Commit aca8a23d authored by Jeff Layton's avatar Jeff Layton Committed by J. Bruce Fields

nfsd: add recurring workqueue job to clean the cache

It's not sufficient to only clean the cache when requests come in. What
if we have a flurry of activity and then the server goes idle? Add a
workqueue job that will clean the cache every RC_EXPIRE period.

Care is taken to only run this when we expect to have entries expiring.
Signed-off-by: default avatarJeff Layton <jlayton@redhat.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 2c6b691c
...@@ -36,6 +36,7 @@ static inline u32 request_hash(u32 xid) ...@@ -36,6 +36,7 @@ static inline u32 request_hash(u32 xid)
} }
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static void cache_cleaner_func(struct work_struct *unused);
/* /*
* locking for the reply cache: * locking for the reply cache:
...@@ -43,6 +44,7 @@ static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); ...@@ -43,6 +44,7 @@ static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
* Otherwise, it when accessing _prev or _next, the lock must be held. * Otherwise, it when accessing _prev or _next, the lock must be held.
*/ */
static DEFINE_SPINLOCK(cache_lock); static DEFINE_SPINLOCK(cache_lock);
static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
/* /*
* Put a cap on the size of the DRC based on the amount of available * Put a cap on the size of the DRC based on the amount of available
...@@ -131,6 +133,8 @@ void nfsd_reply_cache_shutdown(void) ...@@ -131,6 +133,8 @@ void nfsd_reply_cache_shutdown(void)
{ {
struct svc_cacherep *rp; struct svc_cacherep *rp;
cancel_delayed_work_sync(&cache_cleaner);
while (!list_empty(&lru_head)) { while (!list_empty(&lru_head)) {
rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
nfsd_reply_cache_free_locked(rp); nfsd_reply_cache_free_locked(rp);
...@@ -146,13 +150,15 @@ void nfsd_reply_cache_shutdown(void) ...@@ -146,13 +150,15 @@ void nfsd_reply_cache_shutdown(void)
} }
/* /*
* Move cache entry to end of LRU list * Move cache entry to end of LRU list, and queue the cleaner to run if it's
* not already scheduled.
*/ */
static void static void
lru_put_end(struct svc_cacherep *rp) lru_put_end(struct svc_cacherep *rp)
{ {
rp->c_timestamp = jiffies; rp->c_timestamp = jiffies;
list_move_tail(&rp->c_lru, &lru_head); list_move_tail(&rp->c_lru, &lru_head);
schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
} }
/* /*
...@@ -172,6 +178,42 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp) ...@@ -172,6 +178,42 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp)
time_after(jiffies, rp->c_timestamp + RC_EXPIRE); time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
} }
/*
* Walk the LRU list and prune off entries that are older than RC_EXPIRE.
* Also prune the oldest ones when the total exceeds the max number of entries.
*/
static void
prune_cache_entries(void)
{
struct svc_cacherep *rp, *tmp;
list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
if (!nfsd_cache_entry_expired(rp) &&
num_drc_entries <= max_drc_entries)
break;
nfsd_reply_cache_free_locked(rp);
}
/*
* Conditionally rearm the job. If we cleaned out the list, then
* cancel any pending run (since there won't be any work to do).
* Otherwise, we rearm the job or modify the existing one to run in
* RC_EXPIRE since we just ran the pruner.
*/
if (list_empty(&lru_head))
cancel_delayed_work(&cache_cleaner);
else
mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
}
static void
cache_cleaner_func(struct work_struct *unused)
{
spin_lock(&cache_lock);
prune_cache_entries();
spin_unlock(&cache_lock);
}
/* /*
* Search the request hash for an entry that matches the given rqstp. * Search the request hash for an entry that matches the given rqstp.
* Must be called with cache_lock held. Returns the found entry or * Must be called with cache_lock held. Returns the found entry or
...@@ -192,7 +234,6 @@ nfsd_cache_search(struct svc_rqst *rqstp) ...@@ -192,7 +234,6 @@ nfsd_cache_search(struct svc_rqst *rqstp)
hlist_for_each_entry(rp, hn, rh, c_hash) { hlist_for_each_entry(rp, hn, rh, c_hash) {
if (xid == rp->c_xid && proc == rp->c_proc && if (xid == rp->c_xid && proc == rp->c_proc &&
proto == rp->c_prot && vers == rp->c_vers && proto == rp->c_prot && vers == rp->c_vers &&
!nfsd_cache_entry_expired(rp) &&
rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr))
return rp; return rp;
...@@ -234,9 +275,12 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -234,9 +275,12 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
if (!list_empty(&lru_head)) { if (!list_empty(&lru_head)) {
rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
if (nfsd_cache_entry_expired(rp) || if (nfsd_cache_entry_expired(rp) ||
num_drc_entries >= max_drc_entries) num_drc_entries >= max_drc_entries) {
lru_put_end(rp);
prune_cache_entries();
goto setup_entry; goto setup_entry;
} }
}
spin_unlock(&cache_lock); spin_unlock(&cache_lock);
rp = nfsd_reply_cache_alloc(); rp = nfsd_reply_cache_alloc();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment