2 * linux/fs/nfsd/nfscache.c
4 * Request reply cache. This is currently a global cache, but this may
5 * change in the future and be a per-client cache.
7 * This code is heavily inspired by the 44BSD implementation, although
8 * it does things a bit differently.
10 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
13 #include <linux/kernel.h>
14 #include <linux/time.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/spinlock.h>
19 #include <linux/sunrpc/svc.h>
20 #include <linux/nfsd/nfsd.h>
21 #include <linux/nfsd/cache.h>
23 /* Size of reply cache. Common values are:
29 #define CACHESIZE 1024
31 #define REQHASH(xid) ((((xid) >> 24) ^ (xid)) & (HASHSIZE-1))
33 struct nfscache_head
{
34 struct svc_cacherep
* next
;
35 struct svc_cacherep
* prev
;
38 static struct nfscache_head
* hash_list
;
39 static struct svc_cacherep
* lru_head
;
40 static struct svc_cacherep
* lru_tail
;
41 static struct svc_cacherep
* nfscache
;
42 static int cache_disabled
= 1;
44 static int nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*vec
);
47 * locking for the reply cache:
48 * A cache entry is "single use" if c_state == RC_INPROG
49 * Otherwise, it when accessing _prev or _next, the lock must be held.
51 static spinlock_t cache_lock
= SPIN_LOCK_UNLOCKED
;
56 struct svc_cacherep
*rp
;
57 struct nfscache_head
*rh
;
62 i
= CACHESIZE
* sizeof (struct svc_cacherep
);
63 for (order
= 0; (PAGE_SIZE
<< order
) < i
; order
++)
65 nfscache
= (struct svc_cacherep
*)
66 __get_free_pages(GFP_KERNEL
, order
);
68 printk (KERN_ERR
"nfsd: cannot allocate %Zd bytes for reply cache\n", i
);
71 memset(nfscache
, 0, i
);
73 i
= HASHSIZE
* sizeof (struct nfscache_head
);
74 hash_list
= kmalloc (i
, GFP_KERNEL
);
76 free_pages ((unsigned long)nfscache
, order
);
78 printk (KERN_ERR
"nfsd: cannot allocate %Zd bytes for hash list\n", i
);
82 for (i
= 0, rh
= hash_list
; i
< HASHSIZE
; i
++, rh
++)
83 rh
->next
= rh
->prev
= (struct svc_cacherep
*) rh
;
85 for (i
= 0, rp
= nfscache
; i
< CACHESIZE
; i
++, rp
++) {
86 rp
->c_state
= RC_UNUSED
;
87 rp
->c_type
= RC_NOCACHE
;
90 rp
->c_lru_next
= rp
+ 1;
91 rp
->c_lru_prev
= rp
- 1;
94 lru_tail
= nfscache
+ CACHESIZE
- 1;
95 lru_head
->c_lru_prev
= NULL
;
96 lru_tail
->c_lru_next
= NULL
;
102 nfsd_cache_shutdown(void)
104 struct svc_cacherep
*rp
;
108 for (rp
= lru_head
; rp
; rp
= rp
->c_lru_next
) {
109 if (rp
->c_state
== RC_DONE
&& rp
->c_type
== RC_REPLBUFF
)
110 kfree(rp
->c_replvec
.iov_base
);
115 i
= CACHESIZE
* sizeof (struct svc_cacherep
);
116 for (order
= 0; (PAGE_SIZE
<< order
) < i
; order
++)
118 free_pages ((unsigned long)nfscache
, order
);
125 * Move cache entry to front of LRU list
128 lru_put_front(struct svc_cacherep
*rp
)
130 struct svc_cacherep
*prev
= rp
->c_lru_prev
,
131 *next
= rp
->c_lru_next
;
134 prev
->c_lru_next
= next
;
138 next
->c_lru_prev
= prev
;
142 rp
->c_lru_next
= lru_head
;
143 rp
->c_lru_prev
= NULL
;
145 lru_head
->c_lru_prev
= rp
;
150 * Move a cache entry from one hash list to another
153 hash_refile(struct svc_cacherep
*rp
)
155 struct svc_cacherep
*prev
= rp
->c_hash_prev
,
156 *next
= rp
->c_hash_next
;
157 struct nfscache_head
*head
= hash_list
+ REQHASH(rp
->c_xid
);
159 prev
->c_hash_next
= next
;
160 next
->c_hash_prev
= prev
;
162 rp
->c_hash_next
= head
->next
;
163 rp
->c_hash_prev
= (struct svc_cacherep
*) head
;
164 head
->next
->c_hash_prev
= rp
;
169 * Try to find an entry matching the current call in the cache. When none
170 * is found, we grab the oldest unlocked entry off the LRU list.
171 * Note that no operation within the loop may sleep.
174 nfsd_cache_lookup(struct svc_rqst
*rqstp
, int type
)
176 struct svc_cacherep
*rh
, *rp
;
177 u32 xid
= rqstp
->rq_xid
,
178 proto
= rqstp
->rq_prot
,
179 vers
= rqstp
->rq_vers
,
180 proc
= rqstp
->rq_proc
;
184 rqstp
->rq_cacherep
= NULL
;
185 if (cache_disabled
|| type
== RC_NOCACHE
) {
186 nfsdstats
.rcnocache
++;
190 spin_lock(&cache_lock
);
193 rp
= rh
= (struct svc_cacherep
*) &hash_list
[REQHASH(xid
)];
194 while ((rp
= rp
->c_hash_next
) != rh
) {
195 if (rp
->c_state
!= RC_UNUSED
&&
196 xid
== rp
->c_xid
&& proc
== rp
->c_proc
&&
197 proto
== rp
->c_prot
&& vers
== rp
->c_vers
&&
198 time_before(jiffies
, rp
->c_timestamp
+ 120*HZ
) &&
199 memcmp((char*)&rqstp
->rq_addr
, (char*)&rp
->c_addr
, sizeof(rp
->c_addr
))==0) {
204 nfsdstats
.rcmisses
++;
206 /* This loop shouldn't take more than a few iterations normally */
209 for (rp
= lru_tail
; rp
; rp
= rp
->c_lru_prev
) {
210 if (rp
->c_state
!= RC_INPROG
)
212 if (safe
++ > CACHESIZE
) {
213 printk("nfsd: loop in repcache LRU list\n");
220 /* This should not happen */
222 static int complaints
;
224 printk(KERN_WARNING
"nfsd: all repcache entries locked!\n");
225 if (++complaints
> 5) {
226 printk(KERN_WARNING
"nfsd: disabling repcache.\n");
232 rqstp
->rq_cacherep
= rp
;
233 rp
->c_state
= RC_INPROG
;
236 rp
->c_addr
= rqstp
->rq_addr
;
239 rp
->c_timestamp
= jiffies
;
243 /* release any buffer */
244 if (rp
->c_type
== RC_REPLBUFF
) {
245 kfree(rp
->c_replvec
.iov_base
);
246 rp
->c_replvec
.iov_base
= NULL
;
248 rp
->c_type
= RC_NOCACHE
;
250 spin_unlock(&cache_lock
);
254 /* We found a matching entry which is either in progress or done. */
255 age
= jiffies
- rp
->c_timestamp
;
256 rp
->c_timestamp
= jiffies
;
260 /* Request being processed or excessive rexmits */
261 if (rp
->c_state
== RC_INPROG
|| age
< RC_DELAY
)
264 /* From the hall of fame of impractical attacks:
265 * Is this a user who tries to snoop on the cache? */
267 if (!rqstp
->rq_secure
&& rp
->c_secure
)
270 /* Compose RPC reply header */
271 switch (rp
->c_type
) {
275 svc_putu32(&rqstp
->rq_res
.head
[0], rp
->c_replstat
);
279 if (!nfsd_cache_append(rqstp
, &rp
->c_replvec
))
280 goto out
; /* should not happen */
284 printk(KERN_WARNING
"nfsd: bad repcache type %d\n", rp
->c_type
);
285 rp
->c_state
= RC_UNUSED
;
292 * Update a cache entry. This is called from nfsd_dispatch when
293 * the procedure has been executed and the complete reply is in
296 * We're copying around data here rather than swapping buffers because
297 * the toplevel loop requires max-sized buffers, which would be a waste
298 * of memory for a cache with a max reply size of 100 bytes (diropokres).
300 * If we should start to use different types of cache entries tailored
301 * specifically for attrstat and fh's, we may save even more space.
303 * Also note that a cachetype of RC_NOCACHE can legally be passed when
304 * nfsd failed to encode a reply that otherwise would have been cached.
305 * In this case, nfsd_cache_update is called with statp == NULL.
308 nfsd_cache_update(struct svc_rqst
*rqstp
, int cachetype
, u32
*statp
)
310 struct svc_cacherep
*rp
;
311 struct kvec
*resv
= &rqstp
->rq_res
.head
[0], *cachv
;
314 if (!(rp
= rqstp
->rq_cacherep
) || cache_disabled
)
317 len
= resv
->iov_len
- ((char*)statp
- (char*)resv
->iov_base
);
320 /* Don't cache excessive amounts of data and XDR failures */
321 if (!statp
|| len
> (256 >> 2)) {
322 rp
->c_state
= RC_UNUSED
;
329 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len
);
330 rp
->c_replstat
= *statp
;
333 cachv
= &rp
->c_replvec
;
334 cachv
->iov_base
= kmalloc(len
<< 2, GFP_KERNEL
);
335 if (!cachv
->iov_base
) {
336 spin_lock(&cache_lock
);
337 rp
->c_state
= RC_UNUSED
;
338 spin_unlock(&cache_lock
);
341 cachv
->iov_len
= len
<< 2;
342 memcpy(cachv
->iov_base
, statp
, len
<< 2);
345 spin_lock(&cache_lock
);
347 rp
->c_secure
= rqstp
->rq_secure
;
348 rp
->c_type
= cachetype
;
349 rp
->c_state
= RC_DONE
;
350 rp
->c_timestamp
= jiffies
;
351 spin_unlock(&cache_lock
);
356 * Copy cached reply to current reply buffer. Should always fit.
357 * FIXME as reply is in a page, we should just attach the page, and
358 * keep a refcount....
361 nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*data
)
363 struct kvec
*vec
= &rqstp
->rq_res
.head
[0];
365 if (vec
->iov_len
+ data
->iov_len
> PAGE_SIZE
) {
366 printk(KERN_WARNING
"nfsd: cached reply too large (%Zd).\n",
370 memcpy((char*)vec
->iov_base
+ vec
->iov_len
, data
->iov_base
, data
->iov_len
);
371 vec
->iov_len
+= data
->iov_len
;