2 * linux/fs/nfsd/nfscache.c
4 * Request reply cache. This is currently a global cache, but this may
5 * change in the future and be a per-client cache.
7 * This code is heavily inspired by the 44BSD implementation, although
8 * it does things a bit differently.
10 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
13 #include <linux/kernel.h>
14 #include <linux/time.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/spinlock.h>
18 #include <linux/list.h>
20 #include <linux/sunrpc/svc.h>
21 #include <linux/nfsd/nfsd.h>
22 #include <linux/nfsd/cache.h>
24 /* Size of reply cache. Common values are:
30 #define CACHESIZE 1024
33 static struct hlist_head
* cache_hash
;
34 static struct list_head lru_head
;
35 static int cache_disabled
= 1;
38 * Calculate the hash index from an XID.
40 static inline u32
request_hash(u32 xid
)
44 return h
& (HASHSIZE
-1);
47 static int nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*vec
);
50 * locking for the reply cache:
51 * A cache entry is "single use" if c_state == RC_INPROG
52 * Otherwise, it when accessing _prev or _next, the lock must be held.
54 static DEFINE_SPINLOCK(cache_lock
);
56 int nfsd_reply_cache_init(void)
58 struct svc_cacherep
*rp
;
61 INIT_LIST_HEAD(&lru_head
);
64 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
67 list_add(&rp
->c_lru
, &lru_head
);
68 rp
->c_state
= RC_UNUSED
;
69 rp
->c_type
= RC_NOCACHE
;
70 INIT_HLIST_NODE(&rp
->c_hash
);
74 cache_hash
= kcalloc (HASHSIZE
, sizeof(struct hlist_head
), GFP_KERNEL
);
81 printk(KERN_ERR
"nfsd: failed to allocate reply cache\n");
82 nfsd_reply_cache_shutdown();
86 void nfsd_reply_cache_shutdown(void)
88 struct svc_cacherep
*rp
;
90 while (!list_empty(&lru_head
)) {
91 rp
= list_entry(lru_head
.next
, struct svc_cacherep
, c_lru
);
92 if (rp
->c_state
== RC_DONE
&& rp
->c_type
== RC_REPLBUFF
)
93 kfree(rp
->c_replvec
.iov_base
);
105 * Move cache entry to end of LRU list
108 lru_put_end(struct svc_cacherep
*rp
)
110 list_move_tail(&rp
->c_lru
, &lru_head
);
114 * Move a cache entry from one hash list to another
117 hash_refile(struct svc_cacherep
*rp
)
119 hlist_del_init(&rp
->c_hash
);
120 hlist_add_head(&rp
->c_hash
, cache_hash
+ request_hash(rp
->c_xid
));
124 * Try to find an entry matching the current call in the cache. When none
125 * is found, we grab the oldest unlocked entry off the LRU list.
126 * Note that no operation within the loop may sleep.
129 nfsd_cache_lookup(struct svc_rqst
*rqstp
, int type
)
131 struct hlist_node
*hn
;
132 struct hlist_head
*rh
;
133 struct svc_cacherep
*rp
;
134 __be32 xid
= rqstp
->rq_xid
;
135 u32 proto
= rqstp
->rq_prot
,
136 vers
= rqstp
->rq_vers
,
137 proc
= rqstp
->rq_proc
;
141 rqstp
->rq_cacherep
= NULL
;
142 if (cache_disabled
|| type
== RC_NOCACHE
) {
143 nfsdstats
.rcnocache
++;
147 spin_lock(&cache_lock
);
150 rh
= &cache_hash
[request_hash(xid
)];
151 hlist_for_each_entry(rp
, hn
, rh
, c_hash
) {
152 if (rp
->c_state
!= RC_UNUSED
&&
153 xid
== rp
->c_xid
&& proc
== rp
->c_proc
&&
154 proto
== rp
->c_prot
&& vers
== rp
->c_vers
&&
155 time_before(jiffies
, rp
->c_timestamp
+ 120*HZ
) &&
156 memcmp((char*)&rqstp
->rq_addr
, (char*)&rp
->c_addr
, sizeof(rp
->c_addr
))==0) {
161 nfsdstats
.rcmisses
++;
163 /* This loop shouldn't take more than a few iterations normally */
166 list_for_each_entry(rp
, &lru_head
, c_lru
) {
167 if (rp
->c_state
!= RC_INPROG
)
169 if (safe
++ > CACHESIZE
) {
170 printk("nfsd: loop in repcache LRU list\n");
177 /* All entries on the LRU are in-progress. This should not happen */
178 if (&rp
->c_lru
== &lru_head
) {
179 static int complaints
;
181 printk(KERN_WARNING
"nfsd: all repcache entries locked!\n");
182 if (++complaints
> 5) {
183 printk(KERN_WARNING
"nfsd: disabling repcache.\n");
189 rqstp
->rq_cacherep
= rp
;
190 rp
->c_state
= RC_INPROG
;
193 memcpy(&rp
->c_addr
, svc_addr_in(rqstp
), sizeof(rp
->c_addr
));
196 rp
->c_timestamp
= jiffies
;
200 /* release any buffer */
201 if (rp
->c_type
== RC_REPLBUFF
) {
202 kfree(rp
->c_replvec
.iov_base
);
203 rp
->c_replvec
.iov_base
= NULL
;
205 rp
->c_type
= RC_NOCACHE
;
207 spin_unlock(&cache_lock
);
211 /* We found a matching entry which is either in progress or done. */
212 age
= jiffies
- rp
->c_timestamp
;
213 rp
->c_timestamp
= jiffies
;
217 /* Request being processed or excessive rexmits */
218 if (rp
->c_state
== RC_INPROG
|| age
< RC_DELAY
)
221 /* From the hall of fame of impractical attacks:
222 * Is this a user who tries to snoop on the cache? */
224 if (!rqstp
->rq_secure
&& rp
->c_secure
)
227 /* Compose RPC reply header */
228 switch (rp
->c_type
) {
232 svc_putu32(&rqstp
->rq_res
.head
[0], rp
->c_replstat
);
236 if (!nfsd_cache_append(rqstp
, &rp
->c_replvec
))
237 goto out
; /* should not happen */
241 printk(KERN_WARNING
"nfsd: bad repcache type %d\n", rp
->c_type
);
242 rp
->c_state
= RC_UNUSED
;
249 * Update a cache entry. This is called from nfsd_dispatch when
250 * the procedure has been executed and the complete reply is in
253 * We're copying around data here rather than swapping buffers because
254 * the toplevel loop requires max-sized buffers, which would be a waste
255 * of memory for a cache with a max reply size of 100 bytes (diropokres).
257 * If we should start to use different types of cache entries tailored
258 * specifically for attrstat and fh's, we may save even more space.
260 * Also note that a cachetype of RC_NOCACHE can legally be passed when
261 * nfsd failed to encode a reply that otherwise would have been cached.
262 * In this case, nfsd_cache_update is called with statp == NULL.
265 nfsd_cache_update(struct svc_rqst
*rqstp
, int cachetype
, __be32
*statp
)
267 struct svc_cacherep
*rp
;
268 struct kvec
*resv
= &rqstp
->rq_res
.head
[0], *cachv
;
271 if (!(rp
= rqstp
->rq_cacherep
) || cache_disabled
)
274 len
= resv
->iov_len
- ((char*)statp
- (char*)resv
->iov_base
);
277 /* Don't cache excessive amounts of data and XDR failures */
278 if (!statp
|| len
> (256 >> 2)) {
279 rp
->c_state
= RC_UNUSED
;
286 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len
);
287 rp
->c_replstat
= *statp
;
290 cachv
= &rp
->c_replvec
;
291 cachv
->iov_base
= kmalloc(len
<< 2, GFP_KERNEL
);
292 if (!cachv
->iov_base
) {
293 spin_lock(&cache_lock
);
294 rp
->c_state
= RC_UNUSED
;
295 spin_unlock(&cache_lock
);
298 cachv
->iov_len
= len
<< 2;
299 memcpy(cachv
->iov_base
, statp
, len
<< 2);
302 spin_lock(&cache_lock
);
304 rp
->c_secure
= rqstp
->rq_secure
;
305 rp
->c_type
= cachetype
;
306 rp
->c_state
= RC_DONE
;
307 rp
->c_timestamp
= jiffies
;
308 spin_unlock(&cache_lock
);
313 * Copy cached reply to current reply buffer. Should always fit.
314 * FIXME as reply is in a page, we should just attach the page, and
315 * keep a refcount....
318 nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*data
)
320 struct kvec
*vec
= &rqstp
->rq_res
.head
[0];
322 if (vec
->iov_len
+ data
->iov_len
> PAGE_SIZE
) {
323 printk(KERN_WARNING
"nfsd: cached reply too large (%Zd).\n",
327 memcpy((char*)vec
->iov_base
+ vec
->iov_len
, data
->iov_base
, data
->iov_len
);
328 vec
->iov_len
+= data
->iov_len
;