4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <asm/ioctls.h>
32 #include <linux/sunrpc/types.h>
33 #include <linux/sunrpc/cache.h>
34 #include <linux/sunrpc/stats.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
37 #define RPCDBG_FACILITY RPCDBG_CACHE
39 static int cache_defer_req(struct cache_req
*req
, struct cache_head
*item
);
40 static void cache_revisit_request(struct cache_head
*item
);
42 static void cache_init(struct cache_head
*h
)
44 time_t now
= get_seconds();
48 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
49 h
->last_refresh
= now
;
52 struct cache_head
*sunrpc_cache_lookup(struct cache_detail
*detail
,
53 struct cache_head
*key
, int hash
)
55 struct cache_head
**head
, **hp
;
56 struct cache_head
*new = NULL
;
58 head
= &detail
->hash_table
[hash
];
60 read_lock(&detail
->hash_lock
);
62 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
63 struct cache_head
*tmp
= *hp
;
64 if (detail
->match(tmp
, key
)) {
66 read_unlock(&detail
->hash_lock
);
70 read_unlock(&detail
->hash_lock
);
71 /* Didn't find anything, insert an empty entry */
73 new = detail
->alloc();
76 /* must fully initialise 'new', else
77 * we might get lose if we need to
81 detail
->init(new, key
);
83 write_lock(&detail
->hash_lock
);
85 /* check if entry appeared while we slept */
86 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
87 struct cache_head
*tmp
= *hp
;
88 if (detail
->match(tmp
, key
)) {
90 write_unlock(&detail
->hash_lock
);
91 cache_put(new, detail
);
99 write_unlock(&detail
->hash_lock
);
103 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup
);
106 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
);
108 static void cache_fresh_locked(struct cache_head
*head
, time_t expiry
)
110 head
->expiry_time
= expiry
;
111 head
->last_refresh
= get_seconds();
112 set_bit(CACHE_VALID
, &head
->flags
);
115 static void cache_fresh_unlocked(struct cache_head
*head
,
116 struct cache_detail
*detail
)
118 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
)) {
119 cache_revisit_request(head
);
120 cache_dequeue(detail
, head
);
124 struct cache_head
*sunrpc_cache_update(struct cache_detail
*detail
,
125 struct cache_head
*new, struct cache_head
*old
, int hash
)
127 /* The 'old' entry is to be replaced by 'new'.
128 * If 'old' is not VALID, we update it directly,
129 * otherwise we need to replace it
131 struct cache_head
**head
;
132 struct cache_head
*tmp
;
134 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
135 write_lock(&detail
->hash_lock
);
136 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
137 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
138 set_bit(CACHE_NEGATIVE
, &old
->flags
);
140 detail
->update(old
, new);
141 cache_fresh_locked(old
, new->expiry_time
);
142 write_unlock(&detail
->hash_lock
);
143 cache_fresh_unlocked(old
, detail
);
146 write_unlock(&detail
->hash_lock
);
148 /* We need to insert a new entry */
149 tmp
= detail
->alloc();
151 cache_put(old
, detail
);
155 detail
->init(tmp
, old
);
156 head
= &detail
->hash_table
[hash
];
158 write_lock(&detail
->hash_lock
);
159 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
160 set_bit(CACHE_NEGATIVE
, &tmp
->flags
);
162 detail
->update(tmp
, new);
167 cache_fresh_locked(tmp
, new->expiry_time
);
168 cache_fresh_locked(old
, 0);
169 write_unlock(&detail
->hash_lock
);
170 cache_fresh_unlocked(tmp
, detail
);
171 cache_fresh_unlocked(old
, detail
);
172 cache_put(old
, detail
);
175 EXPORT_SYMBOL_GPL(sunrpc_cache_update
);
177 static int cache_make_upcall(struct cache_detail
*cd
, struct cache_head
*h
)
179 if (!cd
->cache_upcall
)
181 return cd
->cache_upcall(cd
, h
);
184 static inline int cache_is_valid(struct cache_detail
*detail
, struct cache_head
*h
)
186 if (!test_bit(CACHE_VALID
, &h
->flags
) ||
187 h
->expiry_time
< get_seconds())
189 else if (detail
->flush_time
> h
->last_refresh
)
193 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
201 * This is the generic cache management routine for all
202 * the authentication caches.
203 * It checks the currency of a cache item and will (later)
204 * initiate an upcall to fill it if needed.
207 * Returns 0 if the cache_head can be used, or cache_puts it and returns
208 * -EAGAIN if upcall is pending and request has been queued
209 * -ETIMEDOUT if upcall failed or request could not be queue or
210 * upcall completed but item is still invalid (implying that
211 * the cache item has been replaced with a newer one).
212 * -ENOENT if cache entry was negative
214 int cache_check(struct cache_detail
*detail
,
215 struct cache_head
*h
, struct cache_req
*rqstp
)
218 long refresh_age
, age
;
220 /* First decide return status as best we can */
221 rv
= cache_is_valid(detail
, h
);
223 /* now see if we want to start an upcall */
224 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
225 age
= get_seconds() - h
->last_refresh
;
230 } else if (rv
== -EAGAIN
|| age
> refresh_age
/2) {
231 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
233 if (!test_and_set_bit(CACHE_PENDING
, &h
->flags
)) {
234 switch (cache_make_upcall(detail
, h
)) {
236 clear_bit(CACHE_PENDING
, &h
->flags
);
237 cache_revisit_request(h
);
239 set_bit(CACHE_NEGATIVE
, &h
->flags
);
240 cache_fresh_locked(h
, get_seconds()+CACHE_NEW_EXPIRY
);
241 cache_fresh_unlocked(h
, detail
);
247 clear_bit(CACHE_PENDING
, &h
->flags
);
248 cache_revisit_request(h
);
255 if (cache_defer_req(rqstp
, h
) < 0) {
256 /* Request is not deferred */
257 rv
= cache_is_valid(detail
, h
);
263 cache_put(h
, detail
);
266 EXPORT_SYMBOL_GPL(cache_check
);
269 * caches need to be periodically cleaned.
270 * For this we maintain a list of cache_detail and
271 * a current pointer into that list and into the table
274 * Each time clean_cache is called it finds the next non-empty entry
275 * in the current table and walks the list in that entry
276 * looking for entries that can be removed.
278 * An entry gets removed if:
279 * - The expiry is before current time
280 * - The last_refresh time is before the flush_time for that cache
282 * later we might drop old entries with non-NEVER expiry if that table
283 * is getting 'full' for some definition of 'full'
285 * The question of "how often to scan a table" is an interesting one
286 * and is answered in part by the use of the "nextcheck" field in the
288 * When a scan of a table begins, the nextcheck field is set to a time
289 * that is well into the future.
290 * While scanning, if an expiry time is found that is earlier than the
291 * current nextcheck time, nextcheck is set to that expiry time.
292 * If the flush_time is ever set to a time earlier than the nextcheck
293 * time, the nextcheck time is then set to that flush_time.
295 * A table is then only scanned if the current time is at least
296 * the nextcheck time.
300 static LIST_HEAD(cache_list
);
301 static DEFINE_SPINLOCK(cache_list_lock
);
302 static struct cache_detail
*current_detail
;
303 static int current_index
;
305 static void do_cache_clean(struct work_struct
*work
);
306 static DECLARE_DELAYED_WORK(cache_cleaner
, do_cache_clean
);
308 static void sunrpc_init_cache_detail(struct cache_detail
*cd
)
310 rwlock_init(&cd
->hash_lock
);
311 INIT_LIST_HEAD(&cd
->queue
);
312 spin_lock(&cache_list_lock
);
315 atomic_set(&cd
->readers
, 0);
318 list_add(&cd
->others
, &cache_list
);
319 spin_unlock(&cache_list_lock
);
321 /* start the cleaning process */
322 schedule_delayed_work(&cache_cleaner
, 0);
325 static void sunrpc_destroy_cache_detail(struct cache_detail
*cd
)
328 spin_lock(&cache_list_lock
);
329 write_lock(&cd
->hash_lock
);
330 if (cd
->entries
|| atomic_read(&cd
->inuse
)) {
331 write_unlock(&cd
->hash_lock
);
332 spin_unlock(&cache_list_lock
);
335 if (current_detail
== cd
)
336 current_detail
= NULL
;
337 list_del_init(&cd
->others
);
338 write_unlock(&cd
->hash_lock
);
339 spin_unlock(&cache_list_lock
);
340 if (list_empty(&cache_list
)) {
341 /* module must be being unloaded so its safe to kill the worker */
342 cancel_delayed_work_sync(&cache_cleaner
);
346 printk(KERN_ERR
"nfsd: failed to unregister %s cache\n", cd
->name
);
349 /* clean cache tries to find something to clean
351 * It returns 1 if it cleaned something,
352 * 0 if it didn't find anything this time
353 * -1 if it fell off the end of the list.
355 static int cache_clean(void)
358 struct list_head
*next
;
360 spin_lock(&cache_list_lock
);
362 /* find a suitable table if we don't already have one */
363 while (current_detail
== NULL
||
364 current_index
>= current_detail
->hash_size
) {
366 next
= current_detail
->others
.next
;
368 next
= cache_list
.next
;
369 if (next
== &cache_list
) {
370 current_detail
= NULL
;
371 spin_unlock(&cache_list_lock
);
374 current_detail
= list_entry(next
, struct cache_detail
, others
);
375 if (current_detail
->nextcheck
> get_seconds())
376 current_index
= current_detail
->hash_size
;
379 current_detail
->nextcheck
= get_seconds()+30*60;
383 /* find a non-empty bucket in the table */
384 while (current_detail
&&
385 current_index
< current_detail
->hash_size
&&
386 current_detail
->hash_table
[current_index
] == NULL
)
389 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
391 if (current_detail
&& current_index
< current_detail
->hash_size
) {
392 struct cache_head
*ch
, **cp
;
393 struct cache_detail
*d
;
395 write_lock(¤t_detail
->hash_lock
);
397 /* Ok, now to clean this strand */
399 cp
= & current_detail
->hash_table
[current_index
];
401 for (; ch
; cp
= & ch
->next
, ch
= *cp
) {
402 if (current_detail
->nextcheck
> ch
->expiry_time
)
403 current_detail
->nextcheck
= ch
->expiry_time
+1;
404 if (ch
->expiry_time
>= get_seconds() &&
405 ch
->last_refresh
>= current_detail
->flush_time
)
407 if (test_and_clear_bit(CACHE_PENDING
, &ch
->flags
))
408 cache_dequeue(current_detail
, ch
);
410 if (atomic_read(&ch
->ref
.refcount
) == 1)
416 current_detail
->entries
--;
419 write_unlock(¤t_detail
->hash_lock
);
423 spin_unlock(&cache_list_lock
);
425 cache_revisit_request(ch
);
429 spin_unlock(&cache_list_lock
);
435 * We want to regularly clean the cache, so we need to schedule some work ...
437 static void do_cache_clean(struct work_struct
*work
)
440 if (cache_clean() == -1)
441 delay
= round_jiffies_relative(30*HZ
);
443 if (list_empty(&cache_list
))
447 schedule_delayed_work(&cache_cleaner
, delay
);
452 * Clean all caches promptly. This just calls cache_clean
453 * repeatedly until we are sure that every cache has had a chance to
456 void cache_flush(void)
458 while (cache_clean() != -1)
460 while (cache_clean() != -1)
463 EXPORT_SYMBOL_GPL(cache_flush
);
465 void cache_purge(struct cache_detail
*detail
)
467 detail
->flush_time
= LONG_MAX
;
468 detail
->nextcheck
= get_seconds();
470 detail
->flush_time
= 1;
472 EXPORT_SYMBOL_GPL(cache_purge
);
476 * Deferral and Revisiting of Requests.
478 * If a cache lookup finds a pending entry, we
479 * need to defer the request and revisit it later.
480 * All deferred requests are stored in a hash table,
481 * indexed by "struct cache_head *".
482 * As it may be wasteful to store a whole request
483 * structure, we allow the request to provide a
484 * deferred form, which must contain a
485 * 'struct cache_deferred_req'
486 * This cache_deferred_req contains a method to allow
487 * it to be revisited when cache info is available
490 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
491 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
493 #define DFR_MAX 300 /* ??? */
495 static DEFINE_SPINLOCK(cache_defer_lock
);
496 static LIST_HEAD(cache_defer_list
);
497 static struct list_head cache_defer_hash
[DFR_HASHSIZE
];
498 static int cache_defer_cnt
;
500 static int cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
502 struct cache_deferred_req
*dreq
, *discard
;
503 int hash
= DFR_HASH(item
);
505 if (cache_defer_cnt
>= DFR_MAX
) {
506 /* too much in the cache, randomly drop this one,
507 * or continue and drop the oldest below
512 dreq
= req
->defer(req
);
518 spin_lock(&cache_defer_lock
);
520 list_add(&dreq
->recent
, &cache_defer_list
);
522 if (cache_defer_hash
[hash
].next
== NULL
)
523 INIT_LIST_HEAD(&cache_defer_hash
[hash
]);
524 list_add(&dreq
->hash
, &cache_defer_hash
[hash
]);
526 /* it is in, now maybe clean up */
528 if (++cache_defer_cnt
> DFR_MAX
) {
529 discard
= list_entry(cache_defer_list
.prev
,
530 struct cache_deferred_req
, recent
);
531 list_del_init(&discard
->recent
);
532 list_del_init(&discard
->hash
);
535 spin_unlock(&cache_defer_lock
);
538 /* there was one too many */
539 discard
->revisit(discard
, 1);
541 if (!test_bit(CACHE_PENDING
, &item
->flags
)) {
542 /* must have just been validated... */
543 cache_revisit_request(item
);
549 static void cache_revisit_request(struct cache_head
*item
)
551 struct cache_deferred_req
*dreq
;
552 struct list_head pending
;
554 struct list_head
*lp
;
555 int hash
= DFR_HASH(item
);
557 INIT_LIST_HEAD(&pending
);
558 spin_lock(&cache_defer_lock
);
560 lp
= cache_defer_hash
[hash
].next
;
562 while (lp
!= &cache_defer_hash
[hash
]) {
563 dreq
= list_entry(lp
, struct cache_deferred_req
, hash
);
565 if (dreq
->item
== item
) {
566 list_del_init(&dreq
->hash
);
567 list_move(&dreq
->recent
, &pending
);
572 spin_unlock(&cache_defer_lock
);
574 while (!list_empty(&pending
)) {
575 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
576 list_del_init(&dreq
->recent
);
577 dreq
->revisit(dreq
, 0);
581 void cache_clean_deferred(void *owner
)
583 struct cache_deferred_req
*dreq
, *tmp
;
584 struct list_head pending
;
587 INIT_LIST_HEAD(&pending
);
588 spin_lock(&cache_defer_lock
);
590 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
591 if (dreq
->owner
== owner
) {
592 list_del_init(&dreq
->hash
);
593 list_move(&dreq
->recent
, &pending
);
597 spin_unlock(&cache_defer_lock
);
599 while (!list_empty(&pending
)) {
600 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
601 list_del_init(&dreq
->recent
);
602 dreq
->revisit(dreq
, 1);
607 * communicate with user-space
609 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
610 * On read, you get a full request, or block.
611 * On write, an update request is processed.
612 * Poll works if anything to read, and always allows write.
614 * Implemented by linked list of requests. Each open file has
615 * a ->private that also exists in this list. New requests are added
616 * to the end and may wakeup and preceding readers.
617 * New readers are added to the head. If, on read, an item is found with
618 * CACHE_UPCALLING clear, we free it from the list.
622 static DEFINE_SPINLOCK(queue_lock
);
623 static DEFINE_MUTEX(queue_io_mutex
);
626 struct list_head list
;
627 int reader
; /* if 0, then request */
629 struct cache_request
{
630 struct cache_queue q
;
631 struct cache_head
*item
;
636 struct cache_reader
{
637 struct cache_queue q
;
638 int offset
; /* if non-0, we have a refcnt on next request */
641 static ssize_t
cache_read(struct file
*filp
, char __user
*buf
, size_t count
,
642 loff_t
*ppos
, struct cache_detail
*cd
)
644 struct cache_reader
*rp
= filp
->private_data
;
645 struct cache_request
*rq
;
646 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
652 mutex_lock(&inode
->i_mutex
); /* protect against multiple concurrent
653 * readers on this file */
655 spin_lock(&queue_lock
);
656 /* need to find next request */
657 while (rp
->q
.list
.next
!= &cd
->queue
&&
658 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
660 struct list_head
*next
= rp
->q
.list
.next
;
661 list_move(&rp
->q
.list
, next
);
663 if (rp
->q
.list
.next
== &cd
->queue
) {
664 spin_unlock(&queue_lock
);
665 mutex_unlock(&inode
->i_mutex
);
669 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
670 BUG_ON(rq
->q
.reader
);
673 spin_unlock(&queue_lock
);
675 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
677 spin_lock(&queue_lock
);
678 list_move(&rp
->q
.list
, &rq
->q
.list
);
679 spin_unlock(&queue_lock
);
681 if (rp
->offset
+ count
> rq
->len
)
682 count
= rq
->len
- rp
->offset
;
684 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
687 if (rp
->offset
>= rq
->len
) {
689 spin_lock(&queue_lock
);
690 list_move(&rp
->q
.list
, &rq
->q
.list
);
691 spin_unlock(&queue_lock
);
696 if (rp
->offset
== 0) {
697 /* need to release rq */
698 spin_lock(&queue_lock
);
700 if (rq
->readers
== 0 &&
701 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
702 list_del(&rq
->q
.list
);
703 spin_unlock(&queue_lock
);
704 cache_put(rq
->item
, cd
);
708 spin_unlock(&queue_lock
);
712 mutex_unlock(&inode
->i_mutex
);
713 return err
? err
: count
;
716 static ssize_t
cache_do_downcall(char *kaddr
, const char __user
*buf
,
717 size_t count
, struct cache_detail
*cd
)
721 if (copy_from_user(kaddr
, buf
, count
))
724 ret
= cd
->cache_parse(cd
, kaddr
, count
);
730 static ssize_t
cache_slow_downcall(const char __user
*buf
,
731 size_t count
, struct cache_detail
*cd
)
733 static char write_buf
[8192]; /* protected by queue_io_mutex */
734 ssize_t ret
= -EINVAL
;
736 if (count
>= sizeof(write_buf
))
738 mutex_lock(&queue_io_mutex
);
739 ret
= cache_do_downcall(write_buf
, buf
, count
, cd
);
740 mutex_unlock(&queue_io_mutex
);
745 static ssize_t
cache_downcall(struct address_space
*mapping
,
746 const char __user
*buf
,
747 size_t count
, struct cache_detail
*cd
)
751 ssize_t ret
= -ENOMEM
;
753 if (count
>= PAGE_CACHE_SIZE
)
756 page
= find_or_create_page(mapping
, 0, GFP_KERNEL
);
761 ret
= cache_do_downcall(kaddr
, buf
, count
, cd
);
764 page_cache_release(page
);
767 return cache_slow_downcall(buf
, count
, cd
);
770 static ssize_t
cache_write(struct file
*filp
, const char __user
*buf
,
771 size_t count
, loff_t
*ppos
,
772 struct cache_detail
*cd
)
774 struct address_space
*mapping
= filp
->f_mapping
;
775 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
776 ssize_t ret
= -EINVAL
;
778 if (!cd
->cache_parse
)
781 mutex_lock(&inode
->i_mutex
);
782 ret
= cache_downcall(mapping
, buf
, count
, cd
);
783 mutex_unlock(&inode
->i_mutex
);
788 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
790 static unsigned int cache_poll(struct file
*filp
, poll_table
*wait
,
791 struct cache_detail
*cd
)
794 struct cache_reader
*rp
= filp
->private_data
;
795 struct cache_queue
*cq
;
797 poll_wait(filp
, &queue_wait
, wait
);
799 /* alway allow write */
800 mask
= POLL_OUT
| POLLWRNORM
;
805 spin_lock(&queue_lock
);
807 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
808 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
810 mask
|= POLLIN
| POLLRDNORM
;
813 spin_unlock(&queue_lock
);
817 static int cache_ioctl(struct inode
*ino
, struct file
*filp
,
818 unsigned int cmd
, unsigned long arg
,
819 struct cache_detail
*cd
)
822 struct cache_reader
*rp
= filp
->private_data
;
823 struct cache_queue
*cq
;
825 if (cmd
!= FIONREAD
|| !rp
)
828 spin_lock(&queue_lock
);
830 /* only find the length remaining in current request,
831 * or the length of the next request
833 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
834 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
836 struct cache_request
*cr
=
837 container_of(cq
, struct cache_request
, q
);
838 len
= cr
->len
- rp
->offset
;
841 spin_unlock(&queue_lock
);
843 return put_user(len
, (int __user
*)arg
);
846 static int cache_open(struct inode
*inode
, struct file
*filp
,
847 struct cache_detail
*cd
)
849 struct cache_reader
*rp
= NULL
;
851 if (!cd
|| !try_module_get(cd
->owner
))
853 nonseekable_open(inode
, filp
);
854 if (filp
->f_mode
& FMODE_READ
) {
855 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
860 atomic_inc(&cd
->readers
);
861 spin_lock(&queue_lock
);
862 list_add(&rp
->q
.list
, &cd
->queue
);
863 spin_unlock(&queue_lock
);
865 filp
->private_data
= rp
;
869 static int cache_release(struct inode
*inode
, struct file
*filp
,
870 struct cache_detail
*cd
)
872 struct cache_reader
*rp
= filp
->private_data
;
875 spin_lock(&queue_lock
);
877 struct cache_queue
*cq
;
878 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
879 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
881 container_of(cq
, struct cache_request
, q
)
887 list_del(&rp
->q
.list
);
888 spin_unlock(&queue_lock
);
890 filp
->private_data
= NULL
;
893 cd
->last_close
= get_seconds();
894 atomic_dec(&cd
->readers
);
896 module_put(cd
->owner
);
902 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
)
904 struct cache_queue
*cq
;
905 spin_lock(&queue_lock
);
906 list_for_each_entry(cq
, &detail
->queue
, list
)
908 struct cache_request
*cr
= container_of(cq
, struct cache_request
, q
);
911 if (cr
->readers
!= 0)
913 list_del(&cr
->q
.list
);
914 spin_unlock(&queue_lock
);
915 cache_put(cr
->item
, detail
);
920 spin_unlock(&queue_lock
);
924 * Support routines for text-based upcalls.
925 * Fields are separated by spaces.
926 * Fields are either mangled to quote space tab newline slosh with slosh
927 * or a hexified with a leading \x
928 * Record is terminated with newline.
932 void qword_add(char **bpp
, int *lp
, char *str
)
940 while ((c
=*str
++) && len
)
948 *bp
++ = '0' + ((c
& 0300)>>6);
949 *bp
++ = '0' + ((c
& 0070)>>3);
950 *bp
++ = '0' + ((c
& 0007)>>0);
958 if (c
|| len
<1) len
= -1;
966 EXPORT_SYMBOL_GPL(qword_add
);
968 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
979 while (blen
&& len
>= 2) {
980 unsigned char c
= *buf
++;
981 *bp
++ = '0' + ((c
&0xf0)>>4) + (c
>=0xa0)*('a'-'9'-1);
982 *bp
++ = '0' + (c
&0x0f) + ((c
&0x0f)>=0x0a)*('a'-'9'-1);
987 if (blen
|| len
<1) len
= -1;
995 EXPORT_SYMBOL_GPL(qword_addhex
);
997 static void warn_no_listener(struct cache_detail
*detail
)
999 if (detail
->last_warn
!= detail
->last_close
) {
1000 detail
->last_warn
= detail
->last_close
;
1001 if (detail
->warn_no_listener
)
1002 detail
->warn_no_listener(detail
, detail
->last_close
!= 0);
1007 * register an upcall request to user-space and queue it up for read() by the
1010 * Each request is at most one page long.
1012 int sunrpc_cache_pipe_upcall(struct cache_detail
*detail
, struct cache_head
*h
,
1013 void (*cache_request
)(struct cache_detail
*,
1014 struct cache_head
*,
1020 struct cache_request
*crq
;
1024 if (atomic_read(&detail
->readers
) == 0 &&
1025 detail
->last_close
< get_seconds() - 30) {
1026 warn_no_listener(detail
);
1030 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1034 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
1040 bp
= buf
; len
= PAGE_SIZE
;
1042 cache_request(detail
, h
, &bp
, &len
);
1050 crq
->item
= cache_get(h
);
1052 crq
->len
= PAGE_SIZE
- len
;
1054 spin_lock(&queue_lock
);
1055 list_add_tail(&crq
->q
.list
, &detail
->queue
);
1056 spin_unlock(&queue_lock
);
1057 wake_up(&queue_wait
);
1060 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall
);
1063 * parse a message from user-space and pass it
1064 * to an appropriate cache
1065 * Messages are, like requests, separated into fields by
1066 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1069 * reply cachename expiry key ... content....
1071 * key and content are both parsed by cache
1074 #define isodigit(c) (isdigit(c) && c <= '7')
1075 int qword_get(char **bpp
, char *dest
, int bufsize
)
1077 /* return bytes copied, or -1 on error */
1081 while (*bp
== ' ') bp
++;
1083 if (bp
[0] == '\\' && bp
[1] == 'x') {
1086 while (isxdigit(bp
[0]) && isxdigit(bp
[1]) && len
< bufsize
) {
1087 int byte
= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
1090 byte
|= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
1096 /* text with \nnn octal quoting */
1097 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
1099 isodigit(bp
[1]) && (bp
[1] <= '3') &&
1102 int byte
= (*++bp
-'0');
1104 byte
= (byte
<< 3) | (*bp
++ - '0');
1105 byte
= (byte
<< 3) | (*bp
++ - '0');
1115 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1117 while (*bp
== ' ') bp
++;
1122 EXPORT_SYMBOL_GPL(qword_get
);
1126 * support /proc/sunrpc/cache/$CACHENAME/content
1128 * We call ->cache_show passing NULL for the item to
1129 * get a header, then pass each real item in the cache
1133 struct cache_detail
*cd
;
1136 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1137 __acquires(cd
->hash_lock
)
1140 unsigned hash
, entry
;
1141 struct cache_head
*ch
;
1142 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1145 read_lock(&cd
->hash_lock
);
1147 return SEQ_START_TOKEN
;
1149 entry
= n
& ((1LL<<32) - 1);
1151 for (ch
=cd
->hash_table
[hash
]; ch
; ch
=ch
->next
)
1154 n
&= ~((1LL<<32) - 1);
1158 } while(hash
< cd
->hash_size
&&
1159 cd
->hash_table
[hash
]==NULL
);
1160 if (hash
>= cd
->hash_size
)
1163 return cd
->hash_table
[hash
];
1166 static void *c_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1168 struct cache_head
*ch
= p
;
1169 int hash
= (*pos
>> 32);
1170 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1172 if (p
== SEQ_START_TOKEN
)
1174 else if (ch
->next
== NULL
) {
1181 *pos
&= ~((1LL<<32) - 1);
1182 while (hash
< cd
->hash_size
&&
1183 cd
->hash_table
[hash
] == NULL
) {
1187 if (hash
>= cd
->hash_size
)
1190 return cd
->hash_table
[hash
];
1193 static void c_stop(struct seq_file
*m
, void *p
)
1194 __releases(cd
->hash_lock
)
1196 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1197 read_unlock(&cd
->hash_lock
);
1200 static int c_show(struct seq_file
*m
, void *p
)
1202 struct cache_head
*cp
= p
;
1203 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1205 if (p
== SEQ_START_TOKEN
)
1206 return cd
->cache_show(m
, cd
, NULL
);
1209 seq_printf(m
, "# expiry=%ld refcnt=%d flags=%lx\n",
1210 cp
->expiry_time
, atomic_read(&cp
->ref
.refcount
), cp
->flags
);
1212 if (cache_check(cd
, cp
, NULL
))
1213 /* cache_check does a cache_put on failure */
1214 seq_printf(m
, "# ");
1218 return cd
->cache_show(m
, cd
, cp
);
1221 static const struct seq_operations cache_content_op
= {
1228 static int content_open(struct inode
*inode
, struct file
*file
,
1229 struct cache_detail
*cd
)
1233 if (!cd
|| !try_module_get(cd
->owner
))
1235 han
= __seq_open_private(file
, &cache_content_op
, sizeof(*han
));
1243 static int content_release(struct inode
*inode
, struct file
*file
,
1244 struct cache_detail
*cd
)
1246 int ret
= seq_release_private(inode
, file
);
1247 module_put(cd
->owner
);
1251 static int open_flush(struct inode
*inode
, struct file
*file
,
1252 struct cache_detail
*cd
)
1254 if (!cd
|| !try_module_get(cd
->owner
))
1256 return nonseekable_open(inode
, file
);
1259 static int release_flush(struct inode
*inode
, struct file
*file
,
1260 struct cache_detail
*cd
)
1262 module_put(cd
->owner
);
1266 static ssize_t
read_flush(struct file
*file
, char __user
*buf
,
1267 size_t count
, loff_t
*ppos
,
1268 struct cache_detail
*cd
)
1271 unsigned long p
= *ppos
;
1274 sprintf(tbuf
, "%lu\n", cd
->flush_time
);
1281 if (copy_to_user(buf
, (void*)(tbuf
+p
), len
))
1287 static ssize_t
write_flush(struct file
*file
, const char __user
*buf
,
1288 size_t count
, loff_t
*ppos
,
1289 struct cache_detail
*cd
)
1294 if (*ppos
|| count
> sizeof(tbuf
)-1)
1296 if (copy_from_user(tbuf
, buf
, count
))
1299 flushtime
= simple_strtoul(tbuf
, &ep
, 0);
1300 if (*ep
&& *ep
!= '\n')
1303 cd
->flush_time
= flushtime
;
1304 cd
->nextcheck
= get_seconds();
1311 static ssize_t
cache_read_procfs(struct file
*filp
, char __user
*buf
,
1312 size_t count
, loff_t
*ppos
)
1314 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1316 return cache_read(filp
, buf
, count
, ppos
, cd
);
1319 static ssize_t
cache_write_procfs(struct file
*filp
, const char __user
*buf
,
1320 size_t count
, loff_t
*ppos
)
1322 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1324 return cache_write(filp
, buf
, count
, ppos
, cd
);
1327 static unsigned int cache_poll_procfs(struct file
*filp
, poll_table
*wait
)
1329 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1331 return cache_poll(filp
, wait
, cd
);
1334 static int cache_ioctl_procfs(struct inode
*inode
, struct file
*filp
,
1335 unsigned int cmd
, unsigned long arg
)
1337 struct cache_detail
*cd
= PDE(inode
)->data
;
1339 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1342 static int cache_open_procfs(struct inode
*inode
, struct file
*filp
)
1344 struct cache_detail
*cd
= PDE(inode
)->data
;
1346 return cache_open(inode
, filp
, cd
);
1349 static int cache_release_procfs(struct inode
*inode
, struct file
*filp
)
1351 struct cache_detail
*cd
= PDE(inode
)->data
;
1353 return cache_release(inode
, filp
, cd
);
1356 static const struct file_operations cache_file_operations_procfs
= {
1357 .owner
= THIS_MODULE
,
1358 .llseek
= no_llseek
,
1359 .read
= cache_read_procfs
,
1360 .write
= cache_write_procfs
,
1361 .poll
= cache_poll_procfs
,
1362 .ioctl
= cache_ioctl_procfs
, /* for FIONREAD */
1363 .open
= cache_open_procfs
,
1364 .release
= cache_release_procfs
,
1367 static int content_open_procfs(struct inode
*inode
, struct file
*filp
)
1369 struct cache_detail
*cd
= PDE(inode
)->data
;
1371 return content_open(inode
, filp
, cd
);
1374 static int content_release_procfs(struct inode
*inode
, struct file
*filp
)
1376 struct cache_detail
*cd
= PDE(inode
)->data
;
1378 return content_release(inode
, filp
, cd
);
1381 static const struct file_operations content_file_operations_procfs
= {
1382 .open
= content_open_procfs
,
1384 .llseek
= seq_lseek
,
1385 .release
= content_release_procfs
,
1388 static int open_flush_procfs(struct inode
*inode
, struct file
*filp
)
1390 struct cache_detail
*cd
= PDE(inode
)->data
;
1392 return open_flush(inode
, filp
, cd
);
1395 static int release_flush_procfs(struct inode
*inode
, struct file
*filp
)
1397 struct cache_detail
*cd
= PDE(inode
)->data
;
1399 return release_flush(inode
, filp
, cd
);
1402 static ssize_t
read_flush_procfs(struct file
*filp
, char __user
*buf
,
1403 size_t count
, loff_t
*ppos
)
1405 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1407 return read_flush(filp
, buf
, count
, ppos
, cd
);
1410 static ssize_t
write_flush_procfs(struct file
*filp
,
1411 const char __user
*buf
,
1412 size_t count
, loff_t
*ppos
)
1414 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1416 return write_flush(filp
, buf
, count
, ppos
, cd
);
1419 static const struct file_operations cache_flush_operations_procfs
= {
1420 .open
= open_flush_procfs
,
1421 .read
= read_flush_procfs
,
1422 .write
= write_flush_procfs
,
1423 .release
= release_flush_procfs
,
1426 static void remove_cache_proc_entries(struct cache_detail
*cd
)
1428 if (cd
->u
.procfs
.proc_ent
== NULL
)
1430 if (cd
->u
.procfs
.flush_ent
)
1431 remove_proc_entry("flush", cd
->u
.procfs
.proc_ent
);
1432 if (cd
->u
.procfs
.channel_ent
)
1433 remove_proc_entry("channel", cd
->u
.procfs
.proc_ent
);
1434 if (cd
->u
.procfs
.content_ent
)
1435 remove_proc_entry("content", cd
->u
.procfs
.proc_ent
);
1436 cd
->u
.procfs
.proc_ent
= NULL
;
1437 remove_proc_entry(cd
->name
, proc_net_rpc
);
1440 #ifdef CONFIG_PROC_FS
1441 static int create_cache_proc_entries(struct cache_detail
*cd
)
1443 struct proc_dir_entry
*p
;
1445 cd
->u
.procfs
.proc_ent
= proc_mkdir(cd
->name
, proc_net_rpc
);
1446 if (cd
->u
.procfs
.proc_ent
== NULL
)
1448 cd
->u
.procfs
.channel_ent
= NULL
;
1449 cd
->u
.procfs
.content_ent
= NULL
;
1451 p
= proc_create_data("flush", S_IFREG
|S_IRUSR
|S_IWUSR
,
1452 cd
->u
.procfs
.proc_ent
,
1453 &cache_flush_operations_procfs
, cd
);
1454 cd
->u
.procfs
.flush_ent
= p
;
1458 if (cd
->cache_upcall
|| cd
->cache_parse
) {
1459 p
= proc_create_data("channel", S_IFREG
|S_IRUSR
|S_IWUSR
,
1460 cd
->u
.procfs
.proc_ent
,
1461 &cache_file_operations_procfs
, cd
);
1462 cd
->u
.procfs
.channel_ent
= p
;
1466 if (cd
->cache_show
) {
1467 p
= proc_create_data("content", S_IFREG
|S_IRUSR
|S_IWUSR
,
1468 cd
->u
.procfs
.proc_ent
,
1469 &content_file_operations_procfs
, cd
);
1470 cd
->u
.procfs
.content_ent
= p
;
1476 remove_cache_proc_entries(cd
);
1479 #else /* CONFIG_PROC_FS */
1480 static int create_cache_proc_entries(struct cache_detail
*cd
)
1486 int cache_register(struct cache_detail
*cd
)
1490 sunrpc_init_cache_detail(cd
);
1491 ret
= create_cache_proc_entries(cd
);
1493 sunrpc_destroy_cache_detail(cd
);
1496 EXPORT_SYMBOL_GPL(cache_register
);
1498 void cache_unregister(struct cache_detail
*cd
)
1500 remove_cache_proc_entries(cd
);
1501 sunrpc_destroy_cache_detail(cd
);
1503 EXPORT_SYMBOL_GPL(cache_unregister
);
1505 static ssize_t
cache_read_pipefs(struct file
*filp
, char __user
*buf
,
1506 size_t count
, loff_t
*ppos
)
1508 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1510 return cache_read(filp
, buf
, count
, ppos
, cd
);
1513 static ssize_t
cache_write_pipefs(struct file
*filp
, const char __user
*buf
,
1514 size_t count
, loff_t
*ppos
)
1516 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1518 return cache_write(filp
, buf
, count
, ppos
, cd
);
1521 static unsigned int cache_poll_pipefs(struct file
*filp
, poll_table
*wait
)
1523 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1525 return cache_poll(filp
, wait
, cd
);
1528 static int cache_ioctl_pipefs(struct inode
*inode
, struct file
*filp
,
1529 unsigned int cmd
, unsigned long arg
)
1531 struct cache_detail
*cd
= RPC_I(inode
)->private;
1533 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1536 static int cache_open_pipefs(struct inode
*inode
, struct file
*filp
)
1538 struct cache_detail
*cd
= RPC_I(inode
)->private;
1540 return cache_open(inode
, filp
, cd
);
1543 static int cache_release_pipefs(struct inode
*inode
, struct file
*filp
)
1545 struct cache_detail
*cd
= RPC_I(inode
)->private;
1547 return cache_release(inode
, filp
, cd
);
1550 const struct file_operations cache_file_operations_pipefs
= {
1551 .owner
= THIS_MODULE
,
1552 .llseek
= no_llseek
,
1553 .read
= cache_read_pipefs
,
1554 .write
= cache_write_pipefs
,
1555 .poll
= cache_poll_pipefs
,
1556 .ioctl
= cache_ioctl_pipefs
, /* for FIONREAD */
1557 .open
= cache_open_pipefs
,
1558 .release
= cache_release_pipefs
,
1561 static int content_open_pipefs(struct inode
*inode
, struct file
*filp
)
1563 struct cache_detail
*cd
= RPC_I(inode
)->private;
1565 return content_open(inode
, filp
, cd
);
1568 static int content_release_pipefs(struct inode
*inode
, struct file
*filp
)
1570 struct cache_detail
*cd
= RPC_I(inode
)->private;
1572 return content_release(inode
, filp
, cd
);
1575 const struct file_operations content_file_operations_pipefs
= {
1576 .open
= content_open_pipefs
,
1578 .llseek
= seq_lseek
,
1579 .release
= content_release_pipefs
,
1582 static int open_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1584 struct cache_detail
*cd
= RPC_I(inode
)->private;
1586 return open_flush(inode
, filp
, cd
);
1589 static int release_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1591 struct cache_detail
*cd
= RPC_I(inode
)->private;
1593 return release_flush(inode
, filp
, cd
);
1596 static ssize_t
read_flush_pipefs(struct file
*filp
, char __user
*buf
,
1597 size_t count
, loff_t
*ppos
)
1599 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1601 return read_flush(filp
, buf
, count
, ppos
, cd
);
1604 static ssize_t
write_flush_pipefs(struct file
*filp
,
1605 const char __user
*buf
,
1606 size_t count
, loff_t
*ppos
)
1608 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1610 return write_flush(filp
, buf
, count
, ppos
, cd
);
1613 const struct file_operations cache_flush_operations_pipefs
= {
1614 .open
= open_flush_pipefs
,
1615 .read
= read_flush_pipefs
,
1616 .write
= write_flush_pipefs
,
1617 .release
= release_flush_pipefs
,
1620 int sunrpc_cache_register_pipefs(struct dentry
*parent
,
1621 const char *name
, mode_t umode
,
1622 struct cache_detail
*cd
)
1628 sunrpc_init_cache_detail(cd
);
1630 q
.len
= strlen(name
);
1631 q
.hash
= full_name_hash(q
.name
, q
.len
);
1632 dir
= rpc_create_cache_dir(parent
, &q
, umode
, cd
);
1634 cd
->u
.pipefs
.dir
= dir
;
1636 sunrpc_destroy_cache_detail(cd
);
1641 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs
);
1643 void sunrpc_cache_unregister_pipefs(struct cache_detail
*cd
)
1645 rpc_remove_cache_dir(cd
->u
.pipefs
.dir
);
1646 cd
->u
.pipefs
.dir
= NULL
;
1647 sunrpc_destroy_cache_detail(cd
);
1649 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs
);