4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
35 #define RPCDBG_FACILITY RPCDBG_CACHE
37 static void cache_defer_req(struct cache_req
*req
, struct cache_head
*item
);
38 static void cache_revisit_request(struct cache_head
*item
);
40 static void cache_init(struct cache_head
*h
)
42 time_t now
= get_seconds();
46 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
47 h
->last_refresh
= now
;
50 struct cache_head
*sunrpc_cache_lookup(struct cache_detail
*detail
,
51 struct cache_head
*key
, int hash
)
53 struct cache_head
**head
, **hp
;
54 struct cache_head
*new = NULL
;
56 head
= &detail
->hash_table
[hash
];
58 read_lock(&detail
->hash_lock
);
60 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
61 struct cache_head
*tmp
= *hp
;
62 if (detail
->match(tmp
, key
)) {
64 read_unlock(&detail
->hash_lock
);
68 read_unlock(&detail
->hash_lock
);
69 /* Didn't find anything, insert an empty entry */
71 new = detail
->alloc();
76 write_lock(&detail
->hash_lock
);
78 /* check if entry appeared while we slept */
79 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
80 struct cache_head
*tmp
= *hp
;
81 if (detail
->match(tmp
, key
)) {
83 write_unlock(&detail
->hash_lock
);
84 cache_put(new, detail
);
88 detail
->init(new, key
);
93 write_unlock(&detail
->hash_lock
);
97 EXPORT_SYMBOL(sunrpc_cache_lookup
);
100 static void queue_loose(struct cache_detail
*detail
, struct cache_head
*ch
);
102 static int cache_fresh_locked(struct cache_head
*head
, time_t expiry
)
104 head
->expiry_time
= expiry
;
105 head
->last_refresh
= get_seconds();
106 return !test_and_set_bit(CACHE_VALID
, &head
->flags
);
109 static void cache_fresh_unlocked(struct cache_head
*head
,
110 struct cache_detail
*detail
, int new)
113 cache_revisit_request(head
);
114 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
)) {
115 cache_revisit_request(head
);
116 queue_loose(detail
, head
);
120 struct cache_head
*sunrpc_cache_update(struct cache_detail
*detail
,
121 struct cache_head
*new, struct cache_head
*old
, int hash
)
123 /* The 'old' entry is to be replaced by 'new'.
124 * If 'old' is not VALID, we update it directly,
125 * otherwise we need to replace it
127 struct cache_head
**head
;
128 struct cache_head
*tmp
;
131 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
132 write_lock(&detail
->hash_lock
);
133 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
134 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
135 set_bit(CACHE_NEGATIVE
, &old
->flags
);
137 detail
->update(old
, new);
138 is_new
= cache_fresh_locked(old
, new->expiry_time
);
139 write_unlock(&detail
->hash_lock
);
140 cache_fresh_unlocked(old
, detail
, is_new
);
143 write_unlock(&detail
->hash_lock
);
145 /* We need to insert a new entry */
146 tmp
= detail
->alloc();
148 cache_put(old
, detail
);
152 detail
->init(tmp
, old
);
153 head
= &detail
->hash_table
[hash
];
155 write_lock(&detail
->hash_lock
);
156 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
157 set_bit(CACHE_NEGATIVE
, &tmp
->flags
);
159 detail
->update(tmp
, new);
164 is_new
= cache_fresh_locked(tmp
, new->expiry_time
);
165 cache_fresh_locked(old
, 0);
166 write_unlock(&detail
->hash_lock
);
167 cache_fresh_unlocked(tmp
, detail
, is_new
);
168 cache_fresh_unlocked(old
, detail
, 0);
169 cache_put(old
, detail
);
172 EXPORT_SYMBOL(sunrpc_cache_update
);
174 static int cache_make_upcall(struct cache_detail
*detail
, struct cache_head
*h
);
176 * This is the generic cache management routine for all
177 * the authentication caches.
178 * It checks the currency of a cache item and will (later)
179 * initiate an upcall to fill it if needed.
182 * Returns 0 if the cache_head can be used, or cache_puts it and returns
183 * -EAGAIN if upcall is pending,
184 * -ENOENT if cache entry was negative
186 int cache_check(struct cache_detail
*detail
,
187 struct cache_head
*h
, struct cache_req
*rqstp
)
190 long refresh_age
, age
;
192 /* First decide return status as best we can */
193 if (!test_bit(CACHE_VALID
, &h
->flags
) ||
194 h
->expiry_time
< get_seconds())
196 else if (detail
->flush_time
> h
->last_refresh
)
200 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
205 /* now see if we want to start an upcall */
206 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
207 age
= get_seconds() - h
->last_refresh
;
212 } else if (rv
== -EAGAIN
|| age
> refresh_age
/2) {
213 dprintk("Want update, refage=%ld, age=%ld\n", refresh_age
, age
);
214 if (!test_and_set_bit(CACHE_PENDING
, &h
->flags
)) {
215 switch (cache_make_upcall(detail
, h
)) {
217 clear_bit(CACHE_PENDING
, &h
->flags
);
219 set_bit(CACHE_NEGATIVE
, &h
->flags
);
220 cache_fresh_unlocked(h
, detail
,
221 cache_fresh_locked(h
, get_seconds()+CACHE_NEW_EXPIRY
));
227 clear_bit(CACHE_PENDING
, &h
->flags
);
228 cache_revisit_request(h
);
235 cache_defer_req(rqstp
, h
);
238 cache_put(h
, detail
);
243 * caches need to be periodically cleaned.
244 * For this we maintain a list of cache_detail and
245 * a current pointer into that list and into the table
248 * Each time clean_cache is called it finds the next non-empty entry
249 * in the current table and walks the list in that entry
250 * looking for entries that can be removed.
252 * An entry gets removed if:
253 * - The expiry is before current time
254 * - The last_refresh time is before the flush_time for that cache
256 * later we might drop old entries with non-NEVER expiry if that table
257 * is getting 'full' for some definition of 'full'
259 * The question of "how often to scan a table" is an interesting one
260 * and is answered in part by the use of the "nextcheck" field in the
262 * When a scan of a table begins, the nextcheck field is set to a time
263 * that is well into the future.
264 * While scanning, if an expiry time is found that is earlier than the
265 * current nextcheck time, nextcheck is set to that expiry time.
266 * If the flush_time is ever set to a time earlier than the nextcheck
267 * time, the nextcheck time is then set to that flush_time.
269 * A table is then only scanned if the current time is at least
270 * the nextcheck time.
274 static LIST_HEAD(cache_list
);
275 static DEFINE_SPINLOCK(cache_list_lock
);
276 static struct cache_detail
*current_detail
;
277 static int current_index
;
279 static struct file_operations cache_file_operations
;
280 static struct file_operations content_file_operations
;
281 static struct file_operations cache_flush_operations
;
283 static void do_cache_clean(void *data
);
284 static DECLARE_WORK(cache_cleaner
, do_cache_clean
, NULL
);
286 void cache_register(struct cache_detail
*cd
)
288 cd
->proc_ent
= proc_mkdir(cd
->name
, proc_net_rpc
);
290 struct proc_dir_entry
*p
;
291 cd
->proc_ent
->owner
= cd
->owner
;
292 cd
->channel_ent
= cd
->content_ent
= NULL
;
294 p
= create_proc_entry("flush", S_IFREG
|S_IRUSR
|S_IWUSR
,
298 p
->proc_fops
= &cache_flush_operations
;
299 p
->owner
= cd
->owner
;
303 if (cd
->cache_request
|| cd
->cache_parse
) {
304 p
= create_proc_entry("channel", S_IFREG
|S_IRUSR
|S_IWUSR
,
308 p
->proc_fops
= &cache_file_operations
;
309 p
->owner
= cd
->owner
;
313 if (cd
->cache_show
) {
314 p
= create_proc_entry("content", S_IFREG
|S_IRUSR
|S_IWUSR
,
318 p
->proc_fops
= &content_file_operations
;
319 p
->owner
= cd
->owner
;
324 rwlock_init(&cd
->hash_lock
);
325 INIT_LIST_HEAD(&cd
->queue
);
326 spin_lock(&cache_list_lock
);
329 atomic_set(&cd
->readers
, 0);
332 list_add(&cd
->others
, &cache_list
);
333 spin_unlock(&cache_list_lock
);
335 /* start the cleaning process */
336 schedule_work(&cache_cleaner
);
339 int cache_unregister(struct cache_detail
*cd
)
342 spin_lock(&cache_list_lock
);
343 write_lock(&cd
->hash_lock
);
344 if (cd
->entries
|| atomic_read(&cd
->inuse
)) {
345 write_unlock(&cd
->hash_lock
);
346 spin_unlock(&cache_list_lock
);
349 if (current_detail
== cd
)
350 current_detail
= NULL
;
351 list_del_init(&cd
->others
);
352 write_unlock(&cd
->hash_lock
);
353 spin_unlock(&cache_list_lock
);
356 remove_proc_entry("flush", cd
->proc_ent
);
358 remove_proc_entry("channel", cd
->proc_ent
);
360 remove_proc_entry("content", cd
->proc_ent
);
363 remove_proc_entry(cd
->name
, proc_net_rpc
);
365 if (list_empty(&cache_list
)) {
366 /* module must be being unloaded so its safe to kill the worker */
367 cancel_delayed_work(&cache_cleaner
);
368 flush_scheduled_work();
373 /* clean cache tries to find something to clean
375 * It returns 1 if it cleaned something,
376 * 0 if it didn't find anything this time
377 * -1 if it fell off the end of the list.
379 static int cache_clean(void)
382 struct list_head
*next
;
384 spin_lock(&cache_list_lock
);
386 /* find a suitable table if we don't already have one */
387 while (current_detail
== NULL
||
388 current_index
>= current_detail
->hash_size
) {
390 next
= current_detail
->others
.next
;
392 next
= cache_list
.next
;
393 if (next
== &cache_list
) {
394 current_detail
= NULL
;
395 spin_unlock(&cache_list_lock
);
398 current_detail
= list_entry(next
, struct cache_detail
, others
);
399 if (current_detail
->nextcheck
> get_seconds())
400 current_index
= current_detail
->hash_size
;
403 current_detail
->nextcheck
= get_seconds()+30*60;
407 /* find a non-empty bucket in the table */
408 while (current_detail
&&
409 current_index
< current_detail
->hash_size
&&
410 current_detail
->hash_table
[current_index
] == NULL
)
413 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
415 if (current_detail
&& current_index
< current_detail
->hash_size
) {
416 struct cache_head
*ch
, **cp
;
417 struct cache_detail
*d
;
419 write_lock(¤t_detail
->hash_lock
);
421 /* Ok, now to clean this strand */
423 cp
= & current_detail
->hash_table
[current_index
];
425 for (; ch
; cp
= & ch
->next
, ch
= *cp
) {
426 if (current_detail
->nextcheck
> ch
->expiry_time
)
427 current_detail
->nextcheck
= ch
->expiry_time
+1;
428 if (ch
->expiry_time
>= get_seconds()
429 && ch
->last_refresh
>= current_detail
->flush_time
432 if (test_and_clear_bit(CACHE_PENDING
, &ch
->flags
))
433 queue_loose(current_detail
, ch
);
435 if (atomic_read(&ch
->ref
.refcount
) == 1)
441 current_detail
->entries
--;
444 write_unlock(¤t_detail
->hash_lock
);
448 spin_unlock(&cache_list_lock
);
452 spin_unlock(&cache_list_lock
);
458 * We want to regularly clean the cache, so we need to schedule some work ...
460 static void do_cache_clean(void *data
)
463 if (cache_clean() == -1)
466 if (list_empty(&cache_list
))
470 schedule_delayed_work(&cache_cleaner
, delay
);
475 * Clean all caches promptly. This just calls cache_clean
476 * repeatedly until we are sure that every cache has had a chance to
479 void cache_flush(void)
481 while (cache_clean() != -1)
483 while (cache_clean() != -1)
487 void cache_purge(struct cache_detail
*detail
)
489 detail
->flush_time
= LONG_MAX
;
490 detail
->nextcheck
= get_seconds();
492 detail
->flush_time
= 1;
498 * Deferral and Revisiting of Requests.
500 * If a cache lookup finds a pending entry, we
501 * need to defer the request and revisit it later.
502 * All deferred requests are stored in a hash table,
503 * indexed by "struct cache_head *".
504 * As it may be wasteful to store a whole request
505 * structure, we allow the request to provide a
506 * deferred form, which must contain a
507 * 'struct cache_deferred_req'
508 * This cache_deferred_req contains a method to allow
509 * it to be revisited when cache info is available
512 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
513 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
515 #define DFR_MAX 300 /* ??? */
517 static DEFINE_SPINLOCK(cache_defer_lock
);
518 static LIST_HEAD(cache_defer_list
);
519 static struct list_head cache_defer_hash
[DFR_HASHSIZE
];
520 static int cache_defer_cnt
;
522 static void cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
524 struct cache_deferred_req
*dreq
;
525 int hash
= DFR_HASH(item
);
527 dreq
= req
->defer(req
);
532 dreq
->recv_time
= get_seconds();
534 spin_lock(&cache_defer_lock
);
536 list_add(&dreq
->recent
, &cache_defer_list
);
538 if (cache_defer_hash
[hash
].next
== NULL
)
539 INIT_LIST_HEAD(&cache_defer_hash
[hash
]);
540 list_add(&dreq
->hash
, &cache_defer_hash
[hash
]);
542 /* it is in, now maybe clean up */
544 if (++cache_defer_cnt
> DFR_MAX
) {
545 /* too much in the cache, randomly drop
549 dreq
= list_entry(cache_defer_list
.next
,
550 struct cache_deferred_req
,
553 dreq
= list_entry(cache_defer_list
.prev
,
554 struct cache_deferred_req
,
556 list_del(&dreq
->recent
);
557 list_del(&dreq
->hash
);
560 spin_unlock(&cache_defer_lock
);
563 /* there was one too many */
564 dreq
->revisit(dreq
, 1);
566 if (!test_bit(CACHE_PENDING
, &item
->flags
)) {
567 /* must have just been validated... */
568 cache_revisit_request(item
);
572 static void cache_revisit_request(struct cache_head
*item
)
574 struct cache_deferred_req
*dreq
;
575 struct list_head pending
;
577 struct list_head
*lp
;
578 int hash
= DFR_HASH(item
);
580 INIT_LIST_HEAD(&pending
);
581 spin_lock(&cache_defer_lock
);
583 lp
= cache_defer_hash
[hash
].next
;
585 while (lp
!= &cache_defer_hash
[hash
]) {
586 dreq
= list_entry(lp
, struct cache_deferred_req
, hash
);
588 if (dreq
->item
== item
) {
589 list_del(&dreq
->hash
);
590 list_move(&dreq
->recent
, &pending
);
595 spin_unlock(&cache_defer_lock
);
597 while (!list_empty(&pending
)) {
598 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
599 list_del_init(&dreq
->recent
);
600 dreq
->revisit(dreq
, 0);
604 void cache_clean_deferred(void *owner
)
606 struct cache_deferred_req
*dreq
, *tmp
;
607 struct list_head pending
;
610 INIT_LIST_HEAD(&pending
);
611 spin_lock(&cache_defer_lock
);
613 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
614 if (dreq
->owner
== owner
) {
615 list_del(&dreq
->hash
);
616 list_move(&dreq
->recent
, &pending
);
620 spin_unlock(&cache_defer_lock
);
622 while (!list_empty(&pending
)) {
623 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
624 list_del_init(&dreq
->recent
);
625 dreq
->revisit(dreq
, 1);
630 * communicate with user-space
632 * We have a magic /proc file - /proc/sunrpc/cache
633 * On read, you get a full request, or block
634 * On write, an update request is processed
635 * Poll works if anything to read, and always allows write
637 * Implemented by linked list of requests. Each open file has
638 * a ->private that also exists in this list. New request are added
639 * to the end and may wakeup and preceding readers.
640 * New readers are added to the head. If, on read, an item is found with
641 * CACHE_UPCALLING clear, we free it from the list.
645 static DEFINE_SPINLOCK(queue_lock
);
646 static DEFINE_MUTEX(queue_io_mutex
);
649 struct list_head list
;
650 int reader
; /* if 0, then request */
652 struct cache_request
{
653 struct cache_queue q
;
654 struct cache_head
*item
;
659 struct cache_reader
{
660 struct cache_queue q
;
661 int offset
; /* if non-0, we have a refcnt on next request */
665 cache_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
667 struct cache_reader
*rp
= filp
->private_data
;
668 struct cache_request
*rq
;
669 struct cache_detail
*cd
= PDE(filp
->f_dentry
->d_inode
)->data
;
675 mutex_lock(&queue_io_mutex
); /* protect against multiple concurrent
676 * readers on this file */
678 spin_lock(&queue_lock
);
679 /* need to find next request */
680 while (rp
->q
.list
.next
!= &cd
->queue
&&
681 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
683 struct list_head
*next
= rp
->q
.list
.next
;
684 list_move(&rp
->q
.list
, next
);
686 if (rp
->q
.list
.next
== &cd
->queue
) {
687 spin_unlock(&queue_lock
);
688 mutex_unlock(&queue_io_mutex
);
692 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
693 BUG_ON(rq
->q
.reader
);
696 spin_unlock(&queue_lock
);
698 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
700 spin_lock(&queue_lock
);
701 list_move(&rp
->q
.list
, &rq
->q
.list
);
702 spin_unlock(&queue_lock
);
704 if (rp
->offset
+ count
> rq
->len
)
705 count
= rq
->len
- rp
->offset
;
707 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
710 if (rp
->offset
>= rq
->len
) {
712 spin_lock(&queue_lock
);
713 list_move(&rp
->q
.list
, &rq
->q
.list
);
714 spin_unlock(&queue_lock
);
719 if (rp
->offset
== 0) {
720 /* need to release rq */
721 spin_lock(&queue_lock
);
723 if (rq
->readers
== 0 &&
724 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
725 list_del(&rq
->q
.list
);
726 spin_unlock(&queue_lock
);
727 cache_put(rq
->item
, cd
);
731 spin_unlock(&queue_lock
);
735 mutex_unlock(&queue_io_mutex
);
736 return err
? err
: count
;
739 static char write_buf
[8192]; /* protected by queue_io_mutex */
742 cache_write(struct file
*filp
, const char __user
*buf
, size_t count
,
746 struct cache_detail
*cd
= PDE(filp
->f_dentry
->d_inode
)->data
;
750 if (count
>= sizeof(write_buf
))
753 mutex_lock(&queue_io_mutex
);
755 if (copy_from_user(write_buf
, buf
, count
)) {
756 mutex_unlock(&queue_io_mutex
);
759 write_buf
[count
] = '\0';
761 err
= cd
->cache_parse(cd
, write_buf
, count
);
765 mutex_unlock(&queue_io_mutex
);
766 return err
? err
: count
;
769 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
772 cache_poll(struct file
*filp
, poll_table
*wait
)
775 struct cache_reader
*rp
= filp
->private_data
;
776 struct cache_queue
*cq
;
777 struct cache_detail
*cd
= PDE(filp
->f_dentry
->d_inode
)->data
;
779 poll_wait(filp
, &queue_wait
, wait
);
781 /* alway allow write */
782 mask
= POLL_OUT
| POLLWRNORM
;
787 spin_lock(&queue_lock
);
789 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
790 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
792 mask
|= POLLIN
| POLLRDNORM
;
795 spin_unlock(&queue_lock
);
800 cache_ioctl(struct inode
*ino
, struct file
*filp
,
801 unsigned int cmd
, unsigned long arg
)
804 struct cache_reader
*rp
= filp
->private_data
;
805 struct cache_queue
*cq
;
806 struct cache_detail
*cd
= PDE(ino
)->data
;
808 if (cmd
!= FIONREAD
|| !rp
)
811 spin_lock(&queue_lock
);
813 /* only find the length remaining in current request,
814 * or the length of the next request
816 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
817 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
819 struct cache_request
*cr
=
820 container_of(cq
, struct cache_request
, q
);
821 len
= cr
->len
- rp
->offset
;
824 spin_unlock(&queue_lock
);
826 return put_user(len
, (int __user
*)arg
);
830 cache_open(struct inode
*inode
, struct file
*filp
)
832 struct cache_reader
*rp
= NULL
;
834 nonseekable_open(inode
, filp
);
835 if (filp
->f_mode
& FMODE_READ
) {
836 struct cache_detail
*cd
= PDE(inode
)->data
;
838 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
843 atomic_inc(&cd
->readers
);
844 spin_lock(&queue_lock
);
845 list_add(&rp
->q
.list
, &cd
->queue
);
846 spin_unlock(&queue_lock
);
848 filp
->private_data
= rp
;
853 cache_release(struct inode
*inode
, struct file
*filp
)
855 struct cache_reader
*rp
= filp
->private_data
;
856 struct cache_detail
*cd
= PDE(inode
)->data
;
859 spin_lock(&queue_lock
);
861 struct cache_queue
*cq
;
862 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
863 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
865 container_of(cq
, struct cache_request
, q
)
871 list_del(&rp
->q
.list
);
872 spin_unlock(&queue_lock
);
874 filp
->private_data
= NULL
;
877 cd
->last_close
= get_seconds();
878 atomic_dec(&cd
->readers
);
885 static struct file_operations cache_file_operations
= {
886 .owner
= THIS_MODULE
,
889 .write
= cache_write
,
891 .ioctl
= cache_ioctl
, /* for FIONREAD */
893 .release
= cache_release
,
897 static void queue_loose(struct cache_detail
*detail
, struct cache_head
*ch
)
899 struct cache_queue
*cq
;
900 spin_lock(&queue_lock
);
901 list_for_each_entry(cq
, &detail
->queue
, list
)
903 struct cache_request
*cr
= container_of(cq
, struct cache_request
, q
);
906 if (cr
->readers
!= 0)
908 list_del(&cr
->q
.list
);
909 spin_unlock(&queue_lock
);
910 cache_put(cr
->item
, detail
);
915 spin_unlock(&queue_lock
);
919 * Support routines for text-based upcalls.
920 * Fields are separated by spaces.
921 * Fields are either mangled to quote space tab newline slosh with slosh
922 * or a hexified with a leading \x
923 * Record is terminated with newline.
927 void qword_add(char **bpp
, int *lp
, char *str
)
935 while ((c
=*str
++) && len
)
943 *bp
++ = '0' + ((c
& 0300)>>6);
944 *bp
++ = '0' + ((c
& 0070)>>3);
945 *bp
++ = '0' + ((c
& 0007)>>0);
953 if (c
|| len
<1) len
= -1;
962 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
973 while (blen
&& len
>= 2) {
974 unsigned char c
= *buf
++;
975 *bp
++ = '0' + ((c
&0xf0)>>4) + (c
>=0xa0)*('a'-'9'-1);
976 *bp
++ = '0' + (c
&0x0f) + ((c
&0x0f)>=0x0a)*('a'-'9'-1);
981 if (blen
|| len
<1) len
= -1;
990 static void warn_no_listener(struct cache_detail
*detail
)
992 if (detail
->last_warn
!= detail
->last_close
) {
993 detail
->last_warn
= detail
->last_close
;
994 if (detail
->warn_no_listener
)
995 detail
->warn_no_listener(detail
);
1000 * register an upcall request to user-space.
1001 * Each request is at most one page long.
1003 static int cache_make_upcall(struct cache_detail
*detail
, struct cache_head
*h
)
1007 struct cache_request
*crq
;
1011 if (detail
->cache_request
== NULL
)
1014 if (atomic_read(&detail
->readers
) == 0 &&
1015 detail
->last_close
< get_seconds() - 30) {
1016 warn_no_listener(detail
);
1020 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1024 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
1030 bp
= buf
; len
= PAGE_SIZE
;
1032 detail
->cache_request(detail
, h
, &bp
, &len
);
1040 crq
->item
= cache_get(h
);
1042 crq
->len
= PAGE_SIZE
- len
;
1044 spin_lock(&queue_lock
);
1045 list_add_tail(&crq
->q
.list
, &detail
->queue
);
1046 spin_unlock(&queue_lock
);
1047 wake_up(&queue_wait
);
1052 * parse a message from user-space and pass it
1053 * to an appropriate cache
1054 * Messages are, like requests, separated into fields by
1055 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1058 * reply cachename expiry key ... content....
1060 * key and content are both parsed by cache
1063 #define isodigit(c) (isdigit(c) && c <= '7')
1064 int qword_get(char **bpp
, char *dest
, int bufsize
)
1066 /* return bytes copied, or -1 on error */
1070 while (*bp
== ' ') bp
++;
1072 if (bp
[0] == '\\' && bp
[1] == 'x') {
1075 while (isxdigit(bp
[0]) && isxdigit(bp
[1]) && len
< bufsize
) {
1076 int byte
= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
1079 byte
|= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
1085 /* text with \nnn octal quoting */
1086 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
1088 isodigit(bp
[1]) && (bp
[1] <= '3') &&
1091 int byte
= (*++bp
-'0');
1093 byte
= (byte
<< 3) | (*bp
++ - '0');
1094 byte
= (byte
<< 3) | (*bp
++ - '0');
1104 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1106 while (*bp
== ' ') bp
++;
1114 * support /proc/sunrpc/cache/$CACHENAME/content
1116 * We call ->cache_show passing NULL for the item to
1117 * get a header, then pass each real item in the cache
1121 struct cache_detail
*cd
;
1124 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1127 unsigned hash
, entry
;
1128 struct cache_head
*ch
;
1129 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1132 read_lock(&cd
->hash_lock
);
1134 return SEQ_START_TOKEN
;
1136 entry
= n
& ((1LL<<32) - 1);
1138 for (ch
=cd
->hash_table
[hash
]; ch
; ch
=ch
->next
)
1141 n
&= ~((1LL<<32) - 1);
1145 } while(hash
< cd
->hash_size
&&
1146 cd
->hash_table
[hash
]==NULL
);
1147 if (hash
>= cd
->hash_size
)
1150 return cd
->hash_table
[hash
];
1153 static void *c_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1155 struct cache_head
*ch
= p
;
1156 int hash
= (*pos
>> 32);
1157 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1159 if (p
== SEQ_START_TOKEN
)
1161 else if (ch
->next
== NULL
) {
1168 *pos
&= ~((1LL<<32) - 1);
1169 while (hash
< cd
->hash_size
&&
1170 cd
->hash_table
[hash
] == NULL
) {
1174 if (hash
>= cd
->hash_size
)
1177 return cd
->hash_table
[hash
];
1180 static void c_stop(struct seq_file
*m
, void *p
)
1182 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1183 read_unlock(&cd
->hash_lock
);
1186 static int c_show(struct seq_file
*m
, void *p
)
1188 struct cache_head
*cp
= p
;
1189 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1191 if (p
== SEQ_START_TOKEN
)
1192 return cd
->cache_show(m
, cd
, NULL
);
1195 seq_printf(m
, "# expiry=%ld refcnt=%d flags=%lx\n",
1196 cp
->expiry_time
, atomic_read(&cp
->ref
.refcount
), cp
->flags
);
1198 if (cache_check(cd
, cp
, NULL
))
1199 /* cache_check does a cache_put on failure */
1200 seq_printf(m
, "# ");
1204 return cd
->cache_show(m
, cd
, cp
);
1207 static struct seq_operations cache_content_op
= {
1214 static int content_open(struct inode
*inode
, struct file
*file
)
1218 struct cache_detail
*cd
= PDE(inode
)->data
;
1220 han
= kmalloc(sizeof(*han
), GFP_KERNEL
);
1226 res
= seq_open(file
, &cache_content_op
);
1230 ((struct seq_file
*)file
->private_data
)->private = han
;
1234 static int content_release(struct inode
*inode
, struct file
*file
)
1236 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1237 struct handle
*han
= m
->private;
1240 return seq_release(inode
, file
);
1243 static struct file_operations content_file_operations
= {
1244 .open
= content_open
,
1246 .llseek
= seq_lseek
,
1247 .release
= content_release
,
1250 static ssize_t
read_flush(struct file
*file
, char __user
*buf
,
1251 size_t count
, loff_t
*ppos
)
1253 struct cache_detail
*cd
= PDE(file
->f_dentry
->d_inode
)->data
;
1255 unsigned long p
= *ppos
;
1258 sprintf(tbuf
, "%lu\n", cd
->flush_time
);
1263 if (len
> count
) len
= count
;
1264 if (copy_to_user(buf
, (void*)(tbuf
+p
), len
))
1271 static ssize_t
write_flush(struct file
* file
, const char __user
* buf
,
1272 size_t count
, loff_t
*ppos
)
1274 struct cache_detail
*cd
= PDE(file
->f_dentry
->d_inode
)->data
;
1278 if (*ppos
|| count
> sizeof(tbuf
)-1)
1280 if (copy_from_user(tbuf
, buf
, count
))
1283 flushtime
= simple_strtoul(tbuf
, &ep
, 0);
1284 if (*ep
&& *ep
!= '\n')
1287 cd
->flush_time
= flushtime
;
1288 cd
->nextcheck
= get_seconds();
1295 static struct file_operations cache_flush_operations
= {
1296 .open
= nonseekable_open
,
1298 .write
= write_flush
,