4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <asm/ioctls.h>
30 #include <linux/sunrpc/types.h>
31 #include <linux/sunrpc/cache.h>
32 #include <linux/sunrpc/stats.h>
34 #define RPCDBG_FACILITY RPCDBG_CACHE
36 void cache_init(struct cache_head
*h
)
38 time_t now
= get_seconds();
41 atomic_set(&h
->refcnt
, 0);
42 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
43 h
->last_refresh
= now
;
47 static int cache_make_upcall(struct cache_detail
*detail
, struct cache_head
*h
);
49 * This is the generic cache management routine for all
50 * the authentication caches.
51 * It checks the currency of a cache item and will (later)
52 * initiate an upcall to fill it if needed.
55 * Returns 0 if the cache_head can be used, or cache_puts it and returns
56 * -EAGAIN if upcall is pending,
57 * -ENOENT if cache entry was negative
59 int cache_check(struct cache_detail
*detail
,
60 struct cache_head
*h
, struct cache_req
*rqstp
)
63 long refresh_age
, age
;
65 /* First decide return status as best we can */
66 if (!test_bit(CACHE_VALID
, &h
->flags
) ||
67 h
->expiry_time
< get_seconds())
69 else if (detail
->flush_time
> h
->last_refresh
)
73 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
78 /* now see if we want to start an upcall */
79 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
80 age
= get_seconds() - h
->last_refresh
;
85 } else if (rv
== -EAGAIN
|| age
> refresh_age
/2) {
86 dprintk("Want update, refage=%ld, age=%ld\n", refresh_age
, age
);
87 if (!test_and_set_bit(CACHE_PENDING
, &h
->flags
)) {
88 switch (cache_make_upcall(detail
, h
)) {
90 clear_bit(CACHE_PENDING
, &h
->flags
);
92 set_bit(CACHE_NEGATIVE
, &h
->flags
);
93 cache_fresh(detail
, h
, get_seconds()+CACHE_NEW_EXPIRY
);
99 clear_bit(CACHE_PENDING
, &h
->flags
);
100 cache_revisit_request(h
);
107 cache_defer_req(rqstp
, h
);
110 detail
->cache_put(h
, detail
);
114 static void queue_loose(struct cache_detail
*detail
, struct cache_head
*ch
);
116 void cache_fresh(struct cache_detail
*detail
,
117 struct cache_head
*head
, time_t expiry
)
120 head
->expiry_time
= expiry
;
121 head
->last_refresh
= get_seconds();
122 if (!test_and_set_bit(CACHE_VALID
, &head
->flags
))
123 cache_revisit_request(head
);
124 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
))
125 queue_loose(detail
, head
);
129 * caches need to be periodically cleaned.
130 * For this we maintain a list of cache_detail and
131 * a current pointer into that list and into the table
134 * Each time clean_cache is called it finds the next non-empty entry
135 * in the current table and walks the list in that entry
136 * looking for entries that can be removed.
138 * An entry gets removed if:
139 * - The expiry is before current time
140 * - The last_refresh time is before the flush_time for that cache
142 * later we might drop old entries with non-NEVER expiry if that table
143 * is getting 'full' for some definition of 'full'
145 * The question of "how often to scan a table" is an interesting one
146 * and is answered in part by the use of the "nextcheck" field in the
148 * When a scan of a table begins, the nextcheck field is set to a time
149 * that is well into the future.
150 * While scanning, if an expiry time is found that is earlier than the
151 * current nextcheck time, nextcheck is set to that expiry time.
152 * If the flush_time is ever set to a time earlier than the nextcheck
153 * time, the nextcheck time is then set to that flush_time.
155 * A table is then only scanned if the current time is at least
156 * the nextcheck time.
160 static LIST_HEAD(cache_list
);
161 static spinlock_t cache_list_lock
= SPIN_LOCK_UNLOCKED
;
162 static struct cache_detail
*current_detail
;
163 static int current_index
;
165 static struct file_operations cache_file_operations
;
166 static struct file_operations content_file_operations
;
167 static struct file_operations cache_flush_operations
;
169 static void do_cache_clean(void *data
);
170 static DECLARE_WORK(cache_cleaner
, do_cache_clean
, NULL
);
172 void cache_register(struct cache_detail
*cd
)
174 cd
->proc_ent
= proc_mkdir(cd
->name
, proc_net_rpc
);
176 struct proc_dir_entry
*p
;
177 cd
->proc_ent
->owner
= THIS_MODULE
;
179 p
= create_proc_entry("flush", S_IFREG
|S_IRUSR
|S_IWUSR
,
182 p
->proc_fops
= &cache_flush_operations
;
183 p
->owner
= THIS_MODULE
;
187 if (cd
->cache_request
|| cd
->cache_parse
) {
188 p
= create_proc_entry("channel", S_IFREG
|S_IRUSR
|S_IWUSR
,
191 p
->proc_fops
= &cache_file_operations
;
192 p
->owner
= THIS_MODULE
;
196 if (cd
->cache_show
) {
197 p
= create_proc_entry("content", S_IFREG
|S_IRUSR
|S_IWUSR
,
200 p
->proc_fops
= &content_file_operations
;
201 p
->owner
= THIS_MODULE
;
206 rwlock_init(&cd
->hash_lock
);
207 INIT_LIST_HEAD(&cd
->queue
);
208 spin_lock(&cache_list_lock
);
211 atomic_set(&cd
->readers
, 0);
213 list_add(&cd
->others
, &cache_list
);
214 spin_unlock(&cache_list_lock
);
216 /* start the cleaning process */
217 schedule_work(&cache_cleaner
);
220 int cache_unregister(struct cache_detail
*cd
)
223 spin_lock(&cache_list_lock
);
224 write_lock(&cd
->hash_lock
);
225 if (cd
->entries
|| atomic_read(&cd
->inuse
)) {
226 write_unlock(&cd
->hash_lock
);
227 spin_unlock(&cache_list_lock
);
230 if (current_detail
== cd
)
231 current_detail
= NULL
;
232 list_del_init(&cd
->others
);
233 write_unlock(&cd
->hash_lock
);
234 spin_unlock(&cache_list_lock
);
237 remove_proc_entry(cd
->name
, proc_net_rpc
);
239 if (list_empty(&cache_list
)) {
240 /* module must be being unloaded so its safe to kill the worker */
241 cancel_delayed_work(&cache_cleaner
);
242 flush_scheduled_work();
247 struct cache_detail
*cache_find(char *name
)
251 spin_lock(&cache_list_lock
);
252 list_for_each(l
, &cache_list
) {
253 struct cache_detail
*cd
= list_entry(l
, struct cache_detail
, others
);
255 if (strcmp(cd
->name
, name
)==0) {
256 atomic_inc(&cd
->inuse
);
257 spin_unlock(&cache_list_lock
);
261 spin_unlock(&cache_list_lock
);
265 /* cache_drop must be called on any cache returned by
266 * cache_find, after it has been used
268 void cache_drop(struct cache_detail
*detail
)
270 atomic_dec(&detail
->inuse
);
273 /* clean cache tries to find something to clean
275 * It returns 1 if it cleaned something,
276 * 0 if it didn't find anything this time
277 * -1 if it fell off the end of the list.
279 int cache_clean(void)
282 struct list_head
*next
;
284 spin_lock(&cache_list_lock
);
286 /* find a suitable table if we don't already have one */
287 while (current_detail
== NULL
||
288 current_index
>= current_detail
->hash_size
) {
290 next
= current_detail
->others
.next
;
292 next
= cache_list
.next
;
293 if (next
== &cache_list
) {
294 current_detail
= NULL
;
295 spin_unlock(&cache_list_lock
);
298 current_detail
= list_entry(next
, struct cache_detail
, others
);
299 if (current_detail
->nextcheck
> get_seconds())
300 current_index
= current_detail
->hash_size
;
303 current_detail
->nextcheck
= get_seconds()+30*60;
307 /* find a non-empty bucket in the table */
308 while (current_detail
&&
309 current_index
< current_detail
->hash_size
&&
310 current_detail
->hash_table
[current_index
] == NULL
)
313 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
315 if (current_detail
&& current_index
< current_detail
->hash_size
) {
316 struct cache_head
*ch
, **cp
;
318 write_lock(¤t_detail
->hash_lock
);
320 /* Ok, now to clean this strand */
322 cp
= & current_detail
->hash_table
[current_index
];
324 for (; ch
; cp
= & ch
->next
, ch
= *cp
) {
325 if (current_detail
->nextcheck
> ch
->expiry_time
)
326 current_detail
->nextcheck
= ch
->expiry_time
+1;
327 if (ch
->expiry_time
>= get_seconds()
328 && ch
->last_refresh
>= current_detail
->flush_time
331 if (test_and_clear_bit(CACHE_PENDING
, &ch
->flags
))
332 queue_loose(current_detail
, ch
);
334 if (!atomic_read(&ch
->refcnt
))
339 clear_bit(CACHE_HASHED
, &ch
->flags
);
342 current_detail
->entries
--;
345 write_unlock(¤t_detail
->hash_lock
);
347 current_detail
->cache_put(ch
, current_detail
);
351 spin_unlock(&cache_list_lock
);
357 * We want to regularly clean the cache, so we need to schedule some work ...
359 static void do_cache_clean(void *data
)
362 if (cache_clean() == -1)
365 if (list_empty(&cache_list
))
369 schedule_delayed_work(&cache_cleaner
, delay
);
374 * Clean all caches promptly. This just calls cache_clean
375 * repeatedly until we are sure that every cache has had a chance to
378 void cache_flush(void)
380 while (cache_clean() != -1)
382 while (cache_clean() != -1)
386 void cache_purge(struct cache_detail
*detail
)
388 detail
->flush_time
= get_seconds()+1;
389 detail
->nextcheck
= get_seconds();
396 * Deferral and Revisiting of Requests.
398 * If a cache lookup finds a pending entry, we
399 * need to defer the request and revisit it later.
400 * All deferred requests are stored in a hash table,
401 * indexed by "struct cache_head *".
402 * As it may be wasteful to store a whole request
403 * structure, we allow the request to provide a
404 * deferred form, which must contain a
405 * 'struct cache_deferred_req'
406 * This cache_deferred_req contains a method to allow
407 * it to be revisited when cache info is available
410 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
411 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
413 #define DFR_MAX 300 /* ??? */
415 spinlock_t cache_defer_lock
= SPIN_LOCK_UNLOCKED
;
416 static LIST_HEAD(cache_defer_list
);
417 static struct list_head cache_defer_hash
[DFR_HASHSIZE
];
418 static int cache_defer_cnt
;
420 void cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
422 struct cache_deferred_req
*dreq
;
423 int hash
= DFR_HASH(item
);
425 dreq
= req
->defer(req
);
430 dreq
->recv_time
= get_seconds();
432 spin_lock(&cache_defer_lock
);
434 list_add(&dreq
->recent
, &cache_defer_list
);
436 if (cache_defer_hash
[hash
].next
== NULL
)
437 INIT_LIST_HEAD(&cache_defer_hash
[hash
]);
438 list_add(&dreq
->hash
, &cache_defer_hash
[hash
]);
440 /* it is in, now maybe clean up */
442 if (++cache_defer_cnt
> DFR_MAX
) {
443 /* too much in the cache, randomly drop
447 dreq
= list_entry(cache_defer_list
.next
,
448 struct cache_deferred_req
,
451 dreq
= list_entry(cache_defer_list
.prev
,
452 struct cache_deferred_req
,
454 list_del(&dreq
->recent
);
455 list_del(&dreq
->hash
);
458 spin_unlock(&cache_defer_lock
);
461 /* there was one too many */
462 dreq
->revisit(dreq
, 1);
464 if (test_bit(CACHE_VALID
, &item
->flags
)) {
465 /* must have just been validated... */
466 cache_revisit_request(item
);
470 void cache_revisit_request(struct cache_head
*item
)
472 struct cache_deferred_req
*dreq
;
473 struct list_head pending
;
475 struct list_head
*lp
;
476 int hash
= DFR_HASH(item
);
478 INIT_LIST_HEAD(&pending
);
479 spin_lock(&cache_defer_lock
);
481 lp
= cache_defer_hash
[hash
].next
;
483 while (lp
!= &cache_defer_hash
[hash
]) {
484 dreq
= list_entry(lp
, struct cache_deferred_req
, hash
);
486 if (dreq
->item
== item
) {
487 list_del(&dreq
->hash
);
488 list_move(&dreq
->recent
, &pending
);
493 spin_unlock(&cache_defer_lock
);
495 while (!list_empty(&pending
)) {
496 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
497 list_del_init(&dreq
->recent
);
498 dreq
->revisit(dreq
, 0);
502 void cache_clean_deferred(void *owner
)
504 struct cache_deferred_req
*dreq
, *tmp
;
505 struct list_head pending
;
508 INIT_LIST_HEAD(&pending
);
509 spin_lock(&cache_defer_lock
);
511 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
512 if (dreq
->owner
== owner
) {
513 list_del(&dreq
->hash
);
514 list_move(&dreq
->recent
, &pending
);
518 spin_unlock(&cache_defer_lock
);
520 while (!list_empty(&pending
)) {
521 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
522 list_del_init(&dreq
->recent
);
523 dreq
->revisit(dreq
, 1);
528 * communicate with user-space
530 * We have a magic /proc file - /proc/sunrpc/cache
531 * On read, you get a full request, or block
532 * On write, an update request is processed
533 * Poll works if anything to read, and always allows write
535 * Implemented by linked list of requests. Each open file has
536 * a ->private that also exists in this list. New request are added
537 * to the end and may wakeup and preceding readers.
538 * New readers are added to the head. If, on read, an item is found with
539 * CACHE_UPCALLING clear, we free it from the list.
543 static spinlock_t queue_lock
= SPIN_LOCK_UNLOCKED
;
544 static DECLARE_MUTEX(queue_io_sem
);
547 struct list_head list
;
548 int reader
; /* if 0, then request */
550 struct cache_request
{
551 struct cache_queue q
;
552 struct cache_head
*item
;
557 struct cache_reader
{
558 struct cache_queue q
;
559 int offset
; /* if non-0, we have a refcnt on next request */
564 cache_read(struct file
*filp
, char *buf
, size_t count
, loff_t
*ppos
)
566 struct cache_reader
*rp
= filp
->private_data
;
567 struct cache_request
*rq
;
568 struct cache_detail
*cd
= PDE(filp
->f_dentry
->d_inode
)->data
;
571 if (ppos
!= &filp
->f_pos
)
577 down(&queue_io_sem
); /* protect against multiple concurrent
578 * readers on this file */
580 spin_lock(&queue_lock
);
581 /* need to find next request */
582 while (rp
->q
.list
.next
!= &cd
->queue
&&
583 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
585 struct list_head
*next
= rp
->q
.list
.next
;
586 list_move(&rp
->q
.list
, next
);
588 if (rp
->q
.list
.next
== &cd
->queue
) {
589 spin_unlock(&queue_lock
);
595 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
596 if (rq
->q
.reader
) BUG();
599 spin_unlock(&queue_lock
);
601 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
603 spin_lock(&queue_lock
);
604 list_move(&rp
->q
.list
, &rq
->q
.list
);
605 spin_unlock(&queue_lock
);
607 if (rp
->offset
+ count
> rq
->len
)
608 count
= rq
->len
- rp
->offset
;
610 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
613 if (rp
->offset
>= rq
->len
) {
615 spin_lock(&queue_lock
);
616 list_move(&rp
->q
.list
, &rq
->q
.list
);
617 spin_unlock(&queue_lock
);
622 if (rp
->offset
== 0) {
623 /* need to release rq */
624 spin_lock(&queue_lock
);
626 if (rq
->readers
== 0 &&
627 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
628 list_del(&rq
->q
.list
);
629 spin_unlock(&queue_lock
);
630 cd
->cache_put(rq
->item
, cd
);
634 spin_unlock(&queue_lock
);
639 return err
? err
: count
;
643 cache_write(struct file
*filp
, const char *buf
, size_t count
,
647 struct cache_reader
*rp
= filp
->private_data
;
648 struct cache_detail
*cd
= PDE(filp
->f_dentry
->d_inode
)->data
;
650 if (ppos
!= &filp
->f_pos
)
655 if (count
> PAGE_SIZE
)
660 if (rp
->page
== NULL
) {
661 rp
->page
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
662 if (rp
->page
== NULL
) {
668 if (copy_from_user(rp
->page
, buf
, count
)) {
672 if (count
< PAGE_SIZE
)
673 rp
->page
[count
] = '\0';
675 err
= cd
->cache_parse(cd
, rp
->page
, count
);
680 return err
? err
: count
;
683 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
686 cache_poll(struct file
*filp
, poll_table
*wait
)
689 struct cache_reader
*rp
= filp
->private_data
;
690 struct cache_queue
*cq
;
691 struct cache_detail
*cd
= PDE(filp
->f_dentry
->d_inode
)->data
;
693 poll_wait(filp
, &queue_wait
, wait
);
695 /* alway allow write */
696 mask
= POLL_OUT
| POLLWRNORM
;
697 spin_lock(&queue_lock
);
699 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
700 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
702 mask
|= POLLIN
| POLLRDNORM
;
705 spin_unlock(&queue_lock
);
710 cache_ioctl(struct inode
*ino
, struct file
*filp
,
711 unsigned int cmd
, unsigned long arg
)
714 struct cache_reader
*rp
= filp
->private_data
;
715 struct cache_queue
*cq
;
716 struct cache_detail
*cd
= PDE(ino
)->data
;
720 spin_lock(&queue_lock
);
722 /* only find the length remaining in current request,
723 * or the length of the next request
725 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
726 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
728 struct cache_request
*cr
=
729 container_of(cq
, struct cache_request
, q
);
730 len
= cr
->len
- rp
->offset
;
733 spin_unlock(&queue_lock
);
735 return put_user(len
, (int *)arg
);
739 cache_open(struct inode
*inode
, struct file
*filp
)
741 struct cache_reader
*rp
;
742 struct cache_detail
*cd
= PDE(inode
)->data
;
744 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
750 atomic_inc(&cd
->readers
);
751 spin_lock(&queue_lock
);
752 list_add(&rp
->q
.list
, &cd
->queue
);
753 spin_unlock(&queue_lock
);
754 filp
->private_data
= rp
;
759 cache_release(struct inode
*inode
, struct file
*filp
)
761 struct cache_reader
*rp
= filp
->private_data
;
762 struct cache_detail
*cd
= PDE(inode
)->data
;
764 spin_lock(&queue_lock
);
766 struct cache_queue
*cq
;
767 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
768 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
770 container_of(cq
, struct cache_request
, q
)
776 list_del(&rp
->q
.list
);
777 spin_unlock(&queue_lock
);
782 filp
->private_data
= NULL
;
785 cd
->last_close
= get_seconds();
786 atomic_dec(&cd
->readers
);
792 static struct file_operations cache_file_operations
= {
793 .owner
= THIS_MODULE
,
796 .write
= cache_write
,
798 .ioctl
= cache_ioctl
, /* for FIONREAD */
800 .release
= cache_release
,
804 static void queue_loose(struct cache_detail
*detail
, struct cache_head
*ch
)
806 struct cache_queue
*cq
;
807 spin_lock(&queue_lock
);
808 list_for_each_entry(cq
, &detail
->queue
, list
)
810 struct cache_request
*cr
= container_of(cq
, struct cache_request
, q
);
813 if (cr
->readers
!= 0)
815 list_del(&cr
->q
.list
);
816 spin_unlock(&queue_lock
);
817 detail
->cache_put(cr
->item
, detail
);
822 spin_unlock(&queue_lock
);
826 * Support routines for text-based upcalls.
827 * Fields are separated by spaces.
828 * Fields are either mangled to quote space tab newline slosh with slosh
829 * or a hexified with a leading \x
830 * Record is terminated with newline.
834 void qword_add(char **bpp
, int *lp
, char *str
)
842 while ((c
=*str
++) && len
)
850 *bp
++ = '0' + ((c
& 0300)>>6);
851 *bp
++ = '0' + ((c
& 0070)>>3);
852 *bp
++ = '0' + ((c
& 0007)>>0);
860 if (c
|| len
<1) len
= -1;
869 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
880 while (blen
&& len
>= 2) {
881 unsigned char c
= *buf
++;
882 *bp
++ = '0' + ((c
&0xf0)>>4) + (c
>=0xa0)*('a'-'9'-1);
883 *bp
++ = '0' + (c
&0x0f) + ((c
&0x0f)>=0x0a)*('a'-'9'-1);
888 if (blen
|| len
<1) len
= -1;
900 * register an upcall request to user-space.
901 * Each request is at most one page long.
903 static int cache_make_upcall(struct cache_detail
*detail
, struct cache_head
*h
)
907 struct cache_request
*crq
;
911 if (detail
->cache_request
== NULL
)
914 if (atomic_read(&detail
->readers
) == 0 &&
915 detail
->last_close
< get_seconds() - 60)
916 /* nobody is listening */
919 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
923 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
929 bp
= buf
; len
= PAGE_SIZE
;
931 detail
->cache_request(detail
, h
, &bp
, &len
);
939 crq
->item
= cache_get(h
);
941 crq
->len
= PAGE_SIZE
- len
;
943 spin_lock(&queue_lock
);
944 list_add_tail(&crq
->q
.list
, &detail
->queue
);
945 spin_unlock(&queue_lock
);
946 wake_up(&queue_wait
);
951 * parse a message from user-space and pass it
952 * to an appropriate cache
953 * Messages are, like requests, separated into fields by
954 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
957 * reply cachename expiry key ... content....
959 * key and content are both parsed by cache
962 #define isodigit(c) (isdigit(c) && c <= '7')
963 int qword_get(char **bpp
, char *dest
, int bufsize
)
965 /* return bytes copied, or -1 on error */
969 while (*bp
== ' ') bp
++;
971 if (bp
[0] == '\\' && bp
[1] == 'x') {
974 while (isxdigit(bp
[0]) && isxdigit(bp
[1]) && len
< bufsize
) {
975 int byte
= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
978 byte
|= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
984 /* text with \nnn octal quoting */
985 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
987 isodigit(bp
[1]) && (bp
[1] <= '3') &&
990 int byte
= (*++bp
-'0');
992 byte
= (byte
<< 3) | (*bp
++ - '0');
993 byte
= (byte
<< 3) | (*bp
++ - '0');
1003 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1005 while (*bp
== ' ') bp
++;
1013 * support /proc/sunrpc/cache/$CACHENAME/content
1015 * We call ->cache_show passing NULL for the item to
1016 * get a header, then pass each real item in the cache
1020 struct cache_detail
*cd
;
1023 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1026 unsigned hash
, entry
;
1027 struct cache_head
*ch
;
1028 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1031 read_lock(&cd
->hash_lock
);
1035 entry
= n
& ((1LL<<32) - 1);
1037 for (ch
=cd
->hash_table
[hash
]; ch
; ch
=ch
->next
)
1040 n
&= ~((1LL<<32) - 1);
1044 } while(hash
< cd
->hash_size
&&
1045 cd
->hash_table
[hash
]==NULL
);
1046 if (hash
>= cd
->hash_size
)
1049 return cd
->hash_table
[hash
];
1052 static void *c_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1054 struct cache_head
*ch
= p
;
1055 int hash
= (*pos
>> 32);
1056 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1060 else if (ch
->next
== NULL
) {
1067 *pos
&= ~((1LL<<32) - 1);
1068 while (hash
< cd
->hash_size
&&
1069 cd
->hash_table
[hash
] == NULL
) {
1073 if (hash
>= cd
->hash_size
)
1076 return cd
->hash_table
[hash
];
1079 static void c_stop(struct seq_file
*m
, void *p
)
1081 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1082 read_unlock(&cd
->hash_lock
);
1085 static int c_show(struct seq_file
*m
, void *p
)
1087 struct cache_head
*cp
= p
;
1088 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1091 return cd
->cache_show(m
, cd
, NULL
);
1094 seq_printf(m
, "# expiry=%ld refcnt=%d\n",
1095 cp
->expiry_time
, atomic_read(&cp
->refcnt
));
1097 if (cache_check(cd
, cp
, NULL
))
1098 /* cache_check does a cache_put on failure */
1099 seq_printf(m
, "# ");
1103 return cd
->cache_show(m
, cd
, cp
);
1106 struct seq_operations cache_content_op
= {
1113 static int content_open(struct inode
*inode
, struct file
*file
)
1117 struct cache_detail
*cd
= PDE(inode
)->data
;
1119 han
= kmalloc(sizeof(*han
), GFP_KERNEL
);
1125 res
= seq_open(file
, &cache_content_op
);
1129 ((struct seq_file
*)file
->private_data
)->private = han
;
1133 static int content_release(struct inode
*inode
, struct file
*file
)
1135 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1136 struct handle
*han
= m
->private;
1139 return seq_release(inode
, file
);
1142 static struct file_operations content_file_operations
= {
1143 .open
= content_open
,
1145 .llseek
= seq_lseek
,
1146 .release
= content_release
,
1149 static ssize_t
read_flush(struct file
*file
, char *buf
,
1150 size_t count
, loff_t
*ppos
)
1152 struct cache_detail
*cd
= PDE(file
->f_dentry
->d_inode
)->data
;
1154 unsigned long p
= *ppos
;
1157 sprintf(tbuf
, "%lu\n", cd
->flush_time
);
1162 if (len
> count
) len
= count
;
1163 if (copy_to_user(buf
, (void*)(tbuf
+p
), len
))
1170 static ssize_t
write_flush(struct file
* file
, const char * buf
,
1171 size_t count
, loff_t
*ppos
)
1173 struct cache_detail
*cd
= PDE(file
->f_dentry
->d_inode
)->data
;
1177 if (*ppos
|| count
> sizeof(tbuf
)-1)
1179 if (copy_from_user(tbuf
, buf
, count
))
1182 flushtime
= simple_strtoul(tbuf
, &ep
, 0);
1183 if (*ep
&& *ep
!= '\n')
1186 cd
->flush_time
= flushtime
;
1187 cd
->nextcheck
= get_seconds();
1194 static struct file_operations cache_flush_operations
= {
1196 .write
= write_flush
,