Merge with Linux 2.5.74.
[linux-2.6/linux-mips.git] / net / sunrpc / cache.c
blobf279910d09378fc854057b6d1fb37af8082f94b1
1 /*
2 * net/sunrpc/cache.c
4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <asm/ioctls.h>
30 #include <linux/sunrpc/types.h>
31 #include <linux/sunrpc/cache.h>
32 #include <linux/sunrpc/stats.h>
34 #define RPCDBG_FACILITY RPCDBG_CACHE
36 void cache_init(struct cache_head *h)
38 time_t now = get_seconds();
39 h->next = NULL;
40 h->flags = 0;
41 atomic_set(&h->refcnt, 0);
42 h->expiry_time = now + CACHE_NEW_EXPIRY;
43 h->last_refresh = now;
47 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
49 * This is the generic cache management routine for all
50 * the authentication caches.
51 * It checks the currency of a cache item and will (later)
52 * initiate an upcall to fill it if needed.
55 * Returns 0 if the cache_head can be used, or cache_puts it and returns
56 * -EAGAIN if upcall is pending,
57 * -ENOENT if cache entry was negative
59 int cache_check(struct cache_detail *detail,
60 struct cache_head *h, struct cache_req *rqstp)
62 int rv;
63 long refresh_age, age;
65 /* First decide return status as best we can */
66 if (!test_bit(CACHE_VALID, &h->flags) ||
67 h->expiry_time < get_seconds())
68 rv = -EAGAIN;
69 else if (detail->flush_time > h->last_refresh)
70 rv = -EAGAIN;
71 else {
72 /* entry is valid */
73 if (test_bit(CACHE_NEGATIVE, &h->flags))
74 rv = -ENOENT;
75 else rv = 0;
78 /* now see if we want to start an upcall */
79 refresh_age = (h->expiry_time - h->last_refresh);
80 age = get_seconds() - h->last_refresh;
82 if (rqstp == NULL) {
83 if (rv == -EAGAIN)
84 rv = -ENOENT;
85 } else if (rv == -EAGAIN || age > refresh_age/2) {
86 dprintk("Want update, refage=%ld, age=%ld\n", refresh_age, age);
87 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
88 switch (cache_make_upcall(detail, h)) {
89 case -EINVAL:
90 clear_bit(CACHE_PENDING, &h->flags);
91 if (rv == -EAGAIN) {
92 set_bit(CACHE_NEGATIVE, &h->flags);
93 cache_fresh(detail, h, get_seconds()+CACHE_NEW_EXPIRY);
94 rv = -ENOENT;
96 break;
98 case -EAGAIN:
99 clear_bit(CACHE_PENDING, &h->flags);
100 cache_revisit_request(h);
101 break;
106 if (rv == -EAGAIN)
107 cache_defer_req(rqstp, h);
109 if (rv && h)
110 detail->cache_put(h, detail);
111 return rv;
114 static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
116 void cache_fresh(struct cache_detail *detail,
117 struct cache_head *head, time_t expiry)
120 head->expiry_time = expiry;
121 head->last_refresh = get_seconds();
122 if (!test_and_set_bit(CACHE_VALID, &head->flags))
123 cache_revisit_request(head);
124 if (test_and_clear_bit(CACHE_PENDING, &head->flags))
125 queue_loose(detail, head);
129 * caches need to be periodically cleaned.
130 * For this we maintain a list of cache_detail and
131 * a current pointer into that list and into the table
132 * for that entry.
134 * Each time clean_cache is called it finds the next non-empty entry
135 * in the current table and walks the list in that entry
136 * looking for entries that can be removed.
138 * An entry gets removed if:
139 * - The expiry is before current time
140 * - The last_refresh time is before the flush_time for that cache
142 * later we might drop old entries with non-NEVER expiry if that table
143 * is getting 'full' for some definition of 'full'
145 * The question of "how often to scan a table" is an interesting one
146 * and is answered in part by the use of the "nextcheck" field in the
147 * cache_detail.
148 * When a scan of a table begins, the nextcheck field is set to a time
149 * that is well into the future.
150 * While scanning, if an expiry time is found that is earlier than the
151 * current nextcheck time, nextcheck is set to that expiry time.
152 * If the flush_time is ever set to a time earlier than the nextcheck
153 * time, the nextcheck time is then set to that flush_time.
155 * A table is then only scanned if the current time is at least
156 * the nextcheck time.
160 static LIST_HEAD(cache_list);
161 static spinlock_t cache_list_lock = SPIN_LOCK_UNLOCKED;
162 static struct cache_detail *current_detail;
163 static int current_index;
165 static struct file_operations cache_file_operations;
166 static struct file_operations content_file_operations;
167 static struct file_operations cache_flush_operations;
169 static void do_cache_clean(void *data);
170 static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
172 void cache_register(struct cache_detail *cd)
174 cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
175 if (cd->proc_ent) {
176 struct proc_dir_entry *p;
177 cd->proc_ent->owner = THIS_MODULE;
179 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
180 cd->proc_ent);
181 if (p) {
182 p->proc_fops = &cache_flush_operations;
183 p->owner = THIS_MODULE;
184 p->data = cd;
187 if (cd->cache_request || cd->cache_parse) {
188 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
189 cd->proc_ent);
190 if (p) {
191 p->proc_fops = &cache_file_operations;
192 p->owner = THIS_MODULE;
193 p->data = cd;
196 if (cd->cache_show) {
197 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
198 cd->proc_ent);
199 if (p) {
200 p->proc_fops = &content_file_operations;
201 p->owner = THIS_MODULE;
202 p->data = cd;
206 rwlock_init(&cd->hash_lock);
207 INIT_LIST_HEAD(&cd->queue);
208 spin_lock(&cache_list_lock);
209 cd->nextcheck = 0;
210 cd->entries = 0;
211 atomic_set(&cd->readers, 0);
212 cd->last_close = 0;
213 list_add(&cd->others, &cache_list);
214 spin_unlock(&cache_list_lock);
216 /* start the cleaning process */
217 schedule_work(&cache_cleaner);
220 int cache_unregister(struct cache_detail *cd)
222 cache_purge(cd);
223 spin_lock(&cache_list_lock);
224 write_lock(&cd->hash_lock);
225 if (cd->entries || atomic_read(&cd->inuse)) {
226 write_unlock(&cd->hash_lock);
227 spin_unlock(&cache_list_lock);
228 return -EBUSY;
230 if (current_detail == cd)
231 current_detail = NULL;
232 list_del_init(&cd->others);
233 write_unlock(&cd->hash_lock);
234 spin_unlock(&cache_list_lock);
235 if (cd->proc_ent) {
236 cd->proc_ent = NULL;
237 remove_proc_entry(cd->name, proc_net_rpc);
239 if (list_empty(&cache_list)) {
240 /* module must be being unloaded so its safe to kill the worker */
241 cancel_delayed_work(&cache_cleaner);
242 flush_scheduled_work();
244 return 0;
247 struct cache_detail *cache_find(char *name)
249 struct list_head *l;
251 spin_lock(&cache_list_lock);
252 list_for_each(l, &cache_list) {
253 struct cache_detail *cd = list_entry(l, struct cache_detail, others);
255 if (strcmp(cd->name, name)==0) {
256 atomic_inc(&cd->inuse);
257 spin_unlock(&cache_list_lock);
258 return cd;
261 spin_unlock(&cache_list_lock);
262 return NULL;
265 /* cache_drop must be called on any cache returned by
266 * cache_find, after it has been used
268 void cache_drop(struct cache_detail *detail)
270 atomic_dec(&detail->inuse);
273 /* clean cache tries to find something to clean
274 * and cleans it.
275 * It returns 1 if it cleaned something,
276 * 0 if it didn't find anything this time
277 * -1 if it fell off the end of the list.
279 int cache_clean(void)
281 int rv = 0;
282 struct list_head *next;
284 spin_lock(&cache_list_lock);
286 /* find a suitable table if we don't already have one */
287 while (current_detail == NULL ||
288 current_index >= current_detail->hash_size) {
289 if (current_detail)
290 next = current_detail->others.next;
291 else
292 next = cache_list.next;
293 if (next == &cache_list) {
294 current_detail = NULL;
295 spin_unlock(&cache_list_lock);
296 return -1;
298 current_detail = list_entry(next, struct cache_detail, others);
299 if (current_detail->nextcheck > get_seconds())
300 current_index = current_detail->hash_size;
301 else {
302 current_index = 0;
303 current_detail->nextcheck = get_seconds()+30*60;
307 /* find a non-empty bucket in the table */
308 while (current_detail &&
309 current_index < current_detail->hash_size &&
310 current_detail->hash_table[current_index] == NULL)
311 current_index++;
313 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
315 if (current_detail && current_index < current_detail->hash_size) {
316 struct cache_head *ch, **cp;
318 write_lock(&current_detail->hash_lock);
320 /* Ok, now to clean this strand */
322 cp = & current_detail->hash_table[current_index];
323 ch = *cp;
324 for (; ch; cp= & ch->next, ch= *cp) {
325 if (current_detail->nextcheck > ch->expiry_time)
326 current_detail->nextcheck = ch->expiry_time+1;
327 if (ch->expiry_time >= get_seconds()
328 && ch->last_refresh >= current_detail->flush_time
330 continue;
331 if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
332 queue_loose(current_detail, ch);
334 if (!atomic_read(&ch->refcnt))
335 break;
337 if (ch) {
338 cache_get(ch);
339 clear_bit(CACHE_HASHED, &ch->flags);
340 *cp = ch->next;
341 ch->next = NULL;
342 current_detail->entries--;
343 rv = 1;
345 write_unlock(&current_detail->hash_lock);
346 if (ch)
347 current_detail->cache_put(ch, current_detail);
348 else
349 current_index ++;
351 spin_unlock(&cache_list_lock);
353 return rv;
357 * We want to regularly clean the cache, so we need to schedule some work ...
359 static void do_cache_clean(void *data)
361 int delay = 5;
362 if (cache_clean() == -1)
363 delay = 30*HZ;
365 if (list_empty(&cache_list))
366 delay = 0;
368 if (delay)
369 schedule_delayed_work(&cache_cleaner, delay);
374 * Clean all caches promptly. This just calls cache_clean
375 * repeatedly until we are sure that every cache has had a chance to
376 * be fully cleaned
378 void cache_flush(void)
380 while (cache_clean() != -1)
381 cond_resched();
382 while (cache_clean() != -1)
383 cond_resched();
386 void cache_purge(struct cache_detail *detail)
388 detail->flush_time = get_seconds()+1;
389 detail->nextcheck = get_seconds();
390 cache_flush();
396 * Deferral and Revisiting of Requests.
398 * If a cache lookup finds a pending entry, we
399 * need to defer the request and revisit it later.
400 * All deferred requests are stored in a hash table,
401 * indexed by "struct cache_head *".
402 * As it may be wasteful to store a whole request
403 * structure, we allow the request to provide a
404 * deferred form, which must contain a
405 * 'struct cache_deferred_req'
406 * This cache_deferred_req contains a method to allow
407 * it to be revisited when cache info is available
410 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
411 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
413 #define DFR_MAX 300 /* ??? */
415 spinlock_t cache_defer_lock = SPIN_LOCK_UNLOCKED;
416 static LIST_HEAD(cache_defer_list);
417 static struct list_head cache_defer_hash[DFR_HASHSIZE];
418 static int cache_defer_cnt;
420 void cache_defer_req(struct cache_req *req, struct cache_head *item)
422 struct cache_deferred_req *dreq;
423 int hash = DFR_HASH(item);
425 dreq = req->defer(req);
426 if (dreq == NULL)
427 return;
429 dreq->item = item;
430 dreq->recv_time = get_seconds();
432 spin_lock(&cache_defer_lock);
434 list_add(&dreq->recent, &cache_defer_list);
436 if (cache_defer_hash[hash].next == NULL)
437 INIT_LIST_HEAD(&cache_defer_hash[hash]);
438 list_add(&dreq->hash, &cache_defer_hash[hash]);
440 /* it is in, now maybe clean up */
441 dreq = NULL;
442 if (++cache_defer_cnt > DFR_MAX) {
443 /* too much in the cache, randomly drop
444 * first or last
446 if (net_random()&1)
447 dreq = list_entry(cache_defer_list.next,
448 struct cache_deferred_req,
449 recent);
450 else
451 dreq = list_entry(cache_defer_list.prev,
452 struct cache_deferred_req,
453 recent);
454 list_del(&dreq->recent);
455 list_del(&dreq->hash);
456 cache_defer_cnt--;
458 spin_unlock(&cache_defer_lock);
460 if (dreq) {
461 /* there was one too many */
462 dreq->revisit(dreq, 1);
464 if (test_bit(CACHE_VALID, &item->flags)) {
465 /* must have just been validated... */
466 cache_revisit_request(item);
470 void cache_revisit_request(struct cache_head *item)
472 struct cache_deferred_req *dreq;
473 struct list_head pending;
475 struct list_head *lp;
476 int hash = DFR_HASH(item);
478 INIT_LIST_HEAD(&pending);
479 spin_lock(&cache_defer_lock);
481 lp = cache_defer_hash[hash].next;
482 if (lp) {
483 while (lp != &cache_defer_hash[hash]) {
484 dreq = list_entry(lp, struct cache_deferred_req, hash);
485 lp = lp->next;
486 if (dreq->item == item) {
487 list_del(&dreq->hash);
488 list_move(&dreq->recent, &pending);
489 cache_defer_cnt--;
493 spin_unlock(&cache_defer_lock);
495 while (!list_empty(&pending)) {
496 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
497 list_del_init(&dreq->recent);
498 dreq->revisit(dreq, 0);
502 void cache_clean_deferred(void *owner)
504 struct cache_deferred_req *dreq, *tmp;
505 struct list_head pending;
508 INIT_LIST_HEAD(&pending);
509 spin_lock(&cache_defer_lock);
511 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
512 if (dreq->owner == owner) {
513 list_del(&dreq->hash);
514 list_move(&dreq->recent, &pending);
515 cache_defer_cnt--;
518 spin_unlock(&cache_defer_lock);
520 while (!list_empty(&pending)) {
521 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
522 list_del_init(&dreq->recent);
523 dreq->revisit(dreq, 1);
528 * communicate with user-space
530 * We have a magic /proc file - /proc/sunrpc/cache
531 * On read, you get a full request, or block
532 * On write, an update request is processed
533 * Poll works if anything to read, and always allows write
535 * Implemented by linked list of requests. Each open file has
536 * a ->private that also exists in this list. New request are added
537 * to the end and may wakeup and preceding readers.
538 * New readers are added to the head. If, on read, an item is found with
539 * CACHE_UPCALLING clear, we free it from the list.
543 static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
544 static DECLARE_MUTEX(queue_io_sem);
546 struct cache_queue {
547 struct list_head list;
548 int reader; /* if 0, then request */
550 struct cache_request {
551 struct cache_queue q;
552 struct cache_head *item;
553 char * buf;
554 int len;
555 int readers;
557 struct cache_reader {
558 struct cache_queue q;
559 int offset; /* if non-0, we have a refcnt on next request */
560 char *page;
563 static ssize_t
564 cache_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
566 struct cache_reader *rp = filp->private_data;
567 struct cache_request *rq;
568 struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
569 int err;
571 if (ppos != &filp->f_pos)
572 return -ESPIPE;
574 if (count == 0)
575 return 0;
577 down(&queue_io_sem); /* protect against multiple concurrent
578 * readers on this file */
579 again:
580 spin_lock(&queue_lock);
581 /* need to find next request */
582 while (rp->q.list.next != &cd->queue &&
583 list_entry(rp->q.list.next, struct cache_queue, list)
584 ->reader) {
585 struct list_head *next = rp->q.list.next;
586 list_move(&rp->q.list, next);
588 if (rp->q.list.next == &cd->queue) {
589 spin_unlock(&queue_lock);
590 up(&queue_io_sem);
591 if (rp->offset)
592 BUG();
593 return 0;
595 rq = container_of(rp->q.list.next, struct cache_request, q.list);
596 if (rq->q.reader) BUG();
597 if (rp->offset == 0)
598 rq->readers++;
599 spin_unlock(&queue_lock);
601 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
602 err = -EAGAIN;
603 spin_lock(&queue_lock);
604 list_move(&rp->q.list, &rq->q.list);
605 spin_unlock(&queue_lock);
606 } else {
607 if (rp->offset + count > rq->len)
608 count = rq->len - rp->offset;
609 err = -EFAULT;
610 if (copy_to_user(buf, rq->buf + rp->offset, count))
611 goto out;
612 rp->offset += count;
613 if (rp->offset >= rq->len) {
614 rp->offset = 0;
615 spin_lock(&queue_lock);
616 list_move(&rp->q.list, &rq->q.list);
617 spin_unlock(&queue_lock);
619 err = 0;
621 out:
622 if (rp->offset == 0) {
623 /* need to release rq */
624 spin_lock(&queue_lock);
625 rq->readers--;
626 if (rq->readers == 0 &&
627 !test_bit(CACHE_PENDING, &rq->item->flags)) {
628 list_del(&rq->q.list);
629 spin_unlock(&queue_lock);
630 cd->cache_put(rq->item, cd);
631 kfree(rq->buf);
632 kfree(rq);
633 } else
634 spin_unlock(&queue_lock);
636 if (err == -EAGAIN)
637 goto again;
638 up(&queue_io_sem);
639 return err ? err : count;
642 static ssize_t
643 cache_write(struct file *filp, const char *buf, size_t count,
644 loff_t *ppos)
646 int err;
647 struct cache_reader *rp = filp->private_data;
648 struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
650 if (ppos != &filp->f_pos)
651 return -ESPIPE;
653 if (count == 0)
654 return 0;
655 if (count > PAGE_SIZE)
656 return -EINVAL;
658 down(&queue_io_sem);
660 if (rp->page == NULL) {
661 rp->page = kmalloc(PAGE_SIZE, GFP_KERNEL);
662 if (rp->page == NULL) {
663 up(&queue_io_sem);
664 return -ENOMEM;
668 if (copy_from_user(rp->page, buf, count)) {
669 up(&queue_io_sem);
670 return -EFAULT;
672 if (count < PAGE_SIZE)
673 rp->page[count] = '\0';
674 if (cd->cache_parse)
675 err = cd->cache_parse(cd, rp->page, count);
676 else
677 err = -EINVAL;
679 up(&queue_io_sem);
680 return err ? err : count;
683 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
685 static unsigned int
686 cache_poll(struct file *filp, poll_table *wait)
688 unsigned int mask;
689 struct cache_reader *rp = filp->private_data;
690 struct cache_queue *cq;
691 struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
693 poll_wait(filp, &queue_wait, wait);
695 /* alway allow write */
696 mask = POLL_OUT | POLLWRNORM;
697 spin_lock(&queue_lock);
699 for (cq= &rp->q; &cq->list != &cd->queue;
700 cq = list_entry(cq->list.next, struct cache_queue, list))
701 if (!cq->reader) {
702 mask |= POLLIN | POLLRDNORM;
703 break;
705 spin_unlock(&queue_lock);
706 return mask;
709 static int
710 cache_ioctl(struct inode *ino, struct file *filp,
711 unsigned int cmd, unsigned long arg)
713 int len = 0;
714 struct cache_reader *rp = filp->private_data;
715 struct cache_queue *cq;
716 struct cache_detail *cd = PDE(ino)->data;
718 if (cmd != FIONREAD)
719 return -EINVAL;
720 spin_lock(&queue_lock);
722 /* only find the length remaining in current request,
723 * or the length of the next request
725 for (cq= &rp->q; &cq->list != &cd->queue;
726 cq = list_entry(cq->list.next, struct cache_queue, list))
727 if (!cq->reader) {
728 struct cache_request *cr =
729 container_of(cq, struct cache_request, q);
730 len = cr->len - rp->offset;
731 break;
733 spin_unlock(&queue_lock);
735 return put_user(len, (int *)arg);
738 static int
739 cache_open(struct inode *inode, struct file *filp)
741 struct cache_reader *rp;
742 struct cache_detail *cd = PDE(inode)->data;
744 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
745 if (!rp)
746 return -ENOMEM;
747 rp->page = NULL;
748 rp->offset = 0;
749 rp->q.reader = 1;
750 atomic_inc(&cd->readers);
751 spin_lock(&queue_lock);
752 list_add(&rp->q.list, &cd->queue);
753 spin_unlock(&queue_lock);
754 filp->private_data = rp;
755 return 0;
758 static int
759 cache_release(struct inode *inode, struct file *filp)
761 struct cache_reader *rp = filp->private_data;
762 struct cache_detail *cd = PDE(inode)->data;
764 spin_lock(&queue_lock);
765 if (rp->offset) {
766 struct cache_queue *cq;
767 for (cq= &rp->q; &cq->list != &cd->queue;
768 cq = list_entry(cq->list.next, struct cache_queue, list))
769 if (!cq->reader) {
770 container_of(cq, struct cache_request, q)
771 ->readers--;
772 break;
774 rp->offset = 0;
776 list_del(&rp->q.list);
777 spin_unlock(&queue_lock);
779 if (rp->page)
780 kfree(rp->page);
782 filp->private_data = NULL;
783 kfree(rp);
785 cd->last_close = get_seconds();
786 atomic_dec(&cd->readers);
787 return 0;
792 static struct file_operations cache_file_operations = {
793 .owner = THIS_MODULE,
794 .llseek = no_llseek,
795 .read = cache_read,
796 .write = cache_write,
797 .poll = cache_poll,
798 .ioctl = cache_ioctl, /* for FIONREAD */
799 .open = cache_open,
800 .release = cache_release,
804 static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
806 struct cache_queue *cq;
807 spin_lock(&queue_lock);
808 list_for_each_entry(cq, &detail->queue, list)
809 if (!cq->reader) {
810 struct cache_request *cr = container_of(cq, struct cache_request, q);
811 if (cr->item != ch)
812 continue;
813 if (cr->readers != 0)
814 break;
815 list_del(&cr->q.list);
816 spin_unlock(&queue_lock);
817 detail->cache_put(cr->item, detail);
818 kfree(cr->buf);
819 kfree(cr);
820 return;
822 spin_unlock(&queue_lock);
826 * Support routines for text-based upcalls.
827 * Fields are separated by spaces.
828 * Fields are either mangled to quote space tab newline slosh with slosh
829 * or a hexified with a leading \x
830 * Record is terminated with newline.
834 void qword_add(char **bpp, int *lp, char *str)
836 char *bp = *bpp;
837 int len = *lp;
838 char c;
840 if (len < 0) return;
842 while ((c=*str++) && len)
843 switch(c) {
844 case ' ':
845 case '\t':
846 case '\n':
847 case '\\':
848 if (len >= 4) {
849 *bp++ = '\\';
850 *bp++ = '0' + ((c & 0300)>>6);
851 *bp++ = '0' + ((c & 0070)>>3);
852 *bp++ = '0' + ((c & 0007)>>0);
854 len -= 4;
855 break;
856 default:
857 *bp++ = c;
858 len--;
860 if (c || len <1) len = -1;
861 else {
862 *bp++ = ' ';
863 len--;
865 *bpp = bp;
866 *lp = len;
869 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
871 char *bp = *bpp;
872 int len = *lp;
874 if (len < 0) return;
876 if (len > 2) {
877 *bp++ = '\\';
878 *bp++ = 'x';
879 len -= 2;
880 while (blen && len >= 2) {
881 unsigned char c = *buf++;
882 *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
883 *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
884 len -= 2;
885 blen--;
888 if (blen || len<1) len = -1;
889 else {
890 *bp++ = ' ';
891 len--;
893 *bpp = bp;
894 *lp = len;
900 * register an upcall request to user-space.
901 * Each request is at most one page long.
903 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
906 char *buf;
907 struct cache_request *crq;
908 char *bp;
909 int len;
911 if (detail->cache_request == NULL)
912 return -EINVAL;
914 if (atomic_read(&detail->readers) == 0 &&
915 detail->last_close < get_seconds() - 60)
916 /* nobody is listening */
917 return -EINVAL;
919 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
920 if (!buf)
921 return -EAGAIN;
923 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
924 if (!crq) {
925 kfree(buf);
926 return -EAGAIN;
929 bp = buf; len = PAGE_SIZE;
931 detail->cache_request(detail, h, &bp, &len);
933 if (len < 0) {
934 kfree(buf);
935 kfree(crq);
936 return -EAGAIN;
938 crq->q.reader = 0;
939 crq->item = cache_get(h);
940 crq->buf = buf;
941 crq->len = PAGE_SIZE - len;
942 crq->readers = 0;
943 spin_lock(&queue_lock);
944 list_add_tail(&crq->q.list, &detail->queue);
945 spin_unlock(&queue_lock);
946 wake_up(&queue_wait);
947 return 0;
951 * parse a message from user-space and pass it
952 * to an appropriate cache
953 * Messages are, like requests, separated into fields by
954 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
956 * Message is
957 * reply cachename expiry key ... content....
959 * key and content are both parsed by cache
962 #define isodigit(c) (isdigit(c) && c <= '7')
963 int qword_get(char **bpp, char *dest, int bufsize)
965 /* return bytes copied, or -1 on error */
966 char *bp = *bpp;
967 int len = 0;
969 while (*bp == ' ') bp++;
971 if (bp[0] == '\\' && bp[1] == 'x') {
972 /* HEX STRING */
973 bp += 2;
974 while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
975 int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
976 bp++;
977 byte <<= 4;
978 byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
979 *dest++ = byte;
980 bp++;
981 len++;
983 } else {
984 /* text with \nnn octal quoting */
985 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
986 if (*bp == '\\' &&
987 isodigit(bp[1]) && (bp[1] <= '3') &&
988 isodigit(bp[2]) &&
989 isodigit(bp[3])) {
990 int byte = (*++bp -'0');
991 bp++;
992 byte = (byte << 3) | (*bp++ - '0');
993 byte = (byte << 3) | (*bp++ - '0');
994 *dest++ = byte;
995 len++;
996 } else {
997 *dest++ = *bp++;
998 len++;
1003 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1004 return -1;
1005 while (*bp == ' ') bp++;
1006 *bpp = bp;
1007 *dest = '\0';
1008 return len;
1013 * support /proc/sunrpc/cache/$CACHENAME/content
1014 * as a seqfile.
1015 * We call ->cache_show passing NULL for the item to
1016 * get a header, then pass each real item in the cache
1019 struct handle {
1020 struct cache_detail *cd;
1023 static void *c_start(struct seq_file *m, loff_t *pos)
1025 loff_t n = *pos;
1026 unsigned hash, entry;
1027 struct cache_head *ch;
1028 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1031 read_lock(&cd->hash_lock);
1032 if (!n--)
1033 return (void *)1;
1034 hash = n >> 32;
1035 entry = n & ((1LL<<32) - 1);
1037 for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1038 if (!entry--)
1039 return ch;
1040 n &= ~((1LL<<32) - 1);
1041 do {
1042 hash++;
1043 n += 1LL<<32;
1044 } while(hash < cd->hash_size &&
1045 cd->hash_table[hash]==NULL);
1046 if (hash >= cd->hash_size)
1047 return NULL;
1048 *pos = n+1;
1049 return cd->hash_table[hash];
1052 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1054 struct cache_head *ch = p;
1055 int hash = (*pos >> 32);
1056 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1058 if (p == (void *)1)
1059 hash = 0;
1060 else if (ch->next == NULL) {
1061 hash++;
1062 *pos += 1LL<<32;
1063 } else {
1064 ++*pos;
1065 return ch->next;
1067 *pos &= ~((1LL<<32) - 1);
1068 while (hash < cd->hash_size &&
1069 cd->hash_table[hash] == NULL) {
1070 hash++;
1071 *pos += 1LL<<32;
1073 if (hash >= cd->hash_size)
1074 return NULL;
1075 ++*pos;
1076 return cd->hash_table[hash];
1079 static void c_stop(struct seq_file *m, void *p)
1081 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1082 read_unlock(&cd->hash_lock);
1085 static int c_show(struct seq_file *m, void *p)
1087 struct cache_head *cp = p;
1088 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1090 if (p == (void *)1)
1091 return cd->cache_show(m, cd, NULL);
1093 ifdebug(CACHE)
1094 seq_printf(m, "# expiry=%ld refcnt=%d\n",
1095 cp->expiry_time, atomic_read(&cp->refcnt));
1096 cache_get(cp);
1097 if (cache_check(cd, cp, NULL))
1098 /* cache_check does a cache_put on failure */
1099 seq_printf(m, "# ");
1100 else
1101 cache_put(cp, cd);
1103 return cd->cache_show(m, cd, cp);
1106 struct seq_operations cache_content_op = {
1107 .start = c_start,
1108 .next = c_next,
1109 .stop = c_stop,
1110 .show = c_show,
1113 static int content_open(struct inode *inode, struct file *file)
1115 int res;
1116 struct handle *han;
1117 struct cache_detail *cd = PDE(inode)->data;
1119 han = kmalloc(sizeof(*han), GFP_KERNEL);
1120 if (han == NULL)
1121 return -ENOMEM;
1123 han->cd = cd;
1125 res = seq_open(file, &cache_content_op);
1126 if (res)
1127 kfree(han);
1128 else
1129 ((struct seq_file *)file->private_data)->private = han;
1131 return res;
1133 static int content_release(struct inode *inode, struct file *file)
1135 struct seq_file *m = (struct seq_file *)file->private_data;
1136 struct handle *han = m->private;
1137 kfree(han);
1138 m->private = NULL;
1139 return seq_release(inode, file);
1142 static struct file_operations content_file_operations = {
1143 .open = content_open,
1144 .read = seq_read,
1145 .llseek = seq_lseek,
1146 .release = content_release,
1149 static ssize_t read_flush(struct file *file, char *buf,
1150 size_t count, loff_t *ppos)
1152 struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
1153 char tbuf[20];
1154 unsigned long p = *ppos;
1155 int len;
1157 sprintf(tbuf, "%lu\n", cd->flush_time);
1158 len = strlen(tbuf);
1159 if (p >= len)
1160 return 0;
1161 len -= p;
1162 if (len > count) len = count;
1163 if (copy_to_user(buf, (void*)(tbuf+p), len))
1164 len = -EFAULT;
1165 else
1166 *ppos += len;
1167 return len;
1170 static ssize_t write_flush(struct file * file, const char * buf,
1171 size_t count, loff_t *ppos)
1173 struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
1174 char tbuf[20];
1175 char *ep;
1176 long flushtime;
1177 if (*ppos || count > sizeof(tbuf)-1)
1178 return -EINVAL;
1179 if (copy_from_user(tbuf, buf, count))
1180 return -EFAULT;
1181 tbuf[count] = 0;
1182 flushtime = simple_strtoul(tbuf, &ep, 0);
1183 if (*ep && *ep != '\n')
1184 return -EINVAL;
1186 cd->flush_time = flushtime;
1187 cd->nextcheck = get_seconds();
1188 cache_flush();
1190 *ppos += count;
1191 return count;
1194 static struct file_operations cache_flush_operations = {
1195 .read = read_flush,
1196 .write = write_flush,