Import 2.1.118
[davej-history.git] / net / sunrpc / sched.c
blobc2dcac0e9193c218fb52cec7fc81fe5f0cbef9dc
1 /*
2 * linux/net/sunrpc/sched.c
4 * Scheduling for synchronous and asynchronous RPC requests.
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 */
9 #define __NO_VERSION__
10 #include <linux/module.h>
12 #define __KERNEL_SYSCALLS__
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <linux/malloc.h>
16 #include <linux/unistd.h>
17 #include <linux/smp.h>
18 #include <linux/smp_lock.h>
20 #include <linux/sunrpc/clnt.h>
22 #ifdef RPC_DEBUG
23 #define RPCDBG_FACILITY RPCDBG_SCHED
24 static int rpc_task_id = 0;
25 #endif
28 * We give RPC the same get_free_pages priority as NFS
30 #define GFP_RPC GFP_NFS
32 static void __rpc_default_timer(struct rpc_task *task);
33 static void rpciod_killall(void);
36 * When an asynchronous RPC task is activated within a bottom half
37 * handler, or while executing another RPC task, it is put on
38 * schedq, and rpciod is woken up.
40 static struct rpc_wait_queue schedq = RPC_INIT_WAITQ("schedq");
43 * RPC tasks that create another task (e.g. for contacting the portmapper)
44 * will wait on this queue for their child's completion
46 static struct rpc_wait_queue childq = RPC_INIT_WAITQ("childq");
49 * RPC tasks sit here while waiting for conditions to improve.
51 static struct rpc_wait_queue delay_queue = RPC_INIT_WAITQ("delayq");
54 * All RPC tasks are linked into this list
56 static struct rpc_task * all_tasks = NULL;
59 * rpciod-related stuff
61 static struct wait_queue * rpciod_idle = NULL;
62 static struct wait_queue * rpciod_killer = NULL;
63 static struct semaphore rpciod_sema = MUTEX;
64 static unsigned int rpciod_users = 0;
65 static pid_t rpciod_pid = 0;
66 static int rpc_inhibit = 0;
69 * This is the last-ditch buffer for NFS swap requests
71 static u32 swap_buffer[PAGE_SIZE >> 2];
72 static int swap_buffer_used = 0;
75 * Add new request to wait queue.
77 * Swapper tasks always get inserted at the head of the queue.
78 * This should avoid many nasty memory deadlocks and hopefully
79 * improve overall performance.
80 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
82 void
83 rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
85 if (task->tk_rpcwait) {
86 if (task->tk_rpcwait != queue)
87 printk(KERN_WARNING "RPC: doubly enqueued task!\n");
88 return;
90 if (RPC_IS_SWAPPER(task))
91 rpc_insert_list(&queue->task, task);
92 else
93 rpc_append_list(&queue->task, task);
94 task->tk_rpcwait = queue;
96 dprintk("RPC: %4d added to queue %p \"%s\"\n",
97 task->tk_pid, queue, rpc_qname(queue));
101 * Remove request from queue.
102 * Note: must be called with interrupts disabled.
104 void
105 rpc_remove_wait_queue(struct rpc_task *task)
107 struct rpc_wait_queue *queue;
109 if (!(queue = task->tk_rpcwait))
110 return;
111 rpc_remove_list(&queue->task, task);
112 task->tk_rpcwait = NULL;
114 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
115 task->tk_pid, queue, rpc_qname(queue));
119 * Set up a timer for the current task.
121 inline void
122 rpc_add_timer(struct rpc_task *task, rpc_action timer)
124 unsigned long expires = jiffies + task->tk_timeout;
126 dprintk("RPC: %4d setting alarm for %lu ms\n",
127 task->tk_pid, task->tk_timeout * 1000 / HZ);
128 if (!timer)
129 timer = __rpc_default_timer;
130 if (expires < jiffies) {
131 printk(KERN_ERR "RPC: bad timeout value %ld - setting to 10 sec!\n",
132 task->tk_timeout);
133 expires = jiffies + 10 * HZ;
135 task->tk_timer.expires = expires;
136 task->tk_timer.data = (unsigned long) task;
137 task->tk_timer.function = (void (*)(unsigned long)) timer;
138 task->tk_timer.prev = NULL;
139 task->tk_timer.next = NULL;
140 add_timer(&task->tk_timer);
144 * Delete any timer for the current task.
145 * Must be called with interrupts off.
147 inline void
148 rpc_del_timer(struct rpc_task *task)
150 if (task->tk_timeout) {
151 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
152 del_timer(&task->tk_timer);
153 task->tk_timeout = 0;
158 * Make an RPC task runnable.
160 * Note: If the task is ASYNC, this must be called with
161 * interrupts disabled to protect the wait queue operation.
163 static inline void
164 rpc_make_runnable(struct rpc_task *task)
166 if (task->tk_timeout) {
167 printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n");
168 return;
170 if (RPC_IS_ASYNC(task)) {
171 rpc_add_wait_queue(&schedq, task);
172 wake_up(&rpciod_idle);
173 } else {
174 wake_up(&task->tk_wait);
176 task->tk_flags |= RPC_TASK_RUNNING;
181 * For other people who may need to wake the I/O daemon
182 * but should (for now) know nothing about its innards
185 void rpciod_wake_up(void)
187 if(rpciod_pid==0)
189 printk(KERN_ERR "rpciod: wot no daemon?\n");
191 wake_up(&rpciod_idle);
195 * Prepare for sleeping on a wait queue.
196 * By always appending tasks to the list we ensure FIFO behavior.
197 * NB: An RPC task will only receive interrupt-driven events as long
198 * as it's on a wait queue.
200 static void
201 __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
202 rpc_action action, rpc_action timer)
204 unsigned long oldflags;
206 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
207 rpc_qname(q), jiffies);
210 * Protect the execution below.
212 save_flags(oldflags); cli();
214 rpc_add_wait_queue(q, task);
215 task->tk_callback = action;
216 if (task->tk_timeout)
217 rpc_add_timer(task, timer);
218 task->tk_flags &= ~RPC_TASK_RUNNING;
220 restore_flags(oldflags);
221 return;
224 void
225 rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
226 rpc_action action, rpc_action timer)
228 __rpc_sleep_on(q, task, action, timer);
232 * Wake up a single task -- must be invoked with bottom halves off.
234 * It would probably suffice to cli/sti the del_timer and remove_wait_queue
235 * operations individually.
237 static void
238 __rpc_wake_up(struct rpc_task *task)
240 dprintk("RPC: %4d __rpc_wake_up (now %ld inh %d)\n",
241 task->tk_pid, jiffies, rpc_inhibit);
243 #ifdef RPC_DEBUG
244 if (task->tk_magic != 0xf00baa) {
245 printk(KERN_ERR "RPC: attempt to wake up non-existing task!\n");
246 rpc_debug = ~0;
247 return;
249 #endif
250 rpc_del_timer(task);
251 if (task->tk_rpcwait != &schedq)
252 rpc_remove_wait_queue(task);
253 if (!RPC_IS_RUNNING(task)) {
254 rpc_make_runnable(task);
255 task->tk_flags |= RPC_TASK_CALLBACK;
257 dprintk("RPC: __rpc_wake_up done\n");
261 * Default timeout handler if none specified by user
263 static void
264 __rpc_default_timer(struct rpc_task *task)
266 dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
267 task->tk_status = -ETIMEDOUT;
268 task->tk_timeout = 0;
269 __rpc_wake_up(task);
273 * Wake up the specified task
275 void
276 rpc_wake_up_task(struct rpc_task *task)
278 unsigned long oldflags;
280 save_flags(oldflags); cli();
281 __rpc_wake_up(task);
282 restore_flags(oldflags);
286 * Wake up the next task on the wait queue.
288 struct rpc_task *
289 rpc_wake_up_next(struct rpc_wait_queue *queue)
291 unsigned long oldflags;
292 struct rpc_task *task;
294 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
295 save_flags(oldflags); cli();
296 if ((task = queue->task) != 0)
297 __rpc_wake_up(task);
298 restore_flags(oldflags);
300 return task;
304 * Wake up all tasks on a queue
306 void
307 rpc_wake_up(struct rpc_wait_queue *queue)
309 unsigned long oldflags;
311 save_flags(oldflags); cli();
312 while (queue->task)
313 __rpc_wake_up(queue->task);
314 restore_flags(oldflags);
318 * Wake up all tasks on a queue, and set their status value.
320 void
321 rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
323 struct rpc_task *task;
324 unsigned long oldflags;
326 save_flags(oldflags); cli();
327 while ((task = queue->task) != NULL) {
328 task->tk_status = status;
329 __rpc_wake_up(task);
331 restore_flags(oldflags);
335 * Run a task at a later time
337 static void __rpc_atrun(struct rpc_task *);
338 void
339 rpc_delay(struct rpc_task *task, unsigned long delay)
341 task->tk_timeout = delay;
342 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
345 static void
346 __rpc_atrun(struct rpc_task *task)
348 task->tk_status = 0;
349 __rpc_wake_up(task);
353 * This is the RPC `scheduler' (or rather, the finite state machine).
355 static int
356 __rpc_execute(struct rpc_task *task)
358 unsigned long oldflags;
359 int status = 0;
361 dprintk("RPC: %4d rpc_execute flgs %x\n",
362 task->tk_pid, task->tk_flags);
364 if (!RPC_IS_RUNNING(task)) {
365 printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n");
366 return 0;
369 while (1) {
371 * Execute any pending callback.
373 if (task->tk_flags & RPC_TASK_CALLBACK) {
374 task->tk_flags &= ~RPC_TASK_CALLBACK;
375 if (task->tk_callback) {
376 task->tk_callback(task);
377 task->tk_callback = NULL;
382 * No handler for next step means exit.
384 if (!task->tk_action)
385 break;
388 * Perform the next FSM step.
389 * tk_action may be NULL when the task has been killed
390 * by someone else.
392 if (RPC_IS_RUNNING(task) && task->tk_action)
393 task->tk_action(task);
396 * Check whether task is sleeping.
397 * Note that if the task may go to sleep in tk_action,
398 * and the RPC reply arrives before we get here, it will
399 * have state RUNNING, but will still be on schedq.
401 save_flags(oldflags); cli();
402 if (RPC_IS_RUNNING(task)) {
403 if (task->tk_rpcwait == &schedq)
404 rpc_remove_wait_queue(task);
405 } else while (!RPC_IS_RUNNING(task)) {
406 if (RPC_IS_ASYNC(task)) {
407 restore_flags(oldflags);
408 return 0;
411 /* sync task: sleep here */
412 dprintk("RPC: %4d sync task going to sleep\n",
413 task->tk_pid);
414 if (current->pid == rpciod_pid)
415 printk(KERN_ERR "RPC: rpciod waiting on sync task!\n");
416 current->timeout = 0;
417 sleep_on(&task->tk_wait);
419 /* When the task received a signal, remove from
420 * any queues etc, and make runnable again. */
421 if (signalled())
422 __rpc_wake_up(task);
424 dprintk("RPC: %4d sync task resuming\n",
425 task->tk_pid);
427 restore_flags(oldflags);
430 * When a sync task receives a signal, it exits with
431 * -ERESTARTSYS. In order to catch any callbacks that
432 * clean up after sleeping on some queue, we don't
433 * break the loop here, but go around once more.
435 if (!RPC_IS_ASYNC(task) && signalled()) {
436 dprintk("RPC: %4d got signal\n", task->tk_pid);
437 rpc_exit(task, -ERESTARTSYS);
441 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
442 if (task->tk_exit) {
443 status = task->tk_status;
444 task->tk_exit(task);
447 return status;
451 * User-visible entry point to the scheduler.
452 * The recursion protection is for debugging. It should go away once
453 * the code has stabilized.
455 void
456 rpc_execute(struct rpc_task *task)
458 static int executing = 0;
459 int incr = RPC_IS_ASYNC(task)? 1 : 0;
461 if (incr) {
462 if (rpc_inhibit) {
463 printk(KERN_INFO "RPC: execution inhibited!\n");
464 return;
466 if (executing)
467 printk(KERN_WARNING "RPC: %d tasks executed\n", executing);
470 executing += incr;
471 __rpc_execute(task);
472 executing -= incr;
476 * This is our own little scheduler for async RPC tasks.
478 static void
479 __rpc_schedule(void)
481 struct rpc_task *task;
482 int count = 0;
483 unsigned long oldflags;
484 int need_resched = current->need_resched;
486 dprintk("RPC: rpc_schedule enter\n");
487 save_flags(oldflags);
488 while (1) {
489 cli();
490 if (!(task = schedq.task))
491 break;
492 rpc_del_timer(task);
493 rpc_remove_wait_queue(task);
494 task->tk_flags |= RPC_TASK_RUNNING;
495 restore_flags(oldflags);
497 __rpc_execute(task);
499 if (++count >= 200) {
500 count = 0;
501 need_resched = 1;
503 if (need_resched)
504 schedule();
506 restore_flags(oldflags);
507 dprintk("RPC: rpc_schedule leave\n");
511 * Allocate memory for RPC purpose.
513 * This is yet another tricky issue: For sync requests issued by
514 * a user process, we want to make kmalloc sleep if there isn't
515 * enough memory. Async requests should not sleep too excessively
516 * because that will block rpciod (but that's not dramatic when
517 * it's starved of memory anyway). Finally, swapout requests should
518 * never sleep at all, and should not trigger another swap_out
519 * request through kmalloc which would just increase memory contention.
521 * I hope the following gets it right, which gives async requests
522 * a slight advantage over sync requests (good for writeback, debatable
523 * for readahead):
525 * sync user requests: GFP_KERNEL
526 * async requests: GFP_RPC (== GFP_NFS)
527 * swap requests: GFP_ATOMIC (or new GFP_SWAPPER)
529 void *
530 rpc_allocate(unsigned int flags, unsigned int size)
532 u32 *buffer;
533 int gfp;
535 if (flags & RPC_TASK_SWAPPER)
536 gfp = GFP_ATOMIC;
537 else if (flags & RPC_TASK_ASYNC)
538 gfp = GFP_RPC;
539 else
540 gfp = GFP_KERNEL;
542 do {
543 if ((buffer = (u32 *) kmalloc(size, gfp)) != NULL) {
544 dprintk("RPC: allocated buffer %p\n", buffer);
545 return buffer;
547 if ((flags & RPC_TASK_SWAPPER) && !swap_buffer_used++) {
548 dprintk("RPC: used last-ditch swap buffer\n");
549 return swap_buffer;
551 if (flags & RPC_TASK_ASYNC)
552 return NULL;
553 current->timeout = jiffies + (HZ >> 4);
554 current->state = TASK_INTERRUPTIBLE;
555 schedule();
556 } while (!signalled());
558 return NULL;
561 void
562 rpc_free(void *buffer)
564 if (buffer != swap_buffer) {
565 kfree(buffer);
566 return;
568 swap_buffer_used = 0;
572 * Creation and deletion of RPC task structures
574 inline void
575 rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
576 rpc_action callback, int flags)
578 memset(task, 0, sizeof(*task));
579 task->tk_client = clnt;
580 task->tk_flags = RPC_TASK_RUNNING | flags;
581 task->tk_exit = callback;
582 if (current->uid != current->fsuid || current->gid != current->fsgid)
583 task->tk_flags |= RPC_TASK_SETUID;
585 /* Initialize retry counters */
586 task->tk_garb_retry = 2;
587 task->tk_cred_retry = 2;
588 task->tk_suid_retry = 1;
590 /* Add to global list of all tasks */
591 task->tk_next_task = all_tasks;
592 task->tk_prev_task = NULL;
593 if (all_tasks)
594 all_tasks->tk_prev_task = task;
595 all_tasks = task;
597 if (clnt)
598 clnt->cl_users++;
600 #ifdef RPC_DEBUG
601 task->tk_magic = 0xf00baa;
602 task->tk_pid = rpc_task_id++;
603 #endif
604 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
605 current->pid);
609 * Create a new task for the specified client. We have to
610 * clean up after an allocation failure, as the client may
611 * have specified "oneshot".
613 struct rpc_task *
614 rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
616 struct rpc_task *task;
618 task = (struct rpc_task *) rpc_allocate(flags, sizeof(*task));
619 if (!task)
620 goto cleanup;
622 rpc_init_task(task, clnt, callback, flags);
624 dprintk("RPC: %4d allocated task\n", task->tk_pid);
625 task->tk_flags |= RPC_TASK_DYNAMIC;
626 out:
627 return task;
629 cleanup:
630 /* Check whether to release the client */
631 if (clnt) {
632 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
633 clnt->cl_users, clnt->cl_oneshot);
634 clnt->cl_users++; /* pretend we were used ... */
635 rpc_release_client(clnt);
637 goto out;
640 void
641 rpc_release_task(struct rpc_task *task)
643 struct rpc_task *next, *prev;
645 dprintk("RPC: %4d release task\n", task->tk_pid);
647 /* Remove from global task list */
648 prev = task->tk_prev_task;
649 next = task->tk_next_task;
650 if (next)
651 next->tk_prev_task = prev;
652 if (prev)
653 prev->tk_next_task = next;
654 else
655 all_tasks = next;
657 /* Release resources */
658 if (task->tk_rqstp)
659 xprt_release(task);
660 if (task->tk_cred)
661 rpcauth_releasecred(task);
662 if (task->tk_buffer) {
663 rpc_free(task->tk_buffer);
664 task->tk_buffer = NULL;
666 if (task->tk_client) {
667 rpc_release_client(task->tk_client);
668 task->tk_client = NULL;
671 #ifdef RPC_DEBUG
672 task->tk_magic = 0;
673 #endif
675 if (task->tk_flags & RPC_TASK_DYNAMIC) {
676 dprintk("RPC: %4d freeing task\n", task->tk_pid);
677 task->tk_flags &= ~RPC_TASK_DYNAMIC;
678 rpc_free(task);
683 * Handling of RPC child tasks
684 * We can't simply call wake_up(parent) here, because the
685 * parent task may already have gone away
687 static inline struct rpc_task *
688 rpc_find_parent(struct rpc_task *child)
690 struct rpc_task *temp, *parent;
692 parent = (struct rpc_task *) child->tk_calldata;
693 for (temp = childq.task; temp; temp = temp->tk_next) {
694 if (temp == parent)
695 return parent;
697 return NULL;
700 static void
701 rpc_child_exit(struct rpc_task *child)
703 struct rpc_task *parent;
705 if ((parent = rpc_find_parent(child)) != NULL) {
706 parent->tk_status = child->tk_status;
707 rpc_wake_up_task(parent);
709 rpc_release_task(child);
713 * Note: rpc_new_task releases the client after a failure.
715 struct rpc_task *
716 rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
718 struct rpc_task *task;
720 task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD);
721 if (!task)
722 goto fail;
723 task->tk_exit = rpc_child_exit;
724 task->tk_calldata = parent;
725 return task;
727 fail:
728 parent->tk_status = -ENOMEM;
729 return NULL;
732 void
733 rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
735 unsigned long oldflags;
737 save_flags(oldflags); cli();
738 rpc_make_runnable(child);
739 restore_flags(oldflags);
740 /* N.B. Is it possible for the child to have already finished? */
741 rpc_sleep_on(&childq, task, func, NULL);
745 * Kill all tasks for the given client.
746 * XXX: kill their descendants as well?
748 void
749 rpc_killall_tasks(struct rpc_clnt *clnt)
751 struct rpc_task **q, *rovr;
753 dprintk("RPC: killing all tasks for client %p\n", clnt);
754 /* N.B. Why bother to inhibit? Nothing blocks here ... */
755 rpc_inhibit++;
756 for (q = &all_tasks; (rovr = *q); q = &rovr->tk_next_task) {
757 if (!clnt || rovr->tk_client == clnt) {
758 rovr->tk_flags |= RPC_TASK_KILLED;
759 rpc_exit(rovr, -EIO);
760 rpc_wake_up_task(rovr);
763 rpc_inhibit--;
766 static struct semaphore rpciod_running = MUTEX_LOCKED;
769 * This is the rpciod kernel thread
771 static int
772 rpciod(void *ptr)
774 struct wait_queue **assassin = (struct wait_queue **) ptr;
775 unsigned long oldflags;
776 int rounds = 0;
778 MOD_INC_USE_COUNT;
779 lock_kernel();
781 * Let our maker know we're running ...
783 rpciod_pid = current->pid;
784 up(&rpciod_running);
786 exit_files(current);
787 exit_mm(current);
789 spin_lock_irq(&current->sigmask_lock);
790 siginitsetinv(&current->blocked, sigmask(SIGKILL));
791 recalc_sigpending(current);
792 spin_unlock_irq(&current->sigmask_lock);
794 current->session = 1;
795 current->pgrp = 1;
796 sprintf(current->comm, "rpciod");
798 dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid);
799 while (rpciod_users) {
800 if (signalled()) {
801 rpciod_killall();
802 flush_signals(current);
804 __rpc_schedule();
806 if (++rounds >= 64) { /* safeguard */
807 schedule();
808 rounds = 0;
810 save_flags(oldflags); cli();
811 if (!schedq.task) {
812 dprintk("RPC: rpciod back to sleep\n");
813 interruptible_sleep_on(&rpciod_idle);
814 dprintk("RPC: switch to rpciod\n");
815 rpciod_tcp_dispatcher();
816 rounds = 0;
818 restore_flags(oldflags);
821 dprintk("RPC: rpciod shutdown commences\n");
822 if (all_tasks) {
823 printk(KERN_ERR "rpciod: active tasks at shutdown?!\n");
824 rpciod_killall();
827 rpciod_pid = 0;
828 wake_up(assassin);
830 dprintk("RPC: rpciod exiting\n");
831 MOD_DEC_USE_COUNT;
832 return 0;
835 static void
836 rpciod_killall(void)
838 unsigned long flags;
840 while (all_tasks) {
841 current->sigpending = 0;
842 rpc_killall_tasks(NULL);
843 __rpc_schedule();
844 if (all_tasks) {
845 dprintk("rpciod_killall: waiting for tasks to exit\n");
846 current->state = TASK_INTERRUPTIBLE;
847 current->timeout = jiffies + 1;
848 schedule();
849 current->timeout = 0;
853 spin_lock_irqsave(&current->sigmask_lock, flags);
854 recalc_sigpending(current);
855 spin_unlock_irqrestore(&current->sigmask_lock, flags);
859 * Start up the rpciod process if it's not already running.
862 rpciod_up(void)
864 int error = 0;
866 MOD_INC_USE_COUNT;
867 down(&rpciod_sema);
868 dprintk("rpciod_up: pid %d, users %d\n", rpciod_pid, rpciod_users);
869 rpciod_users++;
870 if (rpciod_pid)
871 goto out;
873 * If there's no pid, we should be the first user.
875 if (rpciod_users > 1)
876 printk(KERN_WARNING "rpciod_up: no pid, %d users??\n", rpciod_users);
878 * Create the rpciod thread and wait for it to start.
880 error = kernel_thread(rpciod, &rpciod_killer, 0);
881 if (error < 0) {
882 printk(KERN_WARNING "rpciod_up: create thread failed, error=%d\n", error);
883 rpciod_users--;
884 goto out;
886 down(&rpciod_running);
887 error = 0;
888 out:
889 up(&rpciod_sema);
890 MOD_DEC_USE_COUNT;
891 return error;
894 void
895 rpciod_down(void)
897 unsigned long flags;
899 MOD_INC_USE_COUNT;
900 down(&rpciod_sema);
901 dprintk("rpciod_down pid %d sema %d\n", rpciod_pid, rpciod_users);
902 if (rpciod_users) {
903 if (--rpciod_users)
904 goto out;
905 } else
906 printk(KERN_WARNING "rpciod_down: pid=%d, no users??\n", rpciod_pid);
908 if (!rpciod_pid) {
909 dprintk("rpciod_down: Nothing to do!\n");
910 goto out;
913 kill_proc(rpciod_pid, SIGKILL, 1);
915 * Usually rpciod will exit very quickly, so we
916 * wait briefly before checking the process id.
918 current->sigpending = 0;
919 current->state = TASK_INTERRUPTIBLE;
920 current->timeout = jiffies + 1;
921 schedule();
922 current->timeout = 0;
924 * Display a message if we're going to wait longer.
926 while (rpciod_pid) {
927 dprintk("rpciod_down: waiting for pid %d to exit\n", rpciod_pid);
928 if (signalled()) {
929 dprintk("rpciod_down: caught signal\n");
930 break;
932 interruptible_sleep_on(&rpciod_killer);
934 spin_lock_irqsave(&current->sigmask_lock, flags);
935 recalc_sigpending(current);
936 spin_unlock_irqrestore(&current->sigmask_lock, flags);
937 out:
938 up(&rpciod_sema);
939 MOD_DEC_USE_COUNT;
942 #ifdef RPC_DEBUG
943 #include <linux/nfs_fs.h>
944 void rpc_show_tasks(void)
946 struct rpc_task *t = all_tasks, *next;
947 struct nfs_wreq *wreq;
949 if (!t)
950 return;
951 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
952 "-rpcwait -action- --exit--\n");
953 for (; t; t = next) {
954 next = t->tk_next_task;
955 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
956 t->tk_pid, t->tk_proc, t->tk_flags, t->tk_status,
957 t->tk_client, t->tk_client->cl_prog,
958 t->tk_rqstp, t->tk_timeout,
959 t->tk_rpcwait ? rpc_qname(t->tk_rpcwait) : " <NULL> ",
960 t->tk_action, t->tk_exit);
962 if (!(t->tk_flags & RPC_TASK_NFSWRITE))
963 continue;
964 /* NFS write requests */
965 wreq = (struct nfs_wreq *) t->tk_calldata;
966 printk(" NFS: flgs=%08x, pid=%d, pg=%p, off=(%d, %d)\n",
967 wreq->wb_flags, wreq->wb_pid, wreq->wb_page,
968 wreq->wb_offset, wreq->wb_bytes);
969 printk(" name=%s/%s\n",
970 wreq->wb_dentry->d_parent->d_name.name,
971 wreq->wb_dentry->d_name.name);
974 #endif