CIFS: reset mode when client notices that ATTR_READONLY is no longer set
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sunrpc / svc.c
blobbaf27a99a8fdb9ed18f984d55cc9559969518abc
1 /*
2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
17 #include <linux/in.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
22 #include <linux/sunrpc/types.h>
23 #include <linux/sunrpc/xdr.h>
24 #include <linux/sunrpc/stats.h>
25 #include <linux/sunrpc/svcsock.h>
26 #include <linux/sunrpc/clnt.h>
28 #define RPCDBG_FACILITY RPCDBG_SVCDSP
31 * Mode for mapping cpus to pools.
33 enum {
34 SVC_POOL_NONE = -1, /* uninitialised, choose one of the others */
35 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
36 * (legacy & UP mode) */
37 SVC_POOL_PERCPU, /* one pool per cpu */
38 SVC_POOL_PERNODE /* one pool per numa node */
42 * Structure for mapping cpus to pools and vice versa.
43 * Setup once during sunrpc initialisation.
45 static struct svc_pool_map {
46 int mode; /* Note: int not enum to avoid
47 * warnings about "enumeration value
48 * not handled in switch" */
49 unsigned int npools;
50 unsigned int *pool_to; /* maps pool id to cpu or node */
51 unsigned int *to_pool; /* maps cpu or node to pool id */
52 } svc_pool_map = {
53 .mode = SVC_POOL_NONE
58 * Detect best pool mapping mode heuristically,
59 * according to the machine's topology.
61 static int
62 svc_pool_map_choose_mode(void)
64 unsigned int node;
66 if (num_online_nodes() > 1) {
68 * Actually have multiple NUMA nodes,
69 * so split pools on NUMA node boundaries
71 return SVC_POOL_PERNODE;
74 node = any_online_node(node_online_map);
75 if (nr_cpus_node(node) > 2) {
77 * Non-trivial SMP, or CONFIG_NUMA on
78 * non-NUMA hardware, e.g. with a generic
79 * x86_64 kernel on Xeons. In this case we
80 * want to divide the pools on cpu boundaries.
82 /* actually, unless your IRQs round-robin nicely,
83 * this turns out to be really bad, so just
84 * go GLOBAL for now until a better fix can be developped
86 return SVC_POOL_GLOBAL;
89 /* default: one global pool */
90 return SVC_POOL_GLOBAL;
94 * Allocate the to_pool[] and pool_to[] arrays.
95 * Returns 0 on success or an errno.
97 static int
98 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
100 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
101 if (!m->to_pool)
102 goto fail;
103 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
104 if (!m->pool_to)
105 goto fail_free;
107 return 0;
109 fail_free:
110 kfree(m->to_pool);
111 fail:
112 return -ENOMEM;
116 * Initialise the pool map for SVC_POOL_PERCPU mode.
117 * Returns number of pools or <0 on error.
119 static int
120 svc_pool_map_init_percpu(struct svc_pool_map *m)
122 unsigned int maxpools = highest_possible_processor_id()+1;
123 unsigned int pidx = 0;
124 unsigned int cpu;
125 int err;
127 err = svc_pool_map_alloc_arrays(m, maxpools);
128 if (err)
129 return err;
131 for_each_online_cpu(cpu) {
132 BUG_ON(pidx > maxpools);
133 m->to_pool[cpu] = pidx;
134 m->pool_to[pidx] = cpu;
135 pidx++;
137 /* cpus brought online later all get mapped to pool0, sorry */
139 return pidx;
144 * Initialise the pool map for SVC_POOL_PERNODE mode.
145 * Returns number of pools or <0 on error.
147 static int
148 svc_pool_map_init_pernode(struct svc_pool_map *m)
150 unsigned int maxpools = highest_possible_node_id()+1;
151 unsigned int pidx = 0;
152 unsigned int node;
153 int err;
155 err = svc_pool_map_alloc_arrays(m, maxpools);
156 if (err)
157 return err;
159 for_each_node_with_cpus(node) {
160 /* some architectures (e.g. SN2) have cpuless nodes */
161 BUG_ON(pidx > maxpools);
162 m->to_pool[node] = pidx;
163 m->pool_to[pidx] = node;
164 pidx++;
166 /* nodes brought online later all get mapped to pool0, sorry */
168 return pidx;
173 * Build the global map of cpus to pools and vice versa.
175 static unsigned int
176 svc_pool_map_init(void)
178 struct svc_pool_map *m = &svc_pool_map;
179 int npools = -1;
181 if (m->mode != SVC_POOL_NONE)
182 return m->npools;
184 m->mode = svc_pool_map_choose_mode();
186 switch (m->mode) {
187 case SVC_POOL_PERCPU:
188 npools = svc_pool_map_init_percpu(m);
189 break;
190 case SVC_POOL_PERNODE:
191 npools = svc_pool_map_init_pernode(m);
192 break;
195 if (npools < 0) {
196 /* default, or memory allocation failure */
197 npools = 1;
198 m->mode = SVC_POOL_GLOBAL;
200 m->npools = npools;
202 return m->npools;
206 * Set the current thread's cpus_allowed mask so that it
207 * will only run on cpus in the given pool.
209 * Returns 1 and fills in oldmask iff a cpumask was applied.
211 static inline int
212 svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
214 struct svc_pool_map *m = &svc_pool_map;
215 unsigned int node; /* or cpu */
218 * The caller checks for sv_nrpools > 1, which
219 * implies that we've been initialized and the
220 * map mode is not NONE.
222 BUG_ON(m->mode == SVC_POOL_NONE);
224 switch (m->mode)
226 default:
227 return 0;
228 case SVC_POOL_PERCPU:
229 node = m->pool_to[pidx];
230 *oldmask = current->cpus_allowed;
231 set_cpus_allowed(current, cpumask_of_cpu(node));
232 return 1;
233 case SVC_POOL_PERNODE:
234 node = m->pool_to[pidx];
235 *oldmask = current->cpus_allowed;
236 set_cpus_allowed(current, node_to_cpumask(node));
237 return 1;
242 * Use the mapping mode to choose a pool for a given CPU.
243 * Used when enqueueing an incoming RPC. Always returns
244 * a non-NULL pool pointer.
246 struct svc_pool *
247 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
249 struct svc_pool_map *m = &svc_pool_map;
250 unsigned int pidx = 0;
253 * SVC_POOL_NONE happens in a pure client when
254 * lockd is brought up, so silently treat it the
255 * same as SVC_POOL_GLOBAL.
258 switch (m->mode) {
259 case SVC_POOL_PERCPU:
260 pidx = m->to_pool[cpu];
261 break;
262 case SVC_POOL_PERNODE:
263 pidx = m->to_pool[cpu_to_node(cpu)];
264 break;
266 return &serv->sv_pools[pidx % serv->sv_nrpools];
271 * Create an RPC service
273 static struct svc_serv *
274 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
275 void (*shutdown)(struct svc_serv *serv))
277 struct svc_serv *serv;
278 int vers;
279 unsigned int xdrsize;
280 unsigned int i;
282 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
283 return NULL;
284 serv->sv_name = prog->pg_name;
285 serv->sv_program = prog;
286 serv->sv_nrthreads = 1;
287 serv->sv_stats = prog->pg_stats;
288 if (bufsize > RPCSVC_MAXPAYLOAD)
289 bufsize = RPCSVC_MAXPAYLOAD;
290 serv->sv_max_payload = bufsize? bufsize : 4096;
291 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
292 serv->sv_shutdown = shutdown;
293 xdrsize = 0;
294 while (prog) {
295 prog->pg_lovers = prog->pg_nvers-1;
296 for (vers=0; vers<prog->pg_nvers ; vers++)
297 if (prog->pg_vers[vers]) {
298 prog->pg_hivers = vers;
299 if (prog->pg_lovers > vers)
300 prog->pg_lovers = vers;
301 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
302 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
304 prog = prog->pg_next;
306 serv->sv_xdrsize = xdrsize;
307 INIT_LIST_HEAD(&serv->sv_tempsocks);
308 INIT_LIST_HEAD(&serv->sv_permsocks);
309 init_timer(&serv->sv_temptimer);
310 spin_lock_init(&serv->sv_lock);
312 serv->sv_nrpools = npools;
313 serv->sv_pools =
314 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
315 GFP_KERNEL);
316 if (!serv->sv_pools) {
317 kfree(serv);
318 return NULL;
321 for (i = 0; i < serv->sv_nrpools; i++) {
322 struct svc_pool *pool = &serv->sv_pools[i];
324 dprintk("initialising pool %u for %s\n",
325 i, serv->sv_name);
327 pool->sp_id = i;
328 INIT_LIST_HEAD(&pool->sp_threads);
329 INIT_LIST_HEAD(&pool->sp_sockets);
330 INIT_LIST_HEAD(&pool->sp_all_threads);
331 spin_lock_init(&pool->sp_lock);
335 /* Remove any stale portmap registrations */
336 svc_register(serv, 0, 0);
338 return serv;
341 struct svc_serv *
342 svc_create(struct svc_program *prog, unsigned int bufsize,
343 void (*shutdown)(struct svc_serv *serv))
345 return __svc_create(prog, bufsize, /*npools*/1, shutdown);
348 struct svc_serv *
349 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
350 void (*shutdown)(struct svc_serv *serv),
351 svc_thread_fn func, int sig, struct module *mod)
353 struct svc_serv *serv;
354 unsigned int npools = svc_pool_map_init();
356 serv = __svc_create(prog, bufsize, npools, shutdown);
358 if (serv != NULL) {
359 serv->sv_function = func;
360 serv->sv_kill_signal = sig;
361 serv->sv_module = mod;
364 return serv;
368 * Destroy an RPC service. Should be called with the BKL held
370 void
371 svc_destroy(struct svc_serv *serv)
373 struct svc_sock *svsk;
374 struct svc_sock *tmp;
376 dprintk("RPC: svc_destroy(%s, %d)\n",
377 serv->sv_program->pg_name,
378 serv->sv_nrthreads);
380 if (serv->sv_nrthreads) {
381 if (--(serv->sv_nrthreads) != 0) {
382 svc_sock_update_bufs(serv);
383 return;
385 } else
386 printk("svc_destroy: no threads for serv=%p!\n", serv);
388 del_timer_sync(&serv->sv_temptimer);
390 list_for_each_entry_safe(svsk, tmp, &serv->sv_tempsocks, sk_list)
391 svc_force_close_socket(svsk);
393 if (serv->sv_shutdown)
394 serv->sv_shutdown(serv);
396 list_for_each_entry_safe(svsk, tmp, &serv->sv_permsocks, sk_list)
397 svc_force_close_socket(svsk);
399 BUG_ON(!list_empty(&serv->sv_permsocks));
400 BUG_ON(!list_empty(&serv->sv_tempsocks));
402 cache_clean_deferred(serv);
404 /* Unregister service with the portmapper */
405 svc_register(serv, 0, 0);
406 kfree(serv->sv_pools);
407 kfree(serv);
411 * Allocate an RPC server's buffer space.
412 * We allocate pages and place them in rq_argpages.
414 static int
415 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
417 int pages;
418 int arghi;
420 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
421 * We assume one is at most one page
423 arghi = 0;
424 BUG_ON(pages > RPCSVC_MAXPAGES);
425 while (pages) {
426 struct page *p = alloc_page(GFP_KERNEL);
427 if (!p)
428 break;
429 rqstp->rq_pages[arghi++] = p;
430 pages--;
432 return ! pages;
436 * Release an RPC server buffer
438 static void
439 svc_release_buffer(struct svc_rqst *rqstp)
441 int i;
442 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++)
443 if (rqstp->rq_pages[i])
444 put_page(rqstp->rq_pages[i]);
448 * Create a thread in the given pool. Caller must hold BKL.
449 * On a NUMA or SMP machine, with a multi-pool serv, the thread
450 * will be restricted to run on the cpus belonging to the pool.
452 static int
453 __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
454 struct svc_pool *pool)
456 struct svc_rqst *rqstp;
457 int error = -ENOMEM;
458 int have_oldmask = 0;
459 cpumask_t oldmask;
461 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
462 if (!rqstp)
463 goto out;
465 init_waitqueue_head(&rqstp->rq_wait);
467 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
468 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
469 || !svc_init_buffer(rqstp, serv->sv_max_mesg))
470 goto out_thread;
472 serv->sv_nrthreads++;
473 spin_lock_bh(&pool->sp_lock);
474 pool->sp_nrthreads++;
475 list_add(&rqstp->rq_all, &pool->sp_all_threads);
476 spin_unlock_bh(&pool->sp_lock);
477 rqstp->rq_server = serv;
478 rqstp->rq_pool = pool;
480 if (serv->sv_nrpools > 1)
481 have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask);
483 error = kernel_thread((int (*)(void *)) func, rqstp, 0);
485 if (have_oldmask)
486 set_cpus_allowed(current, oldmask);
488 if (error < 0)
489 goto out_thread;
490 svc_sock_update_bufs(serv);
491 error = 0;
492 out:
493 return error;
495 out_thread:
496 svc_exit_thread(rqstp);
497 goto out;
501 * Create a thread in the default pool. Caller must hold BKL.
504 svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
506 return __svc_create_thread(func, serv, &serv->sv_pools[0]);
510 * Choose a pool in which to create a new thread, for svc_set_num_threads
512 static inline struct svc_pool *
513 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
515 if (pool != NULL)
516 return pool;
518 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
522 * Choose a thread to kill, for svc_set_num_threads
524 static inline struct task_struct *
525 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
527 unsigned int i;
528 struct task_struct *task = NULL;
530 if (pool != NULL) {
531 spin_lock_bh(&pool->sp_lock);
532 } else {
533 /* choose a pool in round-robin fashion */
534 for (i = 0; i < serv->sv_nrpools; i++) {
535 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
536 spin_lock_bh(&pool->sp_lock);
537 if (!list_empty(&pool->sp_all_threads))
538 goto found_pool;
539 spin_unlock_bh(&pool->sp_lock);
541 return NULL;
544 found_pool:
545 if (!list_empty(&pool->sp_all_threads)) {
546 struct svc_rqst *rqstp;
549 * Remove from the pool->sp_all_threads list
550 * so we don't try to kill it again.
552 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
553 list_del_init(&rqstp->rq_all);
554 task = rqstp->rq_task;
556 spin_unlock_bh(&pool->sp_lock);
558 return task;
562 * Create or destroy enough new threads to make the number
563 * of threads the given number. If `pool' is non-NULL, applies
564 * only to threads in that pool, otherwise round-robins between
565 * all pools. Must be called with a svc_get() reference and
566 * the BKL held.
568 * Destroying threads relies on the service threads filling in
569 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
570 * has been created using svc_create_pooled().
572 * Based on code that used to be in nfsd_svc() but tweaked
573 * to be pool-aware.
576 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
578 struct task_struct *victim;
579 int error = 0;
580 unsigned int state = serv->sv_nrthreads-1;
582 if (pool == NULL) {
583 /* The -1 assumes caller has done a svc_get() */
584 nrservs -= (serv->sv_nrthreads-1);
585 } else {
586 spin_lock_bh(&pool->sp_lock);
587 nrservs -= pool->sp_nrthreads;
588 spin_unlock_bh(&pool->sp_lock);
591 /* create new threads */
592 while (nrservs > 0) {
593 nrservs--;
594 __module_get(serv->sv_module);
595 error = __svc_create_thread(serv->sv_function, serv,
596 choose_pool(serv, pool, &state));
597 if (error < 0) {
598 module_put(serv->sv_module);
599 break;
602 /* destroy old threads */
603 while (nrservs < 0 &&
604 (victim = choose_victim(serv, pool, &state)) != NULL) {
605 send_sig(serv->sv_kill_signal, victim, 1);
606 nrservs++;
609 return error;
613 * Called from a server thread as it's exiting. Caller must hold BKL.
615 void
616 svc_exit_thread(struct svc_rqst *rqstp)
618 struct svc_serv *serv = rqstp->rq_server;
619 struct svc_pool *pool = rqstp->rq_pool;
621 svc_release_buffer(rqstp);
622 kfree(rqstp->rq_resp);
623 kfree(rqstp->rq_argp);
624 kfree(rqstp->rq_auth_data);
626 spin_lock_bh(&pool->sp_lock);
627 pool->sp_nrthreads--;
628 list_del(&rqstp->rq_all);
629 spin_unlock_bh(&pool->sp_lock);
631 kfree(rqstp);
633 /* Release the server */
634 if (serv)
635 svc_destroy(serv);
639 * Register an RPC service with the local portmapper.
640 * To unregister a service, call this routine with
641 * proto and port == 0.
644 svc_register(struct svc_serv *serv, int proto, unsigned short port)
646 struct svc_program *progp;
647 unsigned long flags;
648 int i, error = 0, dummy;
650 if (!port)
651 clear_thread_flag(TIF_SIGPENDING);
653 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
654 for (i = 0; i < progp->pg_nvers; i++) {
655 if (progp->pg_vers[i] == NULL)
656 continue;
658 dprintk("RPC: svc_register(%s, %s, %d, %d)%s\n",
659 progp->pg_name,
660 proto == IPPROTO_UDP? "udp" : "tcp",
661 port,
663 progp->pg_vers[i]->vs_hidden?
664 " (but not telling portmap)" : "");
666 if (progp->pg_vers[i]->vs_hidden)
667 continue;
669 error = rpc_register(progp->pg_prog, i, proto, port, &dummy);
670 if (error < 0)
671 break;
672 if (port && !dummy) {
673 error = -EACCES;
674 break;
679 if (!port) {
680 spin_lock_irqsave(&current->sighand->siglock, flags);
681 recalc_sigpending();
682 spin_unlock_irqrestore(&current->sighand->siglock, flags);
685 return error;
689 * Process the RPC request.
692 svc_process(struct svc_rqst *rqstp)
694 struct svc_program *progp;
695 struct svc_version *versp = NULL; /* compiler food */
696 struct svc_procedure *procp = NULL;
697 struct kvec * argv = &rqstp->rq_arg.head[0];
698 struct kvec * resv = &rqstp->rq_res.head[0];
699 struct svc_serv *serv = rqstp->rq_server;
700 kxdrproc_t xdr;
701 __be32 *statp;
702 u32 dir, prog, vers, proc;
703 __be32 auth_stat, rpc_stat;
704 int auth_res;
705 __be32 *reply_statp;
707 rpc_stat = rpc_success;
709 if (argv->iov_len < 6*4)
710 goto err_short_len;
712 /* setup response xdr_buf.
713 * Initially it has just one page
715 rqstp->rq_resused = 1;
716 resv->iov_base = page_address(rqstp->rq_respages[0]);
717 resv->iov_len = 0;
718 rqstp->rq_res.pages = rqstp->rq_respages + 1;
719 rqstp->rq_res.len = 0;
720 rqstp->rq_res.page_base = 0;
721 rqstp->rq_res.page_len = 0;
722 rqstp->rq_res.buflen = PAGE_SIZE;
723 rqstp->rq_res.tail[0].iov_base = NULL;
724 rqstp->rq_res.tail[0].iov_len = 0;
725 /* Will be turned off only in gss privacy case: */
726 rqstp->rq_sendfile_ok = 1;
727 /* tcp needs a space for the record length... */
728 if (rqstp->rq_prot == IPPROTO_TCP)
729 svc_putnl(resv, 0);
731 rqstp->rq_xid = svc_getu32(argv);
732 svc_putu32(resv, rqstp->rq_xid);
734 dir = svc_getnl(argv);
735 vers = svc_getnl(argv);
737 /* First words of reply: */
738 svc_putnl(resv, 1); /* REPLY */
740 if (dir != 0) /* direction != CALL */
741 goto err_bad_dir;
742 if (vers != 2) /* RPC version number */
743 goto err_bad_rpc;
745 /* Save position in case we later decide to reject: */
746 reply_statp = resv->iov_base + resv->iov_len;
748 svc_putnl(resv, 0); /* ACCEPT */
750 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
751 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
752 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
754 progp = serv->sv_program;
756 for (progp = serv->sv_program; progp; progp = progp->pg_next)
757 if (prog == progp->pg_prog)
758 break;
761 * Decode auth data, and add verifier to reply buffer.
762 * We do this before anything else in order to get a decent
763 * auth verifier.
765 auth_res = svc_authenticate(rqstp, &auth_stat);
766 /* Also give the program a chance to reject this call: */
767 if (auth_res == SVC_OK && progp) {
768 auth_stat = rpc_autherr_badcred;
769 auth_res = progp->pg_authenticate(rqstp);
771 switch (auth_res) {
772 case SVC_OK:
773 break;
774 case SVC_GARBAGE:
775 rpc_stat = rpc_garbage_args;
776 goto err_bad;
777 case SVC_SYSERR:
778 rpc_stat = rpc_system_err;
779 goto err_bad;
780 case SVC_DENIED:
781 goto err_bad_auth;
782 case SVC_DROP:
783 goto dropit;
784 case SVC_COMPLETE:
785 goto sendit;
788 if (progp == NULL)
789 goto err_bad_prog;
791 if (vers >= progp->pg_nvers ||
792 !(versp = progp->pg_vers[vers]))
793 goto err_bad_vers;
795 procp = versp->vs_proc + proc;
796 if (proc >= versp->vs_nproc || !procp->pc_func)
797 goto err_bad_proc;
798 rqstp->rq_server = serv;
799 rqstp->rq_procinfo = procp;
801 /* Syntactic check complete */
802 serv->sv_stats->rpccnt++;
804 /* Build the reply header. */
805 statp = resv->iov_base +resv->iov_len;
806 svc_putnl(resv, RPC_SUCCESS);
808 /* Bump per-procedure stats counter */
809 procp->pc_count++;
811 /* Initialize storage for argp and resp */
812 memset(rqstp->rq_argp, 0, procp->pc_argsize);
813 memset(rqstp->rq_resp, 0, procp->pc_ressize);
815 /* un-reserve some of the out-queue now that we have a
816 * better idea of reply size
818 if (procp->pc_xdrressize)
819 svc_reserve(rqstp, procp->pc_xdrressize<<2);
821 /* Call the function that processes the request. */
822 if (!versp->vs_dispatch) {
823 /* Decode arguments */
824 xdr = procp->pc_decode;
825 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
826 goto err_garbage;
828 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
830 /* Encode reply */
831 if (*statp == rpc_drop_reply) {
832 if (procp->pc_release)
833 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
834 goto dropit;
836 if (*statp == rpc_success && (xdr = procp->pc_encode)
837 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
838 dprintk("svc: failed to encode reply\n");
839 /* serv->sv_stats->rpcsystemerr++; */
840 *statp = rpc_system_err;
842 } else {
843 dprintk("svc: calling dispatcher\n");
844 if (!versp->vs_dispatch(rqstp, statp)) {
845 /* Release reply info */
846 if (procp->pc_release)
847 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
848 goto dropit;
852 /* Check RPC status result */
853 if (*statp != rpc_success)
854 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
856 /* Release reply info */
857 if (procp->pc_release)
858 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
860 if (procp->pc_encode == NULL)
861 goto dropit;
863 sendit:
864 if (svc_authorise(rqstp))
865 goto dropit;
866 return svc_send(rqstp);
868 dropit:
869 svc_authorise(rqstp); /* doesn't hurt to call this twice */
870 dprintk("svc: svc_process dropit\n");
871 svc_drop(rqstp);
872 return 0;
874 err_short_len:
875 if (net_ratelimit())
876 printk("svc: short len %Zd, dropping request\n", argv->iov_len);
878 goto dropit; /* drop request */
880 err_bad_dir:
881 if (net_ratelimit())
882 printk("svc: bad direction %d, dropping request\n", dir);
884 serv->sv_stats->rpcbadfmt++;
885 goto dropit; /* drop request */
887 err_bad_rpc:
888 serv->sv_stats->rpcbadfmt++;
889 svc_putnl(resv, 1); /* REJECT */
890 svc_putnl(resv, 0); /* RPC_MISMATCH */
891 svc_putnl(resv, 2); /* Only RPCv2 supported */
892 svc_putnl(resv, 2);
893 goto sendit;
895 err_bad_auth:
896 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
897 serv->sv_stats->rpcbadauth++;
898 /* Restore write pointer to location of accept status: */
899 xdr_ressize_check(rqstp, reply_statp);
900 svc_putnl(resv, 1); /* REJECT */
901 svc_putnl(resv, 1); /* AUTH_ERROR */
902 svc_putnl(resv, ntohl(auth_stat)); /* status */
903 goto sendit;
905 err_bad_prog:
906 dprintk("svc: unknown program %d\n", prog);
907 serv->sv_stats->rpcbadfmt++;
908 svc_putnl(resv, RPC_PROG_UNAVAIL);
909 goto sendit;
911 err_bad_vers:
912 if (net_ratelimit())
913 printk("svc: unknown version (%d for prog %d, %s)\n",
914 vers, prog, progp->pg_name);
916 serv->sv_stats->rpcbadfmt++;
917 svc_putnl(resv, RPC_PROG_MISMATCH);
918 svc_putnl(resv, progp->pg_lovers);
919 svc_putnl(resv, progp->pg_hivers);
920 goto sendit;
922 err_bad_proc:
923 if (net_ratelimit())
924 printk("svc: unknown procedure (%d)\n", proc);
926 serv->sv_stats->rpcbadfmt++;
927 svc_putnl(resv, RPC_PROC_UNAVAIL);
928 goto sendit;
930 err_garbage:
931 if (net_ratelimit())
932 printk("svc: failed to decode args\n");
934 rpc_stat = rpc_garbage_args;
935 err_bad:
936 serv->sv_stats->rpcbadfmt++;
937 svc_putnl(resv, ntohl(rpc_stat));
938 goto sendit;
942 * Return (transport-specific) limit on the rpc payload.
944 u32 svc_max_payload(const struct svc_rqst *rqstp)
946 int max = RPCSVC_MAXPAYLOAD_TCP;
948 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM)
949 max = RPCSVC_MAXPAYLOAD_UDP;
950 if (rqstp->rq_server->sv_max_payload < max)
951 max = rqstp->rq_server->sv_max_payload;
952 return max;
954 EXPORT_SYMBOL_GPL(svc_max_payload);