2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
22 #include <linux/sunrpc/types.h>
23 #include <linux/sunrpc/xdr.h>
24 #include <linux/sunrpc/stats.h>
25 #include <linux/sunrpc/svcsock.h>
26 #include <linux/sunrpc/clnt.h>
28 #define RPCDBG_FACILITY RPCDBG_SVCDSP
29 #define RPC_PARANOIA 1
32 * Mode for mapping cpus to pools.
35 SVC_POOL_NONE
= -1, /* uninitialised, choose one of the others */
36 SVC_POOL_GLOBAL
, /* no mapping, just a single global pool
37 * (legacy & UP mode) */
38 SVC_POOL_PERCPU
, /* one pool per cpu */
39 SVC_POOL_PERNODE
/* one pool per numa node */
43 * Structure for mapping cpus to pools and vice versa.
44 * Setup once during sunrpc initialisation.
46 static struct svc_pool_map
{
47 int mode
; /* Note: int not enum to avoid
48 * warnings about "enumeration value
49 * not handled in switch" */
51 unsigned int *pool_to
; /* maps pool id to cpu or node */
52 unsigned int *to_pool
; /* maps cpu or node to pool id */
59 * Detect best pool mapping mode heuristically,
60 * according to the machine's topology.
63 svc_pool_map_choose_mode(void)
67 if (num_online_nodes() > 1) {
69 * Actually have multiple NUMA nodes,
70 * so split pools on NUMA node boundaries
72 return SVC_POOL_PERNODE
;
75 node
= any_online_node(node_online_map
);
76 if (nr_cpus_node(node
) > 2) {
78 * Non-trivial SMP, or CONFIG_NUMA on
79 * non-NUMA hardware, e.g. with a generic
80 * x86_64 kernel on Xeons. In this case we
81 * want to divide the pools on cpu boundaries.
83 return SVC_POOL_PERCPU
;
86 /* default: one global pool */
87 return SVC_POOL_GLOBAL
;
91 * Allocate the to_pool[] and pool_to[] arrays.
92 * Returns 0 on success or an errno.
95 svc_pool_map_alloc_arrays(struct svc_pool_map
*m
, unsigned int maxpools
)
97 m
->to_pool
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
100 m
->pool_to
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
113 * Initialise the pool map for SVC_POOL_PERCPU mode.
114 * Returns number of pools or <0 on error.
117 svc_pool_map_init_percpu(struct svc_pool_map
*m
)
119 unsigned int maxpools
= highest_possible_processor_id()+1;
120 unsigned int pidx
= 0;
124 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
128 for_each_online_cpu(cpu
) {
129 BUG_ON(pidx
> maxpools
);
130 m
->to_pool
[cpu
] = pidx
;
131 m
->pool_to
[pidx
] = cpu
;
134 /* cpus brought online later all get mapped to pool0, sorry */
141 * Initialise the pool map for SVC_POOL_PERNODE mode.
142 * Returns number of pools or <0 on error.
145 svc_pool_map_init_pernode(struct svc_pool_map
*m
)
147 unsigned int maxpools
= highest_possible_node_id()+1;
148 unsigned int pidx
= 0;
152 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
156 for_each_node_with_cpus(node
) {
157 /* some architectures (e.g. SN2) have cpuless nodes */
158 BUG_ON(pidx
> maxpools
);
159 m
->to_pool
[node
] = pidx
;
160 m
->pool_to
[pidx
] = node
;
163 /* nodes brought online later all get mapped to pool0, sorry */
170 * Build the global map of cpus to pools and vice versa.
173 svc_pool_map_init(void)
175 struct svc_pool_map
*m
= &svc_pool_map
;
178 if (m
->mode
!= SVC_POOL_NONE
)
181 m
->mode
= svc_pool_map_choose_mode();
184 case SVC_POOL_PERCPU
:
185 npools
= svc_pool_map_init_percpu(m
);
187 case SVC_POOL_PERNODE
:
188 npools
= svc_pool_map_init_pernode(m
);
193 /* default, or memory allocation failure */
195 m
->mode
= SVC_POOL_GLOBAL
;
203 * Set the current thread's cpus_allowed mask so that it
204 * will only run on cpus in the given pool.
206 * Returns 1 and fills in oldmask iff a cpumask was applied.
209 svc_pool_map_set_cpumask(unsigned int pidx
, cpumask_t
*oldmask
)
211 struct svc_pool_map
*m
= &svc_pool_map
;
212 unsigned int node
; /* or cpu */
215 * The caller checks for sv_nrpools > 1, which
216 * implies that we've been initialized and the
217 * map mode is not NONE.
219 BUG_ON(m
->mode
== SVC_POOL_NONE
);
225 case SVC_POOL_PERCPU
:
226 node
= m
->pool_to
[pidx
];
227 *oldmask
= current
->cpus_allowed
;
228 set_cpus_allowed(current
, cpumask_of_cpu(node
));
230 case SVC_POOL_PERNODE
:
231 node
= m
->pool_to
[pidx
];
232 *oldmask
= current
->cpus_allowed
;
233 set_cpus_allowed(current
, node_to_cpumask(node
));
239 * Use the mapping mode to choose a pool for a given CPU.
240 * Used when enqueueing an incoming RPC. Always returns
241 * a non-NULL pool pointer.
244 svc_pool_for_cpu(struct svc_serv
*serv
, int cpu
)
246 struct svc_pool_map
*m
= &svc_pool_map
;
247 unsigned int pidx
= 0;
250 * SVC_POOL_NONE happens in a pure client when
251 * lockd is brought up, so silently treat it the
252 * same as SVC_POOL_GLOBAL.
256 case SVC_POOL_PERCPU
:
257 pidx
= m
->to_pool
[cpu
];
259 case SVC_POOL_PERNODE
:
260 pidx
= m
->to_pool
[cpu_to_node(cpu
)];
263 return &serv
->sv_pools
[pidx
% serv
->sv_nrpools
];
268 * Create an RPC service
270 static struct svc_serv
*
271 __svc_create(struct svc_program
*prog
, unsigned int bufsize
, int npools
,
272 void (*shutdown
)(struct svc_serv
*serv
))
274 struct svc_serv
*serv
;
276 unsigned int xdrsize
;
279 if (!(serv
= kzalloc(sizeof(*serv
), GFP_KERNEL
)))
281 serv
->sv_name
= prog
->pg_name
;
282 serv
->sv_program
= prog
;
283 serv
->sv_nrthreads
= 1;
284 serv
->sv_stats
= prog
->pg_stats
;
285 serv
->sv_bufsz
= bufsize
? bufsize
: 4096;
286 serv
->sv_shutdown
= shutdown
;
289 prog
->pg_lovers
= prog
->pg_nvers
-1;
290 for (vers
=0; vers
<prog
->pg_nvers
; vers
++)
291 if (prog
->pg_vers
[vers
]) {
292 prog
->pg_hivers
= vers
;
293 if (prog
->pg_lovers
> vers
)
294 prog
->pg_lovers
= vers
;
295 if (prog
->pg_vers
[vers
]->vs_xdrsize
> xdrsize
)
296 xdrsize
= prog
->pg_vers
[vers
]->vs_xdrsize
;
298 prog
= prog
->pg_next
;
300 serv
->sv_xdrsize
= xdrsize
;
301 INIT_LIST_HEAD(&serv
->sv_tempsocks
);
302 INIT_LIST_HEAD(&serv
->sv_permsocks
);
303 init_timer(&serv
->sv_temptimer
);
304 spin_lock_init(&serv
->sv_lock
);
306 serv
->sv_nrpools
= npools
;
308 kcalloc(sizeof(struct svc_pool
), serv
->sv_nrpools
,
310 if (!serv
->sv_pools
) {
315 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
316 struct svc_pool
*pool
= &serv
->sv_pools
[i
];
318 dprintk("initialising pool %u for %s\n",
322 INIT_LIST_HEAD(&pool
->sp_threads
);
323 INIT_LIST_HEAD(&pool
->sp_sockets
);
324 INIT_LIST_HEAD(&pool
->sp_all_threads
);
325 spin_lock_init(&pool
->sp_lock
);
329 /* Remove any stale portmap registrations */
330 svc_register(serv
, 0, 0);
336 svc_create(struct svc_program
*prog
, unsigned int bufsize
,
337 void (*shutdown
)(struct svc_serv
*serv
))
339 return __svc_create(prog
, bufsize
, /*npools*/1, shutdown
);
343 svc_create_pooled(struct svc_program
*prog
, unsigned int bufsize
,
344 void (*shutdown
)(struct svc_serv
*serv
),
345 svc_thread_fn func
, int sig
, struct module
*mod
)
347 struct svc_serv
*serv
;
348 unsigned int npools
= svc_pool_map_init();
350 serv
= __svc_create(prog
, bufsize
, npools
, shutdown
);
353 serv
->sv_function
= func
;
354 serv
->sv_kill_signal
= sig
;
355 serv
->sv_module
= mod
;
362 * Destroy an RPC service. Should be called with the BKL held
365 svc_destroy(struct svc_serv
*serv
)
367 struct svc_sock
*svsk
;
369 dprintk("RPC: svc_destroy(%s, %d)\n",
370 serv
->sv_program
->pg_name
,
373 if (serv
->sv_nrthreads
) {
374 if (--(serv
->sv_nrthreads
) != 0) {
375 svc_sock_update_bufs(serv
);
379 printk("svc_destroy: no threads for serv=%p!\n", serv
);
381 del_timer_sync(&serv
->sv_temptimer
);
383 while (!list_empty(&serv
->sv_tempsocks
)) {
384 svsk
= list_entry(serv
->sv_tempsocks
.next
,
387 svc_delete_socket(svsk
);
389 if (serv
->sv_shutdown
)
390 serv
->sv_shutdown(serv
);
392 while (!list_empty(&serv
->sv_permsocks
)) {
393 svsk
= list_entry(serv
->sv_permsocks
.next
,
396 svc_delete_socket(svsk
);
399 cache_clean_deferred(serv
);
401 /* Unregister service with the portmapper */
402 svc_register(serv
, 0, 0);
403 kfree(serv
->sv_pools
);
408 * Allocate an RPC server's buffer space.
409 * We allocate pages and place them in rq_argpages.
412 svc_init_buffer(struct svc_rqst
*rqstp
, unsigned int size
)
417 if (size
> RPCSVC_MAXPAYLOAD
)
418 size
= RPCSVC_MAXPAYLOAD
;
419 pages
= 2 + (size
+ PAGE_SIZE
-1) / PAGE_SIZE
;
420 rqstp
->rq_argused
= 0;
421 rqstp
->rq_resused
= 0;
423 BUG_ON(pages
> RPCSVC_MAXPAGES
);
425 struct page
*p
= alloc_page(GFP_KERNEL
);
428 rqstp
->rq_argpages
[arghi
++] = p
;
431 rqstp
->rq_arghi
= arghi
;
436 * Release an RPC server buffer
439 svc_release_buffer(struct svc_rqst
*rqstp
)
441 while (rqstp
->rq_arghi
)
442 put_page(rqstp
->rq_argpages
[--rqstp
->rq_arghi
]);
443 while (rqstp
->rq_resused
) {
444 if (rqstp
->rq_respages
[--rqstp
->rq_resused
] == NULL
)
446 put_page(rqstp
->rq_respages
[rqstp
->rq_resused
]);
448 rqstp
->rq_argused
= 0;
452 * Create a thread in the given pool. Caller must hold BKL.
453 * On a NUMA or SMP machine, with a multi-pool serv, the thread
454 * will be restricted to run on the cpus belonging to the pool.
457 __svc_create_thread(svc_thread_fn func
, struct svc_serv
*serv
,
458 struct svc_pool
*pool
)
460 struct svc_rqst
*rqstp
;
462 int have_oldmask
= 0;
465 rqstp
= kzalloc(sizeof(*rqstp
), GFP_KERNEL
);
469 init_waitqueue_head(&rqstp
->rq_wait
);
471 if (!(rqstp
->rq_argp
= kmalloc(serv
->sv_xdrsize
, GFP_KERNEL
))
472 || !(rqstp
->rq_resp
= kmalloc(serv
->sv_xdrsize
, GFP_KERNEL
))
473 || !svc_init_buffer(rqstp
, serv
->sv_bufsz
))
476 serv
->sv_nrthreads
++;
477 spin_lock_bh(&pool
->sp_lock
);
478 pool
->sp_nrthreads
++;
479 list_add(&rqstp
->rq_all
, &pool
->sp_all_threads
);
480 spin_unlock_bh(&pool
->sp_lock
);
481 rqstp
->rq_server
= serv
;
482 rqstp
->rq_pool
= pool
;
484 if (serv
->sv_nrpools
> 1)
485 have_oldmask
= svc_pool_map_set_cpumask(pool
->sp_id
, &oldmask
);
487 error
= kernel_thread((int (*)(void *)) func
, rqstp
, 0);
490 set_cpus_allowed(current
, oldmask
);
494 svc_sock_update_bufs(serv
);
500 svc_exit_thread(rqstp
);
505 * Create a thread in the default pool. Caller must hold BKL.
508 svc_create_thread(svc_thread_fn func
, struct svc_serv
*serv
)
510 return __svc_create_thread(func
, serv
, &serv
->sv_pools
[0]);
514 * Choose a pool in which to create a new thread, for svc_set_num_threads
516 static inline struct svc_pool
*
517 choose_pool(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
522 return &serv
->sv_pools
[(*state
)++ % serv
->sv_nrpools
];
526 * Choose a thread to kill, for svc_set_num_threads
528 static inline struct task_struct
*
529 choose_victim(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
532 struct task_struct
*task
= NULL
;
535 spin_lock_bh(&pool
->sp_lock
);
537 /* choose a pool in round-robin fashion */
538 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
539 pool
= &serv
->sv_pools
[--(*state
) % serv
->sv_nrpools
];
540 spin_lock_bh(&pool
->sp_lock
);
541 if (!list_empty(&pool
->sp_all_threads
))
543 spin_unlock_bh(&pool
->sp_lock
);
549 if (!list_empty(&pool
->sp_all_threads
)) {
550 struct svc_rqst
*rqstp
;
553 * Remove from the pool->sp_all_threads list
554 * so we don't try to kill it again.
556 rqstp
= list_entry(pool
->sp_all_threads
.next
, struct svc_rqst
, rq_all
);
557 list_del_init(&rqstp
->rq_all
);
558 task
= rqstp
->rq_task
;
560 spin_unlock_bh(&pool
->sp_lock
);
566 * Create or destroy enough new threads to make the number
567 * of threads the given number. If `pool' is non-NULL, applies
568 * only to threads in that pool, otherwise round-robins between
569 * all pools. Must be called with a svc_get() reference and
572 * Destroying threads relies on the service threads filling in
573 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
574 * has been created using svc_create_pooled().
576 * Based on code that used to be in nfsd_svc() but tweaked
580 svc_set_num_threads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
582 struct task_struct
*victim
;
584 unsigned int state
= serv
->sv_nrthreads
-1;
587 /* The -1 assumes caller has done a svc_get() */
588 nrservs
-= (serv
->sv_nrthreads
-1);
590 spin_lock_bh(&pool
->sp_lock
);
591 nrservs
-= pool
->sp_nrthreads
;
592 spin_unlock_bh(&pool
->sp_lock
);
595 /* create new threads */
596 while (nrservs
> 0) {
598 __module_get(serv
->sv_module
);
599 error
= __svc_create_thread(serv
->sv_function
, serv
,
600 choose_pool(serv
, pool
, &state
));
602 module_put(serv
->sv_module
);
606 /* destroy old threads */
607 while (nrservs
< 0 &&
608 (victim
= choose_victim(serv
, pool
, &state
)) != NULL
) {
609 send_sig(serv
->sv_kill_signal
, victim
, 1);
617 * Called from a server thread as it's exiting. Caller must hold BKL.
620 svc_exit_thread(struct svc_rqst
*rqstp
)
622 struct svc_serv
*serv
= rqstp
->rq_server
;
623 struct svc_pool
*pool
= rqstp
->rq_pool
;
625 svc_release_buffer(rqstp
);
626 kfree(rqstp
->rq_resp
);
627 kfree(rqstp
->rq_argp
);
628 kfree(rqstp
->rq_auth_data
);
630 spin_lock_bh(&pool
->sp_lock
);
631 pool
->sp_nrthreads
--;
632 list_del(&rqstp
->rq_all
);
633 spin_unlock_bh(&pool
->sp_lock
);
637 /* Release the server */
643 * Register an RPC service with the local portmapper.
644 * To unregister a service, call this routine with
645 * proto and port == 0.
648 svc_register(struct svc_serv
*serv
, int proto
, unsigned short port
)
650 struct svc_program
*progp
;
652 int i
, error
= 0, dummy
;
654 progp
= serv
->sv_program
;
656 dprintk("RPC: svc_register(%s, %s, %d)\n",
657 progp
->pg_name
, proto
== IPPROTO_UDP
? "udp" : "tcp", port
);
660 clear_thread_flag(TIF_SIGPENDING
);
662 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
663 if (progp
->pg_vers
[i
] == NULL
)
665 error
= rpc_register(progp
->pg_prog
, i
, proto
, port
, &dummy
);
668 if (port
&& !dummy
) {
675 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
677 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
684 * Process the RPC request.
687 svc_process(struct svc_rqst
*rqstp
)
689 struct svc_program
*progp
;
690 struct svc_version
*versp
= NULL
; /* compiler food */
691 struct svc_procedure
*procp
= NULL
;
692 struct kvec
* argv
= &rqstp
->rq_arg
.head
[0];
693 struct kvec
* resv
= &rqstp
->rq_res
.head
[0];
694 struct svc_serv
*serv
= rqstp
->rq_server
;
697 u32 dir
, prog
, vers
, proc
;
698 __be32 auth_stat
, rpc_stat
;
700 __be32
*accept_statp
;
702 rpc_stat
= rpc_success
;
704 if (argv
->iov_len
< 6*4)
707 /* setup response xdr_buf.
708 * Initially it has just one page
710 svc_take_page(rqstp
); /* must succeed */
711 resv
->iov_base
= page_address(rqstp
->rq_respages
[0]);
713 rqstp
->rq_res
.pages
= rqstp
->rq_respages
+1;
714 rqstp
->rq_res
.len
= 0;
715 rqstp
->rq_res
.page_base
= 0;
716 rqstp
->rq_res
.page_len
= 0;
717 rqstp
->rq_res
.buflen
= PAGE_SIZE
;
718 rqstp
->rq_res
.tail
[0].iov_base
= NULL
;
719 rqstp
->rq_res
.tail
[0].iov_len
= 0;
720 /* Will be turned off only in gss privacy case: */
721 rqstp
->rq_sendfile_ok
= 1;
722 /* tcp needs a space for the record length... */
723 if (rqstp
->rq_prot
== IPPROTO_TCP
)
726 rqstp
->rq_xid
= svc_getu32(argv
);
727 svc_putu32(resv
, rqstp
->rq_xid
);
729 dir
= svc_getnl(argv
);
730 vers
= svc_getnl(argv
);
732 /* First words of reply: */
733 svc_putnl(resv
, 1); /* REPLY */
735 if (dir
!= 0) /* direction != CALL */
737 if (vers
!= 2) /* RPC version number */
740 /* Save position in case we later decide to reject: */
741 accept_statp
= resv
->iov_base
+ resv
->iov_len
;
743 svc_putnl(resv
, 0); /* ACCEPT */
745 rqstp
->rq_prog
= prog
= svc_getnl(argv
); /* program number */
746 rqstp
->rq_vers
= vers
= svc_getnl(argv
); /* version number */
747 rqstp
->rq_proc
= proc
= svc_getnl(argv
); /* procedure number */
749 progp
= serv
->sv_program
;
751 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
)
752 if (prog
== progp
->pg_prog
)
756 * Decode auth data, and add verifier to reply buffer.
757 * We do this before anything else in order to get a decent
760 auth_res
= svc_authenticate(rqstp
, &auth_stat
);
761 /* Also give the program a chance to reject this call: */
762 if (auth_res
== SVC_OK
&& progp
) {
763 auth_stat
= rpc_autherr_badcred
;
764 auth_res
= progp
->pg_authenticate(rqstp
);
770 rpc_stat
= rpc_garbage_args
;
773 rpc_stat
= rpc_system_err
;
786 if (vers
>= progp
->pg_nvers
||
787 !(versp
= progp
->pg_vers
[vers
]))
790 procp
= versp
->vs_proc
+ proc
;
791 if (proc
>= versp
->vs_nproc
|| !procp
->pc_func
)
793 rqstp
->rq_server
= serv
;
794 rqstp
->rq_procinfo
= procp
;
796 /* Syntactic check complete */
797 serv
->sv_stats
->rpccnt
++;
799 /* Build the reply header. */
800 statp
= resv
->iov_base
+resv
->iov_len
;
801 svc_putnl(resv
, RPC_SUCCESS
);
803 /* Bump per-procedure stats counter */
806 /* Initialize storage for argp and resp */
807 memset(rqstp
->rq_argp
, 0, procp
->pc_argsize
);
808 memset(rqstp
->rq_resp
, 0, procp
->pc_ressize
);
810 /* un-reserve some of the out-queue now that we have a
811 * better idea of reply size
813 if (procp
->pc_xdrressize
)
814 svc_reserve(rqstp
, procp
->pc_xdrressize
<<2);
816 /* Call the function that processes the request. */
817 if (!versp
->vs_dispatch
) {
818 /* Decode arguments */
819 xdr
= procp
->pc_decode
;
820 if (xdr
&& !xdr(rqstp
, argv
->iov_base
, rqstp
->rq_argp
))
823 *statp
= procp
->pc_func(rqstp
, rqstp
->rq_argp
, rqstp
->rq_resp
);
826 if (*statp
== rpc_success
&& (xdr
= procp
->pc_encode
)
827 && !xdr(rqstp
, resv
->iov_base
+resv
->iov_len
, rqstp
->rq_resp
)) {
828 dprintk("svc: failed to encode reply\n");
829 /* serv->sv_stats->rpcsystemerr++; */
830 *statp
= rpc_system_err
;
833 dprintk("svc: calling dispatcher\n");
834 if (!versp
->vs_dispatch(rqstp
, statp
)) {
835 /* Release reply info */
836 if (procp
->pc_release
)
837 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
842 /* Check RPC status result */
843 if (*statp
!= rpc_success
)
844 resv
->iov_len
= ((void*)statp
) - resv
->iov_base
+ 4;
846 /* Release reply info */
847 if (procp
->pc_release
)
848 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
850 if (procp
->pc_encode
== NULL
)
854 if (svc_authorise(rqstp
))
856 return svc_send(rqstp
);
859 svc_authorise(rqstp
); /* doesn't hurt to call this twice */
860 dprintk("svc: svc_process dropit\n");
866 printk("svc: short len %Zd, dropping request\n", argv
->iov_len
);
868 goto dropit
; /* drop request */
872 printk("svc: bad direction %d, dropping request\n", dir
);
874 serv
->sv_stats
->rpcbadfmt
++;
875 goto dropit
; /* drop request */
878 serv
->sv_stats
->rpcbadfmt
++;
879 svc_putnl(resv
, 1); /* REJECT */
880 svc_putnl(resv
, 0); /* RPC_MISMATCH */
881 svc_putnl(resv
, 2); /* Only RPCv2 supported */
886 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat
));
887 serv
->sv_stats
->rpcbadauth
++;
888 /* Restore write pointer to location of accept status: */
889 xdr_ressize_check(rqstp
, accept_statp
);
890 svc_putnl(resv
, 1); /* REJECT */
891 svc_putnl(resv
, 1); /* AUTH_ERROR */
892 svc_putnl(resv
, ntohl(auth_stat
)); /* status */
896 dprintk("svc: unknown program %d\n", prog
);
897 serv
->sv_stats
->rpcbadfmt
++;
898 svc_putnl(resv
, RPC_PROG_UNAVAIL
);
903 printk("svc: unknown version (%d)\n", vers
);
905 serv
->sv_stats
->rpcbadfmt
++;
906 svc_putnl(resv
, RPC_PROG_MISMATCH
);
907 svc_putnl(resv
, progp
->pg_lovers
);
908 svc_putnl(resv
, progp
->pg_hivers
);
913 printk("svc: unknown procedure (%d)\n", proc
);
915 serv
->sv_stats
->rpcbadfmt
++;
916 svc_putnl(resv
, RPC_PROC_UNAVAIL
);
921 printk("svc: failed to decode args\n");
923 rpc_stat
= rpc_garbage_args
;
925 serv
->sv_stats
->rpcbadfmt
++;
926 svc_putnl(resv
, ntohl(rpc_stat
));