2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
23 #include <linux/sunrpc/types.h>
24 #include <linux/sunrpc/xdr.h>
25 #include <linux/sunrpc/stats.h>
26 #include <linux/sunrpc/svcsock.h>
27 #include <linux/sunrpc/clnt.h>
28 #include <linux/sunrpc/bc_xprt.h>
30 #define RPCDBG_FACILITY RPCDBG_SVCDSP
32 static void svc_unregister(const struct svc_serv
*serv
);
34 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
37 * Mode for mapping cpus to pools.
40 SVC_POOL_AUTO
= -1, /* choose one of the others */
41 SVC_POOL_GLOBAL
, /* no mapping, just a single global pool
42 * (legacy & UP mode) */
43 SVC_POOL_PERCPU
, /* one pool per cpu */
44 SVC_POOL_PERNODE
/* one pool per numa node */
46 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
49 * Structure for mapping cpus to pools and vice versa.
50 * Setup once during sunrpc initialisation.
52 static struct svc_pool_map
{
53 int count
; /* How many svc_servs use us */
54 int mode
; /* Note: int not enum to avoid
55 * warnings about "enumeration value
56 * not handled in switch" */
58 unsigned int *pool_to
; /* maps pool id to cpu or node */
59 unsigned int *to_pool
; /* maps cpu or node to pool id */
62 .mode
= SVC_POOL_DEFAULT
64 static DEFINE_MUTEX(svc_pool_map_mutex
);/* protects svc_pool_map.count only */
67 param_set_pool_mode(const char *val
, struct kernel_param
*kp
)
69 int *ip
= (int *)kp
->arg
;
70 struct svc_pool_map
*m
= &svc_pool_map
;
73 mutex_lock(&svc_pool_map_mutex
);
80 if (!strncmp(val
, "auto", 4))
82 else if (!strncmp(val
, "global", 6))
83 *ip
= SVC_POOL_GLOBAL
;
84 else if (!strncmp(val
, "percpu", 6))
85 *ip
= SVC_POOL_PERCPU
;
86 else if (!strncmp(val
, "pernode", 7))
87 *ip
= SVC_POOL_PERNODE
;
92 mutex_unlock(&svc_pool_map_mutex
);
97 param_get_pool_mode(char *buf
, struct kernel_param
*kp
)
99 int *ip
= (int *)kp
->arg
;
104 return strlcpy(buf
, "auto", 20);
105 case SVC_POOL_GLOBAL
:
106 return strlcpy(buf
, "global", 20);
107 case SVC_POOL_PERCPU
:
108 return strlcpy(buf
, "percpu", 20);
109 case SVC_POOL_PERNODE
:
110 return strlcpy(buf
, "pernode", 20);
112 return sprintf(buf
, "%d", *ip
);
116 module_param_call(pool_mode
, param_set_pool_mode
, param_get_pool_mode
,
117 &svc_pool_map
.mode
, 0644);
120 * Detect best pool mapping mode heuristically,
121 * according to the machine's topology.
124 svc_pool_map_choose_mode(void)
128 if (nr_online_nodes
> 1) {
130 * Actually have multiple NUMA nodes,
131 * so split pools on NUMA node boundaries
133 return SVC_POOL_PERNODE
;
136 node
= any_online_node(node_online_map
);
137 if (nr_cpus_node(node
) > 2) {
139 * Non-trivial SMP, or CONFIG_NUMA on
140 * non-NUMA hardware, e.g. with a generic
141 * x86_64 kernel on Xeons. In this case we
142 * want to divide the pools on cpu boundaries.
144 return SVC_POOL_PERCPU
;
147 /* default: one global pool */
148 return SVC_POOL_GLOBAL
;
152 * Allocate the to_pool[] and pool_to[] arrays.
153 * Returns 0 on success or an errno.
156 svc_pool_map_alloc_arrays(struct svc_pool_map
*m
, unsigned int maxpools
)
158 m
->to_pool
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
161 m
->pool_to
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
174 * Initialise the pool map for SVC_POOL_PERCPU mode.
175 * Returns number of pools or <0 on error.
178 svc_pool_map_init_percpu(struct svc_pool_map
*m
)
180 unsigned int maxpools
= nr_cpu_ids
;
181 unsigned int pidx
= 0;
185 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
189 for_each_online_cpu(cpu
) {
190 BUG_ON(pidx
> maxpools
);
191 m
->to_pool
[cpu
] = pidx
;
192 m
->pool_to
[pidx
] = cpu
;
195 /* cpus brought online later all get mapped to pool0, sorry */
202 * Initialise the pool map for SVC_POOL_PERNODE mode.
203 * Returns number of pools or <0 on error.
206 svc_pool_map_init_pernode(struct svc_pool_map
*m
)
208 unsigned int maxpools
= nr_node_ids
;
209 unsigned int pidx
= 0;
213 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
217 for_each_node_with_cpus(node
) {
218 /* some architectures (e.g. SN2) have cpuless nodes */
219 BUG_ON(pidx
> maxpools
);
220 m
->to_pool
[node
] = pidx
;
221 m
->pool_to
[pidx
] = node
;
224 /* nodes brought online later all get mapped to pool0, sorry */
231 * Add a reference to the global map of cpus to pools (and
232 * vice versa). Initialise the map if we're the first user.
233 * Returns the number of pools.
236 svc_pool_map_get(void)
238 struct svc_pool_map
*m
= &svc_pool_map
;
241 mutex_lock(&svc_pool_map_mutex
);
244 mutex_unlock(&svc_pool_map_mutex
);
248 if (m
->mode
== SVC_POOL_AUTO
)
249 m
->mode
= svc_pool_map_choose_mode();
252 case SVC_POOL_PERCPU
:
253 npools
= svc_pool_map_init_percpu(m
);
255 case SVC_POOL_PERNODE
:
256 npools
= svc_pool_map_init_pernode(m
);
261 /* default, or memory allocation failure */
263 m
->mode
= SVC_POOL_GLOBAL
;
267 mutex_unlock(&svc_pool_map_mutex
);
273 * Drop a reference to the global map of cpus to pools.
274 * When the last reference is dropped, the map data is
275 * freed; this allows the sysadmin to change the pool
276 * mode using the pool_mode module option without
277 * rebooting or re-loading sunrpc.ko.
280 svc_pool_map_put(void)
282 struct svc_pool_map
*m
= &svc_pool_map
;
284 mutex_lock(&svc_pool_map_mutex
);
287 m
->mode
= SVC_POOL_DEFAULT
;
293 mutex_unlock(&svc_pool_map_mutex
);
298 * Set the given thread's cpus_allowed mask so that it
299 * will only run on cpus in the given pool.
302 svc_pool_map_set_cpumask(struct task_struct
*task
, unsigned int pidx
)
304 struct svc_pool_map
*m
= &svc_pool_map
;
305 unsigned int node
= m
->pool_to
[pidx
];
308 * The caller checks for sv_nrpools > 1, which
309 * implies that we've been initialized.
311 BUG_ON(m
->count
== 0);
314 case SVC_POOL_PERCPU
:
316 set_cpus_allowed_ptr(task
, cpumask_of(node
));
319 case SVC_POOL_PERNODE
:
321 set_cpus_allowed_ptr(task
, cpumask_of_node(node
));
328 * Use the mapping mode to choose a pool for a given CPU.
329 * Used when enqueueing an incoming RPC. Always returns
330 * a non-NULL pool pointer.
333 svc_pool_for_cpu(struct svc_serv
*serv
, int cpu
)
335 struct svc_pool_map
*m
= &svc_pool_map
;
336 unsigned int pidx
= 0;
339 * An uninitialised map happens in a pure client when
340 * lockd is brought up, so silently treat it the
341 * same as SVC_POOL_GLOBAL.
343 if (svc_serv_is_pooled(serv
)) {
345 case SVC_POOL_PERCPU
:
346 pidx
= m
->to_pool
[cpu
];
348 case SVC_POOL_PERNODE
:
349 pidx
= m
->to_pool
[cpu_to_node(cpu
)];
353 return &serv
->sv_pools
[pidx
% serv
->sv_nrpools
];
358 * Create an RPC service
360 static struct svc_serv
*
361 __svc_create(struct svc_program
*prog
, unsigned int bufsize
, int npools
,
362 void (*shutdown
)(struct svc_serv
*serv
))
364 struct svc_serv
*serv
;
366 unsigned int xdrsize
;
369 if (!(serv
= kzalloc(sizeof(*serv
), GFP_KERNEL
)))
371 serv
->sv_name
= prog
->pg_name
;
372 serv
->sv_program
= prog
;
373 serv
->sv_nrthreads
= 1;
374 serv
->sv_stats
= prog
->pg_stats
;
375 if (bufsize
> RPCSVC_MAXPAYLOAD
)
376 bufsize
= RPCSVC_MAXPAYLOAD
;
377 serv
->sv_max_payload
= bufsize
? bufsize
: 4096;
378 serv
->sv_max_mesg
= roundup(serv
->sv_max_payload
+ PAGE_SIZE
, PAGE_SIZE
);
379 serv
->sv_shutdown
= shutdown
;
382 prog
->pg_lovers
= prog
->pg_nvers
-1;
383 for (vers
=0; vers
<prog
->pg_nvers
; vers
++)
384 if (prog
->pg_vers
[vers
]) {
385 prog
->pg_hivers
= vers
;
386 if (prog
->pg_lovers
> vers
)
387 prog
->pg_lovers
= vers
;
388 if (prog
->pg_vers
[vers
]->vs_xdrsize
> xdrsize
)
389 xdrsize
= prog
->pg_vers
[vers
]->vs_xdrsize
;
391 prog
= prog
->pg_next
;
393 serv
->sv_xdrsize
= xdrsize
;
394 INIT_LIST_HEAD(&serv
->sv_tempsocks
);
395 INIT_LIST_HEAD(&serv
->sv_permsocks
);
396 init_timer(&serv
->sv_temptimer
);
397 spin_lock_init(&serv
->sv_lock
);
399 serv
->sv_nrpools
= npools
;
401 kcalloc(serv
->sv_nrpools
, sizeof(struct svc_pool
),
403 if (!serv
->sv_pools
) {
408 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
409 struct svc_pool
*pool
= &serv
->sv_pools
[i
];
411 dprintk("svc: initialising pool %u for %s\n",
415 INIT_LIST_HEAD(&pool
->sp_threads
);
416 INIT_LIST_HEAD(&pool
->sp_sockets
);
417 INIT_LIST_HEAD(&pool
->sp_all_threads
);
418 spin_lock_init(&pool
->sp_lock
);
421 /* Remove any stale portmap registrations */
422 svc_unregister(serv
);
428 svc_create(struct svc_program
*prog
, unsigned int bufsize
,
429 void (*shutdown
)(struct svc_serv
*serv
))
431 return __svc_create(prog
, bufsize
, /*npools*/1, shutdown
);
433 EXPORT_SYMBOL_GPL(svc_create
);
436 svc_create_pooled(struct svc_program
*prog
, unsigned int bufsize
,
437 void (*shutdown
)(struct svc_serv
*serv
),
438 svc_thread_fn func
, struct module
*mod
)
440 struct svc_serv
*serv
;
441 unsigned int npools
= svc_pool_map_get();
443 serv
= __svc_create(prog
, bufsize
, npools
, shutdown
);
446 serv
->sv_function
= func
;
447 serv
->sv_module
= mod
;
452 EXPORT_SYMBOL_GPL(svc_create_pooled
);
455 * Destroy an RPC service. Should be called with appropriate locking to
456 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
459 svc_destroy(struct svc_serv
*serv
)
461 dprintk("svc: svc_destroy(%s, %d)\n",
462 serv
->sv_program
->pg_name
,
465 if (serv
->sv_nrthreads
) {
466 if (--(serv
->sv_nrthreads
) != 0) {
467 svc_sock_update_bufs(serv
);
471 printk("svc_destroy: no threads for serv=%p!\n", serv
);
473 del_timer_sync(&serv
->sv_temptimer
);
475 svc_close_all(&serv
->sv_tempsocks
);
477 if (serv
->sv_shutdown
)
478 serv
->sv_shutdown(serv
);
480 svc_close_all(&serv
->sv_permsocks
);
482 BUG_ON(!list_empty(&serv
->sv_permsocks
));
483 BUG_ON(!list_empty(&serv
->sv_tempsocks
));
485 cache_clean_deferred(serv
);
487 if (svc_serv_is_pooled(serv
))
490 #if defined(CONFIG_NFS_V4_1)
491 svc_sock_destroy(serv
->bc_xprt
);
492 #endif /* CONFIG_NFS_V4_1 */
494 svc_unregister(serv
);
495 kfree(serv
->sv_pools
);
498 EXPORT_SYMBOL_GPL(svc_destroy
);
501 * Allocate an RPC server's buffer space.
502 * We allocate pages and place them in rq_argpages.
505 svc_init_buffer(struct svc_rqst
*rqstp
, unsigned int size
)
507 unsigned int pages
, arghi
;
509 pages
= size
/ PAGE_SIZE
+ 1; /* extra page as we hold both request and reply.
510 * We assume one is at most one page
513 BUG_ON(pages
> RPCSVC_MAXPAGES
);
515 struct page
*p
= alloc_page(GFP_KERNEL
);
518 rqstp
->rq_pages
[arghi
++] = p
;
525 * Release an RPC server buffer
528 svc_release_buffer(struct svc_rqst
*rqstp
)
532 for (i
= 0; i
< ARRAY_SIZE(rqstp
->rq_pages
); i
++)
533 if (rqstp
->rq_pages
[i
])
534 put_page(rqstp
->rq_pages
[i
]);
538 svc_prepare_thread(struct svc_serv
*serv
, struct svc_pool
*pool
)
540 struct svc_rqst
*rqstp
;
542 rqstp
= kzalloc(sizeof(*rqstp
), GFP_KERNEL
);
546 init_waitqueue_head(&rqstp
->rq_wait
);
548 serv
->sv_nrthreads
++;
549 spin_lock_bh(&pool
->sp_lock
);
550 pool
->sp_nrthreads
++;
551 list_add(&rqstp
->rq_all
, &pool
->sp_all_threads
);
552 spin_unlock_bh(&pool
->sp_lock
);
553 rqstp
->rq_server
= serv
;
554 rqstp
->rq_pool
= pool
;
556 rqstp
->rq_argp
= kmalloc(serv
->sv_xdrsize
, GFP_KERNEL
);
560 rqstp
->rq_resp
= kmalloc(serv
->sv_xdrsize
, GFP_KERNEL
);
564 if (!svc_init_buffer(rqstp
, serv
->sv_max_mesg
))
569 svc_exit_thread(rqstp
);
571 return ERR_PTR(-ENOMEM
);
573 EXPORT_SYMBOL_GPL(svc_prepare_thread
);
576 * Choose a pool in which to create a new thread, for svc_set_num_threads
578 static inline struct svc_pool
*
579 choose_pool(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
584 return &serv
->sv_pools
[(*state
)++ % serv
->sv_nrpools
];
588 * Choose a thread to kill, for svc_set_num_threads
590 static inline struct task_struct
*
591 choose_victim(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
594 struct task_struct
*task
= NULL
;
597 spin_lock_bh(&pool
->sp_lock
);
599 /* choose a pool in round-robin fashion */
600 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
601 pool
= &serv
->sv_pools
[--(*state
) % serv
->sv_nrpools
];
602 spin_lock_bh(&pool
->sp_lock
);
603 if (!list_empty(&pool
->sp_all_threads
))
605 spin_unlock_bh(&pool
->sp_lock
);
611 if (!list_empty(&pool
->sp_all_threads
)) {
612 struct svc_rqst
*rqstp
;
615 * Remove from the pool->sp_all_threads list
616 * so we don't try to kill it again.
618 rqstp
= list_entry(pool
->sp_all_threads
.next
, struct svc_rqst
, rq_all
);
619 list_del_init(&rqstp
->rq_all
);
620 task
= rqstp
->rq_task
;
622 spin_unlock_bh(&pool
->sp_lock
);
628 * Create or destroy enough new threads to make the number
629 * of threads the given number. If `pool' is non-NULL, applies
630 * only to threads in that pool, otherwise round-robins between
631 * all pools. Must be called with a svc_get() reference and
632 * the BKL or another lock to protect access to svc_serv fields.
634 * Destroying threads relies on the service threads filling in
635 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
636 * has been created using svc_create_pooled().
638 * Based on code that used to be in nfsd_svc() but tweaked
642 svc_set_num_threads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
644 struct svc_rqst
*rqstp
;
645 struct task_struct
*task
;
646 struct svc_pool
*chosen_pool
;
648 unsigned int state
= serv
->sv_nrthreads
-1;
651 /* The -1 assumes caller has done a svc_get() */
652 nrservs
-= (serv
->sv_nrthreads
-1);
654 spin_lock_bh(&pool
->sp_lock
);
655 nrservs
-= pool
->sp_nrthreads
;
656 spin_unlock_bh(&pool
->sp_lock
);
659 /* create new threads */
660 while (nrservs
> 0) {
662 chosen_pool
= choose_pool(serv
, pool
, &state
);
664 rqstp
= svc_prepare_thread(serv
, chosen_pool
);
666 error
= PTR_ERR(rqstp
);
670 __module_get(serv
->sv_module
);
671 task
= kthread_create(serv
->sv_function
, rqstp
, serv
->sv_name
);
673 error
= PTR_ERR(task
);
674 module_put(serv
->sv_module
);
675 svc_exit_thread(rqstp
);
679 rqstp
->rq_task
= task
;
680 if (serv
->sv_nrpools
> 1)
681 svc_pool_map_set_cpumask(task
, chosen_pool
->sp_id
);
683 svc_sock_update_bufs(serv
);
684 wake_up_process(task
);
686 /* destroy old threads */
687 while (nrservs
< 0 &&
688 (task
= choose_victim(serv
, pool
, &state
)) != NULL
) {
689 send_sig(SIGINT
, task
, 1);
695 EXPORT_SYMBOL_GPL(svc_set_num_threads
);
698 * Called from a server thread as it's exiting. Caller must hold the BKL or
699 * the "service mutex", whichever is appropriate for the service.
702 svc_exit_thread(struct svc_rqst
*rqstp
)
704 struct svc_serv
*serv
= rqstp
->rq_server
;
705 struct svc_pool
*pool
= rqstp
->rq_pool
;
707 svc_release_buffer(rqstp
);
708 kfree(rqstp
->rq_resp
);
709 kfree(rqstp
->rq_argp
);
710 kfree(rqstp
->rq_auth_data
);
712 spin_lock_bh(&pool
->sp_lock
);
713 pool
->sp_nrthreads
--;
714 list_del(&rqstp
->rq_all
);
715 spin_unlock_bh(&pool
->sp_lock
);
719 /* Release the server */
723 EXPORT_SYMBOL_GPL(svc_exit_thread
);
726 * Register an "inet" protocol family netid with the local
727 * rpcbind daemon via an rpcbind v4 SET request.
729 * No netconfig infrastructure is available in the kernel, so
730 * we map IP_ protocol numbers to netids by hand.
732 * Returns zero on success; a negative errno value is returned
733 * if any error occurs.
735 static int __svc_rpcb_register4(const u32 program
, const u32 version
,
736 const unsigned short protocol
,
737 const unsigned short port
)
739 const struct sockaddr_in sin
= {
740 .sin_family
= AF_INET
,
741 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
742 .sin_port
= htons(port
),
749 netid
= RPCBIND_NETID_UDP
;
752 netid
= RPCBIND_NETID_TCP
;
758 error
= rpcb_v4_register(program
, version
,
759 (const struct sockaddr
*)&sin
, netid
);
762 * User space didn't support rpcbind v4, so retry this
763 * registration request with the legacy rpcbind v2 protocol.
765 if (error
== -EPROTONOSUPPORT
)
766 error
= rpcb_register(program
, version
, protocol
, port
);
771 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
773 * Register an "inet6" protocol family netid with the local
774 * rpcbind daemon via an rpcbind v4 SET request.
776 * No netconfig infrastructure is available in the kernel, so
777 * we map IP_ protocol numbers to netids by hand.
779 * Returns zero on success; a negative errno value is returned
780 * if any error occurs.
782 static int __svc_rpcb_register6(const u32 program
, const u32 version
,
783 const unsigned short protocol
,
784 const unsigned short port
)
786 const struct sockaddr_in6 sin6
= {
787 .sin6_family
= AF_INET6
,
788 .sin6_addr
= IN6ADDR_ANY_INIT
,
789 .sin6_port
= htons(port
),
796 netid
= RPCBIND_NETID_UDP6
;
799 netid
= RPCBIND_NETID_TCP6
;
805 error
= rpcb_v4_register(program
, version
,
806 (const struct sockaddr
*)&sin6
, netid
);
809 * User space didn't support rpcbind version 4, so we won't
810 * use a PF_INET6 listener.
812 if (error
== -EPROTONOSUPPORT
)
813 error
= -EAFNOSUPPORT
;
817 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
820 * Register a kernel RPC service via rpcbind version 4.
822 * Returns zero on success; a negative errno value is returned
823 * if any error occurs.
825 static int __svc_register(const char *progname
,
826 const u32 program
, const u32 version
,
828 const unsigned short protocol
,
829 const unsigned short port
)
831 int error
= -EAFNOSUPPORT
;
835 error
= __svc_rpcb_register4(program
, version
,
838 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
840 error
= __svc_rpcb_register6(program
, version
,
842 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
846 printk(KERN_WARNING
"svc: failed to register %sv%u RPC "
847 "service (errno %d).\n", progname
, version
, -error
);
852 * svc_register - register an RPC service with the local portmapper
853 * @serv: svc_serv struct for the service to register
854 * @family: protocol family of service's listener socket
855 * @proto: transport protocol number to advertise
856 * @port: port to advertise
858 * Service is registered for any address in the passed-in protocol family
860 int svc_register(const struct svc_serv
*serv
, const int family
,
861 const unsigned short proto
, const unsigned short port
)
863 struct svc_program
*progp
;
867 BUG_ON(proto
== 0 && port
== 0);
869 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
870 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
871 if (progp
->pg_vers
[i
] == NULL
)
874 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
877 proto
== IPPROTO_UDP
? "udp" : "tcp",
880 progp
->pg_vers
[i
]->vs_hidden
?
881 " (but not telling portmap)" : "");
883 if (progp
->pg_vers
[i
]->vs_hidden
)
886 error
= __svc_register(progp
->pg_name
, progp
->pg_prog
,
887 i
, family
, proto
, port
);
897 * If user space is running rpcbind, it should take the v4 UNSET
898 * and clear everything for this [program, version]. If user space
899 * is running portmap, it will reject the v4 UNSET, but won't have
900 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
901 * in this case to clear all existing entries for [program, version].
903 static void __svc_unregister(const u32 program
, const u32 version
,
904 const char *progname
)
908 error
= rpcb_v4_register(program
, version
, NULL
, "");
911 * User space didn't support rpcbind v4, so retry this
912 * request with the legacy rpcbind v2 protocol.
914 if (error
== -EPROTONOSUPPORT
)
915 error
= rpcb_register(program
, version
, 0, 0);
917 dprintk("svc: %s(%sv%u), error %d\n",
918 __func__
, progname
, version
, error
);
922 * All netids, bind addresses and ports registered for [program, version]
923 * are removed from the local rpcbind database (if the service is not
924 * hidden) to make way for a new instance of the service.
926 * The result of unregistration is reported via dprintk for those who want
927 * verification of the result, but is otherwise not important.
929 static void svc_unregister(const struct svc_serv
*serv
)
931 struct svc_program
*progp
;
935 clear_thread_flag(TIF_SIGPENDING
);
937 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
938 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
939 if (progp
->pg_vers
[i
] == NULL
)
941 if (progp
->pg_vers
[i
]->vs_hidden
)
944 __svc_unregister(progp
->pg_prog
, i
, progp
->pg_name
);
948 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
950 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
954 * Printk the given error with the address of the client that caused it.
957 __attribute__ ((format (printf
, 2, 3)))
958 svc_printk(struct svc_rqst
*rqstp
, const char *fmt
, ...)
962 char buf
[RPC_MAX_ADDRBUFLEN
];
964 if (!net_ratelimit())
967 printk(KERN_WARNING
"svc: %s: ",
968 svc_print_addr(rqstp
, buf
, sizeof(buf
)));
971 r
= vprintk(fmt
, args
);
978 * Common routine for processing the RPC request.
981 svc_process_common(struct svc_rqst
*rqstp
, struct kvec
*argv
, struct kvec
*resv
)
983 struct svc_program
*progp
;
984 struct svc_version
*versp
= NULL
; /* compiler food */
985 struct svc_procedure
*procp
= NULL
;
986 struct svc_serv
*serv
= rqstp
->rq_server
;
989 u32 prog
, vers
, proc
;
990 __be32 auth_stat
, rpc_stat
;
994 rpc_stat
= rpc_success
;
996 if (argv
->iov_len
< 6*4)
999 /* Will be turned off only in gss privacy case: */
1000 rqstp
->rq_splice_ok
= 1;
1001 /* Will be turned off only when NFSv4 Sessions are used */
1002 rqstp
->rq_usedeferral
= 1;
1004 /* Setup reply header */
1005 rqstp
->rq_xprt
->xpt_ops
->xpo_prep_reply_hdr(rqstp
);
1007 svc_putu32(resv
, rqstp
->rq_xid
);
1009 vers
= svc_getnl(argv
);
1011 /* First words of reply: */
1012 svc_putnl(resv
, 1); /* REPLY */
1014 if (vers
!= 2) /* RPC version number */
1017 /* Save position in case we later decide to reject: */
1018 reply_statp
= resv
->iov_base
+ resv
->iov_len
;
1020 svc_putnl(resv
, 0); /* ACCEPT */
1022 rqstp
->rq_prog
= prog
= svc_getnl(argv
); /* program number */
1023 rqstp
->rq_vers
= vers
= svc_getnl(argv
); /* version number */
1024 rqstp
->rq_proc
= proc
= svc_getnl(argv
); /* procedure number */
1026 progp
= serv
->sv_program
;
1028 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
)
1029 if (prog
== progp
->pg_prog
)
1033 * Decode auth data, and add verifier to reply buffer.
1034 * We do this before anything else in order to get a decent
1037 auth_res
= svc_authenticate(rqstp
, &auth_stat
);
1038 /* Also give the program a chance to reject this call: */
1039 if (auth_res
== SVC_OK
&& progp
) {
1040 auth_stat
= rpc_autherr_badcred
;
1041 auth_res
= progp
->pg_authenticate(rqstp
);
1049 rpc_stat
= rpc_system_err
;
1062 if (vers
>= progp
->pg_nvers
||
1063 !(versp
= progp
->pg_vers
[vers
]))
1066 procp
= versp
->vs_proc
+ proc
;
1067 if (proc
>= versp
->vs_nproc
|| !procp
->pc_func
)
1069 rqstp
->rq_procinfo
= procp
;
1071 /* Syntactic check complete */
1072 serv
->sv_stats
->rpccnt
++;
1074 /* Build the reply header. */
1075 statp
= resv
->iov_base
+resv
->iov_len
;
1076 svc_putnl(resv
, RPC_SUCCESS
);
1078 /* Bump per-procedure stats counter */
1081 /* Initialize storage for argp and resp */
1082 memset(rqstp
->rq_argp
, 0, procp
->pc_argsize
);
1083 memset(rqstp
->rq_resp
, 0, procp
->pc_ressize
);
1085 /* un-reserve some of the out-queue now that we have a
1086 * better idea of reply size
1088 if (procp
->pc_xdrressize
)
1089 svc_reserve_auth(rqstp
, procp
->pc_xdrressize
<<2);
1091 /* Call the function that processes the request. */
1092 if (!versp
->vs_dispatch
) {
1093 /* Decode arguments */
1094 xdr
= procp
->pc_decode
;
1095 if (xdr
&& !xdr(rqstp
, argv
->iov_base
, rqstp
->rq_argp
))
1098 *statp
= procp
->pc_func(rqstp
, rqstp
->rq_argp
, rqstp
->rq_resp
);
1101 if (*statp
== rpc_drop_reply
) {
1102 if (procp
->pc_release
)
1103 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1106 if (*statp
== rpc_success
&& (xdr
= procp
->pc_encode
)
1107 && !xdr(rqstp
, resv
->iov_base
+resv
->iov_len
, rqstp
->rq_resp
)) {
1108 dprintk("svc: failed to encode reply\n");
1109 /* serv->sv_stats->rpcsystemerr++; */
1110 *statp
= rpc_system_err
;
1113 dprintk("svc: calling dispatcher\n");
1114 if (!versp
->vs_dispatch(rqstp
, statp
)) {
1115 /* Release reply info */
1116 if (procp
->pc_release
)
1117 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1122 /* Check RPC status result */
1123 if (*statp
!= rpc_success
)
1124 resv
->iov_len
= ((void*)statp
) - resv
->iov_base
+ 4;
1126 /* Release reply info */
1127 if (procp
->pc_release
)
1128 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1130 if (procp
->pc_encode
== NULL
)
1134 if (svc_authorise(rqstp
))
1136 return 1; /* Caller can now send it */
1139 svc_authorise(rqstp
); /* doesn't hurt to call this twice */
1140 dprintk("svc: svc_process dropit\n");
1145 svc_printk(rqstp
, "short len %Zd, dropping request\n",
1148 goto dropit
; /* drop request */
1151 serv
->sv_stats
->rpcbadfmt
++;
1152 svc_putnl(resv
, 1); /* REJECT */
1153 svc_putnl(resv
, 0); /* RPC_MISMATCH */
1154 svc_putnl(resv
, 2); /* Only RPCv2 supported */
1159 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat
));
1160 serv
->sv_stats
->rpcbadauth
++;
1161 /* Restore write pointer to location of accept status: */
1162 xdr_ressize_check(rqstp
, reply_statp
);
1163 svc_putnl(resv
, 1); /* REJECT */
1164 svc_putnl(resv
, 1); /* AUTH_ERROR */
1165 svc_putnl(resv
, ntohl(auth_stat
)); /* status */
1169 dprintk("svc: unknown program %d\n", prog
);
1170 serv
->sv_stats
->rpcbadfmt
++;
1171 svc_putnl(resv
, RPC_PROG_UNAVAIL
);
1175 svc_printk(rqstp
, "unknown version (%d for prog %d, %s)\n",
1176 vers
, prog
, progp
->pg_name
);
1178 serv
->sv_stats
->rpcbadfmt
++;
1179 svc_putnl(resv
, RPC_PROG_MISMATCH
);
1180 svc_putnl(resv
, progp
->pg_lovers
);
1181 svc_putnl(resv
, progp
->pg_hivers
);
1185 svc_printk(rqstp
, "unknown procedure (%d)\n", proc
);
1187 serv
->sv_stats
->rpcbadfmt
++;
1188 svc_putnl(resv
, RPC_PROC_UNAVAIL
);
1192 svc_printk(rqstp
, "failed to decode args\n");
1194 rpc_stat
= rpc_garbage_args
;
1196 serv
->sv_stats
->rpcbadfmt
++;
1197 svc_putnl(resv
, ntohl(rpc_stat
));
1200 EXPORT_SYMBOL_GPL(svc_process
);
1203 * Process the RPC request.
1206 svc_process(struct svc_rqst
*rqstp
)
1208 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1209 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1210 struct svc_serv
*serv
= rqstp
->rq_server
;
1215 * Setup response xdr_buf.
1216 * Initially it has just one page
1218 rqstp
->rq_resused
= 1;
1219 resv
->iov_base
= page_address(rqstp
->rq_respages
[0]);
1221 rqstp
->rq_res
.pages
= rqstp
->rq_respages
+ 1;
1222 rqstp
->rq_res
.len
= 0;
1223 rqstp
->rq_res
.page_base
= 0;
1224 rqstp
->rq_res
.page_len
= 0;
1225 rqstp
->rq_res
.buflen
= PAGE_SIZE
;
1226 rqstp
->rq_res
.tail
[0].iov_base
= NULL
;
1227 rqstp
->rq_res
.tail
[0].iov_len
= 0;
1229 rqstp
->rq_xid
= svc_getu32(argv
);
1231 dir
= svc_getnl(argv
);
1233 /* direction != CALL */
1234 svc_printk(rqstp
, "bad direction %d, dropping request\n", dir
);
1235 serv
->sv_stats
->rpcbadfmt
++;
1240 error
= svc_process_common(rqstp
, argv
, resv
);
1244 return svc_send(rqstp
);
1247 #if defined(CONFIG_NFS_V4_1)
1249 * Process a backchannel RPC request that arrived over an existing
1250 * outbound connection
1253 bc_svc_process(struct svc_serv
*serv
, struct rpc_rqst
*req
,
1254 struct svc_rqst
*rqstp
)
1256 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1257 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1260 /* Build the svc_rqst used by the common processing routine */
1261 rqstp
->rq_xprt
= serv
->bc_xprt
;
1262 rqstp
->rq_xid
= req
->rq_xid
;
1263 rqstp
->rq_prot
= req
->rq_xprt
->prot
;
1264 rqstp
->rq_server
= serv
;
1266 rqstp
->rq_addrlen
= sizeof(req
->rq_xprt
->addr
);
1267 memcpy(&rqstp
->rq_addr
, &req
->rq_xprt
->addr
, rqstp
->rq_addrlen
);
1268 memcpy(&rqstp
->rq_arg
, &req
->rq_rcv_buf
, sizeof(rqstp
->rq_arg
));
1269 memcpy(&rqstp
->rq_res
, &req
->rq_snd_buf
, sizeof(rqstp
->rq_res
));
1271 /* reset result send buffer "put" position */
1274 if (rqstp
->rq_prot
!= IPPROTO_TCP
) {
1275 printk(KERN_ERR
"No support for Non-TCP transports!\n");
1280 * Skip the next two words because they've already been
1281 * processed in the trasport
1283 svc_getu32(argv
); /* XID */
1284 svc_getnl(argv
); /* CALLDIR */
1286 error
= svc_process_common(rqstp
, argv
, resv
);
1290 memcpy(&req
->rq_snd_buf
, &rqstp
->rq_res
, sizeof(req
->rq_snd_buf
));
1291 return bc_send(req
);
1293 EXPORT_SYMBOL(bc_svc_process
);
1294 #endif /* CONFIG_NFS_V4_1 */
1297 * Return (transport-specific) limit on the rpc payload.
1299 u32
svc_max_payload(const struct svc_rqst
*rqstp
)
1301 u32 max
= rqstp
->rq_xprt
->xpt_class
->xcl_max_payload
;
1303 if (rqstp
->rq_server
->sv_max_payload
< max
)
1304 max
= rqstp
->rq_server
->sv_max_payload
;
1307 EXPORT_SYMBOL_GPL(svc_max_payload
);