2 * linux/net/sunrpc/clnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/utsname.h>
32 #include <linux/sunrpc/clnt.h>
33 #include <linux/workqueue.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
36 #include <linux/nfs.h>
39 #define RPC_SLACK_SPACE (1024) /* total overkill */
42 # define RPCDBG_FACILITY RPCDBG_CALL
45 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
48 static void call_start(struct rpc_task
*task
);
49 static void call_reserve(struct rpc_task
*task
);
50 static void call_reserveresult(struct rpc_task
*task
);
51 static void call_allocate(struct rpc_task
*task
);
52 static void call_encode(struct rpc_task
*task
);
53 static void call_decode(struct rpc_task
*task
);
54 static void call_bind(struct rpc_task
*task
);
55 static void call_bind_status(struct rpc_task
*task
);
56 static void call_transmit(struct rpc_task
*task
);
57 static void call_status(struct rpc_task
*task
);
58 static void call_refresh(struct rpc_task
*task
);
59 static void call_refreshresult(struct rpc_task
*task
);
60 static void call_timeout(struct rpc_task
*task
);
61 static void call_connect(struct rpc_task
*task
);
62 static void call_connect_status(struct rpc_task
*task
);
63 static u32
* call_header(struct rpc_task
*task
);
64 static u32
* call_verify(struct rpc_task
*task
);
68 rpc_setup_pipedir(struct rpc_clnt
*clnt
, char *dir_name
)
70 static unsigned int clntid
;
78 clnt
->__cl_parent_dentry
= rpc_mkdir(NULL
, dir_name
, NULL
);
79 if (IS_ERR(clnt
->__cl_parent_dentry
)) {
80 error
= PTR_ERR(clnt
->__cl_parent_dentry
);
82 goto retry_parent
; /* XXX(hch): WTF? */
84 printk(KERN_INFO
"RPC: Couldn't create pipefs entry %s, error %d\n",
91 snprintf(name
, sizeof(name
), "clnt%x", clntid
++);
92 name
[sizeof(name
) - 1] = '\0';
94 clnt
->cl_dentry
= rpc_mkdir(clnt
->__cl_parent_dentry
, name
, clnt
);
95 if (IS_ERR(clnt
->cl_dentry
)) {
96 error
= PTR_ERR(clnt
->cl_dentry
);
99 printk(KERN_INFO
"RPC: Couldn't create pipefs entry %s, error %d\n",
101 rpc_rmdir(clnt
->__cl_parent_dentry
);
109 * Create an RPC client
110 * FIXME: This should also take a flags argument (as in task->tk_flags).
111 * It's called (among others) from pmap_create_client, which may in
112 * turn be called by an async task. In this case, rpciod should not be
113 * made to sleep too long.
116 rpc_new_client(struct rpc_xprt
*xprt
, char *servname
,
117 struct rpc_program
*program
, u32 vers
,
118 rpc_authflavor_t flavor
)
120 struct rpc_version
*version
;
121 struct rpc_clnt
*clnt
= NULL
;
122 struct rpc_auth
*auth
;
126 dprintk("RPC: creating %s client for %s (xprt %p)\n",
127 program
->name
, servname
, xprt
);
132 if (vers
>= program
->nrvers
|| !(version
= program
->version
[vers
]))
136 clnt
= (struct rpc_clnt
*) kmalloc(sizeof(*clnt
), GFP_KERNEL
);
139 memset(clnt
, 0, sizeof(*clnt
));
140 atomic_set(&clnt
->cl_users
, 0);
141 atomic_set(&clnt
->cl_count
, 1);
142 clnt
->cl_parent
= clnt
;
144 clnt
->cl_server
= clnt
->cl_inline_name
;
145 len
= strlen(servname
) + 1;
146 if (len
> sizeof(clnt
->cl_inline_name
)) {
147 char *buf
= kmalloc(len
, GFP_KERNEL
);
149 clnt
->cl_server
= buf
;
151 len
= sizeof(clnt
->cl_inline_name
);
153 strlcpy(clnt
->cl_server
, servname
, len
);
155 clnt
->cl_xprt
= xprt
;
156 clnt
->cl_procinfo
= version
->procs
;
157 clnt
->cl_maxproc
= version
->nrprocs
;
158 clnt
->cl_protname
= program
->name
;
159 clnt
->cl_pmap
= &clnt
->cl_pmap_default
;
160 clnt
->cl_port
= xprt
->addr
.sin_port
;
161 clnt
->cl_prog
= program
->number
;
162 clnt
->cl_vers
= version
->number
;
163 clnt
->cl_prot
= xprt
->prot
;
164 clnt
->cl_stats
= program
->stats
;
165 rpc_init_wait_queue(&clnt
->cl_pmap_default
.pm_bindwait
, "bindwait");
168 clnt
->cl_autobind
= 1;
170 clnt
->cl_rtt
= &clnt
->cl_rtt_default
;
171 rpc_init_rtt(&clnt
->cl_rtt_default
, xprt
->timeout
.to_initval
);
173 err
= rpc_setup_pipedir(clnt
, program
->pipe_dir_name
);
177 auth
= rpcauth_create(flavor
, clnt
);
179 printk(KERN_INFO
"RPC: Couldn't create auth handle (flavor %u)\n",
185 /* save the nodename */
186 clnt
->cl_nodelen
= strlen(system_utsname
.nodename
);
187 if (clnt
->cl_nodelen
> UNX_MAXNODENAME
)
188 clnt
->cl_nodelen
= UNX_MAXNODENAME
;
189 memcpy(clnt
->cl_nodename
, system_utsname
.nodename
, clnt
->cl_nodelen
);
193 rpc_rmdir(clnt
->cl_dentry
);
194 rpc_rmdir(clnt
->__cl_parent_dentry
);
196 if (clnt
->cl_server
!= clnt
->cl_inline_name
)
197 kfree(clnt
->cl_server
);
205 * Create an RPC client
206 * @xprt - pointer to xprt struct
207 * @servname - name of server
208 * @info - rpc_program
209 * @version - rpc_program version
210 * @authflavor - rpc_auth flavour to use
212 * Creates an RPC client structure, then pings the server in order to
213 * determine if it is up, and if it supports this program and version.
215 * This function should never be called by asynchronous tasks such as
218 struct rpc_clnt
*rpc_create_client(struct rpc_xprt
*xprt
, char *servname
,
219 struct rpc_program
*info
, u32 version
, rpc_authflavor_t authflavor
)
221 struct rpc_clnt
*clnt
;
224 clnt
= rpc_new_client(xprt
, servname
, info
, version
, authflavor
);
227 err
= rpc_ping(clnt
, RPC_TASK_SOFT
|RPC_TASK_NOINTR
);
230 rpc_shutdown_client(clnt
);
235 * This function clones the RPC client structure. It allows us to share the
236 * same transport while varying parameters such as the authentication
240 rpc_clone_client(struct rpc_clnt
*clnt
)
242 struct rpc_clnt
*new;
244 new = (struct rpc_clnt
*)kmalloc(sizeof(*new), GFP_KERNEL
);
247 memcpy(new, clnt
, sizeof(*new));
248 atomic_set(&new->cl_count
, 1);
249 atomic_set(&new->cl_users
, 0);
250 new->cl_parent
= clnt
;
251 atomic_inc(&clnt
->cl_count
);
252 /* Duplicate portmapper */
253 rpc_init_wait_queue(&new->cl_pmap_default
.pm_bindwait
, "bindwait");
254 /* Turn off autobind on clones */
255 new->cl_autobind
= 0;
258 rpc_init_rtt(&new->cl_rtt_default
, clnt
->cl_xprt
->timeout
.to_initval
);
260 atomic_inc(&new->cl_auth
->au_count
);
261 new->cl_pmap
= &new->cl_pmap_default
;
262 rpc_init_wait_queue(&new->cl_pmap_default
.pm_bindwait
, "bindwait");
265 printk(KERN_INFO
"RPC: out of memory in %s\n", __FUNCTION__
);
266 return ERR_PTR(-ENOMEM
);
270 * Properly shut down an RPC client, terminating all outstanding
271 * requests. Note that we must be certain that cl_oneshot and
272 * cl_dead are cleared, or else the client would be destroyed
273 * when the last task releases it.
276 rpc_shutdown_client(struct rpc_clnt
*clnt
)
278 dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
279 clnt
->cl_protname
, clnt
->cl_server
,
280 atomic_read(&clnt
->cl_users
));
282 while (atomic_read(&clnt
->cl_users
) > 0) {
283 /* Don't let rpc_release_client destroy us */
284 clnt
->cl_oneshot
= 0;
286 rpc_killall_tasks(clnt
);
287 sleep_on_timeout(&destroy_wait
, 1*HZ
);
290 if (atomic_read(&clnt
->cl_users
) < 0) {
291 printk(KERN_ERR
"RPC: rpc_shutdown_client clnt %p tasks=%d\n",
292 clnt
, atomic_read(&clnt
->cl_users
));
299 return rpc_destroy_client(clnt
);
303 * Delete an RPC client
306 rpc_destroy_client(struct rpc_clnt
*clnt
)
308 if (!atomic_dec_and_test(&clnt
->cl_count
))
310 BUG_ON(atomic_read(&clnt
->cl_users
) != 0);
312 dprintk("RPC: destroying %s client for %s\n",
313 clnt
->cl_protname
, clnt
->cl_server
);
315 rpcauth_destroy(clnt
->cl_auth
);
316 clnt
->cl_auth
= NULL
;
318 if (clnt
->cl_parent
!= clnt
) {
319 rpc_destroy_client(clnt
->cl_parent
);
323 rpc_rmdir(clnt
->cl_dentry
);
324 if (clnt
->__cl_parent_dentry
)
325 rpc_rmdir(clnt
->__cl_parent_dentry
);
327 xprt_destroy(clnt
->cl_xprt
);
328 clnt
->cl_xprt
= NULL
;
330 if (clnt
->cl_server
!= clnt
->cl_inline_name
)
331 kfree(clnt
->cl_server
);
338 * Release an RPC client
341 rpc_release_client(struct rpc_clnt
*clnt
)
343 dprintk("RPC: rpc_release_client(%p, %d)\n",
344 clnt
, atomic_read(&clnt
->cl_users
));
346 if (!atomic_dec_and_test(&clnt
->cl_users
))
348 wake_up(&destroy_wait
);
349 if (clnt
->cl_oneshot
|| clnt
->cl_dead
)
350 rpc_destroy_client(clnt
);
354 * rpc_bind_new_program - bind a new RPC program to an existing client
355 * @old - old rpc_client
356 * @program - rpc program to set
357 * @vers - rpc program version
359 * Clones the rpc client and sets up a new RPC program. This is mainly
360 * of use for enabling different RPC programs to share the same transport.
361 * The Sun NFSv2/v3 ACL protocol can do this.
363 struct rpc_clnt
*rpc_bind_new_program(struct rpc_clnt
*old
,
364 struct rpc_program
*program
,
367 struct rpc_clnt
*clnt
;
368 struct rpc_version
*version
;
371 BUG_ON(vers
>= program
->nrvers
|| !program
->version
[vers
]);
372 version
= program
->version
[vers
];
373 clnt
= rpc_clone_client(old
);
376 clnt
->cl_procinfo
= version
->procs
;
377 clnt
->cl_maxproc
= version
->nrprocs
;
378 clnt
->cl_protname
= program
->name
;
379 clnt
->cl_prog
= program
->number
;
380 clnt
->cl_vers
= version
->number
;
381 clnt
->cl_stats
= program
->stats
;
382 err
= rpc_ping(clnt
, RPC_TASK_SOFT
|RPC_TASK_NOINTR
);
384 rpc_shutdown_client(clnt
);
392 * Default callback for async RPC calls
395 rpc_default_callback(struct rpc_task
*task
)
400 * Export the signal mask handling for synchronous code that
401 * sleeps on RPC calls
403 #define RPC_INTR_SIGNALS (sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGKILL))
405 static void rpc_save_sigmask(sigset_t
*oldset
, int intr
)
407 unsigned long sigallow
= 0;
410 /* Block all signals except those listed in sigallow */
412 sigallow
|= RPC_INTR_SIGNALS
;
413 siginitsetinv(&sigmask
, sigallow
);
414 sigprocmask(SIG_BLOCK
, &sigmask
, oldset
);
417 static inline void rpc_task_sigmask(struct rpc_task
*task
, sigset_t
*oldset
)
419 rpc_save_sigmask(oldset
, !RPC_TASK_UNINTERRUPTIBLE(task
));
422 static inline void rpc_restore_sigmask(sigset_t
*oldset
)
424 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
427 void rpc_clnt_sigmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
429 rpc_save_sigmask(oldset
, clnt
->cl_intr
);
432 void rpc_clnt_sigunmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
434 rpc_restore_sigmask(oldset
);
438 * New rpc_call implementation
440 int rpc_call_sync(struct rpc_clnt
*clnt
, struct rpc_message
*msg
, int flags
)
442 struct rpc_task
*task
;
446 /* If this client is slain all further I/O fails */
450 BUG_ON(flags
& RPC_TASK_ASYNC
);
453 task
= rpc_new_task(clnt
, NULL
, flags
);
457 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
458 rpc_task_sigmask(task
, &oldset
);
460 rpc_call_setup(task
, msg
, 0);
462 /* Set up the call info struct and execute the task */
463 if (task
->tk_status
== 0) {
464 status
= rpc_execute(task
);
466 status
= task
->tk_status
;
467 rpc_release_task(task
);
470 rpc_restore_sigmask(&oldset
);
476 * New rpc_call implementation
479 rpc_call_async(struct rpc_clnt
*clnt
, struct rpc_message
*msg
, int flags
,
480 rpc_action callback
, void *data
)
482 struct rpc_task
*task
;
486 /* If this client is slain all further I/O fails */
490 flags
|= RPC_TASK_ASYNC
;
492 /* Create/initialize a new RPC task */
494 callback
= rpc_default_callback
;
496 if (!(task
= rpc_new_task(clnt
, callback
, flags
)))
498 task
->tk_calldata
= data
;
500 /* Mask signals on GSS_AUTH upcalls */
501 rpc_task_sigmask(task
, &oldset
);
503 rpc_call_setup(task
, msg
, 0);
505 /* Set up the call info struct and execute the task */
506 status
= task
->tk_status
;
510 rpc_release_task(task
);
512 rpc_restore_sigmask(&oldset
);
519 rpc_call_setup(struct rpc_task
*task
, struct rpc_message
*msg
, int flags
)
522 task
->tk_flags
|= flags
;
523 /* Bind the user cred */
524 if (task
->tk_msg
.rpc_cred
!= NULL
)
525 rpcauth_holdcred(task
);
527 rpcauth_bindcred(task
);
529 if (task
->tk_status
== 0)
530 task
->tk_action
= call_start
;
532 task
->tk_action
= NULL
;
536 rpc_setbufsize(struct rpc_clnt
*clnt
, unsigned int sndsize
, unsigned int rcvsize
)
538 struct rpc_xprt
*xprt
= clnt
->cl_xprt
;
539 if (xprt
->ops
->set_buffer_size
)
540 xprt
->ops
->set_buffer_size(xprt
, sndsize
, rcvsize
);
544 * Return size of largest payload RPC client can support, in bytes
546 * For stream transports, this is one RPC record fragment (see RFC
547 * 1831), as we don't support multi-record requests yet. For datagram
548 * transports, this is the size of an IP packet minus the IP, UDP, and
551 size_t rpc_max_payload(struct rpc_clnt
*clnt
)
553 return clnt
->cl_xprt
->max_payload
;
555 EXPORT_SYMBOL(rpc_max_payload
);
558 * Restart an (async) RPC call. Usually called from within the
562 rpc_restart_call(struct rpc_task
*task
)
564 if (RPC_ASSASSINATED(task
))
567 task
->tk_action
= call_start
;
573 * Other FSM states can be visited zero or more times, but
574 * this state is visited exactly once for each RPC.
577 call_start(struct rpc_task
*task
)
579 struct rpc_clnt
*clnt
= task
->tk_client
;
581 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task
->tk_pid
,
582 clnt
->cl_protname
, clnt
->cl_vers
, task
->tk_msg
.rpc_proc
->p_proc
,
583 (RPC_IS_ASYNC(task
) ? "async" : "sync"));
585 /* Increment call count */
586 task
->tk_msg
.rpc_proc
->p_count
++;
587 clnt
->cl_stats
->rpccnt
++;
588 task
->tk_action
= call_reserve
;
592 * 1. Reserve an RPC call slot
595 call_reserve(struct rpc_task
*task
)
597 dprintk("RPC: %4d call_reserve\n", task
->tk_pid
);
599 if (!rpcauth_uptodatecred(task
)) {
600 task
->tk_action
= call_refresh
;
605 task
->tk_action
= call_reserveresult
;
610 * 1b. Grok the result of xprt_reserve()
613 call_reserveresult(struct rpc_task
*task
)
615 int status
= task
->tk_status
;
617 dprintk("RPC: %4d call_reserveresult (status %d)\n",
618 task
->tk_pid
, task
->tk_status
);
621 * After a call to xprt_reserve(), we must have either
622 * a request slot or else an error status.
626 if (task
->tk_rqstp
) {
627 task
->tk_action
= call_allocate
;
631 printk(KERN_ERR
"%s: status=%d, but no request slot, exiting\n",
632 __FUNCTION__
, status
);
633 rpc_exit(task
, -EIO
);
638 * Even though there was an error, we may have acquired
639 * a request slot somehow. Make sure not to leak it.
641 if (task
->tk_rqstp
) {
642 printk(KERN_ERR
"%s: status=%d, request allocated anyway\n",
643 __FUNCTION__
, status
);
648 case -EAGAIN
: /* woken up; retry */
649 task
->tk_action
= call_reserve
;
651 case -EIO
: /* probably a shutdown */
654 printk(KERN_ERR
"%s: unrecognized error %d, exiting\n",
655 __FUNCTION__
, status
);
658 rpc_exit(task
, status
);
662 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
663 * (Note: buffer memory is freed in rpc_task_release).
666 call_allocate(struct rpc_task
*task
)
670 dprintk("RPC: %4d call_allocate (status %d)\n",
671 task
->tk_pid
, task
->tk_status
);
672 task
->tk_action
= call_bind
;
676 /* FIXME: compute buffer requirements more exactly using
678 bufsiz
= task
->tk_msg
.rpc_proc
->p_bufsiz
+ RPC_SLACK_SPACE
;
680 if (rpc_malloc(task
, bufsiz
<< 1) != NULL
)
682 printk(KERN_INFO
"RPC: buffer allocation failed for task %p\n", task
);
684 if (RPC_IS_ASYNC(task
) || !signalled()) {
686 task
->tk_action
= call_reserve
;
687 rpc_delay(task
, HZ
>>4);
691 rpc_exit(task
, -ERESTARTSYS
);
695 * 3. Encode arguments of an RPC call
698 call_encode(struct rpc_task
*task
)
700 struct rpc_clnt
*clnt
= task
->tk_client
;
701 struct rpc_rqst
*req
= task
->tk_rqstp
;
702 struct xdr_buf
*sndbuf
= &req
->rq_snd_buf
;
703 struct xdr_buf
*rcvbuf
= &req
->rq_rcv_buf
;
709 dprintk("RPC: %4d call_encode (status %d)\n",
710 task
->tk_pid
, task
->tk_status
);
712 /* Default buffer setup */
713 bufsiz
= task
->tk_bufsize
>> 1;
714 sndbuf
->head
[0].iov_base
= (void *)task
->tk_buffer
;
715 sndbuf
->head
[0].iov_len
= bufsiz
;
716 sndbuf
->tail
[0].iov_len
= 0;
717 sndbuf
->page_len
= 0;
719 sndbuf
->buflen
= bufsiz
;
720 rcvbuf
->head
[0].iov_base
= (void *)((char *)task
->tk_buffer
+ bufsiz
);
721 rcvbuf
->head
[0].iov_len
= bufsiz
;
722 rcvbuf
->tail
[0].iov_len
= 0;
723 rcvbuf
->page_len
= 0;
725 rcvbuf
->buflen
= bufsiz
;
727 /* Encode header and provided arguments */
728 encode
= task
->tk_msg
.rpc_proc
->p_encode
;
729 if (!(p
= call_header(task
))) {
730 printk(KERN_INFO
"RPC: call_header failed, exit EIO\n");
731 rpc_exit(task
, -EIO
);
734 if (encode
&& (status
= rpcauth_wrap_req(task
, encode
, req
, p
,
735 task
->tk_msg
.rpc_argp
)) < 0) {
736 printk(KERN_WARNING
"%s: can't encode arguments: %d\n",
737 clnt
->cl_protname
, -status
);
738 rpc_exit(task
, status
);
743 * 4. Get the server port number if not yet set
746 call_bind(struct rpc_task
*task
)
748 struct rpc_clnt
*clnt
= task
->tk_client
;
750 dprintk("RPC: %4d call_bind (status %d)\n",
751 task
->tk_pid
, task
->tk_status
);
753 task
->tk_action
= call_connect
;
754 if (!clnt
->cl_port
) {
755 task
->tk_action
= call_bind_status
;
756 task
->tk_timeout
= task
->tk_xprt
->bind_timeout
;
757 rpc_getport(task
, clnt
);
762 * 4a. Sort out bind result
765 call_bind_status(struct rpc_task
*task
)
767 int status
= -EACCES
;
769 if (task
->tk_status
>= 0) {
770 dprintk("RPC: %4d call_bind_status (status %d)\n",
771 task
->tk_pid
, task
->tk_status
);
773 task
->tk_action
= call_connect
;
777 switch (task
->tk_status
) {
779 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
783 dprintk("RPC: %4d rpcbind request timed out\n",
785 if (RPC_IS_SOFT(task
)) {
791 dprintk("RPC: %4d remote rpcbind service unavailable\n",
794 case -EPROTONOSUPPORT
:
795 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
799 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
800 task
->tk_pid
, -task
->tk_status
);
805 rpc_exit(task
, status
);
810 task
->tk_action
= call_bind
;
815 * 4b. Connect to the RPC server
818 call_connect(struct rpc_task
*task
)
820 struct rpc_xprt
*xprt
= task
->tk_xprt
;
822 dprintk("RPC: %4d call_connect xprt %p %s connected\n",
824 (xprt_connected(xprt
) ? "is" : "is not"));
826 task
->tk_action
= call_transmit
;
827 if (!xprt_connected(xprt
)) {
828 task
->tk_action
= call_connect_status
;
829 if (task
->tk_status
< 0)
836 * 4c. Sort out connect result
839 call_connect_status(struct rpc_task
*task
)
841 struct rpc_clnt
*clnt
= task
->tk_client
;
842 int status
= task
->tk_status
;
844 dprintk("RPC: %5u call_connect_status (status %d)\n",
845 task
->tk_pid
, task
->tk_status
);
849 clnt
->cl_stats
->netreconn
++;
850 task
->tk_action
= call_transmit
;
854 /* Something failed: remote service port may have changed */
855 if (clnt
->cl_autobind
)
862 task
->tk_action
= call_bind
;
865 rpc_exit(task
, -EIO
);
871 * 5. Transmit the RPC request, and wait for reply
874 call_transmit(struct rpc_task
*task
)
876 dprintk("RPC: %4d call_transmit (status %d)\n",
877 task
->tk_pid
, task
->tk_status
);
879 task
->tk_action
= call_status
;
880 if (task
->tk_status
< 0)
882 task
->tk_status
= xprt_prepare_transmit(task
);
883 if (task
->tk_status
!= 0)
885 /* Encode here so that rpcsec_gss can use correct sequence number. */
886 if (!task
->tk_rqstp
->rq_bytes_sent
)
888 if (task
->tk_status
< 0)
891 if (task
->tk_status
< 0)
893 if (!task
->tk_msg
.rpc_proc
->p_decode
) {
894 task
->tk_action
= NULL
;
895 rpc_wake_up_task(task
);
900 * 6. Sort out the RPC call status
903 call_status(struct rpc_task
*task
)
905 struct rpc_clnt
*clnt
= task
->tk_client
;
906 struct rpc_rqst
*req
= task
->tk_rqstp
;
909 if (req
->rq_received
> 0 && !req
->rq_bytes_sent
)
910 task
->tk_status
= req
->rq_received
;
912 dprintk("RPC: %4d call_status (status %d)\n",
913 task
->tk_pid
, task
->tk_status
);
915 status
= task
->tk_status
;
917 task
->tk_action
= call_decode
;
924 task
->tk_action
= call_timeout
;
928 req
->rq_bytes_sent
= 0;
929 if (clnt
->cl_autobind
)
931 task
->tk_action
= call_bind
;
934 task
->tk_action
= call_transmit
;
937 /* shutdown or soft timeout */
938 rpc_exit(task
, status
);
942 printk("%s: RPC call returned error %d\n",
943 clnt
->cl_protname
, -status
);
944 rpc_exit(task
, status
);
950 * 6a. Handle RPC timeout
951 * We do not release the request slot, so we keep using the
952 * same XID for all retransmits.
955 call_timeout(struct rpc_task
*task
)
957 struct rpc_clnt
*clnt
= task
->tk_client
;
959 if (xprt_adjust_timeout(task
->tk_rqstp
) == 0) {
960 dprintk("RPC: %4d call_timeout (minor)\n", task
->tk_pid
);
964 dprintk("RPC: %4d call_timeout (major)\n", task
->tk_pid
);
965 if (RPC_IS_SOFT(task
)) {
967 printk(KERN_NOTICE
"%s: server %s not responding, timed out\n",
968 clnt
->cl_protname
, clnt
->cl_server
);
969 rpc_exit(task
, -EIO
);
973 if (clnt
->cl_chatty
&& !(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
974 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
975 printk(KERN_NOTICE
"%s: server %s not responding, still trying\n",
976 clnt
->cl_protname
, clnt
->cl_server
);
978 if (clnt
->cl_autobind
)
982 clnt
->cl_stats
->rpcretrans
++;
983 task
->tk_action
= call_bind
;
988 * 7. Decode the RPC reply
991 call_decode(struct rpc_task
*task
)
993 struct rpc_clnt
*clnt
= task
->tk_client
;
994 struct rpc_rqst
*req
= task
->tk_rqstp
;
995 kxdrproc_t decode
= task
->tk_msg
.rpc_proc
->p_decode
;
998 dprintk("RPC: %4d call_decode (status %d)\n",
999 task
->tk_pid
, task
->tk_status
);
1001 if (clnt
->cl_chatty
&& (task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
1002 printk(KERN_NOTICE
"%s: server %s OK\n",
1003 clnt
->cl_protname
, clnt
->cl_server
);
1004 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
1007 if (task
->tk_status
< 12) {
1008 if (!RPC_IS_SOFT(task
)) {
1009 task
->tk_action
= call_bind
;
1010 clnt
->cl_stats
->rpcretrans
++;
1013 printk(KERN_WARNING
"%s: too small RPC reply size (%d bytes)\n",
1014 clnt
->cl_protname
, task
->tk_status
);
1015 rpc_exit(task
, -EIO
);
1019 req
->rq_rcv_buf
.len
= req
->rq_private_buf
.len
;
1021 /* Check that the softirq receive buffer is valid */
1022 WARN_ON(memcmp(&req
->rq_rcv_buf
, &req
->rq_private_buf
,
1023 sizeof(req
->rq_rcv_buf
)) != 0);
1025 /* Verify the RPC header */
1026 if (!(p
= call_verify(task
))) {
1027 if (task
->tk_action
== NULL
)
1032 task
->tk_action
= NULL
;
1035 task
->tk_status
= rpcauth_unwrap_resp(task
, decode
, req
, p
,
1036 task
->tk_msg
.rpc_resp
);
1037 dprintk("RPC: %4d call_decode result %d\n", task
->tk_pid
,
1041 req
->rq_received
= req
->rq_private_buf
.len
= 0;
1042 task
->tk_status
= 0;
1046 * 8. Refresh the credentials if rejected by the server
1049 call_refresh(struct rpc_task
*task
)
1051 dprintk("RPC: %4d call_refresh\n", task
->tk_pid
);
1053 xprt_release(task
); /* Must do to obtain new XID */
1054 task
->tk_action
= call_refreshresult
;
1055 task
->tk_status
= 0;
1056 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
1057 rpcauth_refreshcred(task
);
1061 * 8a. Process the results of a credential refresh
1064 call_refreshresult(struct rpc_task
*task
)
1066 int status
= task
->tk_status
;
1067 dprintk("RPC: %4d call_refreshresult (status %d)\n",
1068 task
->tk_pid
, task
->tk_status
);
1070 task
->tk_status
= 0;
1071 task
->tk_action
= call_reserve
;
1072 if (status
>= 0 && rpcauth_uptodatecred(task
))
1074 if (status
== -EACCES
) {
1075 rpc_exit(task
, -EACCES
);
1078 task
->tk_action
= call_refresh
;
1079 if (status
!= -ETIMEDOUT
)
1080 rpc_delay(task
, 3*HZ
);
1085 * Call header serialization
1088 call_header(struct rpc_task
*task
)
1090 struct rpc_clnt
*clnt
= task
->tk_client
;
1091 struct rpc_rqst
*req
= task
->tk_rqstp
;
1092 u32
*p
= req
->rq_svec
[0].iov_base
;
1094 /* FIXME: check buffer size? */
1096 p
= xprt_skip_transport_header(task
->tk_xprt
, p
);
1097 *p
++ = req
->rq_xid
; /* XID */
1098 *p
++ = htonl(RPC_CALL
); /* CALL */
1099 *p
++ = htonl(RPC_VERSION
); /* RPC version */
1100 *p
++ = htonl(clnt
->cl_prog
); /* program number */
1101 *p
++ = htonl(clnt
->cl_vers
); /* program version */
1102 *p
++ = htonl(task
->tk_msg
.rpc_proc
->p_proc
); /* procedure */
1103 p
= rpcauth_marshcred(task
, p
);
1104 req
->rq_slen
= xdr_adjust_iovec(&req
->rq_svec
[0], p
);
1109 * Reply header verification
1112 call_verify(struct rpc_task
*task
)
1114 struct kvec
*iov
= &task
->tk_rqstp
->rq_rcv_buf
.head
[0];
1115 int len
= task
->tk_rqstp
->rq_rcv_buf
.len
>> 2;
1116 u32
*p
= iov
->iov_base
, n
;
1117 int error
= -EACCES
;
1121 p
+= 1; /* skip XID */
1123 if ((n
= ntohl(*p
++)) != RPC_REPLY
) {
1124 printk(KERN_WARNING
"call_verify: not an RPC reply: %x\n", n
);
1127 if ((n
= ntohl(*p
++)) != RPC_MSG_ACCEPTED
) {
1130 switch ((n
= ntohl(*p
++))) {
1131 case RPC_AUTH_ERROR
:
1134 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__
);
1135 error
= -EPROTONOSUPPORT
;
1138 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__
, n
);
1143 switch ((n
= ntohl(*p
++))) {
1144 case RPC_AUTH_REJECTEDCRED
:
1145 case RPC_AUTH_REJECTEDVERF
:
1146 case RPCSEC_GSS_CREDPROBLEM
:
1147 case RPCSEC_GSS_CTXPROBLEM
:
1148 if (!task
->tk_cred_retry
)
1150 task
->tk_cred_retry
--;
1151 dprintk("RPC: %4d call_verify: retry stale creds\n",
1153 rpcauth_invalcred(task
);
1154 task
->tk_action
= call_refresh
;
1156 case RPC_AUTH_BADCRED
:
1157 case RPC_AUTH_BADVERF
:
1158 /* possibly garbled cred/verf? */
1159 if (!task
->tk_garb_retry
)
1161 task
->tk_garb_retry
--;
1162 dprintk("RPC: %4d call_verify: retry garbled creds\n",
1164 task
->tk_action
= call_bind
;
1166 case RPC_AUTH_TOOWEAK
:
1167 printk(KERN_NOTICE
"call_verify: server requires stronger "
1168 "authentication.\n");
1171 printk(KERN_WARNING
"call_verify: unknown auth error: %x\n", n
);
1174 dprintk("RPC: %4d call_verify: call rejected %d\n",
1178 if (!(p
= rpcauth_checkverf(task
, p
))) {
1179 printk(KERN_WARNING
"call_verify: auth check failed\n");
1180 goto out_retry
; /* bad verifier, retry */
1182 len
= p
- (u32
*)iov
->iov_base
- 1;
1185 switch ((n
= ntohl(*p
++))) {
1188 case RPC_PROG_UNAVAIL
:
1189 dprintk("RPC: call_verify: program %u is unsupported by server %s\n",
1190 (unsigned int)task
->tk_client
->cl_prog
,
1191 task
->tk_client
->cl_server
);
1192 error
= -EPFNOSUPPORT
;
1194 case RPC_PROG_MISMATCH
:
1195 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n",
1196 (unsigned int)task
->tk_client
->cl_prog
,
1197 (unsigned int)task
->tk_client
->cl_vers
,
1198 task
->tk_client
->cl_server
);
1199 error
= -EPROTONOSUPPORT
;
1201 case RPC_PROC_UNAVAIL
:
1202 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
1203 task
->tk_msg
.rpc_proc
,
1204 task
->tk_client
->cl_prog
,
1205 task
->tk_client
->cl_vers
,
1206 task
->tk_client
->cl_server
);
1207 error
= -EOPNOTSUPP
;
1209 case RPC_GARBAGE_ARGS
:
1210 dprintk("RPC: %4d %s: server saw garbage\n", task
->tk_pid
, __FUNCTION__
);
1213 printk(KERN_WARNING
"call_verify: server accept status: %x\n", n
);
1218 task
->tk_client
->cl_stats
->rpcgarbage
++;
1219 if (task
->tk_garb_retry
) {
1220 task
->tk_garb_retry
--;
1221 dprintk("RPC %s: retrying %4d\n", __FUNCTION__
, task
->tk_pid
);
1222 task
->tk_action
= call_bind
;
1225 printk(KERN_WARNING
"RPC %s: retry failed, exit EIO\n", __FUNCTION__
);
1229 rpc_exit(task
, error
);
1232 printk(KERN_WARNING
"RPC %s: server reply was truncated.\n", __FUNCTION__
);
1236 static int rpcproc_encode_null(void *rqstp
, u32
*data
, void *obj
)
1241 static int rpcproc_decode_null(void *rqstp
, u32
*data
, void *obj
)
1246 static struct rpc_procinfo rpcproc_null
= {
1247 .p_encode
= rpcproc_encode_null
,
1248 .p_decode
= rpcproc_decode_null
,
1251 int rpc_ping(struct rpc_clnt
*clnt
, int flags
)
1253 struct rpc_message msg
= {
1254 .rpc_proc
= &rpcproc_null
,
1257 msg
.rpc_cred
= authnull_ops
.lookup_cred(NULL
, NULL
, 0);
1258 err
= rpc_call_sync(clnt
, &msg
, flags
);
1259 put_rpccred(msg
.rpc_cred
);