2 * linux/net/sunrpc/clnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/utsname.h>
32 #include <linux/sunrpc/clnt.h>
33 #include <linux/workqueue.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
36 #include <linux/nfs.h>
39 #define RPC_SLACK_SPACE (1024) /* total overkill */
42 # define RPCDBG_FACILITY RPCDBG_CALL
45 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
48 static void call_start(struct rpc_task
*task
);
49 static void call_reserve(struct rpc_task
*task
);
50 static void call_reserveresult(struct rpc_task
*task
);
51 static void call_allocate(struct rpc_task
*task
);
52 static void call_encode(struct rpc_task
*task
);
53 static void call_decode(struct rpc_task
*task
);
54 static void call_bind(struct rpc_task
*task
);
55 static void call_bind_status(struct rpc_task
*task
);
56 static void call_transmit(struct rpc_task
*task
);
57 static void call_status(struct rpc_task
*task
);
58 static void call_transmit_status(struct rpc_task
*task
);
59 static void call_refresh(struct rpc_task
*task
);
60 static void call_refreshresult(struct rpc_task
*task
);
61 static void call_timeout(struct rpc_task
*task
);
62 static void call_connect(struct rpc_task
*task
);
63 static void call_connect_status(struct rpc_task
*task
);
64 static u32
* call_header(struct rpc_task
*task
);
65 static u32
* call_verify(struct rpc_task
*task
);
69 rpc_setup_pipedir(struct rpc_clnt
*clnt
, char *dir_name
)
71 static uint32_t clntid
;
77 snprintf(clnt
->cl_pathname
, sizeof(clnt
->cl_pathname
),
78 "%s/clnt%x", dir_name
,
79 (unsigned int)clntid
++);
80 clnt
->cl_pathname
[sizeof(clnt
->cl_pathname
) - 1] = '\0';
81 clnt
->cl_dentry
= rpc_mkdir(clnt
->cl_pathname
, clnt
);
82 if (!IS_ERR(clnt
->cl_dentry
))
84 error
= PTR_ERR(clnt
->cl_dentry
);
85 if (error
!= -EEXIST
) {
86 printk(KERN_INFO
"RPC: Couldn't create pipefs entry %s, error %d\n",
87 clnt
->cl_pathname
, error
);
94 * Create an RPC client
95 * FIXME: This should also take a flags argument (as in task->tk_flags).
96 * It's called (among others) from pmap_create_client, which may in
97 * turn be called by an async task. In this case, rpciod should not be
98 * made to sleep too long.
101 rpc_new_client(struct rpc_xprt
*xprt
, char *servname
,
102 struct rpc_program
*program
, u32 vers
,
103 rpc_authflavor_t flavor
)
105 struct rpc_version
*version
;
106 struct rpc_clnt
*clnt
= NULL
;
107 struct rpc_auth
*auth
;
111 dprintk("RPC: creating %s client for %s (xprt %p)\n",
112 program
->name
, servname
, xprt
);
117 if (vers
>= program
->nrvers
|| !(version
= program
->version
[vers
]))
121 clnt
= (struct rpc_clnt
*) kmalloc(sizeof(*clnt
), GFP_KERNEL
);
124 memset(clnt
, 0, sizeof(*clnt
));
125 atomic_set(&clnt
->cl_users
, 0);
126 atomic_set(&clnt
->cl_count
, 1);
127 clnt
->cl_parent
= clnt
;
129 clnt
->cl_server
= clnt
->cl_inline_name
;
130 len
= strlen(servname
) + 1;
131 if (len
> sizeof(clnt
->cl_inline_name
)) {
132 char *buf
= kmalloc(len
, GFP_KERNEL
);
134 clnt
->cl_server
= buf
;
136 len
= sizeof(clnt
->cl_inline_name
);
138 strlcpy(clnt
->cl_server
, servname
, len
);
140 clnt
->cl_xprt
= xprt
;
141 clnt
->cl_procinfo
= version
->procs
;
142 clnt
->cl_maxproc
= version
->nrprocs
;
143 clnt
->cl_protname
= program
->name
;
144 clnt
->cl_pmap
= &clnt
->cl_pmap_default
;
145 clnt
->cl_port
= xprt
->addr
.sin_port
;
146 clnt
->cl_prog
= program
->number
;
147 clnt
->cl_vers
= version
->number
;
148 clnt
->cl_prot
= xprt
->prot
;
149 clnt
->cl_stats
= program
->stats
;
150 rpc_init_wait_queue(&clnt
->cl_pmap_default
.pm_bindwait
, "bindwait");
153 clnt
->cl_autobind
= 1;
155 clnt
->cl_rtt
= &clnt
->cl_rtt_default
;
156 rpc_init_rtt(&clnt
->cl_rtt_default
, xprt
->timeout
.to_initval
);
158 err
= rpc_setup_pipedir(clnt
, program
->pipe_dir_name
);
162 auth
= rpcauth_create(flavor
, clnt
);
164 printk(KERN_INFO
"RPC: Couldn't create auth handle (flavor %u)\n",
170 /* save the nodename */
171 clnt
->cl_nodelen
= strlen(system_utsname
.nodename
);
172 if (clnt
->cl_nodelen
> UNX_MAXNODENAME
)
173 clnt
->cl_nodelen
= UNX_MAXNODENAME
;
174 memcpy(clnt
->cl_nodename
, system_utsname
.nodename
, clnt
->cl_nodelen
);
178 rpc_rmdir(clnt
->cl_pathname
);
180 if (clnt
->cl_server
!= clnt
->cl_inline_name
)
181 kfree(clnt
->cl_server
);
189 * Create an RPC client
190 * @xprt - pointer to xprt struct
191 * @servname - name of server
192 * @info - rpc_program
193 * @version - rpc_program version
194 * @authflavor - rpc_auth flavour to use
196 * Creates an RPC client structure, then pings the server in order to
197 * determine if it is up, and if it supports this program and version.
199 * This function should never be called by asynchronous tasks such as
202 struct rpc_clnt
*rpc_create_client(struct rpc_xprt
*xprt
, char *servname
,
203 struct rpc_program
*info
, u32 version
, rpc_authflavor_t authflavor
)
205 struct rpc_clnt
*clnt
;
208 clnt
= rpc_new_client(xprt
, servname
, info
, version
, authflavor
);
211 err
= rpc_ping(clnt
, RPC_TASK_SOFT
|RPC_TASK_NOINTR
);
214 rpc_shutdown_client(clnt
);
219 * This function clones the RPC client structure. It allows us to share the
220 * same transport while varying parameters such as the authentication
224 rpc_clone_client(struct rpc_clnt
*clnt
)
226 struct rpc_clnt
*new;
228 new = (struct rpc_clnt
*)kmalloc(sizeof(*new), GFP_KERNEL
);
231 memcpy(new, clnt
, sizeof(*new));
232 atomic_set(&new->cl_count
, 1);
233 atomic_set(&new->cl_users
, 0);
234 new->cl_parent
= clnt
;
235 atomic_inc(&clnt
->cl_count
);
236 /* Duplicate portmapper */
237 rpc_init_wait_queue(&new->cl_pmap_default
.pm_bindwait
, "bindwait");
238 /* Turn off autobind on clones */
239 new->cl_autobind
= 0;
242 rpc_init_rtt(&new->cl_rtt_default
, clnt
->cl_xprt
->timeout
.to_initval
);
244 atomic_inc(&new->cl_auth
->au_count
);
245 new->cl_pmap
= &new->cl_pmap_default
;
246 rpc_init_wait_queue(&new->cl_pmap_default
.pm_bindwait
, "bindwait");
249 printk(KERN_INFO
"RPC: out of memory in %s\n", __FUNCTION__
);
250 return ERR_PTR(-ENOMEM
);
254 * Properly shut down an RPC client, terminating all outstanding
255 * requests. Note that we must be certain that cl_oneshot and
256 * cl_dead are cleared, or else the client would be destroyed
257 * when the last task releases it.
260 rpc_shutdown_client(struct rpc_clnt
*clnt
)
262 dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
263 clnt
->cl_protname
, clnt
->cl_server
,
264 atomic_read(&clnt
->cl_users
));
266 while (atomic_read(&clnt
->cl_users
) > 0) {
267 /* Don't let rpc_release_client destroy us */
268 clnt
->cl_oneshot
= 0;
270 rpc_killall_tasks(clnt
);
271 sleep_on_timeout(&destroy_wait
, 1*HZ
);
274 if (atomic_read(&clnt
->cl_users
) < 0) {
275 printk(KERN_ERR
"RPC: rpc_shutdown_client clnt %p tasks=%d\n",
276 clnt
, atomic_read(&clnt
->cl_users
));
283 return rpc_destroy_client(clnt
);
287 * Delete an RPC client
290 rpc_destroy_client(struct rpc_clnt
*clnt
)
292 if (!atomic_dec_and_test(&clnt
->cl_count
))
294 BUG_ON(atomic_read(&clnt
->cl_users
) != 0);
296 dprintk("RPC: destroying %s client for %s\n",
297 clnt
->cl_protname
, clnt
->cl_server
);
299 rpcauth_destroy(clnt
->cl_auth
);
300 clnt
->cl_auth
= NULL
;
302 if (clnt
->cl_parent
!= clnt
) {
303 rpc_destroy_client(clnt
->cl_parent
);
306 if (clnt
->cl_pathname
[0])
307 rpc_rmdir(clnt
->cl_pathname
);
309 xprt_destroy(clnt
->cl_xprt
);
310 clnt
->cl_xprt
= NULL
;
312 if (clnt
->cl_server
!= clnt
->cl_inline_name
)
313 kfree(clnt
->cl_server
);
320 * Release an RPC client
323 rpc_release_client(struct rpc_clnt
*clnt
)
325 dprintk("RPC: rpc_release_client(%p, %d)\n",
326 clnt
, atomic_read(&clnt
->cl_users
));
328 if (!atomic_dec_and_test(&clnt
->cl_users
))
330 wake_up(&destroy_wait
);
331 if (clnt
->cl_oneshot
|| clnt
->cl_dead
)
332 rpc_destroy_client(clnt
);
336 * rpc_bind_new_program - bind a new RPC program to an existing client
337 * @old - old rpc_client
338 * @program - rpc program to set
339 * @vers - rpc program version
341 * Clones the rpc client and sets up a new RPC program. This is mainly
342 * of use for enabling different RPC programs to share the same transport.
343 * The Sun NFSv2/v3 ACL protocol can do this.
345 struct rpc_clnt
*rpc_bind_new_program(struct rpc_clnt
*old
,
346 struct rpc_program
*program
,
349 struct rpc_clnt
*clnt
;
350 struct rpc_version
*version
;
353 BUG_ON(vers
>= program
->nrvers
|| !program
->version
[vers
]);
354 version
= program
->version
[vers
];
355 clnt
= rpc_clone_client(old
);
358 clnt
->cl_procinfo
= version
->procs
;
359 clnt
->cl_maxproc
= version
->nrprocs
;
360 clnt
->cl_protname
= program
->name
;
361 clnt
->cl_prog
= program
->number
;
362 clnt
->cl_vers
= version
->number
;
363 clnt
->cl_stats
= program
->stats
;
364 err
= rpc_ping(clnt
, RPC_TASK_SOFT
|RPC_TASK_NOINTR
);
366 rpc_shutdown_client(clnt
);
374 * Default callback for async RPC calls
377 rpc_default_callback(struct rpc_task
*task
, void *data
)
381 static const struct rpc_call_ops rpc_default_ops
= {
382 .rpc_call_done
= rpc_default_callback
,
386 * Export the signal mask handling for synchronous code that
387 * sleeps on RPC calls
389 #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
391 static void rpc_save_sigmask(sigset_t
*oldset
, int intr
)
393 unsigned long sigallow
= sigmask(SIGKILL
);
396 /* Block all signals except those listed in sigallow */
398 sigallow
|= RPC_INTR_SIGNALS
;
399 siginitsetinv(&sigmask
, sigallow
);
400 sigprocmask(SIG_BLOCK
, &sigmask
, oldset
);
403 static inline void rpc_task_sigmask(struct rpc_task
*task
, sigset_t
*oldset
)
405 rpc_save_sigmask(oldset
, !RPC_TASK_UNINTERRUPTIBLE(task
));
408 static inline void rpc_restore_sigmask(sigset_t
*oldset
)
410 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
413 void rpc_clnt_sigmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
415 rpc_save_sigmask(oldset
, clnt
->cl_intr
);
418 void rpc_clnt_sigunmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
420 rpc_restore_sigmask(oldset
);
424 * New rpc_call implementation
426 int rpc_call_sync(struct rpc_clnt
*clnt
, struct rpc_message
*msg
, int flags
)
428 struct rpc_task
*task
;
432 /* If this client is slain all further I/O fails */
436 BUG_ON(flags
& RPC_TASK_ASYNC
);
439 task
= rpc_new_task(clnt
, flags
, &rpc_default_ops
, NULL
);
443 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
444 rpc_task_sigmask(task
, &oldset
);
446 rpc_call_setup(task
, msg
, 0);
448 /* Set up the call info struct and execute the task */
449 status
= task
->tk_status
;
451 atomic_inc(&task
->tk_count
);
452 status
= rpc_execute(task
);
454 status
= task
->tk_status
;
456 rpc_restore_sigmask(&oldset
);
457 rpc_release_task(task
);
463 * New rpc_call implementation
466 rpc_call_async(struct rpc_clnt
*clnt
, struct rpc_message
*msg
, int flags
,
467 const struct rpc_call_ops
*tk_ops
, void *data
)
469 struct rpc_task
*task
;
473 /* If this client is slain all further I/O fails */
477 flags
|= RPC_TASK_ASYNC
;
479 /* Create/initialize a new RPC task */
481 if (!(task
= rpc_new_task(clnt
, flags
, tk_ops
, data
)))
484 /* Mask signals on GSS_AUTH upcalls */
485 rpc_task_sigmask(task
, &oldset
);
487 rpc_call_setup(task
, msg
, 0);
489 /* Set up the call info struct and execute the task */
490 status
= task
->tk_status
;
494 rpc_release_task(task
);
496 rpc_restore_sigmask(&oldset
);
503 rpc_call_setup(struct rpc_task
*task
, struct rpc_message
*msg
, int flags
)
506 task
->tk_flags
|= flags
;
507 /* Bind the user cred */
508 if (task
->tk_msg
.rpc_cred
!= NULL
)
509 rpcauth_holdcred(task
);
511 rpcauth_bindcred(task
);
513 if (task
->tk_status
== 0)
514 task
->tk_action
= call_start
;
516 task
->tk_action
= rpc_exit_task
;
520 rpc_setbufsize(struct rpc_clnt
*clnt
, unsigned int sndsize
, unsigned int rcvsize
)
522 struct rpc_xprt
*xprt
= clnt
->cl_xprt
;
523 if (xprt
->ops
->set_buffer_size
)
524 xprt
->ops
->set_buffer_size(xprt
, sndsize
, rcvsize
);
528 * Return size of largest payload RPC client can support, in bytes
530 * For stream transports, this is one RPC record fragment (see RFC
531 * 1831), as we don't support multi-record requests yet. For datagram
532 * transports, this is the size of an IP packet minus the IP, UDP, and
535 size_t rpc_max_payload(struct rpc_clnt
*clnt
)
537 return clnt
->cl_xprt
->max_payload
;
539 EXPORT_SYMBOL(rpc_max_payload
);
542 * Restart an (async) RPC call. Usually called from within the
546 rpc_restart_call(struct rpc_task
*task
)
548 if (RPC_ASSASSINATED(task
))
551 task
->tk_action
= call_start
;
557 * Other FSM states can be visited zero or more times, but
558 * this state is visited exactly once for each RPC.
561 call_start(struct rpc_task
*task
)
563 struct rpc_clnt
*clnt
= task
->tk_client
;
565 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task
->tk_pid
,
566 clnt
->cl_protname
, clnt
->cl_vers
, task
->tk_msg
.rpc_proc
->p_proc
,
567 (RPC_IS_ASYNC(task
) ? "async" : "sync"));
569 /* Increment call count */
570 task
->tk_msg
.rpc_proc
->p_count
++;
571 clnt
->cl_stats
->rpccnt
++;
572 task
->tk_action
= call_reserve
;
576 * 1. Reserve an RPC call slot
579 call_reserve(struct rpc_task
*task
)
581 dprintk("RPC: %4d call_reserve\n", task
->tk_pid
);
583 if (!rpcauth_uptodatecred(task
)) {
584 task
->tk_action
= call_refresh
;
589 task
->tk_action
= call_reserveresult
;
594 * 1b. Grok the result of xprt_reserve()
597 call_reserveresult(struct rpc_task
*task
)
599 int status
= task
->tk_status
;
601 dprintk("RPC: %4d call_reserveresult (status %d)\n",
602 task
->tk_pid
, task
->tk_status
);
605 * After a call to xprt_reserve(), we must have either
606 * a request slot or else an error status.
610 if (task
->tk_rqstp
) {
611 task
->tk_action
= call_allocate
;
615 printk(KERN_ERR
"%s: status=%d, but no request slot, exiting\n",
616 __FUNCTION__
, status
);
617 rpc_exit(task
, -EIO
);
622 * Even though there was an error, we may have acquired
623 * a request slot somehow. Make sure not to leak it.
625 if (task
->tk_rqstp
) {
626 printk(KERN_ERR
"%s: status=%d, request allocated anyway\n",
627 __FUNCTION__
, status
);
632 case -EAGAIN
: /* woken up; retry */
633 task
->tk_action
= call_reserve
;
635 case -EIO
: /* probably a shutdown */
638 printk(KERN_ERR
"%s: unrecognized error %d, exiting\n",
639 __FUNCTION__
, status
);
642 rpc_exit(task
, status
);
646 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
647 * (Note: buffer memory is freed in xprt_release).
650 call_allocate(struct rpc_task
*task
)
652 struct rpc_rqst
*req
= task
->tk_rqstp
;
653 struct rpc_xprt
*xprt
= task
->tk_xprt
;
656 dprintk("RPC: %4d call_allocate (status %d)\n",
657 task
->tk_pid
, task
->tk_status
);
658 task
->tk_action
= call_bind
;
662 /* FIXME: compute buffer requirements more exactly using
664 bufsiz
= task
->tk_msg
.rpc_proc
->p_bufsiz
+ RPC_SLACK_SPACE
;
666 if (xprt
->ops
->buf_alloc(task
, bufsiz
<< 1) != NULL
)
668 printk(KERN_INFO
"RPC: buffer allocation failed for task %p\n", task
);
670 if (RPC_IS_ASYNC(task
) || !signalled()) {
672 task
->tk_action
= call_reserve
;
673 rpc_delay(task
, HZ
>>4);
677 rpc_exit(task
, -ERESTARTSYS
);
681 rpc_task_need_encode(struct rpc_task
*task
)
683 return task
->tk_rqstp
->rq_snd_buf
.len
== 0;
687 rpc_task_force_reencode(struct rpc_task
*task
)
689 task
->tk_rqstp
->rq_snd_buf
.len
= 0;
693 * 3. Encode arguments of an RPC call
696 call_encode(struct rpc_task
*task
)
698 struct rpc_rqst
*req
= task
->tk_rqstp
;
699 struct xdr_buf
*sndbuf
= &req
->rq_snd_buf
;
700 struct xdr_buf
*rcvbuf
= &req
->rq_rcv_buf
;
705 dprintk("RPC: %4d call_encode (status %d)\n",
706 task
->tk_pid
, task
->tk_status
);
708 /* Default buffer setup */
709 bufsiz
= req
->rq_bufsize
>> 1;
710 sndbuf
->head
[0].iov_base
= (void *)req
->rq_buffer
;
711 sndbuf
->head
[0].iov_len
= bufsiz
;
712 sndbuf
->tail
[0].iov_len
= 0;
713 sndbuf
->page_len
= 0;
715 sndbuf
->buflen
= bufsiz
;
716 rcvbuf
->head
[0].iov_base
= (void *)((char *)req
->rq_buffer
+ bufsiz
);
717 rcvbuf
->head
[0].iov_len
= bufsiz
;
718 rcvbuf
->tail
[0].iov_len
= 0;
719 rcvbuf
->page_len
= 0;
721 rcvbuf
->buflen
= bufsiz
;
723 /* Encode header and provided arguments */
724 encode
= task
->tk_msg
.rpc_proc
->p_encode
;
725 if (!(p
= call_header(task
))) {
726 printk(KERN_INFO
"RPC: call_header failed, exit EIO\n");
727 rpc_exit(task
, -EIO
);
733 task
->tk_status
= rpcauth_wrap_req(task
, encode
, req
, p
,
734 task
->tk_msg
.rpc_argp
);
735 if (task
->tk_status
== -ENOMEM
) {
736 /* XXX: Is this sane? */
737 rpc_delay(task
, 3*HZ
);
738 task
->tk_status
= -EAGAIN
;
743 * 4. Get the server port number if not yet set
746 call_bind(struct rpc_task
*task
)
748 struct rpc_clnt
*clnt
= task
->tk_client
;
750 dprintk("RPC: %4d call_bind (status %d)\n",
751 task
->tk_pid
, task
->tk_status
);
753 task
->tk_action
= call_connect
;
754 if (!clnt
->cl_port
) {
755 task
->tk_action
= call_bind_status
;
756 task
->tk_timeout
= task
->tk_xprt
->bind_timeout
;
757 rpc_getport(task
, clnt
);
762 * 4a. Sort out bind result
765 call_bind_status(struct rpc_task
*task
)
767 int status
= -EACCES
;
769 if (task
->tk_status
>= 0) {
770 dprintk("RPC: %4d call_bind_status (status %d)\n",
771 task
->tk_pid
, task
->tk_status
);
773 task
->tk_action
= call_connect
;
777 switch (task
->tk_status
) {
779 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
781 rpc_delay(task
, 3*HZ
);
784 dprintk("RPC: %4d rpcbind request timed out\n",
786 if (RPC_IS_SOFT(task
)) {
792 dprintk("RPC: %4d remote rpcbind service unavailable\n",
795 case -EPROTONOSUPPORT
:
796 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
800 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
801 task
->tk_pid
, -task
->tk_status
);
806 rpc_exit(task
, status
);
811 task
->tk_action
= call_bind
;
816 * 4b. Connect to the RPC server
819 call_connect(struct rpc_task
*task
)
821 struct rpc_xprt
*xprt
= task
->tk_xprt
;
823 dprintk("RPC: %4d call_connect xprt %p %s connected\n",
825 (xprt_connected(xprt
) ? "is" : "is not"));
827 task
->tk_action
= call_transmit
;
828 if (!xprt_connected(xprt
)) {
829 task
->tk_action
= call_connect_status
;
830 if (task
->tk_status
< 0)
837 * 4c. Sort out connect result
840 call_connect_status(struct rpc_task
*task
)
842 struct rpc_clnt
*clnt
= task
->tk_client
;
843 int status
= task
->tk_status
;
845 dprintk("RPC: %5u call_connect_status (status %d)\n",
846 task
->tk_pid
, task
->tk_status
);
850 clnt
->cl_stats
->netreconn
++;
851 task
->tk_action
= call_transmit
;
855 /* Something failed: remote service port may have changed */
856 if (clnt
->cl_autobind
)
863 task
->tk_action
= call_bind
;
866 rpc_exit(task
, -EIO
);
872 * 5. Transmit the RPC request, and wait for reply
875 call_transmit(struct rpc_task
*task
)
877 dprintk("RPC: %4d call_transmit (status %d)\n",
878 task
->tk_pid
, task
->tk_status
);
880 task
->tk_action
= call_status
;
881 if (task
->tk_status
< 0)
883 task
->tk_status
= xprt_prepare_transmit(task
);
884 if (task
->tk_status
!= 0)
886 /* Encode here so that rpcsec_gss can use correct sequence number. */
887 if (rpc_task_need_encode(task
)) {
888 task
->tk_rqstp
->rq_bytes_sent
= 0;
890 /* Did the encode result in an error condition? */
891 if (task
->tk_status
!= 0)
894 task
->tk_action
= call_transmit_status
;
896 if (task
->tk_status
< 0)
898 if (!task
->tk_msg
.rpc_proc
->p_decode
) {
899 task
->tk_action
= rpc_exit_task
;
900 rpc_wake_up_task(task
);
904 /* release socket write lock before attempting to handle error */
905 xprt_abort_transmit(task
);
906 rpc_task_force_reencode(task
);
910 * 6. Sort out the RPC call status
913 call_status(struct rpc_task
*task
)
915 struct rpc_clnt
*clnt
= task
->tk_client
;
916 struct rpc_rqst
*req
= task
->tk_rqstp
;
919 if (req
->rq_received
> 0 && !req
->rq_bytes_sent
)
920 task
->tk_status
= req
->rq_received
;
922 dprintk("RPC: %4d call_status (status %d)\n",
923 task
->tk_pid
, task
->tk_status
);
925 status
= task
->tk_status
;
927 task
->tk_action
= call_decode
;
934 task
->tk_action
= call_timeout
;
938 if (clnt
->cl_autobind
)
940 task
->tk_action
= call_bind
;
943 task
->tk_action
= call_transmit
;
946 /* shutdown or soft timeout */
947 rpc_exit(task
, status
);
951 printk("%s: RPC call returned error %d\n",
952 clnt
->cl_protname
, -status
);
953 rpc_exit(task
, status
);
959 * 6a. Handle transmission errors.
962 call_transmit_status(struct rpc_task
*task
)
964 if (task
->tk_status
!= -EAGAIN
)
965 rpc_task_force_reencode(task
);
970 * 6b. Handle RPC timeout
971 * We do not release the request slot, so we keep using the
972 * same XID for all retransmits.
975 call_timeout(struct rpc_task
*task
)
977 struct rpc_clnt
*clnt
= task
->tk_client
;
979 if (xprt_adjust_timeout(task
->tk_rqstp
) == 0) {
980 dprintk("RPC: %4d call_timeout (minor)\n", task
->tk_pid
);
984 dprintk("RPC: %4d call_timeout (major)\n", task
->tk_pid
);
985 if (RPC_IS_SOFT(task
)) {
987 printk(KERN_NOTICE
"%s: server %s not responding, timed out\n",
988 clnt
->cl_protname
, clnt
->cl_server
);
989 rpc_exit(task
, -EIO
);
993 if (clnt
->cl_chatty
&& !(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
994 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
995 printk(KERN_NOTICE
"%s: server %s not responding, still trying\n",
996 clnt
->cl_protname
, clnt
->cl_server
);
998 if (clnt
->cl_autobind
)
1002 clnt
->cl_stats
->rpcretrans
++;
1003 task
->tk_action
= call_bind
;
1004 task
->tk_status
= 0;
1008 * 7. Decode the RPC reply
1011 call_decode(struct rpc_task
*task
)
1013 struct rpc_clnt
*clnt
= task
->tk_client
;
1014 struct rpc_rqst
*req
= task
->tk_rqstp
;
1015 kxdrproc_t decode
= task
->tk_msg
.rpc_proc
->p_decode
;
1018 dprintk("RPC: %4d call_decode (status %d)\n",
1019 task
->tk_pid
, task
->tk_status
);
1021 if (clnt
->cl_chatty
&& (task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
1022 printk(KERN_NOTICE
"%s: server %s OK\n",
1023 clnt
->cl_protname
, clnt
->cl_server
);
1024 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
1027 if (task
->tk_status
< 12) {
1028 if (!RPC_IS_SOFT(task
)) {
1029 task
->tk_action
= call_bind
;
1030 clnt
->cl_stats
->rpcretrans
++;
1033 printk(KERN_WARNING
"%s: too small RPC reply size (%d bytes)\n",
1034 clnt
->cl_protname
, task
->tk_status
);
1035 rpc_exit(task
, -EIO
);
1039 req
->rq_rcv_buf
.len
= req
->rq_private_buf
.len
;
1041 /* Check that the softirq receive buffer is valid */
1042 WARN_ON(memcmp(&req
->rq_rcv_buf
, &req
->rq_private_buf
,
1043 sizeof(req
->rq_rcv_buf
)) != 0);
1045 /* Verify the RPC header */
1046 p
= call_verify(task
);
1048 if (p
== ERR_PTR(-EAGAIN
))
1053 task
->tk_action
= rpc_exit_task
;
1056 task
->tk_status
= rpcauth_unwrap_resp(task
, decode
, req
, p
,
1057 task
->tk_msg
.rpc_resp
);
1058 dprintk("RPC: %4d call_decode result %d\n", task
->tk_pid
,
1062 req
->rq_received
= req
->rq_private_buf
.len
= 0;
1063 task
->tk_status
= 0;
1067 * 8. Refresh the credentials if rejected by the server
1070 call_refresh(struct rpc_task
*task
)
1072 dprintk("RPC: %4d call_refresh\n", task
->tk_pid
);
1074 xprt_release(task
); /* Must do to obtain new XID */
1075 task
->tk_action
= call_refreshresult
;
1076 task
->tk_status
= 0;
1077 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
1078 rpcauth_refreshcred(task
);
1082 * 8a. Process the results of a credential refresh
1085 call_refreshresult(struct rpc_task
*task
)
1087 int status
= task
->tk_status
;
1088 dprintk("RPC: %4d call_refreshresult (status %d)\n",
1089 task
->tk_pid
, task
->tk_status
);
1091 task
->tk_status
= 0;
1092 task
->tk_action
= call_reserve
;
1093 if (status
>= 0 && rpcauth_uptodatecred(task
))
1095 if (status
== -EACCES
) {
1096 rpc_exit(task
, -EACCES
);
1099 task
->tk_action
= call_refresh
;
1100 if (status
!= -ETIMEDOUT
)
1101 rpc_delay(task
, 3*HZ
);
1106 * Call header serialization
1109 call_header(struct rpc_task
*task
)
1111 struct rpc_clnt
*clnt
= task
->tk_client
;
1112 struct rpc_rqst
*req
= task
->tk_rqstp
;
1113 u32
*p
= req
->rq_svec
[0].iov_base
;
1115 /* FIXME: check buffer size? */
1117 p
= xprt_skip_transport_header(task
->tk_xprt
, p
);
1118 *p
++ = req
->rq_xid
; /* XID */
1119 *p
++ = htonl(RPC_CALL
); /* CALL */
1120 *p
++ = htonl(RPC_VERSION
); /* RPC version */
1121 *p
++ = htonl(clnt
->cl_prog
); /* program number */
1122 *p
++ = htonl(clnt
->cl_vers
); /* program version */
1123 *p
++ = htonl(task
->tk_msg
.rpc_proc
->p_proc
); /* procedure */
1124 p
= rpcauth_marshcred(task
, p
);
1125 req
->rq_slen
= xdr_adjust_iovec(&req
->rq_svec
[0], p
);
1130 * Reply header verification
1133 call_verify(struct rpc_task
*task
)
1135 struct kvec
*iov
= &task
->tk_rqstp
->rq_rcv_buf
.head
[0];
1136 int len
= task
->tk_rqstp
->rq_rcv_buf
.len
>> 2;
1137 u32
*p
= iov
->iov_base
, n
;
1138 int error
= -EACCES
;
1142 p
+= 1; /* skip XID */
1144 if ((n
= ntohl(*p
++)) != RPC_REPLY
) {
1145 printk(KERN_WARNING
"call_verify: not an RPC reply: %x\n", n
);
1148 if ((n
= ntohl(*p
++)) != RPC_MSG_ACCEPTED
) {
1151 switch ((n
= ntohl(*p
++))) {
1152 case RPC_AUTH_ERROR
:
1155 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__
);
1156 error
= -EPROTONOSUPPORT
;
1159 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__
, n
);
1164 switch ((n
= ntohl(*p
++))) {
1165 case RPC_AUTH_REJECTEDCRED
:
1166 case RPC_AUTH_REJECTEDVERF
:
1167 case RPCSEC_GSS_CREDPROBLEM
:
1168 case RPCSEC_GSS_CTXPROBLEM
:
1169 if (!task
->tk_cred_retry
)
1171 task
->tk_cred_retry
--;
1172 dprintk("RPC: %4d call_verify: retry stale creds\n",
1174 rpcauth_invalcred(task
);
1175 task
->tk_action
= call_refresh
;
1177 case RPC_AUTH_BADCRED
:
1178 case RPC_AUTH_BADVERF
:
1179 /* possibly garbled cred/verf? */
1180 if (!task
->tk_garb_retry
)
1182 task
->tk_garb_retry
--;
1183 dprintk("RPC: %4d call_verify: retry garbled creds\n",
1185 task
->tk_action
= call_bind
;
1187 case RPC_AUTH_TOOWEAK
:
1188 printk(KERN_NOTICE
"call_verify: server requires stronger "
1189 "authentication.\n");
1192 printk(KERN_WARNING
"call_verify: unknown auth error: %x\n", n
);
1195 dprintk("RPC: %4d call_verify: call rejected %d\n",
1199 if (!(p
= rpcauth_checkverf(task
, p
))) {
1200 printk(KERN_WARNING
"call_verify: auth check failed\n");
1201 goto out_garbage
; /* bad verifier, retry */
1203 len
= p
- (u32
*)iov
->iov_base
- 1;
1206 switch ((n
= ntohl(*p
++))) {
1209 case RPC_PROG_UNAVAIL
:
1210 dprintk("RPC: call_verify: program %u is unsupported by server %s\n",
1211 (unsigned int)task
->tk_client
->cl_prog
,
1212 task
->tk_client
->cl_server
);
1213 error
= -EPFNOSUPPORT
;
1215 case RPC_PROG_MISMATCH
:
1216 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n",
1217 (unsigned int)task
->tk_client
->cl_prog
,
1218 (unsigned int)task
->tk_client
->cl_vers
,
1219 task
->tk_client
->cl_server
);
1220 error
= -EPROTONOSUPPORT
;
1222 case RPC_PROC_UNAVAIL
:
1223 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
1224 task
->tk_msg
.rpc_proc
,
1225 task
->tk_client
->cl_prog
,
1226 task
->tk_client
->cl_vers
,
1227 task
->tk_client
->cl_server
);
1228 error
= -EOPNOTSUPP
;
1230 case RPC_GARBAGE_ARGS
:
1231 dprintk("RPC: %4d %s: server saw garbage\n", task
->tk_pid
, __FUNCTION__
);
1234 printk(KERN_WARNING
"call_verify: server accept status: %x\n", n
);
1239 task
->tk_client
->cl_stats
->rpcgarbage
++;
1240 if (task
->tk_garb_retry
) {
1241 task
->tk_garb_retry
--;
1242 dprintk("RPC %s: retrying %4d\n", __FUNCTION__
, task
->tk_pid
);
1243 task
->tk_action
= call_bind
;
1245 return ERR_PTR(-EAGAIN
);
1247 printk(KERN_WARNING
"RPC %s: retry failed, exit EIO\n", __FUNCTION__
);
1251 rpc_exit(task
, error
);
1252 return ERR_PTR(error
);
1254 printk(KERN_WARNING
"RPC %s: server reply was truncated.\n", __FUNCTION__
);
1258 static int rpcproc_encode_null(void *rqstp
, u32
*data
, void *obj
)
1263 static int rpcproc_decode_null(void *rqstp
, u32
*data
, void *obj
)
1268 static struct rpc_procinfo rpcproc_null
= {
1269 .p_encode
= rpcproc_encode_null
,
1270 .p_decode
= rpcproc_decode_null
,
1273 int rpc_ping(struct rpc_clnt
*clnt
, int flags
)
1275 struct rpc_message msg
= {
1276 .rpc_proc
= &rpcproc_null
,
1279 msg
.rpc_cred
= authnull_ops
.lookup_cred(NULL
, NULL
, 0);
1280 err
= rpc_call_sync(clnt
, &msg
, flags
);
1281 put_rpccred(msg
.rpc_cred
);