2 * linux/net/sunrpc/rpcclnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/utsname.h>
33 #include <linux/sunrpc/clnt.h>
34 #include <linux/workqueue.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
37 #include <linux/nfs.h>
40 #define RPC_SLACK_SPACE (1024) /* total overkill */
43 # define RPCDBG_FACILITY RPCDBG_CALL
46 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
49 static void call_start(struct rpc_task
*task
);
50 static void call_reserve(struct rpc_task
*task
);
51 static void call_reserveresult(struct rpc_task
*task
);
52 static void call_allocate(struct rpc_task
*task
);
53 static void call_encode(struct rpc_task
*task
);
54 static void call_decode(struct rpc_task
*task
);
55 static void call_bind(struct rpc_task
*task
);
56 static void call_transmit(struct rpc_task
*task
);
57 static void call_status(struct rpc_task
*task
);
58 static void call_refresh(struct rpc_task
*task
);
59 static void call_refreshresult(struct rpc_task
*task
);
60 static void call_timeout(struct rpc_task
*task
);
61 static void call_connect(struct rpc_task
*task
);
62 static void call_connect_status(struct rpc_task
*task
);
63 static u32
* call_header(struct rpc_task
*task
);
64 static u32
* call_verify(struct rpc_task
*task
);
68 rpc_setup_pipedir(struct rpc_clnt
*clnt
, char *dir_name
)
70 static uint32_t clntid
;
76 snprintf(clnt
->cl_pathname
, sizeof(clnt
->cl_pathname
),
77 "%s/clnt%x", dir_name
,
78 (unsigned int)clntid
++);
79 clnt
->cl_pathname
[sizeof(clnt
->cl_pathname
) - 1] = '\0';
80 clnt
->cl_dentry
= rpc_mkdir(clnt
->cl_pathname
, clnt
);
81 if (!IS_ERR(clnt
->cl_dentry
))
83 error
= PTR_ERR(clnt
->cl_dentry
);
84 if (error
!= -EEXIST
) {
85 printk(KERN_INFO
"RPC: Couldn't create pipefs entry %s, error %d\n",
86 clnt
->cl_pathname
, error
);
93 * Create an RPC client
94 * FIXME: This should also take a flags argument (as in task->tk_flags).
95 * It's called (among others) from pmap_create_client, which may in
96 * turn be called by an async task. In this case, rpciod should not be
97 * made to sleep too long.
100 rpc_create_client(struct rpc_xprt
*xprt
, char *servname
,
101 struct rpc_program
*program
, u32 vers
,
102 rpc_authflavor_t flavor
)
104 struct rpc_version
*version
;
105 struct rpc_clnt
*clnt
= NULL
;
109 dprintk("RPC: creating %s client for %s (xprt %p)\n",
110 program
->name
, servname
, xprt
);
115 if (vers
>= program
->nrvers
|| !(version
= program
->version
[vers
]))
119 clnt
= (struct rpc_clnt
*) kmalloc(sizeof(*clnt
), GFP_KERNEL
);
122 memset(clnt
, 0, sizeof(*clnt
));
123 atomic_set(&clnt
->cl_users
, 0);
124 atomic_set(&clnt
->cl_count
, 1);
125 clnt
->cl_parent
= clnt
;
127 clnt
->cl_server
= clnt
->cl_inline_name
;
128 len
= strlen(servname
) + 1;
129 if (len
> sizeof(clnt
->cl_inline_name
)) {
130 char *buf
= kmalloc(len
, GFP_KERNEL
);
132 clnt
->cl_server
= buf
;
134 len
= sizeof(clnt
->cl_inline_name
);
136 strlcpy(clnt
->cl_server
, servname
, len
);
138 clnt
->cl_xprt
= xprt
;
139 clnt
->cl_procinfo
= version
->procs
;
140 clnt
->cl_maxproc
= version
->nrprocs
;
141 clnt
->cl_protname
= program
->name
;
142 clnt
->cl_pmap
= &clnt
->cl_pmap_default
;
143 clnt
->cl_port
= xprt
->addr
.sin_port
;
144 clnt
->cl_prog
= program
->number
;
145 clnt
->cl_vers
= version
->number
;
146 clnt
->cl_prot
= xprt
->prot
;
147 clnt
->cl_stats
= program
->stats
;
148 rpc_init_wait_queue(&clnt
->cl_pmap_default
.pm_bindwait
, "bindwait");
151 clnt
->cl_autobind
= 1;
153 clnt
->cl_rtt
= &clnt
->cl_rtt_default
;
154 rpc_init_rtt(&clnt
->cl_rtt_default
, xprt
->timeout
.to_initval
);
156 err
= rpc_setup_pipedir(clnt
, program
->pipe_dir_name
);
161 if (!rpcauth_create(flavor
, clnt
)) {
162 printk(KERN_INFO
"RPC: Couldn't create auth handle (flavor %u)\n",
167 /* save the nodename */
168 clnt
->cl_nodelen
= strlen(system_utsname
.nodename
);
169 if (clnt
->cl_nodelen
> UNX_MAXNODENAME
)
170 clnt
->cl_nodelen
= UNX_MAXNODENAME
;
171 memcpy(clnt
->cl_nodename
, system_utsname
.nodename
, clnt
->cl_nodelen
);
175 rpc_rmdir(clnt
->cl_pathname
);
177 if (clnt
->cl_server
!= clnt
->cl_inline_name
)
178 kfree(clnt
->cl_server
);
185 * This function clones the RPC client structure. It allows us to share the
186 * same transport while varying parameters such as the authentication
190 rpc_clone_client(struct rpc_clnt
*clnt
)
192 struct rpc_clnt
*new;
194 new = (struct rpc_clnt
*)kmalloc(sizeof(*new), GFP_KERNEL
);
197 memcpy(new, clnt
, sizeof(*new));
198 atomic_set(&new->cl_count
, 1);
199 atomic_set(&new->cl_users
, 0);
200 new->cl_parent
= clnt
;
201 atomic_inc(&clnt
->cl_count
);
202 /* Duplicate portmapper */
203 rpc_init_wait_queue(&new->cl_pmap_default
.pm_bindwait
, "bindwait");
204 /* Turn off autobind on clones */
205 new->cl_autobind
= 0;
208 rpc_init_rtt(&new->cl_rtt_default
, clnt
->cl_xprt
->timeout
.to_initval
);
210 atomic_inc(&new->cl_auth
->au_count
);
213 printk(KERN_INFO
"RPC: out of memory in %s\n", __FUNCTION__
);
214 return ERR_PTR(-ENOMEM
);
218 * Properly shut down an RPC client, terminating all outstanding
219 * requests. Note that we must be certain that cl_oneshot and
220 * cl_dead are cleared, or else the client would be destroyed
221 * when the last task releases it.
224 rpc_shutdown_client(struct rpc_clnt
*clnt
)
226 dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
227 clnt
->cl_protname
, clnt
->cl_server
,
228 atomic_read(&clnt
->cl_users
));
230 while (atomic_read(&clnt
->cl_users
) > 0) {
231 /* Don't let rpc_release_client destroy us */
232 clnt
->cl_oneshot
= 0;
234 rpc_killall_tasks(clnt
);
235 sleep_on_timeout(&destroy_wait
, 1*HZ
);
238 if (atomic_read(&clnt
->cl_users
) < 0) {
239 printk(KERN_ERR
"RPC: rpc_shutdown_client clnt %p tasks=%d\n",
240 clnt
, atomic_read(&clnt
->cl_users
));
247 return rpc_destroy_client(clnt
);
251 * Delete an RPC client
254 rpc_destroy_client(struct rpc_clnt
*clnt
)
256 if (!atomic_dec_and_test(&clnt
->cl_count
))
258 BUG_ON(atomic_read(&clnt
->cl_users
) != 0);
260 dprintk("RPC: destroying %s client for %s\n",
261 clnt
->cl_protname
, clnt
->cl_server
);
263 rpcauth_destroy(clnt
->cl_auth
);
264 clnt
->cl_auth
= NULL
;
266 if (clnt
->cl_parent
!= clnt
) {
267 rpc_destroy_client(clnt
->cl_parent
);
270 if (clnt
->cl_pathname
[0])
271 rpc_rmdir(clnt
->cl_pathname
);
273 xprt_destroy(clnt
->cl_xprt
);
274 clnt
->cl_xprt
= NULL
;
276 if (clnt
->cl_server
!= clnt
->cl_inline_name
)
277 kfree(clnt
->cl_server
);
284 * Release an RPC client
287 rpc_release_client(struct rpc_clnt
*clnt
)
289 dprintk("RPC: rpc_release_client(%p, %d)\n",
290 clnt
, atomic_read(&clnt
->cl_users
));
292 if (!atomic_dec_and_test(&clnt
->cl_users
))
294 wake_up(&destroy_wait
);
295 if (clnt
->cl_oneshot
|| clnt
->cl_dead
)
296 rpc_destroy_client(clnt
);
300 * Default callback for async RPC calls
303 rpc_default_callback(struct rpc_task
*task
)
308 * Export the signal mask handling for aysnchronous code that
309 * sleeps on RPC calls
312 void rpc_clnt_sigmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
314 unsigned long sigallow
= sigmask(SIGKILL
);
315 unsigned long irqflags
;
317 /* Turn off various signals */
319 struct k_sigaction
*action
= current
->sighand
->action
;
320 if (action
[SIGINT
-1].sa
.sa_handler
== SIG_DFL
)
321 sigallow
|= sigmask(SIGINT
);
322 if (action
[SIGQUIT
-1].sa
.sa_handler
== SIG_DFL
)
323 sigallow
|= sigmask(SIGQUIT
);
325 spin_lock_irqsave(¤t
->sighand
->siglock
, irqflags
);
326 *oldset
= current
->blocked
;
327 siginitsetinv(¤t
->blocked
, sigallow
& ~oldset
->sig
[0]);
329 spin_unlock_irqrestore(¤t
->sighand
->siglock
, irqflags
);
332 void rpc_clnt_sigunmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
334 unsigned long irqflags
;
336 spin_lock_irqsave(¤t
->sighand
->siglock
, irqflags
);
337 current
->blocked
= *oldset
;
339 spin_unlock_irqrestore(¤t
->sighand
->siglock
, irqflags
);
343 * New rpc_call implementation
345 int rpc_call_sync(struct rpc_clnt
*clnt
, struct rpc_message
*msg
, int flags
)
347 struct rpc_task
*task
;
351 /* If this client is slain all further I/O fails */
355 BUG_ON(flags
& RPC_TASK_ASYNC
);
357 rpc_clnt_sigmask(clnt
, &oldset
);
360 task
= rpc_new_task(clnt
, NULL
, flags
);
364 rpc_call_setup(task
, msg
, 0);
366 /* Set up the call info struct and execute the task */
367 if (task
->tk_status
== 0)
368 status
= rpc_execute(task
);
370 status
= task
->tk_status
;
371 rpc_release_task(task
);
375 rpc_clnt_sigunmask(clnt
, &oldset
);
381 * New rpc_call implementation
384 rpc_call_async(struct rpc_clnt
*clnt
, struct rpc_message
*msg
, int flags
,
385 rpc_action callback
, void *data
)
387 struct rpc_task
*task
;
391 /* If this client is slain all further I/O fails */
395 flags
|= RPC_TASK_ASYNC
;
397 rpc_clnt_sigmask(clnt
, &oldset
);
399 /* Create/initialize a new RPC task */
401 callback
= rpc_default_callback
;
403 if (!(task
= rpc_new_task(clnt
, callback
, flags
)))
405 task
->tk_calldata
= data
;
407 rpc_call_setup(task
, msg
, 0);
409 /* Set up the call info struct and execute the task */
410 status
= task
->tk_status
;
414 rpc_release_task(task
);
417 rpc_clnt_sigunmask(clnt
, &oldset
);
424 rpc_call_setup(struct rpc_task
*task
, struct rpc_message
*msg
, int flags
)
427 task
->tk_flags
|= flags
;
428 /* Bind the user cred */
429 if (task
->tk_msg
.rpc_cred
!= NULL
)
430 rpcauth_holdcred(task
);
432 rpcauth_bindcred(task
);
434 if (task
->tk_status
== 0)
435 task
->tk_action
= call_start
;
437 task
->tk_action
= NULL
;
441 rpc_setbufsize(struct rpc_clnt
*clnt
, unsigned int sndsize
, unsigned int rcvsize
)
443 struct rpc_xprt
*xprt
= clnt
->cl_xprt
;
447 xprt
->sndsize
= sndsize
+ RPC_SLACK_SPACE
;
450 xprt
->rcvsize
= rcvsize
+ RPC_SLACK_SPACE
;
451 if (xprt_connected(xprt
))
452 xprt_sock_setbufsize(xprt
);
456 * Return size of largest payload RPC client can support, in bytes
458 * For stream transports, this is one RPC record fragment (see RFC
459 * 1831), as we don't support multi-record requests yet. For datagram
460 * transports, this is the size of an IP packet minus the IP, UDP, and
463 size_t rpc_max_payload(struct rpc_clnt
*clnt
)
465 return clnt
->cl_xprt
->max_payload
;
467 EXPORT_SYMBOL(rpc_max_payload
);
470 * Restart an (async) RPC call. Usually called from within the
474 rpc_restart_call(struct rpc_task
*task
)
476 if (RPC_ASSASSINATED(task
))
479 task
->tk_action
= call_start
;
485 * Other FSM states can be visited zero or more times, but
486 * this state is visited exactly once for each RPC.
489 call_start(struct rpc_task
*task
)
491 struct rpc_clnt
*clnt
= task
->tk_client
;
493 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task
->tk_pid
,
494 clnt
->cl_protname
, clnt
->cl_vers
, task
->tk_msg
.rpc_proc
->p_proc
,
495 (RPC_IS_ASYNC(task
) ? "async" : "sync"));
497 /* Increment call count */
498 task
->tk_msg
.rpc_proc
->p_count
++;
499 clnt
->cl_stats
->rpccnt
++;
500 task
->tk_action
= call_reserve
;
504 * 1. Reserve an RPC call slot
507 call_reserve(struct rpc_task
*task
)
509 dprintk("RPC: %4d call_reserve\n", task
->tk_pid
);
511 if (!rpcauth_uptodatecred(task
)) {
512 task
->tk_action
= call_refresh
;
517 task
->tk_action
= call_reserveresult
;
522 * 1b. Grok the result of xprt_reserve()
525 call_reserveresult(struct rpc_task
*task
)
527 int status
= task
->tk_status
;
529 dprintk("RPC: %4d call_reserveresult (status %d)\n",
530 task
->tk_pid
, task
->tk_status
);
533 * After a call to xprt_reserve(), we must have either
534 * a request slot or else an error status.
538 if (task
->tk_rqstp
) {
539 task
->tk_action
= call_allocate
;
543 printk(KERN_ERR
"%s: status=%d, but no request slot, exiting\n",
544 __FUNCTION__
, status
);
545 rpc_exit(task
, -EIO
);
550 * Even though there was an error, we may have acquired
551 * a request slot somehow. Make sure not to leak it.
553 if (task
->tk_rqstp
) {
554 printk(KERN_ERR
"%s: status=%d, request allocated anyway\n",
555 __FUNCTION__
, status
);
560 case -EAGAIN
: /* woken up; retry */
561 task
->tk_action
= call_reserve
;
563 case -EIO
: /* probably a shutdown */
566 printk(KERN_ERR
"%s: unrecognized error %d, exiting\n",
567 __FUNCTION__
, status
);
570 rpc_exit(task
, status
);
574 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
575 * (Note: buffer memory is freed in rpc_task_release).
578 call_allocate(struct rpc_task
*task
)
582 dprintk("RPC: %4d call_allocate (status %d)\n",
583 task
->tk_pid
, task
->tk_status
);
584 task
->tk_action
= call_bind
;
588 /* FIXME: compute buffer requirements more exactly using
590 bufsiz
= task
->tk_msg
.rpc_proc
->p_bufsiz
+ RPC_SLACK_SPACE
;
592 if (rpc_malloc(task
, bufsiz
<< 1) != NULL
)
594 printk(KERN_INFO
"RPC: buffer allocation failed for task %p\n", task
);
596 if (RPC_IS_ASYNC(task
) || !(task
->tk_client
->cl_intr
&& signalled())) {
598 task
->tk_action
= call_reserve
;
599 rpc_delay(task
, HZ
>>4);
603 rpc_exit(task
, -ERESTARTSYS
);
607 * 3. Encode arguments of an RPC call
610 call_encode(struct rpc_task
*task
)
612 struct rpc_clnt
*clnt
= task
->tk_client
;
613 struct rpc_rqst
*req
= task
->tk_rqstp
;
614 struct xdr_buf
*sndbuf
= &req
->rq_snd_buf
;
615 struct xdr_buf
*rcvbuf
= &req
->rq_rcv_buf
;
621 dprintk("RPC: %4d call_encode (status %d)\n",
622 task
->tk_pid
, task
->tk_status
);
624 /* Default buffer setup */
625 bufsiz
= task
->tk_bufsize
>> 1;
626 sndbuf
->head
[0].iov_base
= (void *)task
->tk_buffer
;
627 sndbuf
->head
[0].iov_len
= bufsiz
;
628 sndbuf
->tail
[0].iov_len
= 0;
629 sndbuf
->page_len
= 0;
631 sndbuf
->buflen
= bufsiz
;
632 rcvbuf
->head
[0].iov_base
= (void *)((char *)task
->tk_buffer
+ bufsiz
);
633 rcvbuf
->head
[0].iov_len
= bufsiz
;
634 rcvbuf
->tail
[0].iov_len
= 0;
635 rcvbuf
->page_len
= 0;
637 rcvbuf
->buflen
= bufsiz
;
639 /* Encode header and provided arguments */
640 encode
= task
->tk_msg
.rpc_proc
->p_encode
;
641 if (!(p
= call_header(task
))) {
642 printk(KERN_INFO
"RPC: call_header failed, exit EIO\n");
643 rpc_exit(task
, -EIO
);
646 if (encode
&& (status
= rpcauth_wrap_req(task
, encode
, req
, p
,
647 task
->tk_msg
.rpc_argp
)) < 0) {
648 printk(KERN_WARNING
"%s: can't encode arguments: %d\n",
649 clnt
->cl_protname
, -status
);
650 rpc_exit(task
, status
);
655 * 4. Get the server port number if not yet set
658 call_bind(struct rpc_task
*task
)
660 struct rpc_clnt
*clnt
= task
->tk_client
;
661 struct rpc_xprt
*xprt
= clnt
->cl_xprt
;
663 dprintk("RPC: %4d call_bind xprt %p %s connected\n", task
->tk_pid
,
664 xprt
, (xprt_connected(xprt
) ? "is" : "is not"));
666 task
->tk_action
= (xprt_connected(xprt
)) ? call_transmit
: call_connect
;
668 if (!clnt
->cl_port
) {
669 task
->tk_action
= call_connect
;
670 task
->tk_timeout
= RPC_CONNECT_TIMEOUT
;
671 rpc_getport(task
, clnt
);
676 * 4a. Connect to the RPC server (TCP case)
679 call_connect(struct rpc_task
*task
)
681 struct rpc_clnt
*clnt
= task
->tk_client
;
683 dprintk("RPC: %4d call_connect status %d\n",
684 task
->tk_pid
, task
->tk_status
);
686 if (xprt_connected(clnt
->cl_xprt
)) {
687 task
->tk_action
= call_transmit
;
690 task
->tk_action
= call_connect_status
;
691 if (task
->tk_status
< 0)
697 * 4b. Sort out connect result
700 call_connect_status(struct rpc_task
*task
)
702 struct rpc_clnt
*clnt
= task
->tk_client
;
703 int status
= task
->tk_status
;
707 clnt
->cl_stats
->netreconn
++;
708 task
->tk_action
= call_transmit
;
712 /* Something failed: we may have to rebind */
713 if (clnt
->cl_autobind
)
719 task
->tk_action
= (clnt
->cl_port
== 0) ? call_bind
: call_connect
;
722 rpc_exit(task
, -EIO
);
727 * 5. Transmit the RPC request, and wait for reply
730 call_transmit(struct rpc_task
*task
)
732 dprintk("RPC: %4d call_transmit (status %d)\n",
733 task
->tk_pid
, task
->tk_status
);
735 task
->tk_action
= call_status
;
736 if (task
->tk_status
< 0)
738 task
->tk_status
= xprt_prepare_transmit(task
);
739 if (task
->tk_status
!= 0)
741 /* Encode here so that rpcsec_gss can use correct sequence number. */
742 if (!task
->tk_rqstp
->rq_bytes_sent
)
744 if (task
->tk_status
< 0)
747 if (task
->tk_status
< 0)
749 if (!task
->tk_msg
.rpc_proc
->p_decode
) {
750 task
->tk_action
= NULL
;
751 rpc_wake_up_task(task
);
756 * 6. Sort out the RPC call status
759 call_status(struct rpc_task
*task
)
761 struct rpc_clnt
*clnt
= task
->tk_client
;
762 struct rpc_rqst
*req
= task
->tk_rqstp
;
765 if (req
->rq_received
> 0 && !req
->rq_bytes_sent
)
766 task
->tk_status
= req
->rq_received
;
768 dprintk("RPC: %4d call_status (status %d)\n",
769 task
->tk_pid
, task
->tk_status
);
771 status
= task
->tk_status
;
773 task
->tk_action
= call_decode
;
780 task
->tk_action
= call_timeout
;
784 req
->rq_bytes_sent
= 0;
785 if (clnt
->cl_autobind
)
787 task
->tk_action
= call_bind
;
790 task
->tk_action
= call_transmit
;
793 /* shutdown or soft timeout */
794 rpc_exit(task
, status
);
798 printk("%s: RPC call returned error %d\n",
799 clnt
->cl_protname
, -status
);
800 rpc_exit(task
, status
);
806 * 6a. Handle RPC timeout
807 * We do not release the request slot, so we keep using the
808 * same XID for all retransmits.
811 call_timeout(struct rpc_task
*task
)
813 struct rpc_clnt
*clnt
= task
->tk_client
;
815 if (xprt_adjust_timeout(task
->tk_rqstp
) == 0) {
816 dprintk("RPC: %4d call_timeout (minor)\n", task
->tk_pid
);
820 dprintk("RPC: %4d call_timeout (major)\n", task
->tk_pid
);
821 if (RPC_IS_SOFT(task
)) {
823 printk(KERN_NOTICE
"%s: server %s not responding, timed out\n",
824 clnt
->cl_protname
, clnt
->cl_server
);
825 rpc_exit(task
, -EIO
);
829 if (clnt
->cl_chatty
&& !(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
830 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
831 printk(KERN_NOTICE
"%s: server %s not responding, still trying\n",
832 clnt
->cl_protname
, clnt
->cl_server
);
834 if (clnt
->cl_autobind
)
838 clnt
->cl_stats
->rpcretrans
++;
839 task
->tk_action
= call_bind
;
844 * 7. Decode the RPC reply
847 call_decode(struct rpc_task
*task
)
849 struct rpc_clnt
*clnt
= task
->tk_client
;
850 struct rpc_rqst
*req
= task
->tk_rqstp
;
851 kxdrproc_t decode
= task
->tk_msg
.rpc_proc
->p_decode
;
854 dprintk("RPC: %4d call_decode (status %d)\n",
855 task
->tk_pid
, task
->tk_status
);
857 if (clnt
->cl_chatty
&& (task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
858 printk(KERN_NOTICE
"%s: server %s OK\n",
859 clnt
->cl_protname
, clnt
->cl_server
);
860 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
863 if (task
->tk_status
< 12) {
864 if (!RPC_IS_SOFT(task
)) {
865 task
->tk_action
= call_bind
;
866 clnt
->cl_stats
->rpcretrans
++;
869 printk(KERN_WARNING
"%s: too small RPC reply size (%d bytes)\n",
870 clnt
->cl_protname
, task
->tk_status
);
871 rpc_exit(task
, -EIO
);
875 req
->rq_rcv_buf
.len
= req
->rq_private_buf
.len
;
877 /* Check that the softirq receive buffer is valid */
878 WARN_ON(memcmp(&req
->rq_rcv_buf
, &req
->rq_private_buf
,
879 sizeof(req
->rq_rcv_buf
)) != 0);
881 /* Verify the RPC header */
882 if (!(p
= call_verify(task
))) {
883 if (task
->tk_action
== NULL
)
888 task
->tk_action
= NULL
;
891 task
->tk_status
= rpcauth_unwrap_resp(task
, decode
, req
, p
,
892 task
->tk_msg
.rpc_resp
);
893 dprintk("RPC: %4d call_decode result %d\n", task
->tk_pid
,
897 req
->rq_received
= req
->rq_private_buf
.len
= 0;
902 * 8. Refresh the credentials if rejected by the server
905 call_refresh(struct rpc_task
*task
)
907 dprintk("RPC: %4d call_refresh\n", task
->tk_pid
);
909 xprt_release(task
); /* Must do to obtain new XID */
910 task
->tk_action
= call_refreshresult
;
912 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
913 rpcauth_refreshcred(task
);
917 * 8a. Process the results of a credential refresh
920 call_refreshresult(struct rpc_task
*task
)
922 int status
= task
->tk_status
;
923 dprintk("RPC: %4d call_refreshresult (status %d)\n",
924 task
->tk_pid
, task
->tk_status
);
927 task
->tk_action
= call_reserve
;
928 if (status
>= 0 && rpcauth_uptodatecred(task
))
930 if (status
== -EACCES
) {
931 rpc_exit(task
, -EACCES
);
934 task
->tk_action
= call_refresh
;
935 if (status
!= -ETIMEDOUT
)
936 rpc_delay(task
, 3*HZ
);
941 * Call header serialization
944 call_header(struct rpc_task
*task
)
946 struct rpc_clnt
*clnt
= task
->tk_client
;
947 struct rpc_xprt
*xprt
= clnt
->cl_xprt
;
948 struct rpc_rqst
*req
= task
->tk_rqstp
;
949 u32
*p
= req
->rq_svec
[0].iov_base
;
951 /* FIXME: check buffer size? */
953 *p
++ = 0; /* fill in later */
954 *p
++ = req
->rq_xid
; /* XID */
955 *p
++ = htonl(RPC_CALL
); /* CALL */
956 *p
++ = htonl(RPC_VERSION
); /* RPC version */
957 *p
++ = htonl(clnt
->cl_prog
); /* program number */
958 *p
++ = htonl(clnt
->cl_vers
); /* program version */
959 *p
++ = htonl(task
->tk_msg
.rpc_proc
->p_proc
); /* procedure */
960 return rpcauth_marshcred(task
, p
);
964 * Reply header verification
967 call_verify(struct rpc_task
*task
)
969 struct kvec
*iov
= &task
->tk_rqstp
->rq_rcv_buf
.head
[0];
970 int len
= task
->tk_rqstp
->rq_rcv_buf
.len
>> 2;
971 u32
*p
= iov
->iov_base
, n
;
976 p
+= 1; /* skip XID */
978 if ((n
= ntohl(*p
++)) != RPC_REPLY
) {
979 printk(KERN_WARNING
"call_verify: not an RPC reply: %x\n", n
);
982 if ((n
= ntohl(*p
++)) != RPC_MSG_ACCEPTED
) {
985 switch ((n
= ntohl(*p
++))) {
989 printk(KERN_WARNING
"%s: RPC call version mismatch!\n", __FUNCTION__
);
992 printk(KERN_WARNING
"%s: RPC call rejected, unknown error: %x\n", __FUNCTION__
, n
);
997 switch ((n
= ntohl(*p
++))) {
998 case RPC_AUTH_REJECTEDCRED
:
999 case RPC_AUTH_REJECTEDVERF
:
1000 case RPCSEC_GSS_CREDPROBLEM
:
1001 case RPCSEC_GSS_CTXPROBLEM
:
1002 if (!task
->tk_cred_retry
)
1004 task
->tk_cred_retry
--;
1005 dprintk("RPC: %4d call_verify: retry stale creds\n",
1007 rpcauth_invalcred(task
);
1008 task
->tk_action
= call_refresh
;
1010 case RPC_AUTH_BADCRED
:
1011 case RPC_AUTH_BADVERF
:
1012 /* possibly garbled cred/verf? */
1013 if (!task
->tk_garb_retry
)
1015 task
->tk_garb_retry
--;
1016 dprintk("RPC: %4d call_verify: retry garbled creds\n",
1018 task
->tk_action
= call_bind
;
1020 case RPC_AUTH_TOOWEAK
:
1021 printk(KERN_NOTICE
"call_verify: server requires stronger "
1022 "authentication.\n");
1025 printk(KERN_WARNING
"call_verify: unknown auth error: %x\n", n
);
1028 dprintk("RPC: %4d call_verify: call rejected %d\n",
1032 if (!(p
= rpcauth_checkverf(task
, p
))) {
1033 printk(KERN_WARNING
"call_verify: auth check failed\n");
1034 goto out_retry
; /* bad verifier, retry */
1036 len
= p
- (u32
*)iov
->iov_base
- 1;
1039 switch ((n
= ntohl(*p
++))) {
1042 case RPC_PROG_UNAVAIL
:
1043 printk(KERN_WARNING
"RPC: call_verify: program %u is unsupported by server %s\n",
1044 (unsigned int)task
->tk_client
->cl_prog
,
1045 task
->tk_client
->cl_server
);
1047 case RPC_PROG_MISMATCH
:
1048 printk(KERN_WARNING
"RPC: call_verify: program %u, version %u unsupported by server %s\n",
1049 (unsigned int)task
->tk_client
->cl_prog
,
1050 (unsigned int)task
->tk_client
->cl_vers
,
1051 task
->tk_client
->cl_server
);
1053 case RPC_PROC_UNAVAIL
:
1054 printk(KERN_WARNING
"RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
1055 task
->tk_msg
.rpc_proc
,
1056 task
->tk_client
->cl_prog
,
1057 task
->tk_client
->cl_vers
,
1058 task
->tk_client
->cl_server
);
1060 case RPC_GARBAGE_ARGS
:
1061 dprintk("RPC: %4d %s: server saw garbage\n", task
->tk_pid
, __FUNCTION__
);
1064 printk(KERN_WARNING
"call_verify: server accept status: %x\n", n
);
1069 task
->tk_client
->cl_stats
->rpcgarbage
++;
1070 if (task
->tk_garb_retry
) {
1071 task
->tk_garb_retry
--;
1072 dprintk(KERN_WARNING
"RPC %s: retrying %4d\n", __FUNCTION__
, task
->tk_pid
);
1073 task
->tk_action
= call_bind
;
1076 printk(KERN_WARNING
"RPC %s: retry failed, exit EIO\n", __FUNCTION__
);
1080 rpc_exit(task
, error
);
1083 printk(KERN_WARNING
"RPC %s: server reply was truncated.\n", __FUNCTION__
);