2 * linux/net/sunrpc/rpcclnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP reconnect handling (when finished).
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
25 #include <asm/segment.h>
27 #include <linux/types.h>
29 #include <linux/malloc.h>
32 #include <linux/sunrpc/clnt.h>
35 #define RPC_SLACK_SPACE 1024 /* total overkill */
38 # define RPCDBG_FACILITY RPCDBG_CALL
41 static struct wait_queue
* destroy_wait
= NULL
;
44 static void call_bind(struct rpc_task
*task
);
45 static void call_reserve(struct rpc_task
*task
);
46 static void call_reserveresult(struct rpc_task
*task
);
47 static void call_allocate(struct rpc_task
*task
);
48 static void call_encode(struct rpc_task
*task
);
49 static void call_decode(struct rpc_task
*task
);
50 static void call_transmit(struct rpc_task
*task
);
51 static void call_receive(struct rpc_task
*task
);
52 static void call_status(struct rpc_task
*task
);
53 static void call_refresh(struct rpc_task
*task
);
54 static void call_refreshresult(struct rpc_task
*task
);
55 static void call_timeout(struct rpc_task
*task
);
56 static void call_reconnect(struct rpc_task
*task
);
57 static u32
* call_header(struct rpc_task
*task
);
58 static u32
* call_verify(struct rpc_task
*task
);
61 * Create an RPC client
62 * FIXME: This should also take a flags argument (as in task->tk_flags).
63 * It's called (among others) from pmap_create_client, which may in
64 * turn be called by an async task. In this case, rpciod should not be
65 * made to sleep too long.
68 rpc_create_client(struct rpc_xprt
*xprt
, char *servname
,
69 struct rpc_program
*program
, u32 vers
, int flavor
)
71 struct rpc_version
*version
;
72 struct rpc_clnt
*clnt
= NULL
;
74 dprintk("RPC: creating %s client for %s (xprt %p)\n",
75 program
->name
, servname
, xprt
);
79 if (vers
>= program
->nrvers
|| !(version
= program
->version
[vers
]))
82 clnt
= (struct rpc_clnt
*) rpc_allocate(0, sizeof(*clnt
));
85 memset(clnt
, 0, sizeof(*clnt
));
88 clnt
->cl_procinfo
= version
->procs
;
89 clnt
->cl_maxproc
= version
->nrprocs
;
90 clnt
->cl_server
= servname
;
91 clnt
->cl_protname
= program
->name
;
92 clnt
->cl_port
= xprt
->addr
.sin_port
;
93 clnt
->cl_prog
= program
->number
;
94 clnt
->cl_vers
= version
->number
;
95 clnt
->cl_prot
= IPPROTO_UDP
;
96 clnt
->cl_stats
= program
->stats
;
97 clnt
->cl_bindwait
= RPC_INIT_WAITQ("bindwait");
100 clnt
->cl_autobind
= 1;
102 if (!rpcauth_create(flavor
, clnt
))
108 printk("RPC: out of memory in rpc_create_client\n");
111 printk("RPC: Couldn't create auth handle (flavor %d)\n",
119 * Properly shut down an RPC client, terminating all outstanding
120 * requests. Note that we must be certain that cl_oneshot and
121 * cl_dead are cleared, or else the client would be destroyed
122 * when the last task releases it.
125 rpc_shutdown_client(struct rpc_clnt
*clnt
)
127 dprintk("RPC: shutting down %s client for %s\n",
128 clnt
->cl_protname
, clnt
->cl_server
);
129 while (clnt
->cl_users
) {
131 printk("rpc_shutdown_client: client %s, tasks=%d\n",
132 clnt
->cl_protname
, clnt
->cl_users
);
134 /* Don't let rpc_release_client destroy us */
135 clnt
->cl_oneshot
= 0;
137 rpc_killall_tasks(clnt
);
138 sleep_on(&destroy_wait
);
140 return rpc_destroy_client(clnt
);
144 * Delete an RPC client
147 rpc_destroy_client(struct rpc_clnt
*clnt
)
149 dprintk("RPC: destroying %s client for %s\n",
150 clnt
->cl_protname
, clnt
->cl_server
);
153 rpcauth_destroy(clnt
->cl_auth
);
154 clnt
->cl_auth
= NULL
;
157 xprt_destroy(clnt
->cl_xprt
);
158 clnt
->cl_xprt
= NULL
;
165 * Release an RPC client
168 rpc_release_client(struct rpc_clnt
*clnt
)
170 dprintk("RPC: rpc_release_client(%p, %d)\n",
171 clnt
, clnt
->cl_users
);
172 if (clnt
->cl_users
) {
173 if (--(clnt
->cl_users
) > 0)
176 printk("rpc_release_client: %s client already free??\n",
179 wake_up(&destroy_wait
);
180 if (clnt
->cl_oneshot
|| clnt
->cl_dead
)
181 rpc_destroy_client(clnt
);
185 * Default callback for async RPC calls
188 rpc_default_callback(struct rpc_task
*task
)
190 rpc_release_task(task
);
194 * Export the signal mask handling for aysnchronous code that
195 * sleeps on RPC calls
198 void rpc_clnt_sigmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
200 unsigned long sigallow
= sigmask(SIGKILL
);
201 unsigned long irqflags
;
203 /* Turn off various signals */
205 struct k_sigaction
*action
= current
->sig
->action
;
206 if (action
[SIGINT
-1].sa
.sa_handler
== SIG_DFL
)
207 sigallow
|= sigmask(SIGINT
);
208 if (action
[SIGQUIT
-1].sa
.sa_handler
== SIG_DFL
)
209 sigallow
|= sigmask(SIGQUIT
);
211 spin_lock_irqsave(¤t
->sigmask_lock
, irqflags
);
212 *oldset
= current
->blocked
;
213 siginitsetinv(¤t
->blocked
, sigallow
& ~oldset
->sig
[0]);
214 recalc_sigpending(current
);
215 spin_unlock_irqrestore(¤t
->sigmask_lock
, irqflags
);
218 void rpc_clnt_sigunmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
220 unsigned long irqflags
;
222 spin_lock_irqsave(¤t
->sigmask_lock
, irqflags
);
223 current
->blocked
= *oldset
;
224 recalc_sigpending(current
);
225 spin_unlock_irqrestore(¤t
->sigmask_lock
, irqflags
);
229 * New rpc_call implementation
232 rpc_do_call(struct rpc_clnt
*clnt
, u32 proc
, void *argp
, void *resp
,
233 int flags
, rpc_action func
, void *data
)
235 struct rpc_task my_task
, *task
= &my_task
;
239 /* If this client is slain all further I/O fails */
243 rpc_clnt_sigmask(clnt
, &oldset
);
245 /* Create/initialize a new RPC task */
246 if ((async
= (flags
& RPC_TASK_ASYNC
)) != 0) {
248 func
= rpc_default_callback
;
250 if (!(task
= rpc_new_task(clnt
, func
, flags
)))
252 task
->tk_calldata
= data
;
254 rpc_init_task(task
, clnt
, NULL
, flags
);
257 /* Bind the user cred, set up the call info struct and
258 * execute the task */
259 if (rpcauth_lookupcred(task
) != NULL
) {
260 rpc_call_setup(task
, proc
, argp
, resp
, 0);
267 status
= task
->tk_status
;
268 rpc_release_task(task
);
272 rpc_clnt_sigunmask(clnt
, &oldset
);
279 rpc_call_setup(struct rpc_task
*task
, u32 proc
,
280 void *argp
, void *resp
, int flags
)
282 task
->tk_action
= call_bind
;
283 task
->tk_proc
= proc
;
284 task
->tk_argp
= argp
;
285 task
->tk_resp
= resp
;
286 task
->tk_flags
|= flags
;
288 /* Increment call count */
289 rpcproc_count(task
->tk_client
, proc
)++;
293 * Restart an (async) RPC call. Usually called from within the
297 rpc_restart_call(struct rpc_task
*task
)
299 if (task
->tk_flags
& RPC_TASK_KILLED
) {
300 rpc_release_task(task
);
303 task
->tk_action
= call_bind
;
304 rpcproc_count(task
->tk_client
, task
->tk_proc
)++;
308 * 0. Get the server port number if not yet set
311 call_bind(struct rpc_task
*task
)
313 struct rpc_clnt
*clnt
= task
->tk_client
;
315 task
->tk_action
= call_reserve
;
318 rpc_getport(task
, clnt
);
322 * 1. Reserve an RPC call slot
325 call_reserve(struct rpc_task
*task
)
327 struct rpc_clnt
*clnt
= task
->tk_client
;
329 dprintk("RPC: %4d call_reserve\n", task
->tk_pid
);
330 if (!clnt
->cl_port
) {
331 printk(KERN_NOTICE
"%s: couldn't bind to server %s - %s.\n",
332 clnt
->cl_protname
, clnt
->cl_server
,
333 clnt
->cl_softrtry
? "giving up" : "retrying");
334 if (!clnt
->cl_softrtry
) {
335 rpc_delay(task
, 5*HZ
);
338 rpc_exit(task
, -EIO
);
341 if (!rpcauth_uptodatecred(task
)) {
342 task
->tk_action
= call_refresh
;
345 task
->tk_action
= call_reserveresult
;
346 task
->tk_timeout
= clnt
->cl_timeout
.to_resrvval
;
348 clnt
->cl_stats
->rpccnt
++;
353 * 1b. Grok the result of xprt_reserve()
356 call_reserveresult(struct rpc_task
*task
)
358 dprintk("RPC: %4d call_reserveresult (status %d)\n",
359 task
->tk_pid
, task
->tk_status
);
361 * After a call to xprt_reserve(), we must have either
362 * a request slot or else an error status.
364 if ((task
->tk_status
>= 0 && !task
->tk_rqstp
) ||
365 (task
->tk_status
< 0 && task
->tk_rqstp
))
366 printk("call_reserveresult: status=%d, request=%p??\n",
367 task
->tk_status
, task
->tk_rqstp
);
369 if (task
->tk_status
>= 0) {
370 task
->tk_action
= call_allocate
;
372 } else if (task
->tk_status
== -EAGAIN
) {
373 task
->tk_timeout
= task
->tk_client
->cl_timeout
.to_resrvval
;
377 } else if (task
->tk_status
== -ETIMEDOUT
) {
378 printk("RPC: task timed out\n");
379 task
->tk_action
= call_timeout
;
382 task
->tk_action
= NULL
;
384 if (!task
->tk_rqstp
) {
385 printk("RPC: task has no request, exit EIO\n");
386 rpc_exit(task
, -EIO
);
393 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
394 * (Note: buffer memory is freed in rpc_task_release).
397 call_allocate(struct rpc_task
*task
)
399 struct rpc_clnt
*clnt
= task
->tk_client
;
402 dprintk("RPC: %4d call_allocate (status %d)\n",
403 task
->tk_pid
, task
->tk_status
);
404 task
->tk_action
= call_encode
;
408 /* FIXME: compute buffer requirements more exactly using
410 bufsiz
= rpcproc_bufsiz(clnt
, task
->tk_proc
) + RPC_SLACK_SPACE
;
412 if ((task
->tk_buffer
= rpc_malloc(task
, bufsiz
)) != NULL
)
414 printk("RPC: buffer allocation failed for task %p\n", task
);
418 task
->tk_action
= call_reserve
;
423 rpc_exit(task
, -ERESTARTSYS
);
427 * 3. Encode arguments of an RPC call
430 call_encode(struct rpc_task
*task
)
432 struct rpc_clnt
*clnt
= task
->tk_client
;
433 struct rpc_rqst
*req
= task
->tk_rqstp
;
439 dprintk("RPC: %4d call_encode (status %d)\n",
440 task
->tk_pid
, task
->tk_status
);
442 task
->tk_action
= call_transmit
;
444 /* Default buffer setup */
445 bufsiz
= rpcproc_bufsiz(clnt
, task
->tk_proc
)+RPC_SLACK_SPACE
;
446 req
->rq_svec
[0].iov_base
= task
->tk_buffer
;
447 req
->rq_svec
[0].iov_len
= bufsiz
;
450 req
->rq_rvec
[0].iov_base
= task
->tk_buffer
;
451 req
->rq_rvec
[0].iov_len
= bufsiz
;
452 req
->rq_rlen
= bufsiz
;
455 if (task
->tk_proc
> clnt
->cl_maxproc
) {
456 printk(KERN_WARNING
"%s (vers %d): bad procedure number %d\n",
457 clnt
->cl_protname
, clnt
->cl_vers
, task
->tk_proc
);
458 rpc_exit(task
, -EIO
);
462 /* Encode header and provided arguments */
463 encode
= rpcproc_encode(clnt
, task
->tk_proc
);
464 if (!(p
= call_header(task
))) {
465 printk("RPC: call_header failed, exit EIO\n");
466 rpc_exit(task
, -EIO
);
468 if ((status
= encode(req
, p
, task
->tk_argp
)) < 0) {
469 printk(KERN_WARNING
"%s: can't encode arguments: %d\n",
470 clnt
->cl_protname
, -status
);
471 rpc_exit(task
, status
);
476 * 4. Transmit the RPC request
479 call_transmit(struct rpc_task
*task
)
481 dprintk("RPC: %4d call_transmit (status %d)\n",
482 task
->tk_pid
, task
->tk_status
);
484 task
->tk_action
= call_receive
;
490 * 5. Wait for the RPC reply
493 call_receive(struct rpc_task
*task
)
495 dprintk("RPC: %4d call_receive (status %d)\n",
496 task
->tk_pid
, task
->tk_status
);
498 /* In case of error, evaluate status */
499 if (task
->tk_status
< 0) {
500 task
->tk_action
= call_status
;
504 /* If we have no decode function, this means we're performing
505 * a void call (a la lockd message passing). */
506 if (!rpcproc_decode(task
->tk_client
, task
->tk_proc
)) {
507 rpc_remove_wait_queue(task
); /* remove from xprt_pending */
508 task
->tk_action
= NULL
;
512 task
->tk_action
= call_status
;
517 * 6. Sort out the RPC call status
520 call_status(struct rpc_task
*task
)
522 struct rpc_clnt
*clnt
= task
->tk_client
;
523 struct rpc_rqst
*req
;
524 int status
= task
->tk_status
;
526 dprintk("RPC: %4d call_status (status %d)\n",
527 task
->tk_pid
, task
->tk_status
);
530 task
->tk_action
= call_decode
;
531 } else if (status
== -ETIMEDOUT
) {
532 task
->tk_action
= call_timeout
;
533 } else if (status
== -EAGAIN
) {
534 if (!(req
= task
->tk_rqstp
))
535 task
->tk_action
= call_reserve
;
536 else if (!task
->tk_buffer
)
537 task
->tk_action
= call_allocate
;
538 else if (req
->rq_damaged
)
539 task
->tk_action
= call_encode
;
541 task
->tk_action
= call_transmit
;
542 } else if (status
== -ENOTCONN
) {
543 task
->tk_action
= call_reconnect
;
544 } else if (status
== -ECONNREFUSED
&& clnt
->cl_autobind
) {
545 task
->tk_action
= call_bind
;
549 printk("%s: RPC call returned error %d\n",
550 clnt
->cl_protname
, -status
);
551 task
->tk_action
= NULL
;
557 * 6a. Handle RPC timeout
558 * We do not release the request slot, so we keep using the
559 * same XID for all retransmits.
562 call_timeout(struct rpc_task
*task
)
564 struct rpc_clnt
*clnt
= task
->tk_client
;
565 struct rpc_rqst
*req
= task
->tk_rqstp
;
568 struct rpc_timeout
*to
= &req
->rq_timeout
;
570 if (xprt_adjust_timeout(to
)) {
571 dprintk("RPC: %4d call_timeout (minor timeo)\n",
575 if ((to
->to_initval
<<= 1) > to
->to_maxval
)
576 to
->to_initval
= to
->to_maxval
;
579 dprintk("RPC: %4d call_timeout (major timeo)\n", task
->tk_pid
);
580 if (clnt
->cl_softrtry
) {
581 if (clnt
->cl_chatty
&& !task
->tk_exit
)
582 printk("%s: server %s not responding, timed out\n",
583 clnt
->cl_protname
, clnt
->cl_server
);
584 rpc_exit(task
, -EIO
);
587 if (clnt
->cl_chatty
&& !(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
588 printk("%s: server %s not responding, still trying\n",
589 clnt
->cl_protname
, clnt
->cl_server
);
590 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
592 if (clnt
->cl_autobind
)
596 if (!clnt
->cl_port
) {
597 task
->tk_action
= call_bind
;
599 task
->tk_action
= call_reserve
;
600 } else if (req
->rq_damaged
) {
601 task
->tk_action
= call_encode
;
602 clnt
->cl_stats
->rpcretrans
++;
604 task
->tk_action
= call_transmit
;
605 clnt
->cl_stats
->rpcretrans
++;
611 * 6b. Reconnect to the RPC server (TCP case)
614 call_reconnect(struct rpc_task
*task
)
616 dprintk("RPC: %4d call_reconnect status %d\n",
617 task
->tk_pid
, task
->tk_status
);
618 if (task
->tk_status
== 0) {
619 task
->tk_action
= call_status
;
620 task
->tk_status
= -EAGAIN
;
623 task
->tk_client
->cl_stats
->netreconn
++;
624 xprt_reconnect(task
);
628 * 7. Decode the RPC reply
631 call_decode(struct rpc_task
*task
)
633 struct rpc_clnt
*clnt
= task
->tk_client
;
634 struct rpc_rqst
*req
= task
->tk_rqstp
;
635 kxdrproc_t decode
= rpcproc_decode(clnt
, task
->tk_proc
);
638 dprintk("RPC: %4d call_decode (status %d)\n",
639 task
->tk_pid
, task
->tk_status
);
641 if (clnt
->cl_chatty
&& (task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
642 printk("%s: server %s OK\n",
643 clnt
->cl_protname
, clnt
->cl_server
);
644 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
647 if (task
->tk_status
< 12) {
648 printk("%s: too small RPC reply size (%d bytes)\n",
649 clnt
->cl_protname
, task
->tk_status
);
650 rpc_exit(task
, -EIO
);
654 /* Verify the RPC header */
655 if (!(p
= call_verify(task
)))
659 * The following is an NFS-specific hack to cater for setuid
660 * processes whose uid is mapped to nobody on the server.
662 if (task
->tk_client
->cl_prog
== 100003 &&
663 (ntohl(*p
) == NFSERR_ACCES
|| ntohl(*p
) == NFSERR_PERM
)) {
664 if (RPC_IS_SETUID(task
) && (task
->tk_suid_retry
)--) {
665 dprintk("RPC: %4d retry squashed uid\n", task
->tk_pid
);
666 task
->tk_flags
^= RPC_CALL_REALUID
;
667 task
->tk_action
= call_encode
;
672 task
->tk_action
= NULL
;
673 task
->tk_status
= decode(req
, p
, task
->tk_resp
);
674 dprintk("RPC: %4d call_decode result %d\n", task
->tk_pid
,
679 * 8. Refresh the credentials if rejected by the server
682 call_refresh(struct rpc_task
*task
)
684 dprintk("RPC: %4d call_refresh\n", task
->tk_pid
);
686 xprt_release(task
); /* Must do to obtain new XID */
687 task
->tk_action
= call_refreshresult
;
689 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
690 rpcauth_refreshcred(task
);
694 * 8a. Process the results of a credential refresh
697 call_refreshresult(struct rpc_task
*task
)
699 dprintk("RPC: %4d call_refreshresult (status %d)\n",
700 task
->tk_pid
, task
->tk_status
);
702 if (task
->tk_status
< 0) {
703 task
->tk_status
= -EACCES
;
704 task
->tk_action
= NULL
;
706 task
->tk_action
= call_reserve
;
710 * Call header serialization
713 call_header(struct rpc_task
*task
)
715 struct rpc_clnt
*clnt
= task
->tk_client
;
716 struct rpc_xprt
*xprt
= clnt
->cl_xprt
;
717 u32
*p
= task
->tk_buffer
;
719 /* FIXME: check buffer size? */
721 *p
++ = 0; /* fill in later */
722 *p
++ = task
->tk_rqstp
->rq_xid
; /* XID */
723 *p
++ = htonl(RPC_CALL
); /* CALL */
724 *p
++ = htonl(RPC_VERSION
); /* RPC version */
725 *p
++ = htonl(clnt
->cl_prog
); /* program number */
726 *p
++ = htonl(clnt
->cl_vers
); /* program version */
727 *p
++ = htonl(task
->tk_proc
); /* procedure */
728 return rpcauth_marshcred(task
, p
);
732 * Reply header verification
735 call_verify(struct rpc_task
*task
)
737 u32
*p
= task
->tk_buffer
, n
;
739 p
+= 1; /* skip XID */
741 if ((n
= ntohl(*p
++)) != RPC_REPLY
) {
742 printk("call_verify: not an RPC reply: %x\n", n
);
745 if ((n
= ntohl(*p
++)) != RPC_MSG_ACCEPTED
) {
748 if ((n
= ntohl(*p
++)) != RPC_AUTH_ERROR
) {
749 printk("call_verify: RPC call rejected: %x\n", n
);
751 switch ((n
= ntohl(*p
++))) {
752 case RPC_AUTH_REJECTEDCRED
:
753 case RPC_AUTH_REJECTEDVERF
:
754 if (!task
->tk_cred_retry
--)
756 dprintk("RPC: %4d call_verify: retry stale creds\n",
758 rpcauth_invalcred(task
);
759 task
->tk_action
= call_refresh
;
761 case RPC_AUTH_BADCRED
:
762 case RPC_AUTH_BADVERF
:
763 /* possibly garbled cred/verf? */
764 if (!task
->tk_garb_retry
--)
766 dprintk("RPC: %4d call_verify: retry garbled creds\n",
768 task
->tk_action
= call_encode
;
770 case RPC_AUTH_TOOWEAK
:
771 printk("call_verify: server requires stronger "
772 "authentication.\n");
774 printk("call_verify: unknown auth error: %x\n", n
);
777 dprintk("RPC: %4d call_verify: call rejected %d\n",
779 rpc_exit(task
, error
);
782 if (!(p
= rpcauth_checkverf(task
, p
))) {
783 printk("call_verify: auth check failed\n");
784 goto garbage
; /* bad verifier, retry */
786 switch ((n
= ntohl(*p
++))) {
789 case RPC_GARBAGE_ARGS
:
792 printk("call_verify: server accept status: %x\n", n
);
797 dprintk("RPC: %4d call_verify: server saw garbage\n", task
->tk_pid
);
798 task
->tk_client
->cl_stats
->rpcgarbage
++;
799 if (task
->tk_garb_retry
--) {
800 printk("RPC: garbage, retrying %4d\n", task
->tk_pid
);
801 task
->tk_action
= call_encode
;
804 printk("RPC: garbage, exit EIO\n");
805 rpc_exit(task
, -EIO
);