Import 2.4.0-test2pre7
[davej-history.git] / net / sunrpc / clnt.c
blobce93ab71c27f46bc43342938fec3e3aec5def719
1 /*
2 * linux/net/sunrpc/rpcclnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP reconnect handling (when finished).
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/malloc.h>
29 #include <linux/in.h>
30 #include <linux/utsname.h>
32 #include <linux/sunrpc/clnt.h>
34 #include <linux/nfs.h>
37 #define RPC_SLACK_SPACE 512 /* total overkill */
39 #ifdef RPC_DEBUG
40 # define RPCDBG_FACILITY RPCDBG_CALL
41 #endif
43 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
46 static void call_reserve(struct rpc_task *task);
47 static void call_reserveresult(struct rpc_task *task);
48 static void call_allocate(struct rpc_task *task);
49 static void call_encode(struct rpc_task *task);
50 static void call_decode(struct rpc_task *task);
51 static void call_bind(struct rpc_task *task);
52 static void call_transmit(struct rpc_task *task);
53 static void call_status(struct rpc_task *task);
54 static void call_refresh(struct rpc_task *task);
55 static void call_refreshresult(struct rpc_task *task);
56 static void call_timeout(struct rpc_task *task);
57 static void call_reconnect(struct rpc_task *task);
58 static u32 * call_header(struct rpc_task *task);
59 static u32 * call_verify(struct rpc_task *task);
63 * Create an RPC client
64 * FIXME: This should also take a flags argument (as in task->tk_flags).
65 * It's called (among others) from pmap_create_client, which may in
66 * turn be called by an async task. In this case, rpciod should not be
67 * made to sleep too long.
69 struct rpc_clnt *
70 rpc_create_client(struct rpc_xprt *xprt, char *servname,
71 struct rpc_program *program, u32 vers, int flavor)
73 struct rpc_version *version;
74 struct rpc_clnt *clnt = NULL;
76 dprintk("RPC: creating %s client for %s (xprt %p)\n",
77 program->name, servname, xprt);
79 #ifdef RPC_DEBUG
80 rpc_register_sysctl();
81 #endif
82 xdr_init();
84 if (!xprt)
85 goto out;
86 if (vers >= program->nrvers || !(version = program->version[vers]))
87 goto out;
89 clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt));
90 if (!clnt)
91 goto out_no_clnt;
92 memset(clnt, 0, sizeof(*clnt));
93 atomic_set(&clnt->cl_users, 0);
95 clnt->cl_xprt = xprt;
96 clnt->cl_procinfo = version->procs;
97 clnt->cl_maxproc = version->nrprocs;
98 clnt->cl_server = servname;
99 clnt->cl_protname = program->name;
100 clnt->cl_port = xprt->addr.sin_port;
101 clnt->cl_prog = program->number;
102 clnt->cl_vers = version->number;
103 clnt->cl_prot = xprt->prot;
104 clnt->cl_stats = program->stats;
105 clnt->cl_bindwait = RPC_INIT_WAITQ("bindwait");
107 if (!clnt->cl_port)
108 clnt->cl_autobind = 1;
110 if (!rpcauth_create(flavor, clnt))
111 goto out_no_auth;
113 /* save the nodename */
114 clnt->cl_nodelen = strlen(system_utsname.nodename);
115 if (clnt->cl_nodelen > UNX_MAXNODENAME)
116 clnt->cl_nodelen = UNX_MAXNODENAME;
117 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
118 out:
119 return clnt;
121 out_no_clnt:
122 printk(KERN_INFO "RPC: out of memory in rpc_create_client\n");
123 goto out;
124 out_no_auth:
125 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %d)\n",
126 flavor);
127 rpc_free(clnt);
128 clnt = NULL;
129 goto out;
133 * Properly shut down an RPC client, terminating all outstanding
134 * requests. Note that we must be certain that cl_oneshot and
135 * cl_dead are cleared, or else the client would be destroyed
136 * when the last task releases it.
139 rpc_shutdown_client(struct rpc_clnt *clnt)
141 dprintk("RPC: shutting down %s client for %s\n",
142 clnt->cl_protname, clnt->cl_server);
143 while (atomic_read(&clnt->cl_users)) {
144 #ifdef RPC_DEBUG
145 dprintk("RPC: rpc_shutdown_client: client %s, tasks=%d\n",
146 clnt->cl_protname, atomic_read(&clnt->cl_users));
147 #endif
148 /* Don't let rpc_release_client destroy us */
149 clnt->cl_oneshot = 0;
150 clnt->cl_dead = 0;
151 rpc_killall_tasks(clnt);
152 sleep_on_timeout(&destroy_wait, 1*HZ);
154 return rpc_destroy_client(clnt);
158 * Delete an RPC client
161 rpc_destroy_client(struct rpc_clnt *clnt)
163 dprintk("RPC: destroying %s client for %s\n",
164 clnt->cl_protname, clnt->cl_server);
166 if (clnt->cl_auth) {
167 rpcauth_destroy(clnt->cl_auth);
168 clnt->cl_auth = NULL;
170 if (clnt->cl_xprt) {
171 xprt_destroy(clnt->cl_xprt);
172 clnt->cl_xprt = NULL;
174 rpc_free(clnt);
175 return 0;
179 * Release an RPC client
181 void
182 rpc_release_client(struct rpc_clnt *clnt)
184 dprintk("RPC: rpc_release_client(%p, %d)\n",
185 clnt, atomic_read(&clnt->cl_users));
187 if (!atomic_dec_and_test(&clnt->cl_users))
188 return;
189 wake_up(&destroy_wait);
190 if (clnt->cl_oneshot || clnt->cl_dead)
191 rpc_destroy_client(clnt);
195 * Default callback for async RPC calls
197 static void
198 rpc_default_callback(struct rpc_task *task)
203 * Export the signal mask handling for aysnchronous code that
204 * sleeps on RPC calls
207 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
209 unsigned long sigallow = sigmask(SIGKILL);
210 unsigned long irqflags;
212 /* Turn off various signals */
213 if (clnt->cl_intr) {
214 struct k_sigaction *action = current->sig->action;
215 if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
216 sigallow |= sigmask(SIGINT);
217 if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
218 sigallow |= sigmask(SIGQUIT);
220 spin_lock_irqsave(&current->sigmask_lock, irqflags);
221 *oldset = current->blocked;
222 siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
223 recalc_sigpending(current);
224 spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
227 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
229 unsigned long irqflags;
231 spin_lock_irqsave(&current->sigmask_lock, irqflags);
232 current->blocked = *oldset;
233 recalc_sigpending(current);
234 spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
238 * New rpc_call implementation
240 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
242 struct rpc_task my_task, *task = &my_task;
243 sigset_t oldset;
244 int status;
246 /* If this client is slain all further I/O fails */
247 if (clnt->cl_dead)
248 return -EIO;
250 if (flags & RPC_TASK_ASYNC) {
251 printk("rpc_call_sync: Illegal flag combination for synchronous task\n");
252 flags &= ~RPC_TASK_ASYNC;
255 rpc_clnt_sigmask(clnt, &oldset);
257 /* Create/initialize a new RPC task */
258 rpc_init_task(task, clnt, NULL, flags);
259 rpc_call_setup(task, msg, 0);
261 /* Set up the call info struct and execute the task */
262 if (task->tk_status == 0)
263 status = rpc_execute(task);
264 else {
265 status = task->tk_status;
266 rpc_release_task(task);
269 rpc_clnt_sigunmask(clnt, &oldset);
271 return status;
275 * New rpc_call implementation
278 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
279 rpc_action callback, void *data)
281 struct rpc_task *task;
282 sigset_t oldset;
283 int status;
285 /* If this client is slain all further I/O fails */
286 if (clnt->cl_dead)
287 return -EIO;
289 flags |= RPC_TASK_ASYNC;
291 rpc_clnt_sigmask(clnt, &oldset);
293 /* Create/initialize a new RPC task */
294 if (!callback)
295 callback = rpc_default_callback;
296 status = -ENOMEM;
297 if (!(task = rpc_new_task(clnt, callback, flags)))
298 goto out;
299 task->tk_calldata = data;
301 rpc_call_setup(task, msg, 0);
303 /* Set up the call info struct and execute the task */
304 if (task->tk_status == 0)
305 status = rpc_execute(task);
306 else {
307 status = task->tk_status;
308 rpc_release_task(task);
311 out:
312 rpc_clnt_sigunmask(clnt, &oldset);
314 return status;
318 void
319 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
321 task->tk_msg = *msg;
322 task->tk_flags |= flags;
323 /* Bind the user cred */
324 if (task->tk_msg.rpc_cred != NULL) {
325 rpcauth_holdcred(task);
326 } else
327 rpcauth_bindcred(task);
329 if (task->tk_status == 0)
330 task->tk_action = call_reserve;
331 else
332 task->tk_action = NULL;
334 /* Increment call count */
335 if (task->tk_msg.rpc_proc < task->tk_client->cl_maxproc)
336 rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;
340 * Restart an (async) RPC call. Usually called from within the
341 * exit handler.
343 void
344 rpc_restart_call(struct rpc_task *task)
346 if (RPC_ASSASSINATED(task))
347 return;
349 task->tk_action = call_reserve;
350 rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;
354 * 1. Reserve an RPC call slot
356 static void
357 call_reserve(struct rpc_task *task)
359 struct rpc_clnt *clnt = task->tk_client;
361 if (task->tk_msg.rpc_proc > clnt->cl_maxproc) {
362 printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n",
363 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc);
364 rpc_exit(task, -EIO);
365 return;
368 dprintk("RPC: %4d call_reserve\n", task->tk_pid);
369 if (!rpcauth_uptodatecred(task)) {
370 task->tk_action = call_refresh;
371 return;
374 task->tk_status = 0;
375 task->tk_action = call_reserveresult;
376 task->tk_timeout = clnt->cl_timeout.to_resrvval;
377 clnt->cl_stats->rpccnt++;
378 xprt_reserve(task);
382 * 1b. Grok the result of xprt_reserve()
384 static void
385 call_reserveresult(struct rpc_task *task)
387 int status = task->tk_status;
389 dprintk("RPC: %4d call_reserveresult (status %d)\n",
390 task->tk_pid, task->tk_status);
392 * After a call to xprt_reserve(), we must have either
393 * a request slot or else an error status.
395 if ((task->tk_status >= 0 && !task->tk_rqstp) ||
396 (task->tk_status < 0 && task->tk_rqstp))
397 printk(KERN_ERR "call_reserveresult: status=%d, request=%p??\n",
398 task->tk_status, task->tk_rqstp);
400 if (task->tk_status >= 0) {
401 task->tk_action = call_allocate;
402 return;
405 task->tk_status = 0;
406 switch (status) {
407 case -EAGAIN:
408 case -ENOBUFS:
409 task->tk_timeout = task->tk_client->cl_timeout.to_resrvval;
410 task->tk_action = call_reserve;
411 break;
412 case -ETIMEDOUT:
413 dprintk("RPC: task timed out\n");
414 task->tk_action = call_timeout;
415 break;
416 default:
417 if (!task->tk_rqstp) {
418 printk(KERN_INFO "RPC: task has no request, exit EIO\n");
419 rpc_exit(task, -EIO);
420 } else
421 rpc_exit(task, status);
426 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
427 * (Note: buffer memory is freed in rpc_task_release).
429 static void
430 call_allocate(struct rpc_task *task)
432 struct rpc_clnt *clnt = task->tk_client;
433 unsigned int bufsiz;
435 dprintk("RPC: %4d call_allocate (status %d)\n",
436 task->tk_pid, task->tk_status);
437 task->tk_action = call_encode;
438 if (task->tk_buffer)
439 return;
441 /* FIXME: compute buffer requirements more exactly using
442 * auth->au_wslack */
443 bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE;
445 if ((task->tk_buffer = rpc_malloc(task, bufsiz << 1)) != NULL)
446 return;
447 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
449 if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) {
450 xprt_release(task);
451 task->tk_action = call_reserve;
452 rpc_delay(task, HZ>>4);
453 return;
456 rpc_exit(task, -ERESTARTSYS);
460 * 3. Encode arguments of an RPC call
462 static void
463 call_encode(struct rpc_task *task)
465 struct rpc_clnt *clnt = task->tk_client;
466 struct rpc_rqst *req = task->tk_rqstp;
467 unsigned int bufsiz;
468 kxdrproc_t encode;
469 int status;
470 u32 *p;
472 dprintk("RPC: %4d call_encode (status %d)\n",
473 task->tk_pid, task->tk_status);
475 task->tk_action = call_bind;
477 /* Default buffer setup */
478 bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc)+RPC_SLACK_SPACE;
479 req->rq_svec[0].iov_base = (void *)task->tk_buffer;
480 req->rq_svec[0].iov_len = bufsiz;
481 req->rq_slen = 0;
482 req->rq_snr = 1;
483 req->rq_rvec[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
484 req->rq_rvec[0].iov_len = bufsiz;
485 req->rq_rlen = bufsiz;
486 req->rq_rnr = 1;
487 req->rq_damaged = 0;
489 /* Zero buffer so we have automatic zero-padding of opaque & string */
490 memset(task->tk_buffer, 0, bufsiz);
492 /* Encode header and provided arguments */
493 encode = rpcproc_encode(clnt, task->tk_msg.rpc_proc);
494 if (!(p = call_header(task))) {
495 printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
496 rpc_exit(task, -EIO);
497 } else
498 if (encode && (status = encode(req, p, task->tk_msg.rpc_argp)) < 0) {
499 printk(KERN_WARNING "%s: can't encode arguments: %d\n",
500 clnt->cl_protname, -status);
501 rpc_exit(task, status);
506 * 4. Get the server port number if not yet set
508 static void
509 call_bind(struct rpc_task *task)
511 struct rpc_clnt *clnt = task->tk_client;
512 struct rpc_xprt *xprt = clnt->cl_xprt;
514 task->tk_action = (xprt->connected) ? call_transmit : call_reconnect;
516 if (!clnt->cl_port) {
517 task->tk_action = call_reconnect;
518 task->tk_timeout = clnt->cl_timeout.to_maxval;
519 rpc_getport(task, clnt);
524 * 4a. Reconnect to the RPC server (TCP case)
526 static void
527 call_reconnect(struct rpc_task *task)
529 struct rpc_clnt *clnt = task->tk_client;
531 dprintk("RPC: %4d call_reconnect status %d\n",
532 task->tk_pid, task->tk_status);
534 task->tk_action = call_transmit;
535 if (task->tk_status < 0 || !clnt->cl_xprt->stream)
536 return;
537 clnt->cl_stats->netreconn++;
538 xprt_reconnect(task);
542 * 5. Transmit the RPC request, and wait for reply
544 static void
545 call_transmit(struct rpc_task *task)
547 struct rpc_clnt *clnt = task->tk_client;
549 dprintk("RPC: %4d call_transmit (status %d)\n",
550 task->tk_pid, task->tk_status);
552 task->tk_action = call_status;
553 if (task->tk_status < 0)
554 return;
555 xprt_transmit(task);
556 if (!rpcproc_decode(clnt, task->tk_msg.rpc_proc)) {
557 task->tk_action = NULL;
558 rpc_wake_up_task(task);
563 * 6. Sort out the RPC call status
565 static void
566 call_status(struct rpc_task *task)
568 struct rpc_clnt *clnt = task->tk_client;
569 struct rpc_xprt *xprt = clnt->cl_xprt;
570 struct rpc_rqst *req;
571 int status = task->tk_status;
573 dprintk("RPC: %4d call_status (status %d)\n",
574 task->tk_pid, task->tk_status);
576 if (status >= 0) {
577 task->tk_action = call_decode;
578 return;
581 task->tk_status = 0;
582 req = task->tk_rqstp;
583 switch(status) {
584 case -ETIMEDOUT:
585 task->tk_action = call_timeout;
586 break;
587 case -ECONNREFUSED:
588 case -ENOTCONN:
589 req->rq_bytes_sent = 0;
590 if (clnt->cl_autobind || !clnt->cl_port) {
591 clnt->cl_port = 0;
592 task->tk_action = call_bind;
593 break;
595 if (xprt->stream) {
596 task->tk_action = call_reconnect;
597 break;
600 * Sleep and dream of an open connection
602 task->tk_timeout = 5 * HZ;
603 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
604 case -ENOMEM:
605 case -EAGAIN:
606 if (req->rq_damaged)
607 task->tk_action = call_encode;
608 else
609 task->tk_action = call_transmit;
610 clnt->cl_stats->rpcretrans++;
611 break;
612 default:
613 if (clnt->cl_chatty)
614 printk("%s: RPC call returned error %d\n",
615 clnt->cl_protname, -status);
616 rpc_exit(task, status);
621 * 6a. Handle RPC timeout
622 * We do not release the request slot, so we keep using the
623 * same XID for all retransmits.
625 static void
626 call_timeout(struct rpc_task *task)
628 struct rpc_clnt *clnt = task->tk_client;
629 struct rpc_rqst *req = task->tk_rqstp;
631 if (req) {
632 struct rpc_timeout *to = &req->rq_timeout;
634 if (xprt_adjust_timeout(to)) {
635 dprintk("RPC: %4d call_timeout (minor timeo)\n",
636 task->tk_pid);
637 goto minor_timeout;
639 to->to_retries = clnt->cl_timeout.to_retries;
642 dprintk("RPC: %4d call_timeout (major timeo)\n", task->tk_pid);
643 if (clnt->cl_softrtry) {
644 if (clnt->cl_chatty && !task->tk_exit)
645 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
646 clnt->cl_protname, clnt->cl_server);
647 rpc_exit(task, -EIO);
648 return;
650 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
651 task->tk_flags |= RPC_CALL_MAJORSEEN;
652 if (req)
653 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
654 clnt->cl_protname, clnt->cl_server);
655 #ifdef RPC_DEBUG
656 else
657 printk(KERN_NOTICE "%s: task %d can't get a request slot\n",
658 clnt->cl_protname, task->tk_pid);
659 #endif
661 if (clnt->cl_autobind)
662 clnt->cl_port = 0;
664 minor_timeout:
665 if (!req)
666 task->tk_action = call_reserve;
667 else if (req->rq_damaged) {
668 task->tk_action = call_encode;
669 clnt->cl_stats->rpcretrans++;
670 } else if (!clnt->cl_port) {
671 task->tk_action = call_bind;
672 clnt->cl_stats->rpcretrans++;
673 } else if (clnt->cl_xprt->stream && !clnt->cl_xprt->connected) {
674 task->tk_action = call_reconnect;
675 clnt->cl_stats->rpcretrans++;
676 } else {
677 task->tk_action = call_transmit;
678 clnt->cl_stats->rpcretrans++;
680 task->tk_status = 0;
684 * 7. Decode the RPC reply
686 static void
687 call_decode(struct rpc_task *task)
689 struct rpc_clnt *clnt = task->tk_client;
690 struct rpc_rqst *req = task->tk_rqstp;
691 kxdrproc_t decode = rpcproc_decode(clnt, task->tk_msg.rpc_proc);
692 u32 *p;
694 dprintk("RPC: %4d call_decode (status %d)\n",
695 task->tk_pid, task->tk_status);
697 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
698 printk(KERN_NOTICE "%s: server %s OK\n",
699 clnt->cl_protname, clnt->cl_server);
700 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
703 if (task->tk_status < 12) {
704 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
705 clnt->cl_protname, task->tk_status);
706 rpc_exit(task, -EIO);
707 return;
710 /* Verify the RPC header */
711 if (!(p = call_verify(task)))
712 return;
715 * The following is an NFS-specific hack to cater for setuid
716 * processes whose uid is mapped to nobody on the server.
718 if (task->tk_client->cl_prog == NFS_PROGRAM &&
719 (ntohl(*p) == NFSERR_ACCES || ntohl(*p) == NFSERR_PERM)) {
720 if (RPC_IS_SETUID(task) && task->tk_suid_retry) {
721 dprintk("RPC: %4d retry squashed uid\n", task->tk_pid);
722 task->tk_flags ^= RPC_CALL_REALUID;
723 task->tk_action = call_encode;
724 task->tk_suid_retry--;
725 return;
729 task->tk_action = NULL;
731 if (decode)
732 task->tk_status = decode(req, p, task->tk_msg.rpc_resp);
733 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
734 task->tk_status);
738 * 8. Refresh the credentials if rejected by the server
740 static void
741 call_refresh(struct rpc_task *task)
743 dprintk("RPC: %4d call_refresh\n", task->tk_pid);
745 xprt_release(task); /* Must do to obtain new XID */
746 task->tk_action = call_refreshresult;
747 task->tk_status = 0;
748 task->tk_client->cl_stats->rpcauthrefresh++;
749 rpcauth_refreshcred(task);
753 * 8a. Process the results of a credential refresh
755 static void
756 call_refreshresult(struct rpc_task *task)
758 dprintk("RPC: %4d call_refreshresult (status %d)\n",
759 task->tk_pid, task->tk_status);
761 if (task->tk_status < 0)
762 rpc_exit(task, -EACCES);
763 else
764 task->tk_action = call_reserve;
768 * Call header serialization
770 static u32 *
771 call_header(struct rpc_task *task)
773 struct rpc_clnt *clnt = task->tk_client;
774 struct rpc_xprt *xprt = clnt->cl_xprt;
775 struct rpc_rqst *req = task->tk_rqstp;
776 u32 *p = req->rq_svec[0].iov_base;
778 /* FIXME: check buffer size? */
779 if (xprt->stream)
780 *p++ = 0; /* fill in later */
781 *p++ = req->rq_xid; /* XID */
782 *p++ = htonl(RPC_CALL); /* CALL */
783 *p++ = htonl(RPC_VERSION); /* RPC version */
784 *p++ = htonl(clnt->cl_prog); /* program number */
785 *p++ = htonl(clnt->cl_vers); /* program version */
786 *p++ = htonl(task->tk_msg.rpc_proc); /* procedure */
787 return rpcauth_marshcred(task, p);
791 * Reply header verification
793 static u32 *
794 call_verify(struct rpc_task *task)
796 u32 *p = task->tk_rqstp->rq_rvec[0].iov_base, n;
798 p += 1; /* skip XID */
800 if ((n = ntohl(*p++)) != RPC_REPLY) {
801 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
802 goto garbage;
804 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
805 int error = -EACCES;
807 if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) {
808 printk(KERN_WARNING "call_verify: RPC call rejected: %x\n", n);
809 } else
810 switch ((n = ntohl(*p++))) {
811 case RPC_AUTH_REJECTEDCRED:
812 case RPC_AUTH_REJECTEDVERF:
813 if (!task->tk_cred_retry)
814 break;
815 task->tk_cred_retry--;
816 dprintk("RPC: %4d call_verify: retry stale creds\n",
817 task->tk_pid);
818 rpcauth_invalcred(task);
819 task->tk_action = call_refresh;
820 return NULL;
821 case RPC_AUTH_BADCRED:
822 case RPC_AUTH_BADVERF:
823 /* possibly garbled cred/verf? */
824 if (!task->tk_garb_retry)
825 break;
826 task->tk_garb_retry--;
827 dprintk("RPC: %4d call_verify: retry garbled creds\n",
828 task->tk_pid);
829 task->tk_action = call_encode;
830 return NULL;
831 case RPC_AUTH_TOOWEAK:
832 printk(KERN_NOTICE "call_verify: server requires stronger "
833 "authentication.\n");
834 default:
835 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
836 error = -EIO;
838 dprintk("RPC: %4d call_verify: call rejected %d\n",
839 task->tk_pid, n);
840 rpc_exit(task, error);
841 return NULL;
843 if (!(p = rpcauth_checkverf(task, p))) {
844 printk(KERN_WARNING "call_verify: auth check failed\n");
845 goto garbage; /* bad verifier, retry */
847 switch ((n = ntohl(*p++))) {
848 case RPC_SUCCESS:
849 return p;
850 case RPC_GARBAGE_ARGS:
851 break; /* retry */
852 default:
853 printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
854 /* Also retry */
857 garbage:
858 dprintk("RPC: %4d call_verify: server saw garbage\n", task->tk_pid);
859 task->tk_client->cl_stats->rpcgarbage++;
860 if (task->tk_garb_retry) {
861 task->tk_garb_retry--;
862 dprintk(KERN_WARNING "RPC: garbage, retrying %4d\n", task->tk_pid);
863 task->tk_action = call_encode;
864 return NULL;
866 printk(KERN_WARNING "RPC: garbage, exit EIO\n");
867 rpc_exit(task, -EIO);
868 return NULL;