Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / net / sunrpc / clnt.c
blob92a531fe09b210d082069ea98b3a9ee463bd9a99
1 /*
2 * linux/net/sunrpc/rpcclnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP reconnect handling (when finished).
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/malloc.h>
29 #include <linux/in.h>
30 #include <linux/utsname.h>
32 #include <linux/sunrpc/clnt.h>
34 #include <linux/nfs.h>
37 #define RPC_SLACK_SPACE 512 /* total overkill */
39 #ifdef RPC_DEBUG
40 # define RPCDBG_FACILITY RPCDBG_CALL
41 #endif
43 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
46 static void call_reserve(struct rpc_task *task);
47 static void call_reserveresult(struct rpc_task *task);
48 static void call_allocate(struct rpc_task *task);
49 static void call_encode(struct rpc_task *task);
50 static void call_decode(struct rpc_task *task);
51 static void call_bind(struct rpc_task *task);
52 static void call_transmit(struct rpc_task *task);
53 static void call_status(struct rpc_task *task);
54 static void call_refresh(struct rpc_task *task);
55 static void call_refreshresult(struct rpc_task *task);
56 static void call_timeout(struct rpc_task *task);
57 static void call_reconnect(struct rpc_task *task);
58 static u32 * call_header(struct rpc_task *task);
59 static u32 * call_verify(struct rpc_task *task);
63 * Create an RPC client
64 * FIXME: This should also take a flags argument (as in task->tk_flags).
65 * It's called (among others) from pmap_create_client, which may in
66 * turn be called by an async task. In this case, rpciod should not be
67 * made to sleep too long.
69 struct rpc_clnt *
70 rpc_create_client(struct rpc_xprt *xprt, char *servname,
71 struct rpc_program *program, u32 vers, int flavor)
73 struct rpc_version *version;
74 struct rpc_clnt *clnt = NULL;
76 dprintk("RPC: creating %s client for %s (xprt %p)\n",
77 program->name, servname, xprt);
79 #ifdef RPC_DEBUG
80 rpc_register_sysctl();
81 #endif
82 xdr_init();
84 if (!xprt)
85 goto out;
86 if (vers >= program->nrvers || !(version = program->version[vers]))
87 goto out;
89 clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt));
90 if (!clnt)
91 goto out_no_clnt;
92 memset(clnt, 0, sizeof(*clnt));
93 atomic_set(&clnt->cl_users, 0);
95 clnt->cl_xprt = xprt;
96 clnt->cl_procinfo = version->procs;
97 clnt->cl_maxproc = version->nrprocs;
98 clnt->cl_server = servname;
99 clnt->cl_protname = program->name;
100 clnt->cl_port = xprt->addr.sin_port;
101 clnt->cl_prog = program->number;
102 clnt->cl_vers = version->number;
103 clnt->cl_prot = xprt->prot;
104 clnt->cl_stats = program->stats;
105 clnt->cl_bindwait = RPC_INIT_WAITQ("bindwait");
107 if (!clnt->cl_port)
108 clnt->cl_autobind = 1;
110 if (!rpcauth_create(flavor, clnt))
111 goto out_no_auth;
113 /* save the nodename */
114 clnt->cl_nodelen = strlen(system_utsname.nodename);
115 if (clnt->cl_nodelen > UNX_MAXNODENAME)
116 clnt->cl_nodelen = UNX_MAXNODENAME;
117 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
118 out:
119 return clnt;
121 out_no_clnt:
122 printk(KERN_INFO "RPC: out of memory in rpc_create_client\n");
123 goto out;
124 out_no_auth:
125 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %d)\n",
126 flavor);
127 rpc_free(clnt);
128 clnt = NULL;
129 goto out;
133 * Properly shut down an RPC client, terminating all outstanding
134 * requests. Note that we must be certain that cl_oneshot and
135 * cl_dead are cleared, or else the client would be destroyed
136 * when the last task releases it.
139 rpc_shutdown_client(struct rpc_clnt *clnt)
141 dprintk("RPC: shutting down %s client for %s\n",
142 clnt->cl_protname, clnt->cl_server);
143 while (atomic_read(&clnt->cl_users)) {
144 #ifdef RPC_DEBUG
145 dprintk("RPC: rpc_shutdown_client: client %s, tasks=%d\n",
146 clnt->cl_protname, atomic_read(&clnt->cl_users));
147 #endif
148 /* Don't let rpc_release_client destroy us */
149 clnt->cl_oneshot = 0;
150 clnt->cl_dead = 0;
151 rpc_killall_tasks(clnt);
152 sleep_on_timeout(&destroy_wait, 1*HZ);
154 return rpc_destroy_client(clnt);
158 * Delete an RPC client
161 rpc_destroy_client(struct rpc_clnt *clnt)
163 dprintk("RPC: destroying %s client for %s\n",
164 clnt->cl_protname, clnt->cl_server);
166 if (clnt->cl_auth) {
167 rpcauth_destroy(clnt->cl_auth);
168 clnt->cl_auth = NULL;
170 if (clnt->cl_xprt) {
171 xprt_destroy(clnt->cl_xprt);
172 clnt->cl_xprt = NULL;
174 rpc_free(clnt);
175 return 0;
179 * Release an RPC client
181 void
182 rpc_release_client(struct rpc_clnt *clnt)
184 dprintk("RPC: rpc_release_client(%p, %d)\n",
185 clnt, atomic_read(&clnt->cl_users));
187 if (!atomic_dec_and_test(&clnt->cl_users))
188 return;
189 wake_up(&destroy_wait);
190 if (clnt->cl_oneshot || clnt->cl_dead)
191 rpc_destroy_client(clnt);
195 * Default callback for async RPC calls
197 static void
198 rpc_default_callback(struct rpc_task *task)
203 * Export the signal mask handling for aysnchronous code that
204 * sleeps on RPC calls
207 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
209 unsigned long sigallow = sigmask(SIGKILL);
210 unsigned long irqflags;
212 /* Turn off various signals */
213 if (clnt->cl_intr) {
214 struct k_sigaction *action = current->sig->action;
215 if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
216 sigallow |= sigmask(SIGINT);
217 if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
218 sigallow |= sigmask(SIGQUIT);
220 spin_lock_irqsave(&current->sigmask_lock, irqflags);
221 *oldset = current->blocked;
222 siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
223 recalc_sigpending(current);
224 spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
227 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
229 unsigned long irqflags;
231 spin_lock_irqsave(&current->sigmask_lock, irqflags);
232 current->blocked = *oldset;
233 recalc_sigpending(current);
234 spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
238 * New rpc_call implementation
240 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
242 struct rpc_task my_task, *task = &my_task;
243 sigset_t oldset;
244 int status;
246 /* If this client is slain all further I/O fails */
247 if (clnt->cl_dead)
248 return -EIO;
250 if (flags & RPC_TASK_ASYNC) {
251 printk("rpc_call_sync: Illegal flag combination for synchronous task\n");
252 flags &= ~RPC_TASK_ASYNC;
255 rpc_clnt_sigmask(clnt, &oldset);
257 /* Create/initialize a new RPC task */
258 rpc_init_task(task, clnt, NULL, flags);
259 rpc_call_setup(task, msg, 0);
261 /* Set up the call info struct and execute the task */
262 if (task->tk_status == 0)
263 status = rpc_execute(task);
264 else {
265 status = task->tk_status;
266 rpc_release_task(task);
269 rpc_clnt_sigunmask(clnt, &oldset);
271 return status;
275 * New rpc_call implementation
278 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
279 rpc_action callback, void *data)
281 struct rpc_task *task;
282 sigset_t oldset;
283 int status;
285 /* If this client is slain all further I/O fails */
286 if (clnt->cl_dead)
287 return -EIO;
289 flags |= RPC_TASK_ASYNC;
291 rpc_clnt_sigmask(clnt, &oldset);
293 /* Create/initialize a new RPC task */
294 if (!callback)
295 callback = rpc_default_callback;
296 status = -ENOMEM;
297 if (!(task = rpc_new_task(clnt, callback, flags)))
298 goto out;
299 task->tk_calldata = data;
301 rpc_call_setup(task, msg, 0);
303 /* Set up the call info struct and execute the task */
304 if (task->tk_status == 0)
305 status = rpc_execute(task);
306 else {
307 status = task->tk_status;
308 rpc_release_task(task);
311 out:
312 rpc_clnt_sigunmask(clnt, &oldset);
314 return status;
318 void
319 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
321 task->tk_msg = *msg;
322 task->tk_flags |= flags;
323 /* Bind the user cred */
324 if (task->tk_msg.rpc_cred != NULL) {
325 rpcauth_holdcred(task);
326 } else
327 rpcauth_bindcred(task);
329 if (task->tk_status == 0)
330 task->tk_action = call_reserve;
331 else
332 task->tk_action = NULL;
334 /* Increment call count */
335 if (task->tk_msg.rpc_proc < task->tk_client->cl_maxproc)
336 rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;
340 * Restart an (async) RPC call. Usually called from within the
341 * exit handler.
343 void
344 rpc_restart_call(struct rpc_task *task)
346 if (RPC_ASSASSINATED(task))
347 return;
349 task->tk_action = call_reserve;
350 rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;
354 * 1. Reserve an RPC call slot
356 static void
357 call_reserve(struct rpc_task *task)
359 struct rpc_clnt *clnt = task->tk_client;
361 if (task->tk_msg.rpc_proc > clnt->cl_maxproc) {
362 printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n",
363 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc);
364 rpc_exit(task, -EIO);
365 return;
368 dprintk("RPC: %4d call_reserve\n", task->tk_pid);
369 if (!rpcauth_uptodatecred(task)) {
370 task->tk_action = call_refresh;
371 return;
374 task->tk_status = 0;
375 task->tk_action = call_reserveresult;
376 task->tk_timeout = clnt->cl_timeout.to_resrvval;
377 clnt->cl_stats->rpccnt++;
378 xprt_reserve(task);
382 * 1b. Grok the result of xprt_reserve()
384 static void
385 call_reserveresult(struct rpc_task *task)
387 int status = task->tk_status;
389 dprintk("RPC: %4d call_reserveresult (status %d)\n",
390 task->tk_pid, task->tk_status);
392 * After a call to xprt_reserve(), we must have either
393 * a request slot or else an error status.
395 if ((task->tk_status >= 0 && !task->tk_rqstp) ||
396 (task->tk_status < 0 && task->tk_rqstp))
397 printk(KERN_ERR "call_reserveresult: status=%d, request=%p??\n",
398 task->tk_status, task->tk_rqstp);
400 if (task->tk_status >= 0) {
401 task->tk_action = call_allocate;
402 return;
405 task->tk_status = 0;
406 switch (status) {
407 case -EAGAIN:
408 case -ENOBUFS:
409 task->tk_timeout = task->tk_client->cl_timeout.to_resrvval;
410 task->tk_action = call_reserve;
411 break;
412 case -ETIMEDOUT:
413 dprintk("RPC: task timed out\n");
414 task->tk_action = call_timeout;
415 break;
416 default:
417 if (!task->tk_rqstp) {
418 printk(KERN_INFO "RPC: task has no request, exit EIO\n");
419 rpc_exit(task, -EIO);
420 } else
421 rpc_exit(task, status);
426 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
427 * (Note: buffer memory is freed in rpc_task_release).
429 static void
430 call_allocate(struct rpc_task *task)
432 struct rpc_clnt *clnt = task->tk_client;
433 unsigned int bufsiz;
435 dprintk("RPC: %4d call_allocate (status %d)\n",
436 task->tk_pid, task->tk_status);
437 task->tk_action = call_encode;
438 if (task->tk_buffer)
439 return;
441 /* FIXME: compute buffer requirements more exactly using
442 * auth->au_wslack */
443 bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE;
445 if ((task->tk_buffer = rpc_malloc(task, bufsiz << 1)) != NULL)
446 return;
447 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
449 if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) {
450 xprt_release(task);
451 task->tk_action = call_reserve;
452 rpc_delay(task, HZ>>4);
453 return;
456 rpc_exit(task, -ERESTARTSYS);
460 * 3. Encode arguments of an RPC call
462 static void
463 call_encode(struct rpc_task *task)
465 struct rpc_clnt *clnt = task->tk_client;
466 struct rpc_rqst *req = task->tk_rqstp;
467 unsigned int bufsiz;
468 kxdrproc_t encode;
469 int status;
470 u32 *p;
472 dprintk("RPC: %4d call_encode (status %d)\n",
473 task->tk_pid, task->tk_status);
475 task->tk_action = call_bind;
477 /* Default buffer setup */
478 bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc)+RPC_SLACK_SPACE;
479 req->rq_svec[0].iov_base = (void *)task->tk_buffer;
480 req->rq_svec[0].iov_len = bufsiz;
481 req->rq_slen = 0;
482 req->rq_snr = 1;
483 req->rq_rvec[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
484 req->rq_rvec[0].iov_len = bufsiz;
485 req->rq_rlen = bufsiz;
486 req->rq_rnr = 1;
488 /* Zero buffer so we have automatic zero-padding of opaque & string */
489 memset(task->tk_buffer, 0, bufsiz);
491 /* Encode header and provided arguments */
492 encode = rpcproc_encode(clnt, task->tk_msg.rpc_proc);
493 if (!(p = call_header(task))) {
494 printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
495 rpc_exit(task, -EIO);
496 } else
497 if (encode && (status = encode(req, p, task->tk_msg.rpc_argp)) < 0) {
498 printk(KERN_WARNING "%s: can't encode arguments: %d\n",
499 clnt->cl_protname, -status);
500 rpc_exit(task, status);
505 * 4. Get the server port number if not yet set
507 static void
508 call_bind(struct rpc_task *task)
510 struct rpc_clnt *clnt = task->tk_client;
511 struct rpc_xprt *xprt = clnt->cl_xprt;
513 task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_reconnect;
515 if (!clnt->cl_port) {
516 task->tk_action = call_reconnect;
517 task->tk_timeout = clnt->cl_timeout.to_maxval;
518 rpc_getport(task, clnt);
523 * 4a. Reconnect to the RPC server (TCP case)
525 static void
526 call_reconnect(struct rpc_task *task)
528 struct rpc_clnt *clnt = task->tk_client;
530 dprintk("RPC: %4d call_reconnect status %d\n",
531 task->tk_pid, task->tk_status);
533 task->tk_action = call_transmit;
534 if (task->tk_status < 0 || !clnt->cl_xprt->stream)
535 return;
536 clnt->cl_stats->netreconn++;
537 xprt_reconnect(task);
541 * 5. Transmit the RPC request, and wait for reply
543 static void
544 call_transmit(struct rpc_task *task)
546 struct rpc_clnt *clnt = task->tk_client;
548 dprintk("RPC: %4d call_transmit (status %d)\n",
549 task->tk_pid, task->tk_status);
551 task->tk_action = call_status;
552 if (task->tk_status < 0)
553 return;
554 xprt_transmit(task);
555 if (!rpcproc_decode(clnt, task->tk_msg.rpc_proc)) {
556 task->tk_action = NULL;
557 rpc_wake_up_task(task);
562 * 6. Sort out the RPC call status
564 static void
565 call_status(struct rpc_task *task)
567 struct rpc_clnt *clnt = task->tk_client;
568 struct rpc_xprt *xprt = clnt->cl_xprt;
569 struct rpc_rqst *req;
570 int status = task->tk_status;
572 dprintk("RPC: %4d call_status (status %d)\n",
573 task->tk_pid, task->tk_status);
575 if (status >= 0) {
576 task->tk_action = call_decode;
577 return;
580 task->tk_status = 0;
581 req = task->tk_rqstp;
582 switch(status) {
583 case -ETIMEDOUT:
584 task->tk_action = call_timeout;
585 break;
586 case -ECONNREFUSED:
587 case -ENOTCONN:
588 req->rq_bytes_sent = 0;
589 if (clnt->cl_autobind || !clnt->cl_port) {
590 clnt->cl_port = 0;
591 task->tk_action = call_bind;
592 break;
594 if (xprt->stream) {
595 task->tk_action = call_reconnect;
596 break;
599 * Sleep and dream of an open connection
601 task->tk_timeout = 5 * HZ;
602 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
603 case -ENOMEM:
604 case -EAGAIN:
605 task->tk_action = call_transmit;
606 clnt->cl_stats->rpcretrans++;
607 break;
608 default:
609 if (clnt->cl_chatty)
610 printk("%s: RPC call returned error %d\n",
611 clnt->cl_protname, -status);
612 rpc_exit(task, status);
617 * 6a. Handle RPC timeout
618 * We do not release the request slot, so we keep using the
619 * same XID for all retransmits.
621 static void
622 call_timeout(struct rpc_task *task)
624 struct rpc_clnt *clnt = task->tk_client;
625 struct rpc_rqst *req = task->tk_rqstp;
627 if (req) {
628 struct rpc_timeout *to = &req->rq_timeout;
630 if (xprt_adjust_timeout(to)) {
631 dprintk("RPC: %4d call_timeout (minor timeo)\n",
632 task->tk_pid);
633 goto minor_timeout;
635 to->to_retries = clnt->cl_timeout.to_retries;
638 dprintk("RPC: %4d call_timeout (major timeo)\n", task->tk_pid);
639 if (clnt->cl_softrtry) {
640 if (clnt->cl_chatty && !task->tk_exit)
641 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
642 clnt->cl_protname, clnt->cl_server);
643 rpc_exit(task, -EIO);
644 return;
646 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
647 task->tk_flags |= RPC_CALL_MAJORSEEN;
648 if (req)
649 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
650 clnt->cl_protname, clnt->cl_server);
651 #ifdef RPC_DEBUG
652 else
653 printk(KERN_NOTICE "%s: task %d can't get a request slot\n",
654 clnt->cl_protname, task->tk_pid);
655 #endif
657 if (clnt->cl_autobind)
658 clnt->cl_port = 0;
660 minor_timeout:
661 if (!req)
662 task->tk_action = call_reserve;
663 else if (!clnt->cl_port) {
664 task->tk_action = call_bind;
665 clnt->cl_stats->rpcretrans++;
666 } else if (!xprt_connected(clnt->cl_xprt)) {
667 task->tk_action = call_reconnect;
668 clnt->cl_stats->rpcretrans++;
669 } else {
670 task->tk_action = call_transmit;
671 clnt->cl_stats->rpcretrans++;
673 task->tk_status = 0;
677 * 7. Decode the RPC reply
679 static void
680 call_decode(struct rpc_task *task)
682 struct rpc_clnt *clnt = task->tk_client;
683 struct rpc_rqst *req = task->tk_rqstp;
684 kxdrproc_t decode = rpcproc_decode(clnt, task->tk_msg.rpc_proc);
685 u32 *p;
687 dprintk("RPC: %4d call_decode (status %d)\n",
688 task->tk_pid, task->tk_status);
690 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
691 printk(KERN_NOTICE "%s: server %s OK\n",
692 clnt->cl_protname, clnt->cl_server);
693 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
696 if (task->tk_status < 12) {
697 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
698 clnt->cl_protname, task->tk_status);
699 rpc_exit(task, -EIO);
700 return;
703 /* Verify the RPC header */
704 if (!(p = call_verify(task)))
705 return;
708 * The following is an NFS-specific hack to cater for setuid
709 * processes whose uid is mapped to nobody on the server.
711 if (task->tk_client->cl_droppriv &&
712 (ntohl(*p) == NFSERR_ACCES || ntohl(*p) == NFSERR_PERM)) {
713 if (RPC_IS_SETUID(task) && task->tk_suid_retry) {
714 dprintk("RPC: %4d retry squashed uid\n", task->tk_pid);
715 task->tk_flags ^= RPC_CALL_REALUID;
716 task->tk_action = call_encode;
717 task->tk_suid_retry--;
718 return;
722 task->tk_action = NULL;
724 if (decode)
725 task->tk_status = decode(req, p, task->tk_msg.rpc_resp);
726 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
727 task->tk_status);
731 * 8. Refresh the credentials if rejected by the server
733 static void
734 call_refresh(struct rpc_task *task)
736 dprintk("RPC: %4d call_refresh\n", task->tk_pid);
738 xprt_release(task); /* Must do to obtain new XID */
739 task->tk_action = call_refreshresult;
740 task->tk_status = 0;
741 task->tk_client->cl_stats->rpcauthrefresh++;
742 rpcauth_refreshcred(task);
746 * 8a. Process the results of a credential refresh
748 static void
749 call_refreshresult(struct rpc_task *task)
751 dprintk("RPC: %4d call_refreshresult (status %d)\n",
752 task->tk_pid, task->tk_status);
754 if (task->tk_status < 0)
755 rpc_exit(task, -EACCES);
756 else
757 task->tk_action = call_reserve;
761 * Call header serialization
763 static u32 *
764 call_header(struct rpc_task *task)
766 struct rpc_clnt *clnt = task->tk_client;
767 struct rpc_xprt *xprt = clnt->cl_xprt;
768 struct rpc_rqst *req = task->tk_rqstp;
769 u32 *p = req->rq_svec[0].iov_base;
771 /* FIXME: check buffer size? */
772 if (xprt->stream)
773 *p++ = 0; /* fill in later */
774 *p++ = req->rq_xid; /* XID */
775 *p++ = htonl(RPC_CALL); /* CALL */
776 *p++ = htonl(RPC_VERSION); /* RPC version */
777 *p++ = htonl(clnt->cl_prog); /* program number */
778 *p++ = htonl(clnt->cl_vers); /* program version */
779 *p++ = htonl(task->tk_msg.rpc_proc); /* procedure */
780 return rpcauth_marshcred(task, p);
784 * Reply header verification
786 static u32 *
787 call_verify(struct rpc_task *task)
789 u32 *p = task->tk_rqstp->rq_rvec[0].iov_base, n;
791 p += 1; /* skip XID */
793 if ((n = ntohl(*p++)) != RPC_REPLY) {
794 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
795 goto garbage;
797 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
798 int error = -EACCES;
800 if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) {
801 printk(KERN_WARNING "call_verify: RPC call rejected: %x\n", n);
802 } else
803 switch ((n = ntohl(*p++))) {
804 case RPC_AUTH_REJECTEDCRED:
805 case RPC_AUTH_REJECTEDVERF:
806 if (!task->tk_cred_retry)
807 break;
808 task->tk_cred_retry--;
809 dprintk("RPC: %4d call_verify: retry stale creds\n",
810 task->tk_pid);
811 rpcauth_invalcred(task);
812 task->tk_action = call_refresh;
813 return NULL;
814 case RPC_AUTH_BADCRED:
815 case RPC_AUTH_BADVERF:
816 /* possibly garbled cred/verf? */
817 if (!task->tk_garb_retry)
818 break;
819 task->tk_garb_retry--;
820 dprintk("RPC: %4d call_verify: retry garbled creds\n",
821 task->tk_pid);
822 task->tk_action = call_encode;
823 return NULL;
824 case RPC_AUTH_TOOWEAK:
825 printk(KERN_NOTICE "call_verify: server requires stronger "
826 "authentication.\n");
827 break;
828 default:
829 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
830 error = -EIO;
832 dprintk("RPC: %4d call_verify: call rejected %d\n",
833 task->tk_pid, n);
834 rpc_exit(task, error);
835 return NULL;
837 if (!(p = rpcauth_checkverf(task, p))) {
838 printk(KERN_WARNING "call_verify: auth check failed\n");
839 goto garbage; /* bad verifier, retry */
841 switch ((n = ntohl(*p++))) {
842 case RPC_SUCCESS:
843 return p;
844 case RPC_GARBAGE_ARGS:
845 break; /* retry */
846 default:
847 printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
848 /* Also retry */
851 garbage:
852 dprintk("RPC: %4d call_verify: server saw garbage\n", task->tk_pid);
853 task->tk_client->cl_stats->rpcgarbage++;
854 if (task->tk_garb_retry) {
855 task->tk_garb_retry--;
856 dprintk(KERN_WARNING "RPC: garbage, retrying %4d\n", task->tk_pid);
857 task->tk_action = call_encode;
858 return NULL;
860 printk(KERN_WARNING "RPC: garbage, exit EIO\n");
861 rpc_exit(task, -EIO);
862 return NULL;