Import 2.3.1pre1
[davej-history.git] / net / sunrpc / clnt.c
blob6ffcc187bcf58cbfa7692402285e721449155173
1 /*
2 * linux/net/sunrpc/rpcclnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP reconnect handling (when finished).
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
25 #include <asm/segment.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29 #include <linux/malloc.h>
30 #include <linux/in.h>
31 #include <linux/utsname.h>
33 #include <linux/sunrpc/clnt.h>
36 #define RPC_SLACK_SPACE 1024 /* total overkill */
38 #ifdef RPC_DEBUG
39 # define RPCDBG_FACILITY RPCDBG_CALL
40 #endif
42 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
45 static void call_bind(struct rpc_task *task);
46 static void call_reserve(struct rpc_task *task);
47 static void call_reserveresult(struct rpc_task *task);
48 static void call_allocate(struct rpc_task *task);
49 static void call_encode(struct rpc_task *task);
50 static void call_decode(struct rpc_task *task);
51 static void call_transmit(struct rpc_task *task);
52 static void call_receive(struct rpc_task *task);
53 static void call_status(struct rpc_task *task);
54 static void call_refresh(struct rpc_task *task);
55 static void call_refreshresult(struct rpc_task *task);
56 static void call_timeout(struct rpc_task *task);
57 static void call_reconnect(struct rpc_task *task);
58 static u32 * call_header(struct rpc_task *task);
59 static u32 * call_verify(struct rpc_task *task);
63 * Create an RPC client
64 * FIXME: This should also take a flags argument (as in task->tk_flags).
65 * It's called (among others) from pmap_create_client, which may in
66 * turn be called by an async task. In this case, rpciod should not be
67 * made to sleep too long.
69 struct rpc_clnt *
70 rpc_create_client(struct rpc_xprt *xprt, char *servname,
71 struct rpc_program *program, u32 vers, int flavor)
73 struct rpc_version *version;
74 struct rpc_clnt *clnt = NULL;
76 dprintk("RPC: creating %s client for %s (xprt %p)\n",
77 program->name, servname, xprt);
79 if (!xprt)
80 goto out;
81 if (vers >= program->nrvers || !(version = program->version[vers]))
82 goto out;
84 clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt));
85 if (!clnt)
86 goto out_no_clnt;
87 memset(clnt, 0, sizeof(*clnt));
89 clnt->cl_xprt = xprt;
90 clnt->cl_procinfo = version->procs;
91 clnt->cl_maxproc = version->nrprocs;
92 clnt->cl_server = servname;
93 clnt->cl_protname = program->name;
94 clnt->cl_port = xprt->addr.sin_port;
95 clnt->cl_prog = program->number;
96 clnt->cl_vers = version->number;
97 clnt->cl_prot = IPPROTO_UDP;
98 clnt->cl_stats = program->stats;
99 clnt->cl_bindwait = RPC_INIT_WAITQ("bindwait");
101 if (!clnt->cl_port)
102 clnt->cl_autobind = 1;
104 if (!rpcauth_create(flavor, clnt))
105 goto out_no_auth;
107 /* save the nodename */
108 clnt->cl_nodelen = strlen(system_utsname.nodename);
109 if (clnt->cl_nodelen > UNX_MAXNODENAME)
110 clnt->cl_nodelen = UNX_MAXNODENAME;
111 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
112 out:
113 return clnt;
115 out_no_clnt:
116 printk("RPC: out of memory in rpc_create_client\n");
117 goto out;
118 out_no_auth:
119 printk("RPC: Couldn't create auth handle (flavor %d)\n",
120 flavor);
121 rpc_free(clnt);
122 clnt = NULL;
123 goto out;
127 * Properly shut down an RPC client, terminating all outstanding
128 * requests. Note that we must be certain that cl_oneshot and
129 * cl_dead are cleared, or else the client would be destroyed
130 * when the last task releases it.
133 rpc_shutdown_client(struct rpc_clnt *clnt)
135 dprintk("RPC: shutting down %s client for %s\n",
136 clnt->cl_protname, clnt->cl_server);
137 while (clnt->cl_users) {
138 #ifdef RPC_DEBUG
139 printk("rpc_shutdown_client: client %s, tasks=%d\n",
140 clnt->cl_protname, clnt->cl_users);
141 #endif
142 /* Don't let rpc_release_client destroy us */
143 clnt->cl_oneshot = 0;
144 clnt->cl_dead = 0;
145 rpc_killall_tasks(clnt);
146 sleep_on(&destroy_wait);
148 return rpc_destroy_client(clnt);
152 * Delete an RPC client
155 rpc_destroy_client(struct rpc_clnt *clnt)
157 dprintk("RPC: destroying %s client for %s\n",
158 clnt->cl_protname, clnt->cl_server);
160 if (clnt->cl_auth) {
161 rpcauth_destroy(clnt->cl_auth);
162 clnt->cl_auth = NULL;
164 if (clnt->cl_xprt) {
165 xprt_destroy(clnt->cl_xprt);
166 clnt->cl_xprt = NULL;
168 rpc_free(clnt);
169 return 0;
173 * Release an RPC client
175 void
176 rpc_release_client(struct rpc_clnt *clnt)
178 dprintk("RPC: rpc_release_client(%p, %d)\n",
179 clnt, clnt->cl_users);
180 if (clnt->cl_users) {
181 if (--(clnt->cl_users) > 0)
182 return;
183 } else
184 printk("rpc_release_client: %s client already free??\n",
185 clnt->cl_protname);
187 wake_up(&destroy_wait);
188 if (clnt->cl_oneshot || clnt->cl_dead)
189 rpc_destroy_client(clnt);
193 * Default callback for async RPC calls
195 static void
196 rpc_default_callback(struct rpc_task *task)
198 rpc_release_task(task);
202 * Export the signal mask handling for aysnchronous code that
203 * sleeps on RPC calls
206 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
208 unsigned long sigallow = sigmask(SIGKILL);
209 unsigned long irqflags;
211 /* Turn off various signals */
212 if (clnt->cl_intr) {
213 struct k_sigaction *action = current->sig->action;
214 if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
215 sigallow |= sigmask(SIGINT);
216 if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
217 sigallow |= sigmask(SIGQUIT);
219 spin_lock_irqsave(&current->sigmask_lock, irqflags);
220 *oldset = current->blocked;
221 siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
222 recalc_sigpending(current);
223 spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
226 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
228 unsigned long irqflags;
230 spin_lock_irqsave(&current->sigmask_lock, irqflags);
231 current->blocked = *oldset;
232 recalc_sigpending(current);
233 spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
237 * New rpc_call implementation
240 rpc_do_call(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp,
241 int flags, rpc_action func, void *data)
243 struct rpc_task my_task, *task = &my_task;
244 sigset_t oldset;
245 int async, status;
247 /* If this client is slain all further I/O fails */
248 if (clnt->cl_dead)
249 return -EIO;
251 rpc_clnt_sigmask(clnt, &oldset);
253 /* Create/initialize a new RPC task */
254 if ((async = (flags & RPC_TASK_ASYNC)) != 0) {
255 if (!func)
256 func = rpc_default_callback;
257 status = -ENOMEM;
258 if (!(task = rpc_new_task(clnt, func, flags)))
259 goto out;
260 task->tk_calldata = data;
261 } else {
262 rpc_init_task(task, clnt, NULL, flags);
265 /* Bind the user cred, set up the call info struct and
266 * execute the task */
267 if (rpcauth_lookupcred(task) != NULL) {
268 rpc_call_setup(task, proc, argp, resp, 0);
269 rpc_execute(task);
270 } else
271 async = 0;
273 status = 0;
274 if (!async) {
275 status = task->tk_status;
276 rpc_release_task(task);
279 out:
280 rpc_clnt_sigunmask(clnt, &oldset);
282 return status;
286 void
287 rpc_call_setup(struct rpc_task *task, u32 proc,
288 void *argp, void *resp, int flags)
290 task->tk_action = call_bind;
291 task->tk_proc = proc;
292 task->tk_argp = argp;
293 task->tk_resp = resp;
294 task->tk_flags |= flags;
296 /* Increment call count */
297 rpcproc_count(task->tk_client, proc)++;
301 * Restart an (async) RPC call. Usually called from within the
302 * exit handler.
304 void
305 rpc_restart_call(struct rpc_task *task)
307 if (task->tk_flags & RPC_TASK_KILLED) {
308 rpc_release_task(task);
309 return;
311 task->tk_action = call_bind;
312 rpcproc_count(task->tk_client, task->tk_proc)++;
316 * 0. Get the server port number if not yet set
318 static void
319 call_bind(struct rpc_task *task)
321 struct rpc_clnt *clnt = task->tk_client;
323 task->tk_action = call_reserve;
324 task->tk_status = 0;
325 if (!clnt->cl_port)
326 rpc_getport(task, clnt);
330 * 1. Reserve an RPC call slot
332 static void
333 call_reserve(struct rpc_task *task)
335 struct rpc_clnt *clnt = task->tk_client;
337 dprintk("RPC: %4d call_reserve\n", task->tk_pid);
338 if (!clnt->cl_port) {
339 printk(KERN_NOTICE "%s: couldn't bind to server %s - %s.\n",
340 clnt->cl_protname, clnt->cl_server,
341 clnt->cl_softrtry? "giving up" : "retrying");
342 if (!clnt->cl_softrtry) {
343 rpc_delay(task, 5*HZ);
344 return;
346 rpc_exit(task, -EIO);
347 return;
349 if (!rpcauth_uptodatecred(task)) {
350 task->tk_action = call_refresh;
351 return;
353 task->tk_action = call_reserveresult;
354 task->tk_timeout = clnt->cl_timeout.to_resrvval;
355 task->tk_status = 0;
356 clnt->cl_stats->rpccnt++;
357 xprt_reserve(task);
361 * 1b. Grok the result of xprt_reserve()
363 static void
364 call_reserveresult(struct rpc_task *task)
366 dprintk("RPC: %4d call_reserveresult (status %d)\n",
367 task->tk_pid, task->tk_status);
369 * After a call to xprt_reserve(), we must have either
370 * a request slot or else an error status.
372 if ((task->tk_status >= 0 && !task->tk_rqstp) ||
373 (task->tk_status < 0 && task->tk_rqstp))
374 printk("call_reserveresult: status=%d, request=%p??\n",
375 task->tk_status, task->tk_rqstp);
377 if (task->tk_status >= 0) {
378 task->tk_action = call_allocate;
379 goto out;
380 } else if (task->tk_status == -EAGAIN) {
381 task->tk_timeout = task->tk_client->cl_timeout.to_resrvval;
382 task->tk_status = 0;
383 xprt_reserve(task);
384 goto out;
385 } else if (task->tk_status == -ETIMEDOUT) {
386 dprintk("RPC: task timed out\n");
387 task->tk_action = call_timeout;
388 goto out;
389 } else {
390 task->tk_action = NULL;
392 if (!task->tk_rqstp) {
393 printk("RPC: task has no request, exit EIO\n");
394 rpc_exit(task, -EIO);
396 out:
397 return;
401 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
402 * (Note: buffer memory is freed in rpc_task_release).
404 static void
405 call_allocate(struct rpc_task *task)
407 struct rpc_clnt *clnt = task->tk_client;
408 unsigned int bufsiz;
410 dprintk("RPC: %4d call_allocate (status %d)\n",
411 task->tk_pid, task->tk_status);
412 task->tk_action = call_encode;
413 if (task->tk_buffer)
414 return;
416 /* FIXME: compute buffer requirements more exactly using
417 * auth->au_wslack */
418 bufsiz = rpcproc_bufsiz(clnt, task->tk_proc) + RPC_SLACK_SPACE;
420 if ((task->tk_buffer = rpc_malloc(task, bufsiz)) != NULL)
421 return;
422 printk("RPC: buffer allocation failed for task %p\n", task);
424 if (!signalled()) {
425 xprt_release(task);
426 task->tk_action = call_reserve;
427 rpc_delay(task, HZ);
428 return;
431 rpc_exit(task, -ERESTARTSYS);
435 * 3. Encode arguments of an RPC call
437 static void
438 call_encode(struct rpc_task *task)
440 struct rpc_clnt *clnt = task->tk_client;
441 struct rpc_rqst *req = task->tk_rqstp;
442 unsigned int bufsiz;
443 kxdrproc_t encode;
444 int status;
445 u32 *p;
447 dprintk("RPC: %4d call_encode (status %d)\n",
448 task->tk_pid, task->tk_status);
450 task->tk_action = call_transmit;
452 /* Default buffer setup */
453 bufsiz = rpcproc_bufsiz(clnt, task->tk_proc)+RPC_SLACK_SPACE;
454 req->rq_svec[0].iov_base = task->tk_buffer;
455 req->rq_svec[0].iov_len = bufsiz;
456 req->rq_slen = 0;
457 req->rq_snr = 1;
458 req->rq_rvec[0].iov_base = task->tk_buffer;
459 req->rq_rvec[0].iov_len = bufsiz;
460 req->rq_rlen = bufsiz;
461 req->rq_rnr = 1;
463 if (task->tk_proc > clnt->cl_maxproc) {
464 printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n",
465 clnt->cl_protname, clnt->cl_vers, task->tk_proc);
466 rpc_exit(task, -EIO);
467 return;
470 /* Encode header and provided arguments */
471 encode = rpcproc_encode(clnt, task->tk_proc);
472 if (!(p = call_header(task))) {
473 printk("RPC: call_header failed, exit EIO\n");
474 rpc_exit(task, -EIO);
475 } else
476 if ((status = encode(req, p, task->tk_argp)) < 0) {
477 printk(KERN_WARNING "%s: can't encode arguments: %d\n",
478 clnt->cl_protname, -status);
479 rpc_exit(task, status);
484 * 4. Transmit the RPC request
486 static void
487 call_transmit(struct rpc_task *task)
489 dprintk("RPC: %4d call_transmit (status %d)\n",
490 task->tk_pid, task->tk_status);
492 task->tk_action = call_receive;
493 task->tk_status = 0;
494 xprt_transmit(task);
498 * 5. Wait for the RPC reply
500 static void
501 call_receive(struct rpc_task *task)
503 dprintk("RPC: %4d call_receive (status %d)\n",
504 task->tk_pid, task->tk_status);
506 task->tk_action = call_status;
507 /* In case of error, evaluate status */
508 if (task->tk_status < 0)
509 return;
511 /* If we have no decode function, this means we're performing
512 * a void call (a la lockd message passing). */
513 if (!rpcproc_decode(task->tk_client, task->tk_proc)) {
514 rpc_remove_wait_queue(task); /* remove from xprt_pending */
515 task->tk_action = NULL;
516 return;
519 xprt_receive(task);
523 * 6. Sort out the RPC call status
525 static void
526 call_status(struct rpc_task *task)
528 struct rpc_clnt *clnt = task->tk_client;
529 struct rpc_rqst *req;
530 int status = task->tk_status;
532 dprintk("RPC: %4d call_status (status %d)\n",
533 task->tk_pid, task->tk_status);
535 if (status >= 0) {
536 task->tk_action = call_decode;
537 } else if (status == -ETIMEDOUT) {
538 task->tk_action = call_timeout;
539 } else if (status == -EAGAIN) {
540 if (!(req = task->tk_rqstp))
541 task->tk_action = call_reserve;
542 else if (!task->tk_buffer)
543 task->tk_action = call_allocate;
544 else if (req->rq_damaged)
545 task->tk_action = call_encode;
546 else
547 task->tk_action = call_transmit;
548 } else if (status == -ENOTCONN) {
549 task->tk_action = call_reconnect;
550 } else if (status == -ECONNREFUSED && clnt->cl_autobind) {
551 task->tk_action = call_bind;
552 clnt->cl_port = 0;
553 } else {
554 if (clnt->cl_chatty)
555 printk("%s: RPC call returned error %d\n",
556 clnt->cl_protname, -status);
557 task->tk_action = NULL;
558 return;
563 * 6a. Handle RPC timeout
564 * We do not release the request slot, so we keep using the
565 * same XID for all retransmits.
567 static void
568 call_timeout(struct rpc_task *task)
570 struct rpc_clnt *clnt = task->tk_client;
571 struct rpc_rqst *req = task->tk_rqstp;
573 if (req) {
574 struct rpc_timeout *to = &req->rq_timeout;
576 if (xprt_adjust_timeout(to)) {
577 dprintk("RPC: %4d call_timeout (minor timeo)\n",
578 task->tk_pid);
579 goto minor_timeout;
581 to->to_initval <<= 1;
582 if (to->to_initval > to->to_maxval)
583 to->to_initval = to->to_maxval;
586 dprintk("RPC: %4d call_timeout (major timeo)\n", task->tk_pid);
587 if (clnt->cl_softrtry) {
588 if (clnt->cl_chatty && !task->tk_exit)
589 printk("%s: server %s not responding, timed out\n",
590 clnt->cl_protname, clnt->cl_server);
591 rpc_exit(task, -EIO);
592 return;
594 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
595 task->tk_flags |= RPC_CALL_MAJORSEEN;
596 if (req)
597 printk("%s: server %s not responding, still trying\n",
598 clnt->cl_protname, clnt->cl_server);
599 else
600 printk("%s: task %d can't get a request slot\n",
601 clnt->cl_protname, task->tk_pid);
603 if (clnt->cl_autobind)
604 clnt->cl_port = 0;
606 minor_timeout:
607 if (!clnt->cl_port) {
608 task->tk_action = call_bind;
609 } else if (!req) {
610 task->tk_action = call_reserve;
611 } else if (req->rq_damaged) {
612 task->tk_action = call_encode;
613 clnt->cl_stats->rpcretrans++;
614 } else {
615 task->tk_action = call_transmit;
616 clnt->cl_stats->rpcretrans++;
618 task->tk_status = 0;
622 * 6b. Reconnect to the RPC server (TCP case)
624 static void
625 call_reconnect(struct rpc_task *task)
627 dprintk("RPC: %4d call_reconnect status %d\n",
628 task->tk_pid, task->tk_status);
629 if (task->tk_status == 0) {
630 task->tk_action = call_status;
631 task->tk_status = -EAGAIN;
632 return;
634 task->tk_client->cl_stats->netreconn++;
635 xprt_reconnect(task);
639 * 7. Decode the RPC reply
641 static void
642 call_decode(struct rpc_task *task)
644 struct rpc_clnt *clnt = task->tk_client;
645 struct rpc_rqst *req = task->tk_rqstp;
646 kxdrproc_t decode = rpcproc_decode(clnt, task->tk_proc);
647 u32 *p;
649 dprintk("RPC: %4d call_decode (status %d)\n",
650 task->tk_pid, task->tk_status);
652 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
653 printk("%s: server %s OK\n",
654 clnt->cl_protname, clnt->cl_server);
655 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
658 if (task->tk_status < 12) {
659 printk("%s: too small RPC reply size (%d bytes)\n",
660 clnt->cl_protname, task->tk_status);
661 rpc_exit(task, -EIO);
662 return;
665 /* Verify the RPC header */
666 if (!(p = call_verify(task)))
667 return;
670 * The following is an NFS-specific hack to cater for setuid
671 * processes whose uid is mapped to nobody on the server.
673 if (task->tk_client->cl_prog == 100003 &&
674 (ntohl(*p) == NFSERR_ACCES || ntohl(*p) == NFSERR_PERM)) {
675 if (RPC_IS_SETUID(task) && (task->tk_suid_retry)--) {
676 dprintk("RPC: %4d retry squashed uid\n", task->tk_pid);
677 task->tk_flags ^= RPC_CALL_REALUID;
678 task->tk_action = call_encode;
679 return;
683 task->tk_action = NULL;
684 task->tk_status = decode(req, p, task->tk_resp);
685 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
686 task->tk_status);
690 * 8. Refresh the credentials if rejected by the server
692 static void
693 call_refresh(struct rpc_task *task)
695 dprintk("RPC: %4d call_refresh\n", task->tk_pid);
697 xprt_release(task); /* Must do to obtain new XID */
698 task->tk_action = call_refreshresult;
699 task->tk_status = 0;
700 task->tk_client->cl_stats->rpcauthrefresh++;
701 rpcauth_refreshcred(task);
705 * 8a. Process the results of a credential refresh
707 static void
708 call_refreshresult(struct rpc_task *task)
710 dprintk("RPC: %4d call_refreshresult (status %d)\n",
711 task->tk_pid, task->tk_status);
713 if (task->tk_status < 0) {
714 task->tk_status = -EACCES;
715 task->tk_action = NULL;
716 } else
717 task->tk_action = call_reserve;
721 * Call header serialization
723 static u32 *
724 call_header(struct rpc_task *task)
726 struct rpc_clnt *clnt = task->tk_client;
727 struct rpc_xprt *xprt = clnt->cl_xprt;
728 u32 *p = task->tk_buffer;
730 /* FIXME: check buffer size? */
731 if (xprt->stream)
732 *p++ = 0; /* fill in later */
733 *p++ = task->tk_rqstp->rq_xid; /* XID */
734 *p++ = htonl(RPC_CALL); /* CALL */
735 *p++ = htonl(RPC_VERSION); /* RPC version */
736 *p++ = htonl(clnt->cl_prog); /* program number */
737 *p++ = htonl(clnt->cl_vers); /* program version */
738 *p++ = htonl(task->tk_proc); /* procedure */
739 return rpcauth_marshcred(task, p);
743 * Reply header verification
745 static u32 *
746 call_verify(struct rpc_task *task)
748 u32 *p = task->tk_buffer, n;
750 p += 1; /* skip XID */
752 if ((n = ntohl(*p++)) != RPC_REPLY) {
753 printk("call_verify: not an RPC reply: %x\n", n);
754 goto garbage;
756 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
757 int error = -EACCES;
759 if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) {
760 printk("call_verify: RPC call rejected: %x\n", n);
761 } else
762 switch ((n = ntohl(*p++))) {
763 case RPC_AUTH_REJECTEDCRED:
764 case RPC_AUTH_REJECTEDVERF:
765 if (!task->tk_cred_retry--)
766 break;
767 dprintk("RPC: %4d call_verify: retry stale creds\n",
768 task->tk_pid);
769 rpcauth_invalcred(task);
770 task->tk_action = call_refresh;
771 return NULL;
772 case RPC_AUTH_BADCRED:
773 case RPC_AUTH_BADVERF:
774 /* possibly garbled cred/verf? */
775 if (!task->tk_garb_retry--)
776 break;
777 dprintk("RPC: %4d call_verify: retry garbled creds\n",
778 task->tk_pid);
779 task->tk_action = call_encode;
780 return NULL;
781 case RPC_AUTH_TOOWEAK:
782 printk("call_verify: server requires stronger "
783 "authentication.\n");
784 default:
785 printk("call_verify: unknown auth error: %x\n", n);
786 error = -EIO;
788 dprintk("RPC: %4d call_verify: call rejected %d\n",
789 task->tk_pid, n);
790 rpc_exit(task, error);
791 return NULL;
793 if (!(p = rpcauth_checkverf(task, p))) {
794 printk("call_verify: auth check failed\n");
795 goto garbage; /* bad verifier, retry */
797 switch ((n = ntohl(*p++))) {
798 case RPC_SUCCESS:
799 return p;
800 case RPC_GARBAGE_ARGS:
801 break; /* retry */
802 default:
803 printk("call_verify: server accept status: %x\n", n);
804 /* Also retry */
807 garbage:
808 dprintk("RPC: %4d call_verify: server saw garbage\n", task->tk_pid);
809 task->tk_client->cl_stats->rpcgarbage++;
810 if (task->tk_garb_retry--) {
811 printk("RPC: garbage, retrying %4d\n", task->tk_pid);
812 task->tk_action = call_encode;
813 return NULL;
815 printk("RPC: garbage, exit EIO\n");
816 rpc_exit(task, -EIO);
817 return NULL;