Import 2.1.118
[davej-history.git] / net / sunrpc / clnt.c
blob9380ff4a45f9df28db3610fcdba83c2a63a1f84d
1 /*
2 * linux/net/sunrpc/rpcclnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP reconnect handling (when finished).
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
25 #include <asm/segment.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29 #include <linux/malloc.h>
30 #include <linux/in.h>
32 #include <linux/sunrpc/clnt.h>
35 #define RPC_SLACK_SPACE 1024 /* total overkill */
37 #ifdef RPC_DEBUG
38 # define RPCDBG_FACILITY RPCDBG_CALL
39 #endif
41 static struct wait_queue * destroy_wait = NULL;
44 static void call_bind(struct rpc_task *task);
45 static void call_reserve(struct rpc_task *task);
46 static void call_reserveresult(struct rpc_task *task);
47 static void call_allocate(struct rpc_task *task);
48 static void call_encode(struct rpc_task *task);
49 static void call_decode(struct rpc_task *task);
50 static void call_transmit(struct rpc_task *task);
51 static void call_receive(struct rpc_task *task);
52 static void call_status(struct rpc_task *task);
53 static void call_refresh(struct rpc_task *task);
54 static void call_refreshresult(struct rpc_task *task);
55 static void call_timeout(struct rpc_task *task);
56 static void call_reconnect(struct rpc_task *task);
57 static u32 * call_header(struct rpc_task *task);
58 static u32 * call_verify(struct rpc_task *task);
61 * Create an RPC client
62 * FIXME: This should also take a flags argument (as in task->tk_flags).
63 * It's called (among others) from pmap_create_client, which may in
64 * turn be called by an async task. In this case, rpciod should not be
65 * made to sleep too long.
67 struct rpc_clnt *
68 rpc_create_client(struct rpc_xprt *xprt, char *servname,
69 struct rpc_program *program, u32 vers, int flavor)
71 struct rpc_version *version;
72 struct rpc_clnt *clnt = NULL;
74 dprintk("RPC: creating %s client for %s (xprt %p)\n",
75 program->name, servname, xprt);
77 if (!xprt)
78 goto out;
79 if (vers >= program->nrvers || !(version = program->version[vers]))
80 goto out;
82 clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt));
83 if (!clnt)
84 goto out_no_clnt;
85 memset(clnt, 0, sizeof(*clnt));
87 clnt->cl_xprt = xprt;
88 clnt->cl_procinfo = version->procs;
89 clnt->cl_maxproc = version->nrprocs;
90 clnt->cl_server = servname;
91 clnt->cl_protname = program->name;
92 clnt->cl_port = xprt->addr.sin_port;
93 clnt->cl_prog = program->number;
94 clnt->cl_vers = version->number;
95 clnt->cl_prot = IPPROTO_UDP;
96 clnt->cl_stats = program->stats;
97 clnt->cl_bindwait = RPC_INIT_WAITQ("bindwait");
99 if (!clnt->cl_port)
100 clnt->cl_autobind = 1;
102 if (!rpcauth_create(flavor, clnt))
103 goto out_no_auth;
104 out:
105 return clnt;
107 out_no_clnt:
108 printk("RPC: out of memory in rpc_create_client\n");
109 goto out;
110 out_no_auth:
111 printk("RPC: Couldn't create auth handle (flavor %d)\n",
112 flavor);
113 rpc_free(clnt);
114 clnt = NULL;
115 goto out;
119 * Properly shut down an RPC client, terminating all outstanding
120 * requests. Note that we must be certain that cl_oneshot and
121 * cl_dead are cleared, or else the client would be destroyed
122 * when the last task releases it.
125 rpc_shutdown_client(struct rpc_clnt *clnt)
127 dprintk("RPC: shutting down %s client for %s\n",
128 clnt->cl_protname, clnt->cl_server);
129 while (clnt->cl_users) {
130 #ifdef RPC_DEBUG
131 printk("rpc_shutdown_client: client %s, tasks=%d\n",
132 clnt->cl_protname, clnt->cl_users);
133 #endif
134 /* Don't let rpc_release_client destroy us */
135 clnt->cl_oneshot = 0;
136 clnt->cl_dead = 0;
137 rpc_killall_tasks(clnt);
138 sleep_on(&destroy_wait);
140 return rpc_destroy_client(clnt);
144 * Delete an RPC client
147 rpc_destroy_client(struct rpc_clnt *clnt)
149 dprintk("RPC: destroying %s client for %s\n",
150 clnt->cl_protname, clnt->cl_server);
152 if (clnt->cl_auth) {
153 rpcauth_destroy(clnt->cl_auth);
154 clnt->cl_auth = NULL;
156 if (clnt->cl_xprt) {
157 xprt_destroy(clnt->cl_xprt);
158 clnt->cl_xprt = NULL;
160 rpc_free(clnt);
161 return 0;
165 * Release an RPC client
167 void
168 rpc_release_client(struct rpc_clnt *clnt)
170 dprintk("RPC: rpc_release_client(%p, %d)\n",
171 clnt, clnt->cl_users);
172 if (clnt->cl_users) {
173 if (--(clnt->cl_users) > 0)
174 return;
175 } else
176 printk("rpc_release_client: %s client already free??\n",
177 clnt->cl_protname);
179 wake_up(&destroy_wait);
180 if (clnt->cl_oneshot || clnt->cl_dead)
181 rpc_destroy_client(clnt);
185 * Default callback for async RPC calls
187 static void
188 rpc_default_callback(struct rpc_task *task)
190 rpc_release_task(task);
194 * Export the signal mask handling for aysnchronous code that
195 * sleeps on RPC calls
198 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
200 unsigned long sigallow = sigmask(SIGKILL);
201 unsigned long irqflags;
203 /* Turn off various signals */
204 if (clnt->cl_intr) {
205 struct k_sigaction *action = current->sig->action;
206 if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
207 sigallow |= sigmask(SIGINT);
208 if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
209 sigallow |= sigmask(SIGQUIT);
211 spin_lock_irqsave(&current->sigmask_lock, irqflags);
212 *oldset = current->blocked;
213 siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
214 recalc_sigpending(current);
215 spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
218 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
220 unsigned long irqflags;
222 spin_lock_irqsave(&current->sigmask_lock, irqflags);
223 current->blocked = *oldset;
224 recalc_sigpending(current);
225 spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
229 * New rpc_call implementation
232 rpc_do_call(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp,
233 int flags, rpc_action func, void *data)
235 struct rpc_task my_task, *task = &my_task;
236 sigset_t oldset;
237 int async, status;
239 /* If this client is slain all further I/O fails */
240 if (clnt->cl_dead)
241 return -EIO;
243 rpc_clnt_sigmask(clnt, &oldset);
245 /* Create/initialize a new RPC task */
246 if ((async = (flags & RPC_TASK_ASYNC)) != 0) {
247 if (!func)
248 func = rpc_default_callback;
249 status = -ENOMEM;
250 if (!(task = rpc_new_task(clnt, func, flags)))
251 goto out;
252 task->tk_calldata = data;
253 } else {
254 rpc_init_task(task, clnt, NULL, flags);
257 /* Bind the user cred, set up the call info struct and
258 * execute the task */
259 if (rpcauth_lookupcred(task) != NULL) {
260 rpc_call_setup(task, proc, argp, resp, 0);
261 rpc_execute(task);
262 } else
263 async = 0;
265 status = 0;
266 if (!async) {
267 status = task->tk_status;
268 rpc_release_task(task);
271 out:
272 rpc_clnt_sigunmask(clnt, &oldset);
274 return status;
278 void
279 rpc_call_setup(struct rpc_task *task, u32 proc,
280 void *argp, void *resp, int flags)
282 task->tk_action = call_bind;
283 task->tk_proc = proc;
284 task->tk_argp = argp;
285 task->tk_resp = resp;
286 task->tk_flags |= flags;
288 /* Increment call count */
289 rpcproc_count(task->tk_client, proc)++;
293 * Restart an (async) RPC call. Usually called from within the
294 * exit handler.
296 void
297 rpc_restart_call(struct rpc_task *task)
299 if (task->tk_flags & RPC_TASK_KILLED) {
300 rpc_release_task(task);
301 return;
303 task->tk_action = call_bind;
304 rpcproc_count(task->tk_client, task->tk_proc)++;
308 * 0. Get the server port number if not yet set
310 static void
311 call_bind(struct rpc_task *task)
313 struct rpc_clnt *clnt = task->tk_client;
315 task->tk_action = call_reserve;
316 task->tk_status = 0;
317 if (!clnt->cl_port)
318 rpc_getport(task, clnt);
322 * 1. Reserve an RPC call slot
324 static void
325 call_reserve(struct rpc_task *task)
327 struct rpc_clnt *clnt = task->tk_client;
329 dprintk("RPC: %4d call_reserve\n", task->tk_pid);
330 if (!clnt->cl_port) {
331 printk(KERN_NOTICE "%s: couldn't bind to server %s - %s.\n",
332 clnt->cl_protname, clnt->cl_server,
333 clnt->cl_softrtry? "giving up" : "retrying");
334 if (!clnt->cl_softrtry) {
335 rpc_delay(task, 5*HZ);
336 return;
338 rpc_exit(task, -EIO);
339 return;
341 if (!rpcauth_uptodatecred(task)) {
342 task->tk_action = call_refresh;
343 return;
345 task->tk_action = call_reserveresult;
346 task->tk_timeout = clnt->cl_timeout.to_resrvval;
347 task->tk_status = 0;
348 clnt->cl_stats->rpccnt++;
349 xprt_reserve(task);
353 * 1b. Grok the result of xprt_reserve()
355 static void
356 call_reserveresult(struct rpc_task *task)
358 dprintk("RPC: %4d call_reserveresult (status %d)\n",
359 task->tk_pid, task->tk_status);
361 * After a call to xprt_reserve(), we must have either
362 * a request slot or else an error status.
364 if ((task->tk_status >= 0 && !task->tk_rqstp) ||
365 (task->tk_status < 0 && task->tk_rqstp))
366 printk("call_reserveresult: status=%d, request=%p??\n",
367 task->tk_status, task->tk_rqstp);
369 if (task->tk_status >= 0) {
370 task->tk_action = call_allocate;
371 goto out;
372 } else if (task->tk_status == -EAGAIN) {
373 task->tk_timeout = task->tk_client->cl_timeout.to_resrvval;
374 task->tk_status = 0;
375 xprt_reserve(task);
376 goto out;
377 } else if (task->tk_status == -ETIMEDOUT) {
378 printk("RPC: task timed out\n");
379 task->tk_action = call_timeout;
380 goto out;
381 } else {
382 task->tk_action = NULL;
384 if (!task->tk_rqstp) {
385 printk("RPC: task has no request, exit EIO\n");
386 rpc_exit(task, -EIO);
388 out:
389 return;
393 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
394 * (Note: buffer memory is freed in rpc_task_release).
396 static void
397 call_allocate(struct rpc_task *task)
399 struct rpc_clnt *clnt = task->tk_client;
400 unsigned int bufsiz;
402 dprintk("RPC: %4d call_allocate (status %d)\n",
403 task->tk_pid, task->tk_status);
404 task->tk_action = call_encode;
405 if (task->tk_buffer)
406 return;
408 /* FIXME: compute buffer requirements more exactly using
409 * auth->au_wslack */
410 bufsiz = rpcproc_bufsiz(clnt, task->tk_proc) + RPC_SLACK_SPACE;
412 if ((task->tk_buffer = rpc_malloc(task, bufsiz)) != NULL)
413 return;
414 printk("RPC: buffer allocation failed for task %p\n", task);
416 if (!signalled()) {
417 xprt_release(task);
418 task->tk_action = call_reserve;
419 rpc_delay(task, HZ);
420 return;
423 rpc_exit(task, -ERESTARTSYS);
427 * 3. Encode arguments of an RPC call
429 static void
430 call_encode(struct rpc_task *task)
432 struct rpc_clnt *clnt = task->tk_client;
433 struct rpc_rqst *req = task->tk_rqstp;
434 unsigned int bufsiz;
435 kxdrproc_t encode;
436 int status;
437 u32 *p;
439 dprintk("RPC: %4d call_encode (status %d)\n",
440 task->tk_pid, task->tk_status);
442 task->tk_action = call_transmit;
444 /* Default buffer setup */
445 bufsiz = rpcproc_bufsiz(clnt, task->tk_proc)+RPC_SLACK_SPACE;
446 req->rq_svec[0].iov_base = task->tk_buffer;
447 req->rq_svec[0].iov_len = bufsiz;
448 req->rq_slen = 0;
449 req->rq_snr = 1;
450 req->rq_rvec[0].iov_base = task->tk_buffer;
451 req->rq_rvec[0].iov_len = bufsiz;
452 req->rq_rlen = bufsiz;
453 req->rq_rnr = 1;
455 if (task->tk_proc > clnt->cl_maxproc) {
456 printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n",
457 clnt->cl_protname, clnt->cl_vers, task->tk_proc);
458 rpc_exit(task, -EIO);
459 return;
462 /* Encode header and provided arguments */
463 encode = rpcproc_encode(clnt, task->tk_proc);
464 if (!(p = call_header(task))) {
465 printk("RPC: call_header failed, exit EIO\n");
466 rpc_exit(task, -EIO);
467 } else
468 if ((status = encode(req, p, task->tk_argp)) < 0) {
469 printk(KERN_WARNING "%s: can't encode arguments: %d\n",
470 clnt->cl_protname, -status);
471 rpc_exit(task, status);
476 * 4. Transmit the RPC request
478 static void
479 call_transmit(struct rpc_task *task)
481 dprintk("RPC: %4d call_transmit (status %d)\n",
482 task->tk_pid, task->tk_status);
484 task->tk_action = call_receive;
485 task->tk_status = 0;
486 xprt_transmit(task);
490 * 5. Wait for the RPC reply
492 static void
493 call_receive(struct rpc_task *task)
495 dprintk("RPC: %4d call_receive (status %d)\n",
496 task->tk_pid, task->tk_status);
498 /* In case of error, evaluate status */
499 if (task->tk_status < 0) {
500 task->tk_action = call_status;
501 return;
504 /* If we have no decode function, this means we're performing
505 * a void call (a la lockd message passing). */
506 if (!rpcproc_decode(task->tk_client, task->tk_proc)) {
507 rpc_remove_wait_queue(task); /* remove from xprt_pending */
508 task->tk_action = NULL;
509 return;
512 task->tk_action = call_status;
513 xprt_receive(task);
517 * 6. Sort out the RPC call status
519 static void
520 call_status(struct rpc_task *task)
522 struct rpc_clnt *clnt = task->tk_client;
523 struct rpc_rqst *req;
524 int status = task->tk_status;
526 dprintk("RPC: %4d call_status (status %d)\n",
527 task->tk_pid, task->tk_status);
529 if (status >= 0) {
530 task->tk_action = call_decode;
531 } else if (status == -ETIMEDOUT) {
532 task->tk_action = call_timeout;
533 } else if (status == -EAGAIN) {
534 if (!(req = task->tk_rqstp))
535 task->tk_action = call_reserve;
536 else if (!task->tk_buffer)
537 task->tk_action = call_allocate;
538 else if (req->rq_damaged)
539 task->tk_action = call_encode;
540 else
541 task->tk_action = call_transmit;
542 } else if (status == -ENOTCONN) {
543 task->tk_action = call_reconnect;
544 } else if (status == -ECONNREFUSED && clnt->cl_autobind) {
545 task->tk_action = call_bind;
546 clnt->cl_port = 0;
547 } else {
548 if (clnt->cl_chatty)
549 printk("%s: RPC call returned error %d\n",
550 clnt->cl_protname, -status);
551 task->tk_action = NULL;
552 return;
557 * 6a. Handle RPC timeout
558 * We do not release the request slot, so we keep using the
559 * same XID for all retransmits.
561 static void
562 call_timeout(struct rpc_task *task)
564 struct rpc_clnt *clnt = task->tk_client;
565 struct rpc_rqst *req = task->tk_rqstp;
567 if (req) {
568 struct rpc_timeout *to = &req->rq_timeout;
570 if (xprt_adjust_timeout(to)) {
571 dprintk("RPC: %4d call_timeout (minor timeo)\n",
572 task->tk_pid);
573 goto minor_timeout;
575 if ((to->to_initval <<= 1) > to->to_maxval)
576 to->to_initval = to->to_maxval;
579 dprintk("RPC: %4d call_timeout (major timeo)\n", task->tk_pid);
580 if (clnt->cl_softrtry) {
581 if (clnt->cl_chatty && !task->tk_exit)
582 printk("%s: server %s not responding, timed out\n",
583 clnt->cl_protname, clnt->cl_server);
584 rpc_exit(task, -EIO);
585 return;
587 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
588 printk("%s: server %s not responding, still trying\n",
589 clnt->cl_protname, clnt->cl_server);
590 task->tk_flags |= RPC_CALL_MAJORSEEN;
592 if (clnt->cl_autobind)
593 clnt->cl_port = 0;
595 minor_timeout:
596 if (!clnt->cl_port) {
597 task->tk_action = call_bind;
598 } else if (!req) {
599 task->tk_action = call_reserve;
600 } else if (req->rq_damaged) {
601 task->tk_action = call_encode;
602 clnt->cl_stats->rpcretrans++;
603 } else {
604 task->tk_action = call_transmit;
605 clnt->cl_stats->rpcretrans++;
607 task->tk_status = 0;
611 * 6b. Reconnect to the RPC server (TCP case)
613 static void
614 call_reconnect(struct rpc_task *task)
616 dprintk("RPC: %4d call_reconnect status %d\n",
617 task->tk_pid, task->tk_status);
618 if (task->tk_status == 0) {
619 task->tk_action = call_status;
620 task->tk_status = -EAGAIN;
621 return;
623 task->tk_client->cl_stats->netreconn++;
624 xprt_reconnect(task);
628 * 7. Decode the RPC reply
630 static void
631 call_decode(struct rpc_task *task)
633 struct rpc_clnt *clnt = task->tk_client;
634 struct rpc_rqst *req = task->tk_rqstp;
635 kxdrproc_t decode = rpcproc_decode(clnt, task->tk_proc);
636 u32 *p;
638 dprintk("RPC: %4d call_decode (status %d)\n",
639 task->tk_pid, task->tk_status);
641 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
642 printk("%s: server %s OK\n",
643 clnt->cl_protname, clnt->cl_server);
644 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
647 if (task->tk_status < 12) {
648 printk("%s: too small RPC reply size (%d bytes)\n",
649 clnt->cl_protname, task->tk_status);
650 rpc_exit(task, -EIO);
651 return;
654 /* Verify the RPC header */
655 if (!(p = call_verify(task)))
656 return;
659 * The following is an NFS-specific hack to cater for setuid
660 * processes whose uid is mapped to nobody on the server.
662 if (task->tk_client->cl_prog == 100003 &&
663 (ntohl(*p) == NFSERR_ACCES || ntohl(*p) == NFSERR_PERM)) {
664 if (RPC_IS_SETUID(task) && (task->tk_suid_retry)--) {
665 dprintk("RPC: %4d retry squashed uid\n", task->tk_pid);
666 task->tk_flags ^= RPC_CALL_REALUID;
667 task->tk_action = call_encode;
668 return;
672 task->tk_action = NULL;
673 task->tk_status = decode(req, p, task->tk_resp);
674 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
675 task->tk_status);
679 * 8. Refresh the credentials if rejected by the server
681 static void
682 call_refresh(struct rpc_task *task)
684 dprintk("RPC: %4d call_refresh\n", task->tk_pid);
686 xprt_release(task); /* Must do to obtain new XID */
687 task->tk_action = call_refreshresult;
688 task->tk_status = 0;
689 task->tk_client->cl_stats->rpcauthrefresh++;
690 rpcauth_refreshcred(task);
694 * 8a. Process the results of a credential refresh
696 static void
697 call_refreshresult(struct rpc_task *task)
699 dprintk("RPC: %4d call_refreshresult (status %d)\n",
700 task->tk_pid, task->tk_status);
702 if (task->tk_status < 0) {
703 task->tk_status = -EACCES;
704 task->tk_action = NULL;
705 } else
706 task->tk_action = call_reserve;
710 * Call header serialization
712 static u32 *
713 call_header(struct rpc_task *task)
715 struct rpc_clnt *clnt = task->tk_client;
716 struct rpc_xprt *xprt = clnt->cl_xprt;
717 u32 *p = task->tk_buffer;
719 /* FIXME: check buffer size? */
720 if (xprt->stream)
721 *p++ = 0; /* fill in later */
722 *p++ = task->tk_rqstp->rq_xid; /* XID */
723 *p++ = htonl(RPC_CALL); /* CALL */
724 *p++ = htonl(RPC_VERSION); /* RPC version */
725 *p++ = htonl(clnt->cl_prog); /* program number */
726 *p++ = htonl(clnt->cl_vers); /* program version */
727 *p++ = htonl(task->tk_proc); /* procedure */
728 return rpcauth_marshcred(task, p);
732 * Reply header verification
734 static u32 *
735 call_verify(struct rpc_task *task)
737 u32 *p = task->tk_buffer, n;
739 p += 1; /* skip XID */
741 if ((n = ntohl(*p++)) != RPC_REPLY) {
742 printk("call_verify: not an RPC reply: %x\n", n);
743 goto garbage;
745 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
746 int error = -EACCES;
748 if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) {
749 printk("call_verify: RPC call rejected: %x\n", n);
750 } else
751 switch ((n = ntohl(*p++))) {
752 case RPC_AUTH_REJECTEDCRED:
753 case RPC_AUTH_REJECTEDVERF:
754 if (!task->tk_cred_retry--)
755 break;
756 dprintk("RPC: %4d call_verify: retry stale creds\n",
757 task->tk_pid);
758 rpcauth_invalcred(task);
759 task->tk_action = call_refresh;
760 return NULL;
761 case RPC_AUTH_BADCRED:
762 case RPC_AUTH_BADVERF:
763 /* possibly garbled cred/verf? */
764 if (!task->tk_garb_retry--)
765 break;
766 dprintk("RPC: %4d call_verify: retry garbled creds\n",
767 task->tk_pid);
768 task->tk_action = call_encode;
769 return NULL;
770 case RPC_AUTH_TOOWEAK:
771 printk("call_verify: server requires stronger "
772 "authentication.\n");
773 default:
774 printk("call_verify: unknown auth error: %x\n", n);
775 error = -EIO;
777 dprintk("RPC: %4d call_verify: call rejected %d\n",
778 task->tk_pid, n);
779 rpc_exit(task, error);
780 return NULL;
782 if (!(p = rpcauth_checkverf(task, p))) {
783 printk("call_verify: auth check failed\n");
784 goto garbage; /* bad verifier, retry */
786 switch ((n = ntohl(*p++))) {
787 case RPC_SUCCESS:
788 return p;
789 case RPC_GARBAGE_ARGS:
790 break; /* retry */
791 default:
792 printk("call_verify: server accept status: %x\n", n);
793 /* Also retry */
796 garbage:
797 dprintk("RPC: %4d call_verify: server saw garbage\n", task->tk_pid);
798 task->tk_client->cl_stats->rpcgarbage++;
799 if (task->tk_garb_retry--) {
800 printk("RPC: garbage, retrying %4d\n", task->tk_pid);
801 task->tk_action = call_encode;
802 return NULL;
804 printk("RPC: garbage, exit EIO\n");
805 rpc_exit(task, -EIO);
806 return NULL;