nfsd4: combine nfs4_rpc_args and nfsd4_cb_sequence
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nfsd / nfs4callback.c
blob5687fce8564113b3a8355855fa7db295e39c6976
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <andros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/sunrpc/clnt.h>
35 #include <linux/sunrpc/svc_xprt.h>
36 #include <linux/slab.h>
37 #include "nfsd.h"
38 #include "state.h"
40 #define NFSDDBG_FACILITY NFSDDBG_PROC
42 #define NFSPROC4_CB_NULL 0
43 #define NFSPROC4_CB_COMPOUND 1
45 /* Index of predefined Linux callback client operations */
47 enum {
48 NFSPROC4_CLNT_CB_NULL = 0,
49 NFSPROC4_CLNT_CB_RECALL,
50 NFSPROC4_CLNT_CB_SEQUENCE,
53 enum nfs_cb_opnum4 {
54 OP_CB_RECALL = 4,
55 OP_CB_SEQUENCE = 11,
58 #define NFS4_MAXTAGLEN 20
60 #define NFS4_enc_cb_null_sz 0
61 #define NFS4_dec_cb_null_sz 0
62 #define cb_compound_enc_hdr_sz 4
63 #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
64 #define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
65 #define cb_sequence_enc_sz (sessionid_sz + 4 + \
66 1 /* no referring calls list yet */)
67 #define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
69 #define op_enc_sz 1
70 #define op_dec_sz 2
71 #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
72 #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
73 #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
74 cb_sequence_enc_sz + \
75 1 + enc_stateid_sz + \
76 enc_nfs4_fh_sz)
78 #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
79 cb_sequence_dec_sz + \
80 op_dec_sz)
83 * Generic encode routines from fs/nfs/nfs4xdr.c
85 static inline __be32 *
86 xdr_writemem(__be32 *p, const void *ptr, int nbytes)
88 int tmp = XDR_QUADLEN(nbytes);
89 if (!tmp)
90 return p;
91 p[tmp-1] = 0;
92 memcpy(p, ptr, nbytes);
93 return p + tmp;
96 #define WRITE32(n) *p++ = htonl(n)
97 #define WRITEMEM(ptr,nbytes) do { \
98 p = xdr_writemem(p, ptr, nbytes); \
99 } while (0)
100 #define RESERVE_SPACE(nbytes) do { \
101 p = xdr_reserve_space(xdr, nbytes); \
102 if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
103 BUG_ON(!p); \
104 } while (0)
107 * Generic decode routines from fs/nfs/nfs4xdr.c
109 #define DECODE_TAIL \
110 status = 0; \
111 out: \
112 return status; \
113 xdr_error: \
114 dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
115 status = -EIO; \
116 goto out
118 #define READ32(x) (x) = ntohl(*p++)
119 #define READ64(x) do { \
120 (x) = (u64)ntohl(*p++) << 32; \
121 (x) |= ntohl(*p++); \
122 } while (0)
123 #define READTIME(x) do { \
124 p++; \
125 (x.tv_sec) = ntohl(*p++); \
126 (x.tv_nsec) = ntohl(*p++); \
127 } while (0)
128 #define READ_BUF(nbytes) do { \
129 p = xdr_inline_decode(xdr, nbytes); \
130 if (!p) { \
131 dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
132 __func__, __LINE__); \
133 return -EIO; \
135 } while (0)
137 struct nfs4_cb_compound_hdr {
138 /* args */
139 u32 ident; /* minorversion 0 only */
140 u32 nops;
141 __be32 *nops_p;
142 u32 minorversion;
143 /* res */
144 int status;
147 static struct {
148 int stat;
149 int errno;
150 } nfs_cb_errtbl[] = {
151 { NFS4_OK, 0 },
152 { NFS4ERR_PERM, EPERM },
153 { NFS4ERR_NOENT, ENOENT },
154 { NFS4ERR_IO, EIO },
155 { NFS4ERR_NXIO, ENXIO },
156 { NFS4ERR_ACCESS, EACCES },
157 { NFS4ERR_EXIST, EEXIST },
158 { NFS4ERR_XDEV, EXDEV },
159 { NFS4ERR_NOTDIR, ENOTDIR },
160 { NFS4ERR_ISDIR, EISDIR },
161 { NFS4ERR_INVAL, EINVAL },
162 { NFS4ERR_FBIG, EFBIG },
163 { NFS4ERR_NOSPC, ENOSPC },
164 { NFS4ERR_ROFS, EROFS },
165 { NFS4ERR_MLINK, EMLINK },
166 { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
167 { NFS4ERR_NOTEMPTY, ENOTEMPTY },
168 { NFS4ERR_DQUOT, EDQUOT },
169 { NFS4ERR_STALE, ESTALE },
170 { NFS4ERR_BADHANDLE, EBADHANDLE },
171 { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
172 { NFS4ERR_NOTSUPP, ENOTSUPP },
173 { NFS4ERR_TOOSMALL, ETOOSMALL },
174 { NFS4ERR_SERVERFAULT, ESERVERFAULT },
175 { NFS4ERR_BADTYPE, EBADTYPE },
176 { NFS4ERR_LOCKED, EAGAIN },
177 { NFS4ERR_RESOURCE, EREMOTEIO },
178 { NFS4ERR_SYMLINK, ELOOP },
179 { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
180 { NFS4ERR_DEADLOCK, EDEADLK },
181 { -1, EIO }
184 static int
185 nfs_cb_stat_to_errno(int stat)
187 int i;
188 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
189 if (nfs_cb_errtbl[i].stat == stat)
190 return nfs_cb_errtbl[i].errno;
192 /* If we cannot translate the error, the recovery routines should
193 * handle it.
194 * Note: remaining NFSv4 error codes have values > 10000, so should
195 * not conflict with native Linux error codes.
197 return stat;
201 * XDR encode
204 static void
205 encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
207 __be32 *p;
209 RESERVE_SPACE(sizeof(stateid_t));
210 WRITE32(sid->si_generation);
211 WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
214 static void
215 encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
217 __be32 * p;
219 RESERVE_SPACE(16);
220 WRITE32(0); /* tag length is always 0 */
221 WRITE32(hdr->minorversion);
222 WRITE32(hdr->ident);
223 hdr->nops_p = p;
224 WRITE32(hdr->nops);
227 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
229 *hdr->nops_p = htonl(hdr->nops);
232 static void
233 encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
234 struct nfs4_cb_compound_hdr *hdr)
236 __be32 *p;
237 int len = dp->dl_fh.fh_size;
239 RESERVE_SPACE(4);
240 WRITE32(OP_CB_RECALL);
241 encode_stateid(xdr, &dp->dl_stateid);
242 RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
243 WRITE32(0); /* truncate optimization not implemented */
244 WRITE32(len);
245 WRITEMEM(&dp->dl_fh.fh_base, len);
246 hdr->nops++;
249 static void
250 encode_cb_sequence(struct xdr_stream *xdr, struct nfs4_rpc_args *args,
251 struct nfs4_cb_compound_hdr *hdr)
253 __be32 *p;
255 if (hdr->minorversion == 0)
256 return;
258 RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
260 WRITE32(OP_CB_SEQUENCE);
261 WRITEMEM(args->args_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN);
262 WRITE32(args->args_clp->cl_cb_seq_nr);
263 WRITE32(0); /* slotid, always 0 */
264 WRITE32(0); /* highest slotid always 0 */
265 WRITE32(0); /* cachethis always 0 */
266 WRITE32(0); /* FIXME: support referring_call_lists */
267 hdr->nops++;
270 static int
271 nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
273 struct xdr_stream xdrs, *xdr = &xdrs;
275 xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
276 RESERVE_SPACE(0);
277 return 0;
280 static int
281 nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
282 struct nfs4_rpc_args *rpc_args)
284 struct xdr_stream xdr;
285 struct nfs4_delegation *args = rpc_args->args_op;
286 struct nfs4_cb_compound_hdr hdr = {
287 .ident = args->dl_ident,
288 .minorversion = rpc_args->args_minorversion,
291 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
292 encode_cb_compound_hdr(&xdr, &hdr);
293 encode_cb_sequence(&xdr, rpc_args, &hdr);
294 encode_cb_recall(&xdr, args, &hdr);
295 encode_cb_nops(&hdr);
296 return 0;
300 static int
301 decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
302 __be32 *p;
303 u32 taglen;
305 READ_BUF(8);
306 READ32(hdr->status);
307 /* We've got no use for the tag; ignore it: */
308 READ32(taglen);
309 READ_BUF(taglen + 4);
310 p += XDR_QUADLEN(taglen);
311 READ32(hdr->nops);
312 return 0;
315 static int
316 decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
318 __be32 *p;
319 u32 op;
320 int32_t nfserr;
322 READ_BUF(8);
323 READ32(op);
324 if (op != expected) {
325 dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
326 " operation %d but we issued a request for %d\n",
327 op, expected);
328 return -EIO;
330 READ32(nfserr);
331 if (nfserr != NFS_OK)
332 return -nfs_cb_stat_to_errno(nfserr);
333 return 0;
337 * Our current back channel implmentation supports a single backchannel
338 * with a single slot.
340 static int
341 decode_cb_sequence(struct xdr_stream *xdr, struct nfs4_rpc_args *res,
342 struct rpc_rqst *rqstp)
344 struct nfs4_sessionid id;
345 int status;
346 u32 dummy;
347 __be32 *p;
349 if (res->args_minorversion == 0)
350 return 0;
352 status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
353 if (status)
354 return status;
357 * If the server returns different values for sessionID, slotID or
358 * sequence number, the server is looney tunes.
360 status = -ESERVERFAULT;
362 READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
363 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
364 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
365 if (memcmp(id.data, res->args_clp->cl_sessionid.data,
366 NFS4_MAX_SESSIONID_LEN)) {
367 dprintk("%s Invalid session id\n", __func__);
368 goto out;
370 READ32(dummy);
371 if (dummy != res->args_clp->cl_cb_seq_nr) {
372 dprintk("%s Invalid sequence number\n", __func__);
373 goto out;
375 READ32(dummy); /* slotid must be 0 */
376 if (dummy != 0) {
377 dprintk("%s Invalid slotid\n", __func__);
378 goto out;
380 /* FIXME: process highest slotid and target highest slotid */
381 status = 0;
382 out:
383 return status;
387 static int
388 nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
390 return 0;
393 static int
394 nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
395 struct nfs4_rpc_args *args)
397 struct xdr_stream xdr;
398 struct nfs4_cb_compound_hdr hdr;
399 int status;
401 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
402 status = decode_cb_compound_hdr(&xdr, &hdr);
403 if (status)
404 goto out;
405 if (args) {
406 status = decode_cb_sequence(&xdr, args, rqstp);
407 if (status)
408 goto out;
410 status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
411 out:
412 return status;
416 * RPC procedure tables
418 #define PROC(proc, call, argtype, restype) \
419 [NFSPROC4_CLNT_##proc] = { \
420 .p_proc = NFSPROC4_CB_##call, \
421 .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
422 .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
423 .p_arglen = NFS4_##argtype##_sz, \
424 .p_replen = NFS4_##restype##_sz, \
425 .p_statidx = NFSPROC4_CB_##call, \
426 .p_name = #proc, \
429 static struct rpc_procinfo nfs4_cb_procedures[] = {
430 PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null),
431 PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall),
434 static struct rpc_version nfs_cb_version4 = {
436 * Note on the callback rpc program version number: despite language in rfc
437 * 5661 section 18.36.3 requiring servers to use 4 in this field, the
438 * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
439 * in practice that appears to be what implementations use. The section
440 * 18.36.3 language is expected to be fixed in an erratum.
442 .number = 1,
443 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
444 .procs = nfs4_cb_procedures
447 static struct rpc_version * nfs_cb_version[] = {
448 &nfs_cb_version4,
451 static struct rpc_program cb_program;
453 static struct rpc_stat cb_stats = {
454 .program = &cb_program
457 #define NFS4_CALLBACK 0x40000000
458 static struct rpc_program cb_program = {
459 .name = "nfs4_cb",
460 .number = NFS4_CALLBACK,
461 .nrvers = ARRAY_SIZE(nfs_cb_version),
462 .version = nfs_cb_version,
463 .stats = &cb_stats,
464 .pipe_dir_name = "/nfsd4_cb",
467 static int max_cb_time(void)
469 return max(nfsd4_lease/10, (time_t)1) * HZ;
472 /* Reference counting, callback cleanup, etc., all look racy as heck.
473 * And why is cl_cb_set an atomic? */
475 int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
477 struct rpc_timeout timeparms = {
478 .to_initval = max_cb_time(),
479 .to_retries = 0,
481 struct rpc_create_args args = {
482 .net = &init_net,
483 .protocol = XPRT_TRANSPORT_TCP,
484 .address = (struct sockaddr *) &conn->cb_addr,
485 .addrsize = conn->cb_addrlen,
486 .timeout = &timeparms,
487 .program = &cb_program,
488 .prognumber = conn->cb_prog,
489 .version = 0,
490 .authflavor = clp->cl_flavor,
491 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
492 .client_name = clp->cl_principal,
494 struct rpc_clnt *client;
496 if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
497 return -EINVAL;
498 if (conn->cb_minorversion) {
499 args.bc_xprt = conn->cb_xprt;
500 args.protocol = XPRT_TRANSPORT_BC_TCP;
502 /* Create RPC client */
503 client = rpc_create(&args);
504 if (IS_ERR(client)) {
505 dprintk("NFSD: couldn't create callback client: %ld\n",
506 PTR_ERR(client));
507 return PTR_ERR(client);
509 nfsd4_set_callback_client(clp, client);
510 return 0;
514 static void warn_no_callback_path(struct nfs4_client *clp, int reason)
516 dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
517 (int)clp->cl_name.len, clp->cl_name.data, reason);
520 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
522 struct nfs4_client *clp = calldata;
524 if (task->tk_status)
525 warn_no_callback_path(clp, task->tk_status);
526 else
527 atomic_set(&clp->cl_cb_set, 1);
530 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
531 .rpc_call_done = nfsd4_cb_probe_done,
534 static struct rpc_cred *callback_cred;
536 int set_callback_cred(void)
538 if (callback_cred)
539 return 0;
540 callback_cred = rpc_lookup_machine_cred();
541 if (!callback_cred)
542 return -ENOMEM;
543 return 0;
547 void do_probe_callback(struct nfs4_client *clp)
549 struct rpc_message msg = {
550 .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
551 .rpc_argp = clp,
552 .rpc_cred = callback_cred
554 int status;
556 status = rpc_call_async(clp->cl_cb_client, &msg,
557 RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
558 &nfsd4_cb_probe_ops, (void *)clp);
559 if (status)
560 warn_no_callback_path(clp, status);
564 * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
566 void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
568 int status;
570 BUG_ON(atomic_read(&clp->cl_cb_set));
572 status = setup_callback_client(clp, conn);
573 if (status) {
574 warn_no_callback_path(clp, status);
575 return;
577 do_probe_callback(clp);
581 * There's currently a single callback channel slot.
582 * If the slot is available, then mark it busy. Otherwise, set the
583 * thread for sleeping on the callback RPC wait queue.
585 static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
586 struct rpc_task *task)
588 struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
589 u32 *ptr = (u32 *)clp->cl_sessionid.data;
590 int status = 0;
592 dprintk("%s: %u:%u:%u:%u\n", __func__,
593 ptr[0], ptr[1], ptr[2], ptr[3]);
595 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
596 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
597 dprintk("%s slot is busy\n", __func__);
598 status = -EAGAIN;
599 goto out;
603 * We'll need the clp during XDR encoding and decoding,
604 * and the sequence during decoding to verify the reply
606 args->args_clp = clp;
607 task->tk_msg.rpc_resp = args;
609 out:
610 dprintk("%s status=%d\n", __func__, status);
611 return status;
615 * TODO: cb_sequence should support referring call lists, cachethis, multiple
616 * slots, and mark callback channel down on communication errors.
618 static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
620 struct nfs4_delegation *dp = calldata;
621 struct nfs4_client *clp = dp->dl_client;
622 struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
623 u32 minorversion = clp->cl_cb_conn.cb_minorversion;
624 int status = 0;
626 args->args_minorversion = minorversion;
627 if (minorversion) {
628 status = nfsd41_cb_setup_sequence(clp, task);
629 if (status) {
630 if (status != -EAGAIN) {
631 /* terminate rpc task */
632 task->tk_status = status;
633 task->tk_action = NULL;
635 return;
638 rpc_call_start(task);
641 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
643 struct nfs4_delegation *dp = calldata;
644 struct nfs4_client *clp = dp->dl_client;
646 dprintk("%s: minorversion=%d\n", __func__,
647 clp->cl_cb_conn.cb_minorversion);
649 if (clp->cl_cb_conn.cb_minorversion) {
650 /* No need for lock, access serialized in nfsd4_cb_prepare */
651 ++clp->cl_cb_seq_nr;
652 clear_bit(0, &clp->cl_cb_slot_busy);
653 rpc_wake_up_next(&clp->cl_cb_waitq);
654 dprintk("%s: freed slot, new seqid=%d\n", __func__,
655 clp->cl_cb_seq_nr);
657 /* We're done looking into the sequence information */
658 task->tk_msg.rpc_resp = NULL;
663 static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
665 struct nfs4_delegation *dp = calldata;
666 struct nfs4_client *clp = dp->dl_client;
667 struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
669 nfsd4_cb_done(task, calldata);
671 if (current_rpc_client == NULL) {
672 /* We're shutting down; give up. */
673 /* XXX: err, or is it ok just to fall through
674 * and rpc_restart_call? */
675 return;
678 switch (task->tk_status) {
679 case 0:
680 return;
681 case -EBADHANDLE:
682 case -NFS4ERR_BAD_STATEID:
683 /* Race: client probably got cb_recall
684 * before open reply granting delegation */
685 break;
686 default:
687 /* Network partition? */
688 atomic_set(&clp->cl_cb_set, 0);
689 warn_no_callback_path(clp, task->tk_status);
690 if (current_rpc_client != task->tk_client) {
691 /* queue a callback on the new connection: */
692 atomic_inc(&dp->dl_count);
693 nfsd4_cb_recall(dp);
694 return;
697 if (dp->dl_retries--) {
698 rpc_delay(task, 2*HZ);
699 task->tk_status = 0;
700 rpc_restart_call_prepare(task);
701 return;
702 } else {
703 atomic_set(&clp->cl_cb_set, 0);
704 warn_no_callback_path(clp, task->tk_status);
708 static void nfsd4_cb_recall_release(void *calldata)
710 struct nfs4_delegation *dp = calldata;
712 nfs4_put_delegation(dp);
715 static const struct rpc_call_ops nfsd4_cb_recall_ops = {
716 .rpc_call_prepare = nfsd4_cb_prepare,
717 .rpc_call_done = nfsd4_cb_recall_done,
718 .rpc_release = nfsd4_cb_recall_release,
721 static struct workqueue_struct *callback_wq;
723 int nfsd4_create_callback_queue(void)
725 callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
726 if (!callback_wq)
727 return -ENOMEM;
728 return 0;
731 void nfsd4_destroy_callback_queue(void)
733 destroy_workqueue(callback_wq);
736 /* must be called under the state lock */
737 void nfsd4_set_callback_client(struct nfs4_client *clp, struct rpc_clnt *new)
739 struct rpc_clnt *old = clp->cl_cb_client;
741 clp->cl_cb_client = new;
743 * After this, any work that saw the old value of cl_cb_client will
744 * be gone:
746 flush_workqueue(callback_wq);
747 /* So we can safely shut it down: */
748 if (old)
749 rpc_shutdown_client(old);
753 * called with dp->dl_count inc'ed.
755 static void _nfsd4_cb_recall(struct nfs4_delegation *dp)
757 struct nfs4_client *clp = dp->dl_client;
758 struct rpc_clnt *clnt = clp->cl_cb_client;
759 struct nfs4_rpc_args *args = &dp->dl_recall.cb_args;
760 struct rpc_message msg = {
761 .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
762 .rpc_cred = callback_cred
765 if (clnt == NULL) {
766 nfs4_put_delegation(dp);
767 return; /* Client is shutting down; give up. */
770 args->args_op = dp;
771 msg.rpc_argp = args;
772 dp->dl_retries = 1;
773 rpc_call_async(clnt, &msg, RPC_TASK_SOFT, &nfsd4_cb_recall_ops, dp);
776 void nfsd4_do_callback_rpc(struct work_struct *w)
778 /* XXX: for now, just send off delegation recall. */
779 /* In future, generalize to handle any sort of callback. */
780 struct nfsd4_callback *c = container_of(w, struct nfsd4_callback, cb_work);
781 struct nfs4_delegation *dp = container_of(c, struct nfs4_delegation, dl_recall);
783 _nfsd4_cb_recall(dp);
787 void nfsd4_cb_recall(struct nfs4_delegation *dp)
789 queue_work(callback_wq, &dp->dl_recall.cb_work);