nfsd4: nfsd4_destroy_session must set callback client under the state lock
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nfsd / nfs4callback.c
blob77bc9d3c80fde989c166725b04247c06b8f26e48
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <andros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/sunrpc/clnt.h>
35 #include <linux/sunrpc/svc_xprt.h>
36 #include <linux/slab.h>
37 #include "nfsd.h"
38 #include "state.h"
40 #define NFSDDBG_FACILITY NFSDDBG_PROC
42 #define NFSPROC4_CB_NULL 0
43 #define NFSPROC4_CB_COMPOUND 1
44 #define NFS4_STATEID_SIZE 16
46 /* Index of predefined Linux callback client operations */
48 enum {
49 NFSPROC4_CLNT_CB_NULL = 0,
50 NFSPROC4_CLNT_CB_RECALL,
51 NFSPROC4_CLNT_CB_SEQUENCE,
54 enum nfs_cb_opnum4 {
55 OP_CB_RECALL = 4,
56 OP_CB_SEQUENCE = 11,
59 #define NFS4_MAXTAGLEN 20
61 #define NFS4_enc_cb_null_sz 0
62 #define NFS4_dec_cb_null_sz 0
63 #define cb_compound_enc_hdr_sz 4
64 #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
65 #define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
66 #define cb_sequence_enc_sz (sessionid_sz + 4 + \
67 1 /* no referring calls list yet */)
68 #define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
70 #define op_enc_sz 1
71 #define op_dec_sz 2
72 #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
73 #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
74 #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
75 cb_sequence_enc_sz + \
76 1 + enc_stateid_sz + \
77 enc_nfs4_fh_sz)
79 #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
80 cb_sequence_dec_sz + \
81 op_dec_sz)
84 * Generic encode routines from fs/nfs/nfs4xdr.c
86 static inline __be32 *
87 xdr_writemem(__be32 *p, const void *ptr, int nbytes)
89 int tmp = XDR_QUADLEN(nbytes);
90 if (!tmp)
91 return p;
92 p[tmp-1] = 0;
93 memcpy(p, ptr, nbytes);
94 return p + tmp;
97 #define WRITE32(n) *p++ = htonl(n)
98 #define WRITEMEM(ptr,nbytes) do { \
99 p = xdr_writemem(p, ptr, nbytes); \
100 } while (0)
101 #define RESERVE_SPACE(nbytes) do { \
102 p = xdr_reserve_space(xdr, nbytes); \
103 if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
104 BUG_ON(!p); \
105 } while (0)
108 * Generic decode routines from fs/nfs/nfs4xdr.c
110 #define DECODE_TAIL \
111 status = 0; \
112 out: \
113 return status; \
114 xdr_error: \
115 dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
116 status = -EIO; \
117 goto out
119 #define READ32(x) (x) = ntohl(*p++)
120 #define READ64(x) do { \
121 (x) = (u64)ntohl(*p++) << 32; \
122 (x) |= ntohl(*p++); \
123 } while (0)
124 #define READTIME(x) do { \
125 p++; \
126 (x.tv_sec) = ntohl(*p++); \
127 (x.tv_nsec) = ntohl(*p++); \
128 } while (0)
129 #define READ_BUF(nbytes) do { \
130 p = xdr_inline_decode(xdr, nbytes); \
131 if (!p) { \
132 dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
133 __func__, __LINE__); \
134 return -EIO; \
136 } while (0)
138 struct nfs4_cb_compound_hdr {
139 /* args */
140 u32 ident; /* minorversion 0 only */
141 u32 nops;
142 __be32 *nops_p;
143 u32 minorversion;
144 /* res */
145 int status;
146 u32 taglen;
147 char *tag;
150 static struct {
151 int stat;
152 int errno;
153 } nfs_cb_errtbl[] = {
154 { NFS4_OK, 0 },
155 { NFS4ERR_PERM, EPERM },
156 { NFS4ERR_NOENT, ENOENT },
157 { NFS4ERR_IO, EIO },
158 { NFS4ERR_NXIO, ENXIO },
159 { NFS4ERR_ACCESS, EACCES },
160 { NFS4ERR_EXIST, EEXIST },
161 { NFS4ERR_XDEV, EXDEV },
162 { NFS4ERR_NOTDIR, ENOTDIR },
163 { NFS4ERR_ISDIR, EISDIR },
164 { NFS4ERR_INVAL, EINVAL },
165 { NFS4ERR_FBIG, EFBIG },
166 { NFS4ERR_NOSPC, ENOSPC },
167 { NFS4ERR_ROFS, EROFS },
168 { NFS4ERR_MLINK, EMLINK },
169 { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
170 { NFS4ERR_NOTEMPTY, ENOTEMPTY },
171 { NFS4ERR_DQUOT, EDQUOT },
172 { NFS4ERR_STALE, ESTALE },
173 { NFS4ERR_BADHANDLE, EBADHANDLE },
174 { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
175 { NFS4ERR_NOTSUPP, ENOTSUPP },
176 { NFS4ERR_TOOSMALL, ETOOSMALL },
177 { NFS4ERR_SERVERFAULT, ESERVERFAULT },
178 { NFS4ERR_BADTYPE, EBADTYPE },
179 { NFS4ERR_LOCKED, EAGAIN },
180 { NFS4ERR_RESOURCE, EREMOTEIO },
181 { NFS4ERR_SYMLINK, ELOOP },
182 { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
183 { NFS4ERR_DEADLOCK, EDEADLK },
184 { -1, EIO }
187 static int
188 nfs_cb_stat_to_errno(int stat)
190 int i;
191 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
192 if (nfs_cb_errtbl[i].stat == stat)
193 return nfs_cb_errtbl[i].errno;
195 /* If we cannot translate the error, the recovery routines should
196 * handle it.
197 * Note: remaining NFSv4 error codes have values > 10000, so should
198 * not conflict with native Linux error codes.
200 return stat;
204 * XDR encode
207 static void
208 encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
210 __be32 * p;
212 RESERVE_SPACE(16);
213 WRITE32(0); /* tag length is always 0 */
214 WRITE32(hdr->minorversion);
215 WRITE32(hdr->ident);
216 hdr->nops_p = p;
217 WRITE32(hdr->nops);
220 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
222 *hdr->nops_p = htonl(hdr->nops);
225 static void
226 encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
227 struct nfs4_cb_compound_hdr *hdr)
229 __be32 *p;
230 int len = dp->dl_fh.fh_size;
232 RESERVE_SPACE(12+sizeof(dp->dl_stateid) + len);
233 WRITE32(OP_CB_RECALL);
234 WRITE32(dp->dl_stateid.si_generation);
235 WRITEMEM(&dp->dl_stateid.si_opaque, sizeof(stateid_opaque_t));
236 WRITE32(0); /* truncate optimization not implemented */
237 WRITE32(len);
238 WRITEMEM(&dp->dl_fh.fh_base, len);
239 hdr->nops++;
242 static void
243 encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args,
244 struct nfs4_cb_compound_hdr *hdr)
246 __be32 *p;
248 if (hdr->minorversion == 0)
249 return;
251 RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
253 WRITE32(OP_CB_SEQUENCE);
254 WRITEMEM(args->cbs_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN);
255 WRITE32(args->cbs_clp->cl_cb_seq_nr);
256 WRITE32(0); /* slotid, always 0 */
257 WRITE32(0); /* highest slotid always 0 */
258 WRITE32(0); /* cachethis always 0 */
259 WRITE32(0); /* FIXME: support referring_call_lists */
260 hdr->nops++;
263 static int
264 nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
266 struct xdr_stream xdrs, *xdr = &xdrs;
268 xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
269 RESERVE_SPACE(0);
270 return 0;
273 static int
274 nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
275 struct nfs4_rpc_args *rpc_args)
277 struct xdr_stream xdr;
278 struct nfs4_delegation *args = rpc_args->args_op;
279 struct nfs4_cb_compound_hdr hdr = {
280 .ident = args->dl_ident,
281 .minorversion = rpc_args->args_seq.cbs_minorversion,
284 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
285 encode_cb_compound_hdr(&xdr, &hdr);
286 encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
287 encode_cb_recall(&xdr, args, &hdr);
288 encode_cb_nops(&hdr);
289 return 0;
293 static int
294 decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
295 __be32 *p;
297 READ_BUF(8);
298 READ32(hdr->status);
299 READ32(hdr->taglen);
300 READ_BUF(hdr->taglen + 4);
301 hdr->tag = (char *)p;
302 p += XDR_QUADLEN(hdr->taglen);
303 READ32(hdr->nops);
304 return 0;
307 static int
308 decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
310 __be32 *p;
311 u32 op;
312 int32_t nfserr;
314 READ_BUF(8);
315 READ32(op);
316 if (op != expected) {
317 dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
318 " operation %d but we issued a request for %d\n",
319 op, expected);
320 return -EIO;
322 READ32(nfserr);
323 if (nfserr != NFS_OK)
324 return -nfs_cb_stat_to_errno(nfserr);
325 return 0;
329 * Our current back channel implmentation supports a single backchannel
330 * with a single slot.
332 static int
333 decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res,
334 struct rpc_rqst *rqstp)
336 struct nfs4_sessionid id;
337 int status;
338 u32 dummy;
339 __be32 *p;
341 if (res->cbs_minorversion == 0)
342 return 0;
344 status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
345 if (status)
346 return status;
349 * If the server returns different values for sessionID, slotID or
350 * sequence number, the server is looney tunes.
352 status = -ESERVERFAULT;
354 READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
355 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
356 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
357 if (memcmp(id.data, res->cbs_clp->cl_sessionid.data,
358 NFS4_MAX_SESSIONID_LEN)) {
359 dprintk("%s Invalid session id\n", __func__);
360 goto out;
362 READ32(dummy);
363 if (dummy != res->cbs_clp->cl_cb_seq_nr) {
364 dprintk("%s Invalid sequence number\n", __func__);
365 goto out;
367 READ32(dummy); /* slotid must be 0 */
368 if (dummy != 0) {
369 dprintk("%s Invalid slotid\n", __func__);
370 goto out;
372 /* FIXME: process highest slotid and target highest slotid */
373 status = 0;
374 out:
375 return status;
379 static int
380 nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
382 return 0;
385 static int
386 nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
387 struct nfsd4_cb_sequence *seq)
389 struct xdr_stream xdr;
390 struct nfs4_cb_compound_hdr hdr;
391 int status;
393 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
394 status = decode_cb_compound_hdr(&xdr, &hdr);
395 if (status)
396 goto out;
397 if (seq) {
398 status = decode_cb_sequence(&xdr, seq, rqstp);
399 if (status)
400 goto out;
402 status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
403 out:
404 return status;
408 * RPC procedure tables
410 #define PROC(proc, call, argtype, restype) \
411 [NFSPROC4_CLNT_##proc] = { \
412 .p_proc = NFSPROC4_CB_##call, \
413 .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
414 .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
415 .p_arglen = NFS4_##argtype##_sz, \
416 .p_replen = NFS4_##restype##_sz, \
417 .p_statidx = NFSPROC4_CB_##call, \
418 .p_name = #proc, \
421 static struct rpc_procinfo nfs4_cb_procedures[] = {
422 PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null),
423 PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall),
426 static struct rpc_version nfs_cb_version4 = {
427 .number = 1,
428 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
429 .procs = nfs4_cb_procedures
432 static struct rpc_version * nfs_cb_version[] = {
433 NULL,
434 &nfs_cb_version4,
437 static struct rpc_program cb_program;
439 static struct rpc_stat cb_stats = {
440 .program = &cb_program
443 #define NFS4_CALLBACK 0x40000000
444 static struct rpc_program cb_program = {
445 .name = "nfs4_cb",
446 .number = NFS4_CALLBACK,
447 .nrvers = ARRAY_SIZE(nfs_cb_version),
448 .version = nfs_cb_version,
449 .stats = &cb_stats,
450 .pipe_dir_name = "/nfsd4_cb",
453 static int max_cb_time(void)
455 return max(nfsd4_lease/10, (time_t)1) * HZ;
458 /* Reference counting, callback cleanup, etc., all look racy as heck.
459 * And why is cl_cb_set an atomic? */
461 int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
463 struct rpc_timeout timeparms = {
464 .to_initval = max_cb_time(),
465 .to_retries = 0,
467 struct rpc_create_args args = {
468 .protocol = XPRT_TRANSPORT_TCP,
469 .address = (struct sockaddr *) &cb->cb_addr,
470 .addrsize = cb->cb_addrlen,
471 .timeout = &timeparms,
472 .program = &cb_program,
473 .prognumber = cb->cb_prog,
474 .version = nfs_cb_version[1]->number,
475 .authflavor = clp->cl_flavor,
476 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
477 .client_name = clp->cl_principal,
479 struct rpc_clnt *client;
481 if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
482 return -EINVAL;
483 if (cb->cb_minorversion) {
484 args.bc_xprt = cb->cb_xprt;
485 args.protocol = XPRT_TRANSPORT_BC_TCP;
487 /* Create RPC client */
488 client = rpc_create(&args);
489 if (IS_ERR(client)) {
490 dprintk("NFSD: couldn't create callback client: %ld\n",
491 PTR_ERR(client));
492 return PTR_ERR(client);
494 nfsd4_set_callback_client(clp, client);
495 return 0;
499 static void warn_no_callback_path(struct nfs4_client *clp, int reason)
501 dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
502 (int)clp->cl_name.len, clp->cl_name.data, reason);
505 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
507 struct nfs4_client *clp = calldata;
509 if (task->tk_status)
510 warn_no_callback_path(clp, task->tk_status);
511 else
512 atomic_set(&clp->cl_cb_set, 1);
515 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
516 .rpc_call_done = nfsd4_cb_probe_done,
519 static struct rpc_cred *callback_cred;
521 int set_callback_cred(void)
523 if (callback_cred)
524 return 0;
525 callback_cred = rpc_lookup_machine_cred();
526 if (!callback_cred)
527 return -ENOMEM;
528 return 0;
532 void do_probe_callback(struct nfs4_client *clp)
534 struct rpc_message msg = {
535 .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
536 .rpc_argp = clp,
537 .rpc_cred = callback_cred
539 int status;
541 status = rpc_call_async(clp->cl_cb_client, &msg,
542 RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
543 &nfsd4_cb_probe_ops, (void *)clp);
544 if (status)
545 warn_no_callback_path(clp, status);
549 * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
551 void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
553 int status;
555 BUG_ON(atomic_read(&clp->cl_cb_set));
557 status = setup_callback_client(clp, cb);
558 if (status) {
559 warn_no_callback_path(clp, status);
560 return;
562 do_probe_callback(clp);
566 * There's currently a single callback channel slot.
567 * If the slot is available, then mark it busy. Otherwise, set the
568 * thread for sleeping on the callback RPC wait queue.
570 static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
571 struct rpc_task *task)
573 struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
574 u32 *ptr = (u32 *)clp->cl_sessionid.data;
575 int status = 0;
577 dprintk("%s: %u:%u:%u:%u\n", __func__,
578 ptr[0], ptr[1], ptr[2], ptr[3]);
580 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
581 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
582 dprintk("%s slot is busy\n", __func__);
583 status = -EAGAIN;
584 goto out;
588 * We'll need the clp during XDR encoding and decoding,
589 * and the sequence during decoding to verify the reply
591 args->args_seq.cbs_clp = clp;
592 task->tk_msg.rpc_resp = &args->args_seq;
594 out:
595 dprintk("%s status=%d\n", __func__, status);
596 return status;
600 * TODO: cb_sequence should support referring call lists, cachethis, multiple
601 * slots, and mark callback channel down on communication errors.
603 static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
605 struct nfs4_delegation *dp = calldata;
606 struct nfs4_client *clp = dp->dl_client;
607 struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
608 u32 minorversion = clp->cl_cb_conn.cb_minorversion;
609 int status = 0;
611 args->args_seq.cbs_minorversion = minorversion;
612 if (minorversion) {
613 status = nfsd41_cb_setup_sequence(clp, task);
614 if (status) {
615 if (status != -EAGAIN) {
616 /* terminate rpc task */
617 task->tk_status = status;
618 task->tk_action = NULL;
620 return;
623 rpc_call_start(task);
626 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
628 struct nfs4_delegation *dp = calldata;
629 struct nfs4_client *clp = dp->dl_client;
631 dprintk("%s: minorversion=%d\n", __func__,
632 clp->cl_cb_conn.cb_minorversion);
634 if (clp->cl_cb_conn.cb_minorversion) {
635 /* No need for lock, access serialized in nfsd4_cb_prepare */
636 ++clp->cl_cb_seq_nr;
637 clear_bit(0, &clp->cl_cb_slot_busy);
638 rpc_wake_up_next(&clp->cl_cb_waitq);
639 dprintk("%s: freed slot, new seqid=%d\n", __func__,
640 clp->cl_cb_seq_nr);
642 /* We're done looking into the sequence information */
643 task->tk_msg.rpc_resp = NULL;
648 static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
650 struct nfs4_delegation *dp = calldata;
651 struct nfs4_client *clp = dp->dl_client;
652 struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
654 nfsd4_cb_done(task, calldata);
656 if (current_rpc_client == NULL) {
657 /* We're shutting down; give up. */
658 /* XXX: err, or is it ok just to fall through
659 * and rpc_restart_call? */
660 return;
663 switch (task->tk_status) {
664 case -EIO:
665 /* Network partition? */
666 atomic_set(&clp->cl_cb_set, 0);
667 warn_no_callback_path(clp, task->tk_status);
668 if (current_rpc_client != task->tk_client) {
669 /* queue a callback on the new connection: */
670 nfsd4_cb_recall(dp);
671 return;
673 case -EBADHANDLE:
674 case -NFS4ERR_BAD_STATEID:
675 /* Race: client probably got cb_recall
676 * before open reply granting delegation */
677 break;
678 default:
679 /* success, or error we can't handle */
680 return;
682 if (dp->dl_retries--) {
683 rpc_delay(task, 2*HZ);
684 task->tk_status = 0;
685 rpc_restart_call(task);
686 return;
687 } else {
688 atomic_set(&clp->cl_cb_set, 0);
689 warn_no_callback_path(clp, task->tk_status);
693 static void nfsd4_cb_recall_release(void *calldata)
695 struct nfs4_delegation *dp = calldata;
697 nfs4_put_delegation(dp);
700 static const struct rpc_call_ops nfsd4_cb_recall_ops = {
701 .rpc_call_prepare = nfsd4_cb_prepare,
702 .rpc_call_done = nfsd4_cb_recall_done,
703 .rpc_release = nfsd4_cb_recall_release,
706 static struct workqueue_struct *callback_wq;
708 int nfsd4_create_callback_queue(void)
710 callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
711 if (!callback_wq)
712 return -ENOMEM;
713 return 0;
716 void nfsd4_destroy_callback_queue(void)
718 destroy_workqueue(callback_wq);
721 /* must be called under the state lock */
722 void nfsd4_set_callback_client(struct nfs4_client *clp, struct rpc_clnt *new)
724 struct rpc_clnt *old = clp->cl_cb_client;
726 clp->cl_cb_client = new;
728 * After this, any work that saw the old value of cl_cb_client will
729 * be gone:
731 flush_workqueue(callback_wq);
732 /* So we can safely shut it down: */
733 if (old)
734 rpc_shutdown_client(old);
738 * called with dp->dl_count inc'ed.
740 static void _nfsd4_cb_recall(struct nfs4_delegation *dp)
742 struct nfs4_client *clp = dp->dl_client;
743 struct rpc_clnt *clnt = clp->cl_cb_client;
744 struct nfs4_rpc_args *args = &dp->dl_recall.cb_args;
745 struct rpc_message msg = {
746 .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
747 .rpc_cred = callback_cred
749 int status;
751 if (clnt == NULL)
752 return; /* Client is shutting down; give up. */
754 args->args_op = dp;
755 msg.rpc_argp = args;
756 dp->dl_retries = 1;
757 status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
758 &nfsd4_cb_recall_ops, dp);
759 if (status)
760 nfs4_put_delegation(dp);
763 void nfsd4_do_callback_rpc(struct work_struct *w)
765 /* XXX: for now, just send off delegation recall. */
766 /* In future, generalize to handle any sort of callback. */
767 struct nfsd4_callback *c = container_of(w, struct nfsd4_callback, cb_work);
768 struct nfs4_delegation *dp = container_of(c, struct nfs4_delegation, dl_recall);
770 _nfsd4_cb_recall(dp);
774 void nfsd4_cb_recall(struct nfs4_delegation *dp)
776 queue_work(callback_wq, &dp->dl_recall.cb_work);