2 * linux/fs/nfs/callback_proc.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFSv4 callback procedures
8 #include <linux/nfs4.h>
9 #include <linux/nfs_fs.h>
10 #include <linux/slab.h>
13 #include "delegation.h"
18 #define NFSDBG_FACILITY NFSDBG_CALLBACK
21 __be32
nfs4_callback_getattr(struct cb_getattrargs
*args
,
22 struct cb_getattrres
*res
,
23 struct cb_process_state
*cps
)
25 struct nfs_delegation
*delegation
;
26 struct nfs_inode
*nfsi
;
29 res
->status
= htonl(NFS4ERR_OP_NOT_IN_SESSION
);
30 if (!cps
->clp
) /* Always set for v4.0. Set in cb_sequence for v4.1 */
33 res
->bitmap
[0] = res
->bitmap
[1] = 0;
34 res
->status
= htonl(NFS4ERR_BADHANDLE
);
36 dprintk("NFS: GETATTR callback request from %s\n",
37 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
));
39 inode
= nfs_delegation_find_inode(cps
->clp
, &args
->fh
);
44 delegation
= rcu_dereference(nfsi
->delegation
);
45 if (delegation
== NULL
|| (delegation
->type
& FMODE_WRITE
) == 0)
47 res
->size
= i_size_read(inode
);
48 res
->change_attr
= delegation
->change_attr
;
49 if (nfsi
->npages
!= 0)
51 res
->ctime
= inode
->i_ctime
;
52 res
->mtime
= inode
->i_mtime
;
53 res
->bitmap
[0] = (FATTR4_WORD0_CHANGE
|FATTR4_WORD0_SIZE
) &
55 res
->bitmap
[1] = (FATTR4_WORD1_TIME_METADATA
|FATTR4_WORD1_TIME_MODIFY
) &
62 dprintk("%s: exit with status = %d\n", __func__
, ntohl(res
->status
));
66 __be32
nfs4_callback_recall(struct cb_recallargs
*args
, void *dummy
,
67 struct cb_process_state
*cps
)
72 res
= htonl(NFS4ERR_OP_NOT_IN_SESSION
);
73 if (!cps
->clp
) /* Always set for v4.0. Set in cb_sequence for v4.1 */
76 dprintk("NFS: RECALL callback request from %s\n",
77 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
));
79 res
= htonl(NFS4ERR_BADHANDLE
);
80 inode
= nfs_delegation_find_inode(cps
->clp
, &args
->fh
);
83 /* Set up a helper thread to actually return the delegation */
84 switch (nfs_async_inode_return_delegation(inode
, &args
->stateid
)) {
90 res
= htonl(NFS4ERR_BAD_STATEID
);
93 res
= htonl(NFS4ERR_RESOURCE
);
97 dprintk("%s: exit with status = %d\n", __func__
, ntohl(res
));
101 int nfs4_validate_delegation_stateid(struct nfs_delegation
*delegation
, const nfs4_stateid
*stateid
)
103 if (delegation
== NULL
|| memcmp(delegation
->stateid
.data
, stateid
->data
,
104 sizeof(delegation
->stateid
.data
)) != 0)
109 #if defined(CONFIG_NFS_V4_1)
111 static u32
initiate_file_draining(struct nfs_client
*clp
,
112 struct cb_layoutrecallargs
*args
)
114 struct pnfs_layout_hdr
*lo
;
117 u32 rv
= NFS4ERR_NOMATCHING_LAYOUT
;
118 LIST_HEAD(free_me_list
);
120 spin_lock(&clp
->cl_lock
);
121 list_for_each_entry(lo
, &clp
->cl_layouts
, plh_layouts
) {
122 if (nfs_compare_fh(&args
->cbl_fh
,
123 &NFS_I(lo
->plh_inode
)->fh
))
125 ino
= igrab(lo
->plh_inode
);
129 /* Without this, layout can be freed as soon
130 * as we release cl_lock.
135 spin_unlock(&clp
->cl_lock
);
137 return NFS4ERR_NOMATCHING_LAYOUT
;
139 spin_lock(&ino
->i_lock
);
140 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
141 mark_matching_lsegs_invalid(lo
, &free_me_list
,
145 rv
= NFS4ERR_NOMATCHING_LAYOUT
;
146 pnfs_set_layout_stateid(lo
, &args
->cbl_stateid
, true);
147 spin_unlock(&ino
->i_lock
);
148 pnfs_free_lseg_list(&free_me_list
);
154 static u32
initiate_bulk_draining(struct nfs_client
*clp
,
155 struct cb_layoutrecallargs
*args
)
157 struct pnfs_layout_hdr
*lo
;
159 u32 rv
= NFS4ERR_NOMATCHING_LAYOUT
;
160 struct pnfs_layout_hdr
*tmp
;
161 LIST_HEAD(recall_list
);
162 LIST_HEAD(free_me_list
);
163 struct pnfs_layout_range range
= {
164 .iomode
= IOMODE_ANY
,
166 .length
= NFS4_MAX_UINT64
,
169 spin_lock(&clp
->cl_lock
);
170 list_for_each_entry(lo
, &clp
->cl_layouts
, plh_layouts
) {
171 if ((args
->cbl_recall_type
== RETURN_FSID
) &&
172 memcmp(&NFS_SERVER(lo
->plh_inode
)->fsid
,
173 &args
->cbl_fsid
, sizeof(struct nfs_fsid
)))
175 if (!igrab(lo
->plh_inode
))
178 BUG_ON(!list_empty(&lo
->plh_bulk_recall
));
179 list_add(&lo
->plh_bulk_recall
, &recall_list
);
181 spin_unlock(&clp
->cl_lock
);
182 list_for_each_entry_safe(lo
, tmp
,
183 &recall_list
, plh_bulk_recall
) {
185 spin_lock(&ino
->i_lock
);
186 set_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
187 if (mark_matching_lsegs_invalid(lo
, &free_me_list
, &range
))
189 list_del_init(&lo
->plh_bulk_recall
);
190 spin_unlock(&ino
->i_lock
);
191 pnfs_free_lseg_list(&free_me_list
);
198 static u32
do_callback_layoutrecall(struct nfs_client
*clp
,
199 struct cb_layoutrecallargs
*args
)
201 u32 res
= NFS4ERR_DELAY
;
203 dprintk("%s enter, type=%i\n", __func__
, args
->cbl_recall_type
);
204 if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
))
206 if (args
->cbl_recall_type
== RETURN_FILE
)
207 res
= initiate_file_draining(clp
, args
);
209 res
= initiate_bulk_draining(clp
, args
);
210 clear_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
);
212 dprintk("%s returning %i\n", __func__
, res
);
217 __be32
nfs4_callback_layoutrecall(struct cb_layoutrecallargs
*args
,
218 void *dummy
, struct cb_process_state
*cps
)
222 dprintk("%s: -->\n", __func__
);
225 res
= do_callback_layoutrecall(cps
->clp
, args
);
227 res
= NFS4ERR_OP_NOT_IN_SESSION
;
229 dprintk("%s: exit with status = %d\n", __func__
, res
);
230 return cpu_to_be32(res
);
233 static void pnfs_recall_all_layouts(struct nfs_client
*clp
)
235 struct cb_layoutrecallargs args
;
237 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
238 memset(&args
, 0, sizeof(args
));
239 args
.cbl_recall_type
= RETURN_ALL
;
240 /* FIXME we ignore errors, what should we do? */
241 do_callback_layoutrecall(clp
, &args
);
244 __be32
nfs4_callback_devicenotify(struct cb_devicenotifyargs
*args
,
245 void *dummy
, struct cb_process_state
*cps
)
249 struct nfs_client
*clp
= cps
->clp
;
250 struct nfs_server
*server
= NULL
;
252 dprintk("%s: -->\n", __func__
);
255 res
= cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION
);
259 for (i
= 0; i
< args
->ndevs
; i
++) {
260 struct cb_devicenotifyitem
*dev
= &args
->devs
[i
];
263 server
->pnfs_curr_ld
->id
!= dev
->cbd_layout_type
) {
265 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
)
266 if (server
->pnfs_curr_ld
&&
267 server
->pnfs_curr_ld
->id
== dev
->cbd_layout_type
) {
272 dprintk("%s: layout type %u not found\n",
273 __func__
, dev
->cbd_layout_type
);
278 if (dev
->cbd_notify_type
== NOTIFY_DEVICEID4_CHANGE
)
279 dprintk("%s: NOTIFY_DEVICEID4_CHANGE not supported, "
280 "deleting instead\n", __func__
);
281 nfs4_delete_deviceid(server
->pnfs_curr_ld
, clp
, &dev
->cbd_dev_id
);
286 dprintk("%s: exit with status = %u\n",
287 __func__
, be32_to_cpu(res
));
291 int nfs41_validate_delegation_stateid(struct nfs_delegation
*delegation
, const nfs4_stateid
*stateid
)
293 if (delegation
== NULL
)
296 if (stateid
->stateid
.seqid
!= 0)
298 if (memcmp(&delegation
->stateid
.stateid
.other
,
299 &stateid
->stateid
.other
,
300 NFS4_STATEID_OTHER_SIZE
))
307 * Validate the sequenceID sent by the server.
308 * Return success if the sequenceID is one more than what we last saw on
309 * this slot, accounting for wraparound. Increments the slot's sequence.
311 * We don't yet implement a duplicate request cache, instead we set the
312 * back channel ca_maxresponsesize_cached to zero. This is OK for now
313 * since we only currently implement idempotent callbacks anyway.
315 * We have a single slot backchannel at this time, so we don't bother
316 * checking the used_slots bit array on the table. The lower layer guarantees
317 * a single outstanding callback request at a time.
320 validate_seqid(struct nfs4_slot_table
*tbl
, struct cb_sequenceargs
* args
)
322 struct nfs4_slot
*slot
;
324 dprintk("%s enter. slotid %d seqid %d\n",
325 __func__
, args
->csa_slotid
, args
->csa_sequenceid
);
327 if (args
->csa_slotid
> NFS41_BC_MAX_CALLBACKS
)
328 return htonl(NFS4ERR_BADSLOT
);
330 slot
= tbl
->slots
+ args
->csa_slotid
;
331 dprintk("%s slot table seqid: %d\n", __func__
, slot
->seq_nr
);
334 if (likely(args
->csa_sequenceid
== slot
->seq_nr
+ 1)) {
340 if (args
->csa_sequenceid
== slot
->seq_nr
) {
341 dprintk("%s seqid %d is a replay\n",
342 __func__
, args
->csa_sequenceid
);
343 /* Signal process_op to set this error on next op */
344 if (args
->csa_cachethis
== 0)
345 return htonl(NFS4ERR_RETRY_UNCACHED_REP
);
347 /* The ca_maxresponsesize_cached is 0 with no DRC */
348 else if (args
->csa_cachethis
== 1)
349 return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE
);
353 if (args
->csa_sequenceid
== 1 && (slot
->seq_nr
+ 1) == 0) {
358 /* Misordered request */
359 return htonl(NFS4ERR_SEQ_MISORDERED
);
361 tbl
->highest_used_slotid
= args
->csa_slotid
;
362 return htonl(NFS4_OK
);
366 * For each referring call triple, check the session's slot table for
367 * a match. If the slot is in use and the sequence numbers match, the
368 * client is still waiting for a response to the original request.
370 static bool referring_call_exists(struct nfs_client
*clp
,
372 struct referring_call_list
*rclists
)
376 struct nfs4_session
*session
;
377 struct nfs4_slot_table
*tbl
;
378 struct referring_call_list
*rclist
;
379 struct referring_call
*ref
;
382 * XXX When client trunking is implemented, this becomes
383 * a session lookup from within the loop
385 session
= clp
->cl_session
;
386 tbl
= &session
->fc_slot_table
;
388 for (i
= 0; i
< nrclists
; i
++) {
389 rclist
= &rclists
[i
];
390 if (memcmp(session
->sess_id
.data
,
391 rclist
->rcl_sessionid
.data
,
392 NFS4_MAX_SESSIONID_LEN
) != 0)
395 for (j
= 0; j
< rclist
->rcl_nrefcalls
; j
++) {
396 ref
= &rclist
->rcl_refcalls
[j
];
398 dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
399 "slotid %u\n", __func__
,
400 ((u32
*)&rclist
->rcl_sessionid
.data
)[0],
401 ((u32
*)&rclist
->rcl_sessionid
.data
)[1],
402 ((u32
*)&rclist
->rcl_sessionid
.data
)[2],
403 ((u32
*)&rclist
->rcl_sessionid
.data
)[3],
404 ref
->rc_sequenceid
, ref
->rc_slotid
);
406 spin_lock(&tbl
->slot_tbl_lock
);
407 status
= (test_bit(ref
->rc_slotid
, tbl
->used_slots
) &&
408 tbl
->slots
[ref
->rc_slotid
].seq_nr
==
410 spin_unlock(&tbl
->slot_tbl_lock
);
420 __be32
nfs4_callback_sequence(struct cb_sequenceargs
*args
,
421 struct cb_sequenceres
*res
,
422 struct cb_process_state
*cps
)
424 struct nfs4_slot_table
*tbl
;
425 struct nfs_client
*clp
;
427 __be32 status
= htonl(NFS4ERR_BADSESSION
);
429 clp
= nfs4_find_client_sessionid(args
->csa_addr
, &args
->csa_sessionid
);
433 tbl
= &clp
->cl_session
->bc_slot_table
;
435 spin_lock(&tbl
->slot_tbl_lock
);
436 /* state manager is resetting the session */
437 if (test_bit(NFS4_SESSION_DRAINING
, &clp
->cl_session
->session_state
)) {
438 spin_unlock(&tbl
->slot_tbl_lock
);
439 status
= htonl(NFS4ERR_DELAY
);
440 /* Return NFS4ERR_BADSESSION if we're draining the session
441 * in order to reset it.
443 if (test_bit(NFS4CLNT_SESSION_RESET
, &clp
->cl_state
))
444 status
= htonl(NFS4ERR_BADSESSION
);
448 status
= validate_seqid(&clp
->cl_session
->bc_slot_table
, args
);
449 spin_unlock(&tbl
->slot_tbl_lock
);
453 cps
->slotid
= args
->csa_slotid
;
456 * Check for pending referring calls. If a match is found, a
457 * related callback was received before the response to the original
460 if (referring_call_exists(clp
, args
->csa_nrclists
, args
->csa_rclists
)) {
461 status
= htonl(NFS4ERR_DELAY
);
465 memcpy(&res
->csr_sessionid
, &args
->csa_sessionid
,
466 sizeof(res
->csr_sessionid
));
467 res
->csr_sequenceid
= args
->csa_sequenceid
;
468 res
->csr_slotid
= args
->csa_slotid
;
469 res
->csr_highestslotid
= NFS41_BC_MAX_CALLBACKS
- 1;
470 res
->csr_target_highestslotid
= NFS41_BC_MAX_CALLBACKS
- 1;
473 cps
->clp
= clp
; /* put in nfs4_callback_compound */
474 for (i
= 0; i
< args
->csa_nrclists
; i
++)
475 kfree(args
->csa_rclists
[i
].rcl_refcalls
);
476 kfree(args
->csa_rclists
);
478 if (status
== htonl(NFS4ERR_RETRY_UNCACHED_REP
)) {
479 cps
->drc_status
= status
;
482 res
->csr_status
= status
;
484 dprintk("%s: exit with status = %d res->csr_status %d\n", __func__
,
485 ntohl(status
), ntohl(res
->csr_status
));
490 validate_bitmap_values(unsigned long mask
)
492 return (mask
& ~RCA4_TYPE_MASK_ALL
) == 0;
495 __be32
nfs4_callback_recallany(struct cb_recallanyargs
*args
, void *dummy
,
496 struct cb_process_state
*cps
)
501 status
= cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION
);
502 if (!cps
->clp
) /* set in cb_sequence */
505 dprintk("NFS: RECALL_ANY callback request from %s\n",
506 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
));
508 status
= cpu_to_be32(NFS4ERR_INVAL
);
509 if (!validate_bitmap_values(args
->craa_type_mask
))
512 status
= cpu_to_be32(NFS4_OK
);
513 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG
, (const unsigned long *)
514 &args
->craa_type_mask
))
516 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG
, (const unsigned long *)
517 &args
->craa_type_mask
))
518 flags
|= FMODE_WRITE
;
519 if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT
, (const unsigned long *)
520 &args
->craa_type_mask
))
521 pnfs_recall_all_layouts(cps
->clp
);
523 nfs_expire_all_delegation_types(cps
->clp
, flags
);
525 dprintk("%s: exit with status = %d\n", __func__
, ntohl(status
));
529 /* Reduce the fore channel's max_slots to the target value */
530 __be32
nfs4_callback_recallslot(struct cb_recallslotargs
*args
, void *dummy
,
531 struct cb_process_state
*cps
)
533 struct nfs4_slot_table
*fc_tbl
;
536 status
= htonl(NFS4ERR_OP_NOT_IN_SESSION
);
537 if (!cps
->clp
) /* set in cb_sequence */
540 dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
541 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
),
542 args
->crsa_target_max_slots
);
544 fc_tbl
= &cps
->clp
->cl_session
->fc_slot_table
;
546 status
= htonl(NFS4ERR_BAD_HIGH_SLOT
);
547 if (args
->crsa_target_max_slots
> fc_tbl
->max_slots
||
548 args
->crsa_target_max_slots
< 1)
551 status
= htonl(NFS4_OK
);
552 if (args
->crsa_target_max_slots
== fc_tbl
->max_slots
)
555 fc_tbl
->target_max_slots
= args
->crsa_target_max_slots
;
556 nfs41_handle_recall_slot(cps
->clp
);
558 dprintk("%s: exit with status = %d\n", __func__
, ntohl(status
));
561 #endif /* CONFIG_NFS_V4_1 */