2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
36 #define NFSDBG_FACILITY NFSDBG_PNFS
41 * protects pnfs_modules_tbl.
43 static DEFINE_SPINLOCK(pnfs_spinlock
);
46 * pnfs_modules_tbl holds all pnfs modules
48 static LIST_HEAD(pnfs_modules_tbl
);
50 /* Return the registered pnfs layout driver module matching given id */
51 static struct pnfs_layoutdriver_type
*
52 find_pnfs_driver_locked(u32 id
)
54 struct pnfs_layoutdriver_type
*local
;
56 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
61 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
65 static struct pnfs_layoutdriver_type
*
66 find_pnfs_driver(u32 id
)
68 struct pnfs_layoutdriver_type
*local
;
70 spin_lock(&pnfs_spinlock
);
71 local
= find_pnfs_driver_locked(id
);
72 spin_unlock(&pnfs_spinlock
);
77 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
79 if (nfss
->pnfs_curr_ld
)
80 module_put(nfss
->pnfs_curr_ld
->owner
);
81 nfss
->pnfs_curr_ld
= NULL
;
85 * Try to set the server's pnfs module to the pnfs layout type specified by id.
86 * Currently only one pNFS layout driver per filesystem is supported.
88 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
91 set_pnfs_layoutdriver(struct nfs_server
*server
, u32 id
)
93 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
97 if (!(server
->nfs_client
->cl_exchange_flags
&
98 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
99 printk(KERN_ERR
"%s: id %u cl_exchange_flags 0x%x\n", __func__
,
100 id
, server
->nfs_client
->cl_exchange_flags
);
103 ld_type
= find_pnfs_driver(id
);
105 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
106 ld_type
= find_pnfs_driver(id
);
108 dprintk("%s: No pNFS module found for %u.\n",
113 if (!try_module_get(ld_type
->owner
)) {
114 dprintk("%s: Could not grab reference on module\n", __func__
);
117 server
->pnfs_curr_ld
= ld_type
;
119 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
123 dprintk("%s: Using NFSv4 I/O\n", __func__
);
124 server
->pnfs_curr_ld
= NULL
;
128 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
130 int status
= -EINVAL
;
131 struct pnfs_layoutdriver_type
*tmp
;
133 if (ld_type
->id
== 0) {
134 printk(KERN_ERR
"%s id 0 is reserved\n", __func__
);
137 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
138 printk(KERN_ERR
"%s Layout driver must provide "
139 "alloc_lseg and free_lseg.\n", __func__
);
143 spin_lock(&pnfs_spinlock
);
144 tmp
= find_pnfs_driver_locked(ld_type
->id
);
146 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
148 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
151 printk(KERN_ERR
"%s Module with id %d already loaded!\n",
152 __func__
, ld_type
->id
);
154 spin_unlock(&pnfs_spinlock
);
158 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
161 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
163 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
164 spin_lock(&pnfs_spinlock
);
165 list_del(&ld_type
->pnfs_tblid
);
166 spin_unlock(&pnfs_spinlock
);
168 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
171 * pNFS client layout cache
174 /* Need to hold i_lock if caller does not already hold reference */
176 get_layout_hdr(struct pnfs_layout_hdr
*lo
)
178 atomic_inc(&lo
->plh_refcount
);
181 static struct pnfs_layout_hdr
*
182 pnfs_alloc_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
184 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
185 return ld
->alloc_layout_hdr
? ld
->alloc_layout_hdr(ino
, gfp_flags
) :
186 kzalloc(sizeof(struct pnfs_layout_hdr
), gfp_flags
);
190 pnfs_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
192 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(lo
->plh_inode
)->pnfs_curr_ld
;
193 return ld
->alloc_layout_hdr
? ld
->free_layout_hdr(lo
) : kfree(lo
);
197 destroy_layout_hdr(struct pnfs_layout_hdr
*lo
)
199 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
200 BUG_ON(!list_empty(&lo
->plh_layouts
));
201 NFS_I(lo
->plh_inode
)->layout
= NULL
;
202 pnfs_free_layout_hdr(lo
);
206 put_layout_hdr_locked(struct pnfs_layout_hdr
*lo
)
208 if (atomic_dec_and_test(&lo
->plh_refcount
))
209 destroy_layout_hdr(lo
);
213 put_layout_hdr(struct pnfs_layout_hdr
*lo
)
215 struct inode
*inode
= lo
->plh_inode
;
217 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
218 destroy_layout_hdr(lo
);
219 spin_unlock(&inode
->i_lock
);
224 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
226 INIT_LIST_HEAD(&lseg
->pls_list
);
227 atomic_set(&lseg
->pls_refcount
, 1);
229 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
230 lseg
->pls_layout
= lo
;
233 static void free_lseg(struct pnfs_layout_segment
*lseg
)
235 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
237 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
238 /* Matched by get_layout_hdr in pnfs_insert_layout */
239 put_layout_hdr(NFS_I(ino
)->layout
);
243 put_lseg_common(struct pnfs_layout_segment
*lseg
)
245 struct inode
*inode
= lseg
->pls_layout
->plh_inode
;
247 WARN_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
248 list_del_init(&lseg
->pls_list
);
249 if (list_empty(&lseg
->pls_layout
->plh_segs
)) {
250 set_bit(NFS_LAYOUT_DESTROYED
, &lseg
->pls_layout
->plh_flags
);
251 /* Matched by initial refcount set in alloc_init_layout_hdr */
252 put_layout_hdr_locked(lseg
->pls_layout
);
254 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
258 put_lseg(struct pnfs_layout_segment
*lseg
)
265 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
266 atomic_read(&lseg
->pls_refcount
),
267 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
268 inode
= lseg
->pls_layout
->plh_inode
;
269 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
272 put_lseg_common(lseg
);
273 list_add(&lseg
->pls_list
, &free_me
);
274 spin_unlock(&inode
->i_lock
);
275 pnfs_free_lseg_list(&free_me
);
278 EXPORT_SYMBOL_GPL(put_lseg
);
281 end_offset(u64 start
, u64 len
)
286 return end
>= start
? end
: NFS4_MAX_UINT64
;
289 /* last octet in a range */
291 last_byte_offset(u64 start
, u64 len
)
297 return end
> start
? end
- 1 : NFS4_MAX_UINT64
;
301 * is l2 fully contained in l1?
303 * [----------------------------------)
308 lo_seg_contained(struct pnfs_layout_range
*l1
,
309 struct pnfs_layout_range
*l2
)
311 u64 start1
= l1
->offset
;
312 u64 end1
= end_offset(start1
, l1
->length
);
313 u64 start2
= l2
->offset
;
314 u64 end2
= end_offset(start2
, l2
->length
);
316 return (start1
<= start2
) && (end1
>= end2
);
320 * is l1 and l2 intersecting?
322 * [----------------------------------)
327 lo_seg_intersecting(struct pnfs_layout_range
*l1
,
328 struct pnfs_layout_range
*l2
)
330 u64 start1
= l1
->offset
;
331 u64 end1
= end_offset(start1
, l1
->length
);
332 u64 start2
= l2
->offset
;
333 u64 end2
= end_offset(start2
, l2
->length
);
335 return (end1
== NFS4_MAX_UINT64
|| end1
> start2
) &&
336 (end2
== NFS4_MAX_UINT64
|| end2
> start1
);
340 should_free_lseg(struct pnfs_layout_range
*lseg_range
,
341 struct pnfs_layout_range
*recall_range
)
343 return (recall_range
->iomode
== IOMODE_ANY
||
344 lseg_range
->iomode
== recall_range
->iomode
) &&
345 lo_seg_intersecting(lseg_range
, recall_range
);
348 /* Returns 1 if lseg is removed from list, 0 otherwise */
349 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
350 struct list_head
*tmp_list
)
354 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
355 /* Remove the reference keeping the lseg in the
356 * list. It will now be removed when all
357 * outstanding io is finished.
359 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
360 atomic_read(&lseg
->pls_refcount
));
361 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
362 put_lseg_common(lseg
);
363 list_add(&lseg
->pls_list
, tmp_list
);
370 /* Returns count of number of matching invalid lsegs remaining in list
374 mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
375 struct list_head
*tmp_list
,
376 struct pnfs_layout_range
*recall_range
)
378 struct pnfs_layout_segment
*lseg
, *next
;
379 int invalid
= 0, removed
= 0;
381 dprintk("%s:Begin lo %p\n", __func__
, lo
);
383 if (list_empty(&lo
->plh_segs
)) {
384 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
))
385 put_layout_hdr_locked(lo
);
388 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
390 should_free_lseg(&lseg
->pls_range
, recall_range
)) {
391 dprintk("%s: freeing lseg %p iomode %d "
392 "offset %llu length %llu\n", __func__
,
393 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
394 lseg
->pls_range
.length
);
396 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
398 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
399 return invalid
- removed
;
402 /* note free_me must contain lsegs from a single layout_hdr */
404 pnfs_free_lseg_list(struct list_head
*free_me
)
406 struct pnfs_layout_segment
*lseg
, *tmp
;
407 struct pnfs_layout_hdr
*lo
;
409 if (list_empty(free_me
))
412 lo
= list_first_entry(free_me
, struct pnfs_layout_segment
,
413 pls_list
)->pls_layout
;
415 if (test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
)) {
416 struct nfs_client
*clp
;
418 clp
= NFS_SERVER(lo
->plh_inode
)->nfs_client
;
419 spin_lock(&clp
->cl_lock
);
420 list_del_init(&lo
->plh_layouts
);
421 spin_unlock(&clp
->cl_lock
);
423 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
424 list_del(&lseg
->pls_list
);
430 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
432 struct pnfs_layout_hdr
*lo
;
435 spin_lock(&nfsi
->vfs_inode
.i_lock
);
438 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
439 mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
441 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
442 pnfs_free_lseg_list(&tmp_list
);
446 * Called by the state manger to remove all layouts established under an
450 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
452 struct nfs_server
*server
;
453 struct pnfs_layout_hdr
*lo
;
456 nfs4_deviceid_mark_client_invalid(clp
);
457 nfs4_deviceid_purge_client(clp
);
459 spin_lock(&clp
->cl_lock
);
461 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
462 if (!list_empty(&server
->layouts
))
463 list_splice_init(&server
->layouts
, &tmp_list
);
466 spin_unlock(&clp
->cl_lock
);
468 while (!list_empty(&tmp_list
)) {
469 lo
= list_entry(tmp_list
.next
, struct pnfs_layout_hdr
,
471 dprintk("%s freeing layout for inode %lu\n", __func__
,
472 lo
->plh_inode
->i_ino
);
473 list_del_init(&lo
->plh_layouts
);
474 pnfs_destroy_layout(NFS_I(lo
->plh_inode
));
478 /* update lo->plh_stateid with new if is more recent */
480 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
485 oldseq
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
486 newseq
= be32_to_cpu(new->stateid
.seqid
);
487 if ((int)(newseq
- oldseq
) > 0) {
488 memcpy(&lo
->plh_stateid
, &new->stateid
, sizeof(new->stateid
));
489 if (update_barrier
) {
490 u32 new_barrier
= be32_to_cpu(new->stateid
.seqid
);
492 if ((int)(new_barrier
- lo
->plh_barrier
))
493 lo
->plh_barrier
= new_barrier
;
495 /* Because of wraparound, we want to keep the barrier
496 * "close" to the current seqids. It needs to be
497 * within 2**31 to count as "behind", so if it
498 * gets too near that limit, give us a litle leeway
499 * and bring it to within 2**30.
500 * NOTE - and yes, this is all unsigned arithmetic.
502 if (unlikely((newseq
- lo
->plh_barrier
) > (3 << 29)))
503 lo
->plh_barrier
= newseq
- (1 << 30);
508 /* lget is set to 1 if called from inside send_layoutget call chain */
510 pnfs_layoutgets_blocked(struct pnfs_layout_hdr
*lo
, nfs4_stateid
*stateid
,
514 (int)(lo
->plh_barrier
- be32_to_cpu(stateid
->stateid
.seqid
)) >= 0)
516 return lo
->plh_block_lgets
||
517 test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
) ||
518 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
519 (list_empty(&lo
->plh_segs
) &&
520 (atomic_read(&lo
->plh_outstanding
) > lget
));
524 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
525 struct nfs4_state
*open_state
)
529 dprintk("--> %s\n", __func__
);
530 spin_lock(&lo
->plh_inode
->i_lock
);
531 if (pnfs_layoutgets_blocked(lo
, NULL
, 1)) {
533 } else if (list_empty(&lo
->plh_segs
)) {
537 seq
= read_seqbegin(&open_state
->seqlock
);
538 memcpy(dst
->data
, open_state
->stateid
.data
,
539 sizeof(open_state
->stateid
.data
));
540 } while (read_seqretry(&open_state
->seqlock
, seq
));
542 memcpy(dst
->data
, lo
->plh_stateid
.data
, sizeof(lo
->plh_stateid
.data
));
543 spin_unlock(&lo
->plh_inode
->i_lock
);
544 dprintk("<-- %s\n", __func__
);
549 * Get layout from server.
550 * for now, assume that whole file layouts are requested.
552 * arg->length: all ones
554 static struct pnfs_layout_segment
*
555 send_layoutget(struct pnfs_layout_hdr
*lo
,
556 struct nfs_open_context
*ctx
,
557 struct pnfs_layout_range
*range
,
560 struct inode
*ino
= lo
->plh_inode
;
561 struct nfs_server
*server
= NFS_SERVER(ino
);
562 struct nfs4_layoutget
*lgp
;
563 struct pnfs_layout_segment
*lseg
= NULL
;
564 struct page
**pages
= NULL
;
566 u32 max_resp_sz
, max_pages
;
568 dprintk("--> %s\n", __func__
);
571 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
575 /* allocate pages for xdr post processing */
576 max_resp_sz
= server
->nfs_client
->cl_session
->fc_attrs
.max_resp_sz
;
577 max_pages
= max_resp_sz
>> PAGE_SHIFT
;
579 pages
= kzalloc(max_pages
* sizeof(struct page
*), gfp_flags
);
583 for (i
= 0; i
< max_pages
; i
++) {
584 pages
[i
] = alloc_page(gfp_flags
);
589 lgp
->args
.minlength
= PAGE_CACHE_SIZE
;
590 if (lgp
->args
.minlength
> range
->length
)
591 lgp
->args
.minlength
= range
->length
;
592 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
593 lgp
->args
.range
= *range
;
594 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
595 lgp
->args
.inode
= ino
;
596 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
597 lgp
->args
.layout
.pages
= pages
;
598 lgp
->args
.layout
.pglen
= max_pages
* PAGE_SIZE
;
600 lgp
->gfp_flags
= gfp_flags
;
602 /* Synchronously retrieve layout information from server and
605 nfs4_proc_layoutget(lgp
);
607 /* remember that LAYOUTGET failed and suspend trying */
608 set_bit(lo_fail_bit(range
->iomode
), &lo
->plh_flags
);
612 for (i
= 0; i
< max_pages
; i
++)
613 __free_page(pages
[i
]);
619 /* free any allocated xdr pages, lgp as it's not used */
621 for (i
= 0; i
< max_pages
; i
++) {
624 __free_page(pages
[i
]);
632 /* Initiates a LAYOUTRETURN(FILE) */
634 _pnfs_return_layout(struct inode
*ino
)
636 struct pnfs_layout_hdr
*lo
= NULL
;
637 struct nfs_inode
*nfsi
= NFS_I(ino
);
639 struct nfs4_layoutreturn
*lrp
;
640 nfs4_stateid stateid
;
643 dprintk("--> %s\n", __func__
);
645 spin_lock(&ino
->i_lock
);
648 spin_unlock(&ino
->i_lock
);
649 dprintk("%s: no layout to return\n", __func__
);
652 stateid
= nfsi
->layout
->plh_stateid
;
653 /* Reference matched in nfs4_layoutreturn_release */
655 mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
656 lo
->plh_block_lgets
++;
657 spin_unlock(&ino
->i_lock
);
658 pnfs_free_lseg_list(&tmp_list
);
660 WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
));
662 lrp
= kzalloc(sizeof(*lrp
), GFP_KERNEL
);
663 if (unlikely(lrp
== NULL
)) {
665 set_bit(NFS_LAYOUT_RW_FAILED
, &lo
->plh_flags
);
666 set_bit(NFS_LAYOUT_RO_FAILED
, &lo
->plh_flags
);
671 lrp
->args
.stateid
= stateid
;
672 lrp
->args
.layout_type
= NFS_SERVER(ino
)->pnfs_curr_ld
->id
;
673 lrp
->args
.inode
= ino
;
674 lrp
->args
.layout
= lo
;
675 lrp
->clp
= NFS_SERVER(ino
)->nfs_client
;
677 status
= nfs4_proc_layoutreturn(lrp
);
679 dprintk("<-- %s status: %d\n", __func__
, status
);
683 bool pnfs_roc(struct inode
*ino
)
685 struct pnfs_layout_hdr
*lo
;
686 struct pnfs_layout_segment
*lseg
, *tmp
;
690 spin_lock(&ino
->i_lock
);
691 lo
= NFS_I(ino
)->layout
;
692 if (!lo
|| !test_and_clear_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
) ||
693 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
695 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
696 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
697 mark_lseg_invalid(lseg
, &tmp_list
);
702 lo
->plh_block_lgets
++;
703 get_layout_hdr(lo
); /* matched in pnfs_roc_release */
704 spin_unlock(&ino
->i_lock
);
705 pnfs_free_lseg_list(&tmp_list
);
709 spin_unlock(&ino
->i_lock
);
713 void pnfs_roc_release(struct inode
*ino
)
715 struct pnfs_layout_hdr
*lo
;
717 spin_lock(&ino
->i_lock
);
718 lo
= NFS_I(ino
)->layout
;
719 lo
->plh_block_lgets
--;
720 put_layout_hdr_locked(lo
);
721 spin_unlock(&ino
->i_lock
);
724 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
726 struct pnfs_layout_hdr
*lo
;
728 spin_lock(&ino
->i_lock
);
729 lo
= NFS_I(ino
)->layout
;
730 if ((int)(barrier
- lo
->plh_barrier
) > 0)
731 lo
->plh_barrier
= barrier
;
732 spin_unlock(&ino
->i_lock
);
735 bool pnfs_roc_drain(struct inode
*ino
, u32
*barrier
)
737 struct nfs_inode
*nfsi
= NFS_I(ino
);
738 struct pnfs_layout_segment
*lseg
;
741 spin_lock(&ino
->i_lock
);
742 list_for_each_entry(lseg
, &nfsi
->layout
->plh_segs
, pls_list
)
743 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
748 struct pnfs_layout_hdr
*lo
= nfsi
->layout
;
749 u32 current_seqid
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
751 /* Since close does not return a layout stateid for use as
752 * a barrier, we choose the worst-case barrier.
754 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
756 spin_unlock(&ino
->i_lock
);
761 * Compare two layout segments for sorting into layout cache.
762 * We want to preferentially return RW over RO layouts, so ensure those
766 cmp_layout(struct pnfs_layout_range
*l1
,
767 struct pnfs_layout_range
*l2
)
771 /* high offset > low offset */
772 d
= l1
->offset
- l2
->offset
;
776 /* short length > long length */
777 d
= l2
->length
- l1
->length
;
781 /* read > read/write */
782 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
786 pnfs_insert_layout(struct pnfs_layout_hdr
*lo
,
787 struct pnfs_layout_segment
*lseg
)
789 struct pnfs_layout_segment
*lp
;
791 dprintk("%s:Begin\n", __func__
);
793 assert_spin_locked(&lo
->plh_inode
->i_lock
);
794 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
795 if (cmp_layout(&lseg
->pls_range
, &lp
->pls_range
) > 0)
797 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
798 dprintk("%s: inserted lseg %p "
799 "iomode %d offset %llu length %llu before "
800 "lp %p iomode %d offset %llu length %llu\n",
801 __func__
, lseg
, lseg
->pls_range
.iomode
,
802 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
803 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
804 lp
->pls_range
.length
);
807 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
808 dprintk("%s: inserted lseg %p "
809 "iomode %d offset %llu length %llu at tail\n",
810 __func__
, lseg
, lseg
->pls_range
.iomode
,
811 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
815 dprintk("%s:Return\n", __func__
);
818 static struct pnfs_layout_hdr
*
819 alloc_init_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
821 struct pnfs_layout_hdr
*lo
;
823 lo
= pnfs_alloc_layout_hdr(ino
, gfp_flags
);
826 atomic_set(&lo
->plh_refcount
, 1);
827 INIT_LIST_HEAD(&lo
->plh_layouts
);
828 INIT_LIST_HEAD(&lo
->plh_segs
);
829 INIT_LIST_HEAD(&lo
->plh_bulk_recall
);
834 static struct pnfs_layout_hdr
*
835 pnfs_find_alloc_layout(struct inode
*ino
, gfp_t gfp_flags
)
837 struct nfs_inode
*nfsi
= NFS_I(ino
);
838 struct pnfs_layout_hdr
*new = NULL
;
840 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
842 assert_spin_locked(&ino
->i_lock
);
844 if (test_bit(NFS_LAYOUT_DESTROYED
, &nfsi
->layout
->plh_flags
))
849 spin_unlock(&ino
->i_lock
);
850 new = alloc_init_layout_hdr(ino
, gfp_flags
);
851 spin_lock(&ino
->i_lock
);
853 if (likely(nfsi
->layout
== NULL
)) /* Won the race? */
856 pnfs_free_layout_hdr(new);
861 * iomode matching rules:
872 is_matching_lseg(struct pnfs_layout_range
*ls_range
,
873 struct pnfs_layout_range
*range
)
875 struct pnfs_layout_range range1
;
877 if ((range
->iomode
== IOMODE_RW
&&
878 ls_range
->iomode
!= IOMODE_RW
) ||
879 !lo_seg_intersecting(ls_range
, range
))
882 /* range1 covers only the first byte in the range */
885 return lo_seg_contained(ls_range
, &range1
);
889 * lookup range in layout
891 static struct pnfs_layout_segment
*
892 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
893 struct pnfs_layout_range
*range
)
895 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
897 dprintk("%s:Begin\n", __func__
);
899 assert_spin_locked(&lo
->plh_inode
->i_lock
);
900 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
901 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
902 is_matching_lseg(&lseg
->pls_range
, range
)) {
903 ret
= get_lseg(lseg
);
906 if (lseg
->pls_range
.offset
> range
->offset
)
910 dprintk("%s:Return lseg %p ref %d\n",
911 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
916 * Layout segment is retreived from the server if not cached.
917 * The appropriate layout segment is referenced and returned to the caller.
919 struct pnfs_layout_segment
*
920 pnfs_update_layout(struct inode
*ino
,
921 struct nfs_open_context
*ctx
,
924 enum pnfs_iomode iomode
,
927 struct pnfs_layout_range arg
= {
933 struct nfs_inode
*nfsi
= NFS_I(ino
);
934 struct nfs_server
*server
= NFS_SERVER(ino
);
935 struct nfs_client
*clp
= server
->nfs_client
;
936 struct pnfs_layout_hdr
*lo
;
937 struct pnfs_layout_segment
*lseg
= NULL
;
940 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
942 spin_lock(&ino
->i_lock
);
943 lo
= pnfs_find_alloc_layout(ino
, gfp_flags
);
945 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__
);
949 /* Do we even need to bother with this? */
950 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
951 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
952 dprintk("%s matches recall, use MDS\n", __func__
);
956 /* if LAYOUTGET already failed once we don't try again */
957 if (test_bit(lo_fail_bit(iomode
), &nfsi
->layout
->plh_flags
))
960 /* Check to see if the layout for the given range already exists */
961 lseg
= pnfs_find_lseg(lo
, &arg
);
965 if (pnfs_layoutgets_blocked(lo
, NULL
, 0))
967 atomic_inc(&lo
->plh_outstanding
);
970 if (list_empty(&lo
->plh_segs
))
972 spin_unlock(&ino
->i_lock
);
974 /* The lo must be on the clp list if there is any
975 * chance of a CB_LAYOUTRECALL(FILE) coming in.
977 spin_lock(&clp
->cl_lock
);
978 BUG_ON(!list_empty(&lo
->plh_layouts
));
979 list_add_tail(&lo
->plh_layouts
, &server
->layouts
);
980 spin_unlock(&clp
->cl_lock
);
983 pg_offset
= arg
.offset
& ~PAGE_CACHE_MASK
;
985 arg
.offset
-= pg_offset
;
986 arg
.length
+= pg_offset
;
988 if (arg
.length
!= NFS4_MAX_UINT64
)
989 arg
.length
= PAGE_CACHE_ALIGN(arg
.length
);
991 lseg
= send_layoutget(lo
, ctx
, &arg
, gfp_flags
);
992 if (!lseg
&& first
) {
993 spin_lock(&clp
->cl_lock
);
994 list_del_init(&lo
->plh_layouts
);
995 spin_unlock(&clp
->cl_lock
);
997 atomic_dec(&lo
->plh_outstanding
);
1000 dprintk("%s end, state 0x%lx lseg %p\n", __func__
,
1001 nfsi
->layout
? nfsi
->layout
->plh_flags
: -1, lseg
);
1004 spin_unlock(&ino
->i_lock
);
1007 EXPORT_SYMBOL_GPL(pnfs_update_layout
);
1010 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
1012 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
1013 struct nfs4_layoutget_res
*res
= &lgp
->res
;
1014 struct pnfs_layout_segment
*lseg
;
1015 struct inode
*ino
= lo
->plh_inode
;
1016 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
1019 /* Inject layout blob into I/O device driver */
1020 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
1021 if (!lseg
|| IS_ERR(lseg
)) {
1025 status
= PTR_ERR(lseg
);
1026 dprintk("%s: Could not allocate layout: error %d\n",
1031 spin_lock(&ino
->i_lock
);
1032 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
1033 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1034 dprintk("%s forget reply due to recall\n", __func__
);
1035 goto out_forget_reply
;
1038 if (pnfs_layoutgets_blocked(lo
, &res
->stateid
, 1)) {
1039 dprintk("%s forget reply due to state\n", __func__
);
1040 goto out_forget_reply
;
1042 init_lseg(lo
, lseg
);
1043 lseg
->pls_range
= res
->range
;
1044 *lgp
->lsegpp
= get_lseg(lseg
);
1045 pnfs_insert_layout(lo
, lseg
);
1047 if (res
->return_on_close
) {
1048 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
1049 set_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
);
1052 /* Done processing layoutget. Set the layout stateid */
1053 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
1054 spin_unlock(&ino
->i_lock
);
1059 spin_unlock(&ino
->i_lock
);
1060 lseg
->pls_layout
= lo
;
1061 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
1066 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
1068 BUG_ON(pgio
->pg_lseg
!= NULL
);
1070 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1076 /* If no lseg, fall back to read through mds */
1077 if (pgio
->pg_lseg
== NULL
)
1078 nfs_pageio_reset_read_mds(pgio
);
1081 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read
);
1084 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
1086 BUG_ON(pgio
->pg_lseg
!= NULL
);
1088 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1094 /* If no lseg, fall back to write through mds */
1095 if (pgio
->pg_lseg
== NULL
)
1096 nfs_pageio_reset_write_mds(pgio
);
1098 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write
);
1101 pnfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
)
1103 struct nfs_server
*server
= NFS_SERVER(inode
);
1104 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
1108 nfs_pageio_init(pgio
, inode
, ld
->pg_read_ops
, server
->rsize
, 0);
1113 pnfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
, int ioflags
)
1115 struct nfs_server
*server
= NFS_SERVER(inode
);
1116 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
1120 nfs_pageio_init(pgio
, inode
, ld
->pg_write_ops
, server
->wsize
, ioflags
);
1125 pnfs_generic_pg_test(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*prev
,
1126 struct nfs_page
*req
)
1128 if (pgio
->pg_lseg
== NULL
)
1129 return nfs_generic_pg_test(pgio
, prev
, req
);
1132 * Test if a nfs_page is fully contained in the pnfs_layout_range.
1133 * Note that this test makes several assumptions:
1134 * - that the previous nfs_page in the struct nfs_pageio_descriptor
1135 * is known to lie within the range.
1136 * - that the nfs_page being tested is known to be contiguous with the
1137 * previous nfs_page.
1138 * - Layout ranges are page aligned, so we only have to test the
1139 * start offset of the request.
1141 * Please also note that 'end_offset' is actually the offset of the
1142 * first byte that lies outside the pnfs_layout_range. FIXME?
1145 return req_offset(req
) < end_offset(pgio
->pg_lseg
->pls_range
.offset
,
1146 pgio
->pg_lseg
->pls_range
.length
);
1148 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test
);
1151 * Called by non rpc-based layout drivers
1154 pnfs_ld_write_done(struct nfs_write_data
*data
)
1158 if (!data
->pnfs_error
) {
1159 pnfs_set_layoutcommit(data
);
1160 data
->mds_ops
->rpc_call_done(&data
->task
, data
);
1161 data
->mds_ops
->rpc_release(data
);
1165 dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__
,
1167 status
= nfs_initiate_write(data
, NFS_CLIENT(data
->inode
),
1168 data
->mds_ops
, NFS_FILE_SYNC
);
1169 return status
? : -EAGAIN
;
1171 EXPORT_SYMBOL_GPL(pnfs_ld_write_done
);
1174 pnfs_write_through_mds(struct nfs_pageio_descriptor
*desc
,
1175 struct nfs_write_data
*data
)
1177 list_splice_tail_init(&data
->pages
, &desc
->pg_list
);
1178 if (data
->req
&& list_empty(&data
->req
->wb_list
))
1179 nfs_list_add_request(data
->req
, &desc
->pg_list
);
1180 nfs_pageio_reset_write_mds(desc
);
1181 desc
->pg_recoalesce
= 1;
1182 nfs_writedata_release(data
);
1185 static enum pnfs_try_status
1186 pnfs_try_to_write_data(struct nfs_write_data
*wdata
,
1187 const struct rpc_call_ops
*call_ops
,
1188 struct pnfs_layout_segment
*lseg
,
1191 struct inode
*inode
= wdata
->inode
;
1192 enum pnfs_try_status trypnfs
;
1193 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1195 wdata
->mds_ops
= call_ops
;
1196 wdata
->lseg
= get_lseg(lseg
);
1198 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
1199 inode
->i_ino
, wdata
->args
.count
, wdata
->args
.offset
, how
);
1201 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(wdata
, how
);
1202 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
1203 put_lseg(wdata
->lseg
);
1206 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
1208 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1213 pnfs_do_multiple_writes(struct nfs_pageio_descriptor
*desc
, struct list_head
*head
, int how
)
1215 struct nfs_write_data
*data
;
1216 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
1217 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
1219 desc
->pg_lseg
= NULL
;
1220 while (!list_empty(head
)) {
1221 enum pnfs_try_status trypnfs
;
1223 data
= list_entry(head
->next
, struct nfs_write_data
, list
);
1224 list_del_init(&data
->list
);
1226 trypnfs
= pnfs_try_to_write_data(data
, call_ops
, lseg
, how
);
1227 if (trypnfs
== PNFS_NOT_ATTEMPTED
)
1228 pnfs_write_through_mds(desc
, data
);
1234 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor
*desc
)
1239 ret
= nfs_generic_flush(desc
, &head
);
1241 put_lseg(desc
->pg_lseg
);
1242 desc
->pg_lseg
= NULL
;
1245 pnfs_do_multiple_writes(desc
, &head
, desc
->pg_ioflags
);
1248 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages
);
1251 * Called by non rpc-based layout drivers
1254 pnfs_ld_read_done(struct nfs_read_data
*data
)
1258 if (!data
->pnfs_error
) {
1259 __nfs4_read_done_cb(data
);
1260 data
->mds_ops
->rpc_call_done(&data
->task
, data
);
1261 data
->mds_ops
->rpc_release(data
);
1265 dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__
,
1267 status
= nfs_initiate_read(data
, NFS_CLIENT(data
->inode
),
1269 return status
? : -EAGAIN
;
1271 EXPORT_SYMBOL_GPL(pnfs_ld_read_done
);
1274 pnfs_read_through_mds(struct nfs_pageio_descriptor
*desc
,
1275 struct nfs_read_data
*data
)
1277 list_splice_tail_init(&data
->pages
, &desc
->pg_list
);
1278 if (data
->req
&& list_empty(&data
->req
->wb_list
))
1279 nfs_list_add_request(data
->req
, &desc
->pg_list
);
1280 nfs_pageio_reset_read_mds(desc
);
1281 desc
->pg_recoalesce
= 1;
1282 nfs_readdata_release(data
);
1286 * Call the appropriate parallel I/O subsystem read function.
1288 static enum pnfs_try_status
1289 pnfs_try_to_read_data(struct nfs_read_data
*rdata
,
1290 const struct rpc_call_ops
*call_ops
,
1291 struct pnfs_layout_segment
*lseg
)
1293 struct inode
*inode
= rdata
->inode
;
1294 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1295 enum pnfs_try_status trypnfs
;
1297 rdata
->mds_ops
= call_ops
;
1298 rdata
->lseg
= get_lseg(lseg
);
1300 dprintk("%s: Reading ino:%lu %u@%llu\n",
1301 __func__
, inode
->i_ino
, rdata
->args
.count
, rdata
->args
.offset
);
1303 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(rdata
);
1304 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
1305 put_lseg(rdata
->lseg
);
1308 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
1310 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1315 pnfs_do_multiple_reads(struct nfs_pageio_descriptor
*desc
, struct list_head
*head
)
1317 struct nfs_read_data
*data
;
1318 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
1319 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
1321 desc
->pg_lseg
= NULL
;
1322 while (!list_empty(head
)) {
1323 enum pnfs_try_status trypnfs
;
1325 data
= list_entry(head
->next
, struct nfs_read_data
, list
);
1326 list_del_init(&data
->list
);
1328 trypnfs
= pnfs_try_to_read_data(data
, call_ops
, lseg
);
1329 if (trypnfs
== PNFS_NOT_ATTEMPTED
)
1330 pnfs_read_through_mds(desc
, data
);
1336 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
1341 ret
= nfs_generic_pagein(desc
, &head
);
1343 put_lseg(desc
->pg_lseg
);
1344 desc
->pg_lseg
= NULL
;
1347 pnfs_do_multiple_reads(desc
, &head
);
1350 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages
);
1353 * Currently there is only one (whole file) write lseg.
1355 static struct pnfs_layout_segment
*pnfs_list_write_lseg(struct inode
*inode
)
1357 struct pnfs_layout_segment
*lseg
, *rv
= NULL
;
1359 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
)
1360 if (lseg
->pls_range
.iomode
== IOMODE_RW
)
1366 pnfs_set_layoutcommit(struct nfs_write_data
*wdata
)
1368 struct nfs_inode
*nfsi
= NFS_I(wdata
->inode
);
1369 loff_t end_pos
= wdata
->mds_offset
+ wdata
->res
.count
;
1370 bool mark_as_dirty
= false;
1372 spin_lock(&nfsi
->vfs_inode
.i_lock
);
1373 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1374 /* references matched in nfs4_layoutcommit_release */
1375 get_lseg(wdata
->lseg
);
1376 wdata
->lseg
->pls_lc_cred
=
1377 get_rpccred(wdata
->args
.context
->state
->owner
->so_cred
);
1378 mark_as_dirty
= true;
1379 dprintk("%s: Set layoutcommit for inode %lu ",
1380 __func__
, wdata
->inode
->i_ino
);
1382 if (end_pos
> wdata
->lseg
->pls_end_pos
)
1383 wdata
->lseg
->pls_end_pos
= end_pos
;
1384 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
1386 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1387 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1389 mark_inode_dirty_sync(wdata
->inode
);
1391 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
1394 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1395 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1396 * data to disk to allow the server to recover the data if it crashes.
1397 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1398 * is off, and a COMMIT is sent to a data server, or
1399 * if WRITEs to a data server return NFS_DATA_SYNC.
1402 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
1404 struct nfs4_layoutcommit_data
*data
;
1405 struct nfs_inode
*nfsi
= NFS_I(inode
);
1406 struct pnfs_layout_segment
*lseg
;
1407 struct rpc_cred
*cred
;
1411 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
1413 if (!test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
1416 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1417 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
1419 mark_inode_dirty_sync(inode
);
1424 spin_lock(&inode
->i_lock
);
1425 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1426 spin_unlock(&inode
->i_lock
);
1431 * Currently only one (whole file) write lseg which is referenced
1432 * in pnfs_set_layoutcommit and will be found.
1434 lseg
= pnfs_list_write_lseg(inode
);
1436 end_pos
= lseg
->pls_end_pos
;
1437 cred
= lseg
->pls_lc_cred
;
1438 lseg
->pls_end_pos
= 0;
1439 lseg
->pls_lc_cred
= NULL
;
1441 memcpy(&data
->args
.stateid
.data
, nfsi
->layout
->plh_stateid
.data
,
1442 sizeof(nfsi
->layout
->plh_stateid
.data
));
1443 spin_unlock(&inode
->i_lock
);
1445 data
->args
.inode
= inode
;
1448 nfs_fattr_init(&data
->fattr
);
1449 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
1450 data
->res
.fattr
= &data
->fattr
;
1451 data
->args
.lastbytewritten
= end_pos
- 1;
1452 data
->res
.server
= NFS_SERVER(inode
);
1454 status
= nfs4_proc_layoutcommit(data
, sync
);
1456 dprintk("<-- %s status %d\n", __func__
, status
);