2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
37 #define NFSDBG_FACILITY NFSDBG_PNFS
38 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
43 * protects pnfs_modules_tbl.
45 static DEFINE_SPINLOCK(pnfs_spinlock
);
48 * pnfs_modules_tbl holds all pnfs modules
50 static LIST_HEAD(pnfs_modules_tbl
);
52 /* Return the registered pnfs layout driver module matching given id */
53 static struct pnfs_layoutdriver_type
*
54 find_pnfs_driver_locked(u32 id
)
56 struct pnfs_layoutdriver_type
*local
;
58 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
63 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
67 static struct pnfs_layoutdriver_type
*
68 find_pnfs_driver(u32 id
)
70 struct pnfs_layoutdriver_type
*local
;
72 spin_lock(&pnfs_spinlock
);
73 local
= find_pnfs_driver_locked(id
);
74 if (local
!= NULL
&& !try_module_get(local
->owner
)) {
75 dprintk("%s: Could not grab reference on module\n", __func__
);
78 spin_unlock(&pnfs_spinlock
);
83 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
85 if (nfss
->pnfs_curr_ld
) {
86 if (nfss
->pnfs_curr_ld
->clear_layoutdriver
)
87 nfss
->pnfs_curr_ld
->clear_layoutdriver(nfss
);
88 /* Decrement the MDS count. Purge the deviceid cache if zero */
89 if (atomic_dec_and_test(&nfss
->nfs_client
->cl_mds_count
))
90 nfs4_deviceid_purge_client(nfss
->nfs_client
);
91 module_put(nfss
->pnfs_curr_ld
->owner
);
93 nfss
->pnfs_curr_ld
= NULL
;
97 * Try to set the server's pnfs module to the pnfs layout type specified by id.
98 * Currently only one pNFS layout driver per filesystem is supported.
100 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
103 set_pnfs_layoutdriver(struct nfs_server
*server
, const struct nfs_fh
*mntfh
,
106 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
110 if (!(server
->nfs_client
->cl_exchange_flags
&
111 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
112 printk(KERN_ERR
"NFS: %s: id %u cl_exchange_flags 0x%x\n",
113 __func__
, id
, server
->nfs_client
->cl_exchange_flags
);
116 ld_type
= find_pnfs_driver(id
);
118 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
119 ld_type
= find_pnfs_driver(id
);
121 dprintk("%s: No pNFS module found for %u.\n",
126 server
->pnfs_curr_ld
= ld_type
;
127 if (ld_type
->set_layoutdriver
128 && ld_type
->set_layoutdriver(server
, mntfh
)) {
129 printk(KERN_ERR
"NFS: %s: Error initializing pNFS layout "
130 "driver %u.\n", __func__
, id
);
131 module_put(ld_type
->owner
);
134 /* Bump the MDS count */
135 atomic_inc(&server
->nfs_client
->cl_mds_count
);
137 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
141 dprintk("%s: Using NFSv4 I/O\n", __func__
);
142 server
->pnfs_curr_ld
= NULL
;
146 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
148 int status
= -EINVAL
;
149 struct pnfs_layoutdriver_type
*tmp
;
151 if (ld_type
->id
== 0) {
152 printk(KERN_ERR
"NFS: %s id 0 is reserved\n", __func__
);
155 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
156 printk(KERN_ERR
"NFS: %s Layout driver must provide "
157 "alloc_lseg and free_lseg.\n", __func__
);
161 spin_lock(&pnfs_spinlock
);
162 tmp
= find_pnfs_driver_locked(ld_type
->id
);
164 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
166 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
169 printk(KERN_ERR
"NFS: %s Module with id %d already loaded!\n",
170 __func__
, ld_type
->id
);
172 spin_unlock(&pnfs_spinlock
);
176 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
179 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
181 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
182 spin_lock(&pnfs_spinlock
);
183 list_del(&ld_type
->pnfs_tblid
);
184 spin_unlock(&pnfs_spinlock
);
186 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
189 * pNFS client layout cache
192 /* Need to hold i_lock if caller does not already hold reference */
194 pnfs_get_layout_hdr(struct pnfs_layout_hdr
*lo
)
196 atomic_inc(&lo
->plh_refcount
);
199 static struct pnfs_layout_hdr
*
200 pnfs_alloc_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
202 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
203 return ld
->alloc_layout_hdr(ino
, gfp_flags
);
207 pnfs_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
209 struct nfs_server
*server
= NFS_SERVER(lo
->plh_inode
);
210 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
212 if (!list_empty(&lo
->plh_layouts
)) {
213 struct nfs_client
*clp
= server
->nfs_client
;
215 spin_lock(&clp
->cl_lock
);
216 list_del_init(&lo
->plh_layouts
);
217 spin_unlock(&clp
->cl_lock
);
219 put_rpccred(lo
->plh_lc_cred
);
220 return ld
->free_layout_hdr(lo
);
224 pnfs_detach_layout_hdr(struct pnfs_layout_hdr
*lo
)
226 struct nfs_inode
*nfsi
= NFS_I(lo
->plh_inode
);
227 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
229 /* Reset MDS Threshold I/O counters */
235 pnfs_put_layout_hdr(struct pnfs_layout_hdr
*lo
)
237 struct inode
*inode
= lo
->plh_inode
;
239 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
240 pnfs_detach_layout_hdr(lo
);
241 spin_unlock(&inode
->i_lock
);
242 pnfs_free_layout_hdr(lo
);
247 pnfs_iomode_to_fail_bit(u32 iomode
)
249 return iomode
== IOMODE_RW
?
250 NFS_LAYOUT_RW_FAILED
: NFS_LAYOUT_RO_FAILED
;
254 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
256 lo
->plh_retry_timestamp
= jiffies
;
257 if (!test_and_set_bit(fail_bit
, &lo
->plh_flags
))
258 atomic_inc(&lo
->plh_refcount
);
262 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
264 if (test_and_clear_bit(fail_bit
, &lo
->plh_flags
))
265 atomic_dec(&lo
->plh_refcount
);
269 pnfs_layout_io_set_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
271 struct inode
*inode
= lo
->plh_inode
;
272 struct pnfs_layout_range range
= {
275 .length
= NFS4_MAX_UINT64
,
279 spin_lock(&inode
->i_lock
);
280 pnfs_layout_set_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
281 pnfs_mark_matching_lsegs_invalid(lo
, &head
, &range
);
282 spin_unlock(&inode
->i_lock
);
283 pnfs_free_lseg_list(&head
);
284 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__
,
285 iomode
== IOMODE_RW
? "RW" : "READ");
289 pnfs_layout_io_test_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
291 unsigned long start
, end
;
292 int fail_bit
= pnfs_iomode_to_fail_bit(iomode
);
294 if (test_bit(fail_bit
, &lo
->plh_flags
) == 0)
297 start
= end
- PNFS_LAYOUTGET_RETRY_TIMEOUT
;
298 if (!time_in_range(lo
->plh_retry_timestamp
, start
, end
)) {
299 /* It is time to retry the failed layoutgets */
300 pnfs_layout_clear_fail_bit(lo
, fail_bit
);
307 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
309 INIT_LIST_HEAD(&lseg
->pls_list
);
310 INIT_LIST_HEAD(&lseg
->pls_lc_list
);
311 atomic_set(&lseg
->pls_refcount
, 1);
313 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
314 lseg
->pls_layout
= lo
;
317 static void pnfs_free_lseg(struct pnfs_layout_segment
*lseg
)
319 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
321 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
325 pnfs_layout_remove_lseg(struct pnfs_layout_hdr
*lo
,
326 struct pnfs_layout_segment
*lseg
)
328 struct inode
*inode
= lo
->plh_inode
;
330 WARN_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
331 list_del_init(&lseg
->pls_list
);
332 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
333 atomic_dec(&lo
->plh_refcount
);
334 if (list_empty(&lo
->plh_segs
))
335 clear_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
336 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
340 pnfs_put_lseg(struct pnfs_layout_segment
*lseg
)
342 struct pnfs_layout_hdr
*lo
;
348 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
349 atomic_read(&lseg
->pls_refcount
),
350 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
351 lo
= lseg
->pls_layout
;
352 inode
= lo
->plh_inode
;
353 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
354 pnfs_get_layout_hdr(lo
);
355 pnfs_layout_remove_lseg(lo
, lseg
);
356 spin_unlock(&inode
->i_lock
);
357 pnfs_free_lseg(lseg
);
358 pnfs_put_layout_hdr(lo
);
361 EXPORT_SYMBOL_GPL(pnfs_put_lseg
);
364 end_offset(u64 start
, u64 len
)
369 return end
>= start
? end
: NFS4_MAX_UINT64
;
373 * is l2 fully contained in l1?
375 * [----------------------------------)
380 lo_seg_contained(struct pnfs_layout_range
*l1
,
381 struct pnfs_layout_range
*l2
)
383 u64 start1
= l1
->offset
;
384 u64 end1
= end_offset(start1
, l1
->length
);
385 u64 start2
= l2
->offset
;
386 u64 end2
= end_offset(start2
, l2
->length
);
388 return (start1
<= start2
) && (end1
>= end2
);
392 * is l1 and l2 intersecting?
394 * [----------------------------------)
399 lo_seg_intersecting(struct pnfs_layout_range
*l1
,
400 struct pnfs_layout_range
*l2
)
402 u64 start1
= l1
->offset
;
403 u64 end1
= end_offset(start1
, l1
->length
);
404 u64 start2
= l2
->offset
;
405 u64 end2
= end_offset(start2
, l2
->length
);
407 return (end1
== NFS4_MAX_UINT64
|| end1
> start2
) &&
408 (end2
== NFS4_MAX_UINT64
|| end2
> start1
);
412 should_free_lseg(struct pnfs_layout_range
*lseg_range
,
413 struct pnfs_layout_range
*recall_range
)
415 return (recall_range
->iomode
== IOMODE_ANY
||
416 lseg_range
->iomode
== recall_range
->iomode
) &&
417 lo_seg_intersecting(lseg_range
, recall_range
);
420 /* Returns 1 if lseg is removed from list, 0 otherwise */
421 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
422 struct list_head
*tmp_list
)
426 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
427 /* Remove the reference keeping the lseg in the
428 * list. It will now be removed when all
429 * outstanding io is finished.
431 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
432 atomic_read(&lseg
->pls_refcount
));
433 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
434 pnfs_layout_remove_lseg(lseg
->pls_layout
, lseg
);
435 list_add(&lseg
->pls_list
, tmp_list
);
442 /* Returns count of number of matching invalid lsegs remaining in list
446 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
447 struct list_head
*tmp_list
,
448 struct pnfs_layout_range
*recall_range
)
450 struct pnfs_layout_segment
*lseg
, *next
;
451 int invalid
= 0, removed
= 0;
453 dprintk("%s:Begin lo %p\n", __func__
, lo
);
455 if (list_empty(&lo
->plh_segs
))
457 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
459 should_free_lseg(&lseg
->pls_range
, recall_range
)) {
460 dprintk("%s: freeing lseg %p iomode %d "
461 "offset %llu length %llu\n", __func__
,
462 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
463 lseg
->pls_range
.length
);
465 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
467 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
468 return invalid
- removed
;
471 /* note free_me must contain lsegs from a single layout_hdr */
473 pnfs_free_lseg_list(struct list_head
*free_me
)
475 struct pnfs_layout_segment
*lseg
, *tmp
;
477 if (list_empty(free_me
))
480 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
481 list_del(&lseg
->pls_list
);
482 pnfs_free_lseg(lseg
);
487 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
489 struct pnfs_layout_hdr
*lo
;
492 spin_lock(&nfsi
->vfs_inode
.i_lock
);
495 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
496 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
497 pnfs_get_layout_hdr(lo
);
498 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RO_FAILED
);
499 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RW_FAILED
);
500 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
501 pnfs_free_lseg_list(&tmp_list
);
502 pnfs_put_layout_hdr(lo
);
504 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
506 EXPORT_SYMBOL_GPL(pnfs_destroy_layout
);
509 pnfs_layout_add_bulk_destroy_list(struct inode
*inode
,
510 struct list_head
*layout_list
)
512 struct pnfs_layout_hdr
*lo
;
515 spin_lock(&inode
->i_lock
);
516 lo
= NFS_I(inode
)->layout
;
517 if (lo
!= NULL
&& list_empty(&lo
->plh_bulk_destroy
)) {
518 pnfs_get_layout_hdr(lo
);
519 list_add(&lo
->plh_bulk_destroy
, layout_list
);
522 spin_unlock(&inode
->i_lock
);
526 /* Caller must hold rcu_read_lock and clp->cl_lock */
528 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client
*clp
,
529 struct nfs_server
*server
,
530 struct list_head
*layout_list
)
532 struct pnfs_layout_hdr
*lo
, *next
;
535 list_for_each_entry_safe(lo
, next
, &server
->layouts
, plh_layouts
) {
536 inode
= igrab(lo
->plh_inode
);
539 list_del_init(&lo
->plh_layouts
);
540 if (pnfs_layout_add_bulk_destroy_list(inode
, layout_list
))
543 spin_unlock(&clp
->cl_lock
);
545 spin_lock(&clp
->cl_lock
);
553 pnfs_layout_free_bulk_destroy_list(struct list_head
*layout_list
,
556 struct pnfs_layout_hdr
*lo
;
558 struct pnfs_layout_range range
= {
559 .iomode
= IOMODE_ANY
,
561 .length
= NFS4_MAX_UINT64
,
563 LIST_HEAD(lseg_list
);
566 while (!list_empty(layout_list
)) {
567 lo
= list_entry(layout_list
->next
, struct pnfs_layout_hdr
,
569 dprintk("%s freeing layout for inode %lu\n", __func__
,
570 lo
->plh_inode
->i_ino
);
571 inode
= lo
->plh_inode
;
572 spin_lock(&inode
->i_lock
);
573 list_del_init(&lo
->plh_bulk_destroy
);
574 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
576 set_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
577 if (pnfs_mark_matching_lsegs_invalid(lo
, &lseg_list
, &range
))
579 spin_unlock(&inode
->i_lock
);
580 pnfs_free_lseg_list(&lseg_list
);
581 pnfs_put_layout_hdr(lo
);
588 pnfs_destroy_layouts_byfsid(struct nfs_client
*clp
,
589 struct nfs_fsid
*fsid
,
592 struct nfs_server
*server
;
593 LIST_HEAD(layout_list
);
595 spin_lock(&clp
->cl_lock
);
598 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
599 if (memcmp(&server
->fsid
, fsid
, sizeof(*fsid
)) != 0)
601 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
607 spin_unlock(&clp
->cl_lock
);
609 if (list_empty(&layout_list
))
611 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
615 pnfs_destroy_layouts_byclid(struct nfs_client
*clp
,
618 struct nfs_server
*server
;
619 LIST_HEAD(layout_list
);
621 spin_lock(&clp
->cl_lock
);
624 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
625 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
631 spin_unlock(&clp
->cl_lock
);
633 if (list_empty(&layout_list
))
635 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
639 * Called by the state manger to remove all layouts established under an
643 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
645 nfs4_deviceid_mark_client_invalid(clp
);
646 nfs4_deviceid_purge_client(clp
);
648 pnfs_destroy_layouts_byclid(clp
, false);
652 * Compare 2 layout stateid sequence ids, to see which is newer,
653 * taking into account wraparound issues.
655 static bool pnfs_seqid_is_newer(u32 s1
, u32 s2
)
657 return (s32
)s1
- (s32
)s2
> 0;
660 /* update lo->plh_stateid with new if is more recent */
662 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
665 u32 oldseq
, newseq
, new_barrier
;
666 int empty
= list_empty(&lo
->plh_segs
);
668 oldseq
= be32_to_cpu(lo
->plh_stateid
.seqid
);
669 newseq
= be32_to_cpu(new->seqid
);
670 if (empty
|| pnfs_seqid_is_newer(newseq
, oldseq
)) {
671 nfs4_stateid_copy(&lo
->plh_stateid
, new);
672 if (update_barrier
) {
673 new_barrier
= be32_to_cpu(new->seqid
);
675 /* Because of wraparound, we want to keep the barrier
676 * "close" to the current seqids.
678 new_barrier
= newseq
- atomic_read(&lo
->plh_outstanding
);
680 if (empty
|| pnfs_seqid_is_newer(new_barrier
, lo
->plh_barrier
))
681 lo
->plh_barrier
= new_barrier
;
686 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr
*lo
,
687 const nfs4_stateid
*stateid
)
689 u32 seqid
= be32_to_cpu(stateid
->seqid
);
691 return !pnfs_seqid_is_newer(seqid
, lo
->plh_barrier
);
694 /* lget is set to 1 if called from inside send_layoutget call chain */
696 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr
*lo
, int lget
)
698 return lo
->plh_block_lgets
||
699 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
700 (list_empty(&lo
->plh_segs
) &&
701 (atomic_read(&lo
->plh_outstanding
) > lget
));
705 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
706 struct nfs4_state
*open_state
)
710 dprintk("--> %s\n", __func__
);
711 spin_lock(&lo
->plh_inode
->i_lock
);
712 if (pnfs_layoutgets_blocked(lo
, 1)) {
714 } else if (list_empty(&lo
->plh_segs
)) {
718 seq
= read_seqbegin(&open_state
->seqlock
);
719 nfs4_stateid_copy(dst
, &open_state
->stateid
);
720 } while (read_seqretry(&open_state
->seqlock
, seq
));
722 nfs4_stateid_copy(dst
, &lo
->plh_stateid
);
723 spin_unlock(&lo
->plh_inode
->i_lock
);
724 dprintk("<-- %s\n", __func__
);
729 * Get layout from server.
730 * for now, assume that whole file layouts are requested.
732 * arg->length: all ones
734 static struct pnfs_layout_segment
*
735 send_layoutget(struct pnfs_layout_hdr
*lo
,
736 struct nfs_open_context
*ctx
,
737 struct pnfs_layout_range
*range
,
740 struct inode
*ino
= lo
->plh_inode
;
741 struct nfs_server
*server
= NFS_SERVER(ino
);
742 struct nfs4_layoutget
*lgp
;
743 struct pnfs_layout_segment
*lseg
;
745 dprintk("--> %s\n", __func__
);
747 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
751 lgp
->args
.minlength
= PAGE_CACHE_SIZE
;
752 if (lgp
->args
.minlength
> range
->length
)
753 lgp
->args
.minlength
= range
->length
;
754 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
755 lgp
->args
.range
= *range
;
756 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
757 lgp
->args
.inode
= ino
;
758 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
759 lgp
->gfp_flags
= gfp_flags
;
761 /* Synchronously retrieve layout information from server and
764 lseg
= nfs4_proc_layoutget(lgp
, gfp_flags
);
766 switch (PTR_ERR(lseg
)) {
771 /* remember that LAYOUTGET failed and suspend trying */
772 pnfs_layout_io_set_failed(lo
, range
->iomode
);
781 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
782 * when the layout segment list is empty.
784 * Note that a pnfs_layout_hdr can exist with an empty layout segment
785 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
786 * deviceid is marked invalid.
789 _pnfs_return_layout(struct inode
*ino
)
791 struct pnfs_layout_hdr
*lo
= NULL
;
792 struct nfs_inode
*nfsi
= NFS_I(ino
);
794 struct nfs4_layoutreturn
*lrp
;
795 nfs4_stateid stateid
;
796 int status
= 0, empty
;
798 dprintk("NFS: %s for inode %lu\n", __func__
, ino
->i_ino
);
800 spin_lock(&ino
->i_lock
);
803 spin_unlock(&ino
->i_lock
);
804 dprintk("NFS: %s no layout to return\n", __func__
);
807 stateid
= nfsi
->layout
->plh_stateid
;
808 /* Reference matched in nfs4_layoutreturn_release */
809 pnfs_get_layout_hdr(lo
);
810 empty
= list_empty(&lo
->plh_segs
);
811 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
812 /* Don't send a LAYOUTRETURN if list was initially empty */
814 spin_unlock(&ino
->i_lock
);
815 pnfs_put_layout_hdr(lo
);
816 dprintk("NFS: %s no layout segments to return\n", __func__
);
819 lo
->plh_block_lgets
++;
820 spin_unlock(&ino
->i_lock
);
821 pnfs_free_lseg_list(&tmp_list
);
823 WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
));
825 lrp
= kzalloc(sizeof(*lrp
), GFP_KERNEL
);
826 if (unlikely(lrp
== NULL
)) {
828 spin_lock(&ino
->i_lock
);
829 lo
->plh_block_lgets
--;
830 spin_unlock(&ino
->i_lock
);
831 pnfs_put_layout_hdr(lo
);
835 lrp
->args
.stateid
= stateid
;
836 lrp
->args
.layout_type
= NFS_SERVER(ino
)->pnfs_curr_ld
->id
;
837 lrp
->args
.inode
= ino
;
838 lrp
->args
.layout
= lo
;
839 lrp
->clp
= NFS_SERVER(ino
)->nfs_client
;
841 status
= nfs4_proc_layoutreturn(lrp
);
843 dprintk("<-- %s status: %d\n", __func__
, status
);
846 EXPORT_SYMBOL_GPL(_pnfs_return_layout
);
848 bool pnfs_roc(struct inode
*ino
)
850 struct pnfs_layout_hdr
*lo
;
851 struct pnfs_layout_segment
*lseg
, *tmp
;
855 spin_lock(&ino
->i_lock
);
856 lo
= NFS_I(ino
)->layout
;
857 if (!lo
|| !test_and_clear_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
) ||
858 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
860 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
861 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
862 mark_lseg_invalid(lseg
, &tmp_list
);
867 lo
->plh_block_lgets
++;
868 pnfs_get_layout_hdr(lo
); /* matched in pnfs_roc_release */
869 spin_unlock(&ino
->i_lock
);
870 pnfs_free_lseg_list(&tmp_list
);
874 spin_unlock(&ino
->i_lock
);
878 void pnfs_roc_release(struct inode
*ino
)
880 struct pnfs_layout_hdr
*lo
;
882 spin_lock(&ino
->i_lock
);
883 lo
= NFS_I(ino
)->layout
;
884 lo
->plh_block_lgets
--;
885 if (atomic_dec_and_test(&lo
->plh_refcount
)) {
886 pnfs_detach_layout_hdr(lo
);
887 spin_unlock(&ino
->i_lock
);
888 pnfs_free_layout_hdr(lo
);
890 spin_unlock(&ino
->i_lock
);
893 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
895 struct pnfs_layout_hdr
*lo
;
897 spin_lock(&ino
->i_lock
);
898 lo
= NFS_I(ino
)->layout
;
899 if (pnfs_seqid_is_newer(barrier
, lo
->plh_barrier
))
900 lo
->plh_barrier
= barrier
;
901 spin_unlock(&ino
->i_lock
);
904 bool pnfs_roc_drain(struct inode
*ino
, u32
*barrier
, struct rpc_task
*task
)
906 struct nfs_inode
*nfsi
= NFS_I(ino
);
907 struct pnfs_layout_hdr
*lo
;
908 struct pnfs_layout_segment
*lseg
;
912 spin_lock(&ino
->i_lock
);
913 list_for_each_entry(lseg
, &nfsi
->layout
->plh_segs
, pls_list
)
914 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
915 rpc_sleep_on(&NFS_SERVER(ino
)->roc_rpcwaitq
, task
, NULL
);
920 current_seqid
= be32_to_cpu(lo
->plh_stateid
.seqid
);
922 /* Since close does not return a layout stateid for use as
923 * a barrier, we choose the worst-case barrier.
925 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
927 spin_unlock(&ino
->i_lock
);
932 * Compare two layout segments for sorting into layout cache.
933 * We want to preferentially return RW over RO layouts, so ensure those
937 cmp_layout(struct pnfs_layout_range
*l1
,
938 struct pnfs_layout_range
*l2
)
942 /* high offset > low offset */
943 d
= l1
->offset
- l2
->offset
;
947 /* short length > long length */
948 d
= l2
->length
- l1
->length
;
952 /* read > read/write */
953 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
957 pnfs_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
958 struct pnfs_layout_segment
*lseg
)
960 struct pnfs_layout_segment
*lp
;
962 dprintk("%s:Begin\n", __func__
);
964 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
965 if (cmp_layout(&lseg
->pls_range
, &lp
->pls_range
) > 0)
967 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
968 dprintk("%s: inserted lseg %p "
969 "iomode %d offset %llu length %llu before "
970 "lp %p iomode %d offset %llu length %llu\n",
971 __func__
, lseg
, lseg
->pls_range
.iomode
,
972 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
973 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
974 lp
->pls_range
.length
);
977 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
978 dprintk("%s: inserted lseg %p "
979 "iomode %d offset %llu length %llu at tail\n",
980 __func__
, lseg
, lseg
->pls_range
.iomode
,
981 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
983 pnfs_get_layout_hdr(lo
);
985 dprintk("%s:Return\n", __func__
);
988 static struct pnfs_layout_hdr
*
989 alloc_init_layout_hdr(struct inode
*ino
,
990 struct nfs_open_context
*ctx
,
993 struct pnfs_layout_hdr
*lo
;
995 lo
= pnfs_alloc_layout_hdr(ino
, gfp_flags
);
998 atomic_set(&lo
->plh_refcount
, 1);
999 INIT_LIST_HEAD(&lo
->plh_layouts
);
1000 INIT_LIST_HEAD(&lo
->plh_segs
);
1001 INIT_LIST_HEAD(&lo
->plh_bulk_destroy
);
1002 lo
->plh_inode
= ino
;
1003 lo
->plh_lc_cred
= get_rpccred(ctx
->state
->owner
->so_cred
);
1007 static struct pnfs_layout_hdr
*
1008 pnfs_find_alloc_layout(struct inode
*ino
,
1009 struct nfs_open_context
*ctx
,
1012 struct nfs_inode
*nfsi
= NFS_I(ino
);
1013 struct pnfs_layout_hdr
*new = NULL
;
1015 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
1017 if (nfsi
->layout
!= NULL
)
1019 spin_unlock(&ino
->i_lock
);
1020 new = alloc_init_layout_hdr(ino
, ctx
, gfp_flags
);
1021 spin_lock(&ino
->i_lock
);
1023 if (likely(nfsi
->layout
== NULL
)) { /* Won the race? */
1026 } else if (new != NULL
)
1027 pnfs_free_layout_hdr(new);
1029 pnfs_get_layout_hdr(nfsi
->layout
);
1030 return nfsi
->layout
;
1034 * iomode matching rules:
1045 is_matching_lseg(struct pnfs_layout_range
*ls_range
,
1046 struct pnfs_layout_range
*range
)
1048 struct pnfs_layout_range range1
;
1050 if ((range
->iomode
== IOMODE_RW
&&
1051 ls_range
->iomode
!= IOMODE_RW
) ||
1052 !lo_seg_intersecting(ls_range
, range
))
1055 /* range1 covers only the first byte in the range */
1058 return lo_seg_contained(ls_range
, &range1
);
1062 * lookup range in layout
1064 static struct pnfs_layout_segment
*
1065 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
1066 struct pnfs_layout_range
*range
)
1068 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
1070 dprintk("%s:Begin\n", __func__
);
1072 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
1073 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
1074 is_matching_lseg(&lseg
->pls_range
, range
)) {
1075 ret
= pnfs_get_lseg(lseg
);
1078 if (lseg
->pls_range
.offset
> range
->offset
)
1082 dprintk("%s:Return lseg %p ref %d\n",
1083 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
1088 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1089 * to the MDS or over pNFS
1091 * The nfs_inode read_io and write_io fields are cumulative counters reset
1092 * when there are no layout segments. Note that in pnfs_update_layout iomode
1093 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1096 * A return of true means use MDS I/O.
1099 * If a file's size is smaller than the file size threshold, data accesses
1100 * SHOULD be sent to the metadata server. If an I/O request has a length that
1101 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1102 * server. If both file size and I/O size are provided, the client SHOULD
1103 * reach or exceed both thresholds before sending its read or write
1104 * requests to the data server.
1106 static bool pnfs_within_mdsthreshold(struct nfs_open_context
*ctx
,
1107 struct inode
*ino
, int iomode
)
1109 struct nfs4_threshold
*t
= ctx
->mdsthreshold
;
1110 struct nfs_inode
*nfsi
= NFS_I(ino
);
1111 loff_t fsize
= i_size_read(ino
);
1112 bool size
= false, size_set
= false, io
= false, io_set
= false, ret
= false;
1117 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1118 __func__
, t
->bm
, t
->rd_sz
, t
->wr_sz
, t
->rd_io_sz
, t
->wr_io_sz
);
1122 if (t
->bm
& THRESHOLD_RD
) {
1123 dprintk("%s fsize %llu\n", __func__
, fsize
);
1125 if (fsize
< t
->rd_sz
)
1128 if (t
->bm
& THRESHOLD_RD_IO
) {
1129 dprintk("%s nfsi->read_io %llu\n", __func__
,
1132 if (nfsi
->read_io
< t
->rd_io_sz
)
1137 if (t
->bm
& THRESHOLD_WR
) {
1138 dprintk("%s fsize %llu\n", __func__
, fsize
);
1140 if (fsize
< t
->wr_sz
)
1143 if (t
->bm
& THRESHOLD_WR_IO
) {
1144 dprintk("%s nfsi->write_io %llu\n", __func__
,
1147 if (nfsi
->write_io
< t
->wr_io_sz
)
1152 if (size_set
&& io_set
) {
1155 } else if (size
|| io
)
1158 dprintk("<-- %s size %d io %d ret %d\n", __func__
, size
, io
, ret
);
1163 * Layout segment is retreived from the server if not cached.
1164 * The appropriate layout segment is referenced and returned to the caller.
1166 struct pnfs_layout_segment
*
1167 pnfs_update_layout(struct inode
*ino
,
1168 struct nfs_open_context
*ctx
,
1171 enum pnfs_iomode iomode
,
1174 struct pnfs_layout_range arg
= {
1180 struct nfs_server
*server
= NFS_SERVER(ino
);
1181 struct nfs_client
*clp
= server
->nfs_client
;
1182 struct pnfs_layout_hdr
*lo
;
1183 struct pnfs_layout_segment
*lseg
= NULL
;
1186 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
1189 if (pnfs_within_mdsthreshold(ctx
, ino
, iomode
))
1192 spin_lock(&ino
->i_lock
);
1193 lo
= pnfs_find_alloc_layout(ino
, ctx
, gfp_flags
);
1195 spin_unlock(&ino
->i_lock
);
1199 /* Do we even need to bother with this? */
1200 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1201 dprintk("%s matches recall, use MDS\n", __func__
);
1205 /* if LAYOUTGET already failed once we don't try again */
1206 if (pnfs_layout_io_test_failed(lo
, iomode
))
1209 /* Check to see if the layout for the given range already exists */
1210 lseg
= pnfs_find_lseg(lo
, &arg
);
1214 if (pnfs_layoutgets_blocked(lo
, 0))
1216 atomic_inc(&lo
->plh_outstanding
);
1218 first
= list_empty(&lo
->plh_layouts
) ? true : false;
1219 spin_unlock(&ino
->i_lock
);
1222 /* The lo must be on the clp list if there is any
1223 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1225 spin_lock(&clp
->cl_lock
);
1226 list_add_tail(&lo
->plh_layouts
, &server
->layouts
);
1227 spin_unlock(&clp
->cl_lock
);
1230 pg_offset
= arg
.offset
& ~PAGE_CACHE_MASK
;
1232 arg
.offset
-= pg_offset
;
1233 arg
.length
+= pg_offset
;
1235 if (arg
.length
!= NFS4_MAX_UINT64
)
1236 arg
.length
= PAGE_CACHE_ALIGN(arg
.length
);
1238 lseg
= send_layoutget(lo
, ctx
, &arg
, gfp_flags
);
1239 atomic_dec(&lo
->plh_outstanding
);
1241 pnfs_put_layout_hdr(lo
);
1243 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1244 "(%s, offset: %llu, length: %llu)\n",
1245 __func__
, ino
->i_sb
->s_id
,
1246 (unsigned long long)NFS_FILEID(ino
),
1247 lseg
== NULL
? "not found" : "found",
1248 iomode
==IOMODE_RW
? "read/write" : "read-only",
1249 (unsigned long long)pos
,
1250 (unsigned long long)count
);
1253 spin_unlock(&ino
->i_lock
);
1254 goto out_put_layout_hdr
;
1256 EXPORT_SYMBOL_GPL(pnfs_update_layout
);
1258 struct pnfs_layout_segment
*
1259 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
1261 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
1262 struct nfs4_layoutget_res
*res
= &lgp
->res
;
1263 struct pnfs_layout_segment
*lseg
;
1264 struct inode
*ino
= lo
->plh_inode
;
1267 /* Inject layout blob into I/O device driver */
1268 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
1269 if (!lseg
|| IS_ERR(lseg
)) {
1273 status
= PTR_ERR(lseg
);
1274 dprintk("%s: Could not allocate layout: error %d\n",
1279 spin_lock(&ino
->i_lock
);
1280 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1281 dprintk("%s forget reply due to recall\n", __func__
);
1282 goto out_forget_reply
;
1285 if (pnfs_layoutgets_blocked(lo
, 1) ||
1286 pnfs_layout_stateid_blocked(lo
, &res
->stateid
)) {
1287 dprintk("%s forget reply due to state\n", __func__
);
1288 goto out_forget_reply
;
1291 /* Done processing layoutget. Set the layout stateid */
1292 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
1294 init_lseg(lo
, lseg
);
1295 lseg
->pls_range
= res
->range
;
1296 pnfs_get_lseg(lseg
);
1297 pnfs_layout_insert_lseg(lo
, lseg
);
1299 if (res
->return_on_close
) {
1300 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
1301 set_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
);
1304 spin_unlock(&ino
->i_lock
);
1307 return ERR_PTR(status
);
1310 spin_unlock(&ino
->i_lock
);
1311 lseg
->pls_layout
= lo
;
1312 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
1317 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
1319 u64 rd_size
= req
->wb_bytes
;
1321 WARN_ON_ONCE(pgio
->pg_lseg
!= NULL
);
1323 if (req
->wb_offset
!= req
->wb_pgbase
) {
1324 nfs_pageio_reset_read_mds(pgio
);
1328 if (pgio
->pg_dreq
== NULL
)
1329 rd_size
= i_size_read(pgio
->pg_inode
) - req_offset(req
);
1331 rd_size
= nfs_dreq_bytes_left(pgio
->pg_dreq
);
1333 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1339 /* If no lseg, fall back to read through mds */
1340 if (pgio
->pg_lseg
== NULL
)
1341 nfs_pageio_reset_read_mds(pgio
);
1344 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read
);
1347 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
1348 struct nfs_page
*req
, u64 wb_size
)
1350 WARN_ON_ONCE(pgio
->pg_lseg
!= NULL
);
1352 if (req
->wb_offset
!= req
->wb_pgbase
) {
1353 nfs_pageio_reset_write_mds(pgio
);
1357 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1363 /* If no lseg, fall back to write through mds */
1364 if (pgio
->pg_lseg
== NULL
)
1365 nfs_pageio_reset_write_mds(pgio
);
1367 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write
);
1370 pnfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
,
1371 const struct nfs_pgio_completion_ops
*compl_ops
)
1373 struct nfs_server
*server
= NFS_SERVER(inode
);
1374 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
1377 nfs_pageio_init_read(pgio
, inode
, compl_ops
);
1379 nfs_pageio_init(pgio
, inode
, ld
->pg_read_ops
, compl_ops
, server
->rsize
, 0);
1383 pnfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
,
1385 const struct nfs_pgio_completion_ops
*compl_ops
)
1387 struct nfs_server
*server
= NFS_SERVER(inode
);
1388 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
1391 nfs_pageio_init_write(pgio
, inode
, ioflags
, compl_ops
);
1393 nfs_pageio_init(pgio
, inode
, ld
->pg_write_ops
, compl_ops
, server
->wsize
, ioflags
);
1397 pnfs_generic_pg_test(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*prev
,
1398 struct nfs_page
*req
)
1400 if (pgio
->pg_lseg
== NULL
)
1401 return nfs_generic_pg_test(pgio
, prev
, req
);
1404 * Test if a nfs_page is fully contained in the pnfs_layout_range.
1405 * Note that this test makes several assumptions:
1406 * - that the previous nfs_page in the struct nfs_pageio_descriptor
1407 * is known to lie within the range.
1408 * - that the nfs_page being tested is known to be contiguous with the
1409 * previous nfs_page.
1410 * - Layout ranges are page aligned, so we only have to test the
1411 * start offset of the request.
1413 * Please also note that 'end_offset' is actually the offset of the
1414 * first byte that lies outside the pnfs_layout_range. FIXME?
1417 return req_offset(req
) < end_offset(pgio
->pg_lseg
->pls_range
.offset
,
1418 pgio
->pg_lseg
->pls_range
.length
);
1420 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test
);
1422 int pnfs_write_done_resend_to_mds(struct inode
*inode
,
1423 struct list_head
*head
,
1424 const struct nfs_pgio_completion_ops
*compl_ops
,
1425 struct nfs_direct_req
*dreq
)
1427 struct nfs_pageio_descriptor pgio
;
1430 /* Resend all requests through the MDS */
1431 nfs_pageio_init_write(&pgio
, inode
, FLUSH_STABLE
, compl_ops
);
1432 pgio
.pg_dreq
= dreq
;
1433 while (!list_empty(head
)) {
1434 struct nfs_page
*req
= nfs_list_entry(head
->next
);
1436 nfs_list_remove_request(req
);
1437 if (!nfs_pageio_add_request(&pgio
, req
))
1438 nfs_list_add_request(req
, &failed
);
1440 nfs_pageio_complete(&pgio
);
1442 if (!list_empty(&failed
)) {
1443 /* For some reason our attempt to resend pages. Mark the
1444 * overall send request as having failed, and let
1445 * nfs_writeback_release_full deal with the error.
1447 list_move(&failed
, head
);
1452 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds
);
1454 static void pnfs_ld_handle_write_error(struct nfs_write_data
*data
)
1456 struct nfs_pgio_header
*hdr
= data
->header
;
1458 dprintk("pnfs write error = %d\n", hdr
->pnfs_error
);
1459 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
1460 PNFS_LAYOUTRET_ON_ERROR
) {
1461 clear_bit(NFS_INO_LAYOUTCOMMIT
, &NFS_I(hdr
->inode
)->flags
);
1462 pnfs_return_layout(hdr
->inode
);
1464 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
1465 data
->task
.tk_status
= pnfs_write_done_resend_to_mds(hdr
->inode
,
1467 hdr
->completion_ops
,
1472 * Called by non rpc-based layout drivers
1474 void pnfs_ld_write_done(struct nfs_write_data
*data
)
1476 struct nfs_pgio_header
*hdr
= data
->header
;
1478 if (!hdr
->pnfs_error
) {
1479 pnfs_set_layoutcommit(data
);
1480 hdr
->mds_ops
->rpc_call_done(&data
->task
, data
);
1482 pnfs_ld_handle_write_error(data
);
1483 hdr
->mds_ops
->rpc_release(data
);
1485 EXPORT_SYMBOL_GPL(pnfs_ld_write_done
);
1488 pnfs_write_through_mds(struct nfs_pageio_descriptor
*desc
,
1489 struct nfs_write_data
*data
)
1491 struct nfs_pgio_header
*hdr
= data
->header
;
1493 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1494 list_splice_tail_init(&hdr
->pages
, &desc
->pg_list
);
1495 nfs_pageio_reset_write_mds(desc
);
1496 desc
->pg_recoalesce
= 1;
1498 nfs_writedata_release(data
);
1501 static enum pnfs_try_status
1502 pnfs_try_to_write_data(struct nfs_write_data
*wdata
,
1503 const struct rpc_call_ops
*call_ops
,
1504 struct pnfs_layout_segment
*lseg
,
1507 struct nfs_pgio_header
*hdr
= wdata
->header
;
1508 struct inode
*inode
= hdr
->inode
;
1509 enum pnfs_try_status trypnfs
;
1510 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1512 hdr
->mds_ops
= call_ops
;
1514 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
1515 inode
->i_ino
, wdata
->args
.count
, wdata
->args
.offset
, how
);
1516 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(wdata
, how
);
1517 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
1518 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
1519 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1524 pnfs_do_multiple_writes(struct nfs_pageio_descriptor
*desc
, struct list_head
*head
, int how
)
1526 struct nfs_write_data
*data
;
1527 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
1528 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
1530 desc
->pg_lseg
= NULL
;
1531 while (!list_empty(head
)) {
1532 enum pnfs_try_status trypnfs
;
1534 data
= list_first_entry(head
, struct nfs_write_data
, list
);
1535 list_del_init(&data
->list
);
1537 trypnfs
= pnfs_try_to_write_data(data
, call_ops
, lseg
, how
);
1538 if (trypnfs
== PNFS_NOT_ATTEMPTED
)
1539 pnfs_write_through_mds(desc
, data
);
1541 pnfs_put_lseg(lseg
);
1544 static void pnfs_writehdr_free(struct nfs_pgio_header
*hdr
)
1546 pnfs_put_lseg(hdr
->lseg
);
1547 nfs_writehdr_free(hdr
);
1549 EXPORT_SYMBOL_GPL(pnfs_writehdr_free
);
1552 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor
*desc
)
1554 struct nfs_write_header
*whdr
;
1555 struct nfs_pgio_header
*hdr
;
1558 whdr
= nfs_writehdr_alloc();
1560 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
1561 pnfs_put_lseg(desc
->pg_lseg
);
1562 desc
->pg_lseg
= NULL
;
1565 hdr
= &whdr
->header
;
1566 nfs_pgheader_init(desc
, hdr
, pnfs_writehdr_free
);
1567 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
1568 atomic_inc(&hdr
->refcnt
);
1569 ret
= nfs_generic_flush(desc
, hdr
);
1571 pnfs_put_lseg(desc
->pg_lseg
);
1572 desc
->pg_lseg
= NULL
;
1574 pnfs_do_multiple_writes(desc
, &hdr
->rpc_list
, desc
->pg_ioflags
);
1575 if (atomic_dec_and_test(&hdr
->refcnt
))
1576 hdr
->completion_ops
->completion(hdr
);
1579 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages
);
1581 int pnfs_read_done_resend_to_mds(struct inode
*inode
,
1582 struct list_head
*head
,
1583 const struct nfs_pgio_completion_ops
*compl_ops
,
1584 struct nfs_direct_req
*dreq
)
1586 struct nfs_pageio_descriptor pgio
;
1589 /* Resend all requests through the MDS */
1590 nfs_pageio_init_read(&pgio
, inode
, compl_ops
);
1591 pgio
.pg_dreq
= dreq
;
1592 while (!list_empty(head
)) {
1593 struct nfs_page
*req
= nfs_list_entry(head
->next
);
1595 nfs_list_remove_request(req
);
1596 if (!nfs_pageio_add_request(&pgio
, req
))
1597 nfs_list_add_request(req
, &failed
);
1599 nfs_pageio_complete(&pgio
);
1601 if (!list_empty(&failed
)) {
1602 list_move(&failed
, head
);
1607 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds
);
1609 static void pnfs_ld_handle_read_error(struct nfs_read_data
*data
)
1611 struct nfs_pgio_header
*hdr
= data
->header
;
1613 dprintk("pnfs read error = %d\n", hdr
->pnfs_error
);
1614 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
1615 PNFS_LAYOUTRET_ON_ERROR
) {
1616 clear_bit(NFS_INO_LAYOUTCOMMIT
, &NFS_I(hdr
->inode
)->flags
);
1617 pnfs_return_layout(hdr
->inode
);
1619 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
1620 data
->task
.tk_status
= pnfs_read_done_resend_to_mds(hdr
->inode
,
1622 hdr
->completion_ops
,
1627 * Called by non rpc-based layout drivers
1629 void pnfs_ld_read_done(struct nfs_read_data
*data
)
1631 struct nfs_pgio_header
*hdr
= data
->header
;
1633 if (likely(!hdr
->pnfs_error
)) {
1634 __nfs4_read_done_cb(data
);
1635 hdr
->mds_ops
->rpc_call_done(&data
->task
, data
);
1637 pnfs_ld_handle_read_error(data
);
1638 hdr
->mds_ops
->rpc_release(data
);
1640 EXPORT_SYMBOL_GPL(pnfs_ld_read_done
);
1643 pnfs_read_through_mds(struct nfs_pageio_descriptor
*desc
,
1644 struct nfs_read_data
*data
)
1646 struct nfs_pgio_header
*hdr
= data
->header
;
1648 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1649 list_splice_tail_init(&hdr
->pages
, &desc
->pg_list
);
1650 nfs_pageio_reset_read_mds(desc
);
1651 desc
->pg_recoalesce
= 1;
1653 nfs_readdata_release(data
);
1657 * Call the appropriate parallel I/O subsystem read function.
1659 static enum pnfs_try_status
1660 pnfs_try_to_read_data(struct nfs_read_data
*rdata
,
1661 const struct rpc_call_ops
*call_ops
,
1662 struct pnfs_layout_segment
*lseg
)
1664 struct nfs_pgio_header
*hdr
= rdata
->header
;
1665 struct inode
*inode
= hdr
->inode
;
1666 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1667 enum pnfs_try_status trypnfs
;
1669 hdr
->mds_ops
= call_ops
;
1671 dprintk("%s: Reading ino:%lu %u@%llu\n",
1672 __func__
, inode
->i_ino
, rdata
->args
.count
, rdata
->args
.offset
);
1674 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(rdata
);
1675 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
1676 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
1677 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1682 pnfs_do_multiple_reads(struct nfs_pageio_descriptor
*desc
, struct list_head
*head
)
1684 struct nfs_read_data
*data
;
1685 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
1686 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
1688 desc
->pg_lseg
= NULL
;
1689 while (!list_empty(head
)) {
1690 enum pnfs_try_status trypnfs
;
1692 data
= list_first_entry(head
, struct nfs_read_data
, list
);
1693 list_del_init(&data
->list
);
1695 trypnfs
= pnfs_try_to_read_data(data
, call_ops
, lseg
);
1696 if (trypnfs
== PNFS_NOT_ATTEMPTED
)
1697 pnfs_read_through_mds(desc
, data
);
1699 pnfs_put_lseg(lseg
);
1702 static void pnfs_readhdr_free(struct nfs_pgio_header
*hdr
)
1704 pnfs_put_lseg(hdr
->lseg
);
1705 nfs_readhdr_free(hdr
);
1707 EXPORT_SYMBOL_GPL(pnfs_readhdr_free
);
1710 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
1712 struct nfs_read_header
*rhdr
;
1713 struct nfs_pgio_header
*hdr
;
1716 rhdr
= nfs_readhdr_alloc();
1718 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
1720 pnfs_put_lseg(desc
->pg_lseg
);
1721 desc
->pg_lseg
= NULL
;
1724 hdr
= &rhdr
->header
;
1725 nfs_pgheader_init(desc
, hdr
, pnfs_readhdr_free
);
1726 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
1727 atomic_inc(&hdr
->refcnt
);
1728 ret
= nfs_generic_pagein(desc
, hdr
);
1730 pnfs_put_lseg(desc
->pg_lseg
);
1731 desc
->pg_lseg
= NULL
;
1733 pnfs_do_multiple_reads(desc
, &hdr
->rpc_list
);
1734 if (atomic_dec_and_test(&hdr
->refcnt
))
1735 hdr
->completion_ops
->completion(hdr
);
1738 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages
);
1741 * There can be multiple RW segments.
1743 static void pnfs_list_write_lseg(struct inode
*inode
, struct list_head
*listp
)
1745 struct pnfs_layout_segment
*lseg
;
1747 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
) {
1748 if (lseg
->pls_range
.iomode
== IOMODE_RW
&&
1749 test_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
1750 list_add(&lseg
->pls_lc_list
, listp
);
1754 void pnfs_set_lo_fail(struct pnfs_layout_segment
*lseg
)
1756 pnfs_layout_io_set_failed(lseg
->pls_layout
, lseg
->pls_range
.iomode
);
1758 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail
);
1761 pnfs_set_layoutcommit(struct nfs_write_data
*wdata
)
1763 struct nfs_pgio_header
*hdr
= wdata
->header
;
1764 struct inode
*inode
= hdr
->inode
;
1765 struct nfs_inode
*nfsi
= NFS_I(inode
);
1766 loff_t end_pos
= wdata
->mds_offset
+ wdata
->res
.count
;
1767 bool mark_as_dirty
= false;
1769 spin_lock(&inode
->i_lock
);
1770 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1771 mark_as_dirty
= true;
1772 dprintk("%s: Set layoutcommit for inode %lu ",
1773 __func__
, inode
->i_ino
);
1775 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT
, &hdr
->lseg
->pls_flags
)) {
1776 /* references matched in nfs4_layoutcommit_release */
1777 pnfs_get_lseg(hdr
->lseg
);
1779 if (end_pos
> nfsi
->layout
->plh_lwb
)
1780 nfsi
->layout
->plh_lwb
= end_pos
;
1781 spin_unlock(&inode
->i_lock
);
1782 dprintk("%s: lseg %p end_pos %llu\n",
1783 __func__
, hdr
->lseg
, nfsi
->layout
->plh_lwb
);
1785 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1786 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1788 mark_inode_dirty_sync(inode
);
1790 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
1792 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data
*data
)
1794 struct nfs_server
*nfss
= NFS_SERVER(data
->args
.inode
);
1796 if (nfss
->pnfs_curr_ld
->cleanup_layoutcommit
)
1797 nfss
->pnfs_curr_ld
->cleanup_layoutcommit(data
);
1801 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1802 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1803 * data to disk to allow the server to recover the data if it crashes.
1804 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1805 * is off, and a COMMIT is sent to a data server, or
1806 * if WRITEs to a data server return NFS_DATA_SYNC.
1809 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
1811 struct nfs4_layoutcommit_data
*data
;
1812 struct nfs_inode
*nfsi
= NFS_I(inode
);
1816 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
1818 if (!test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
1821 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1822 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
1828 if (!test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
1831 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING
, &nfsi
->flags
)) {
1836 status
= wait_on_bit_lock(&nfsi
->flags
, NFS_INO_LAYOUTCOMMITTING
,
1837 nfs_wait_bit_killable
, TASK_KILLABLE
);
1842 INIT_LIST_HEAD(&data
->lseg_list
);
1843 spin_lock(&inode
->i_lock
);
1844 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1845 clear_bit(NFS_INO_LAYOUTCOMMITTING
, &nfsi
->flags
);
1846 spin_unlock(&inode
->i_lock
);
1847 wake_up_bit(&nfsi
->flags
, NFS_INO_LAYOUTCOMMITTING
);
1851 pnfs_list_write_lseg(inode
, &data
->lseg_list
);
1853 end_pos
= nfsi
->layout
->plh_lwb
;
1854 nfsi
->layout
->plh_lwb
= 0;
1856 nfs4_stateid_copy(&data
->args
.stateid
, &nfsi
->layout
->plh_stateid
);
1857 spin_unlock(&inode
->i_lock
);
1859 data
->args
.inode
= inode
;
1860 data
->cred
= get_rpccred(nfsi
->layout
->plh_lc_cred
);
1861 nfs_fattr_init(&data
->fattr
);
1862 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
1863 data
->res
.fattr
= &data
->fattr
;
1864 data
->args
.lastbytewritten
= end_pos
- 1;
1865 data
->res
.server
= NFS_SERVER(inode
);
1867 status
= nfs4_proc_layoutcommit(data
, sync
);
1870 mark_inode_dirty_sync(inode
);
1871 dprintk("<-- %s status %d\n", __func__
, status
);
1878 struct nfs4_threshold
*pnfs_mdsthreshold_alloc(void)
1880 struct nfs4_threshold
*thp
;
1882 thp
= kzalloc(sizeof(*thp
), GFP_NOFS
);
1884 dprintk("%s mdsthreshold allocation failed\n", __func__
);