2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
35 #define NFSDBG_FACILITY NFSDBG_PNFS
40 * protects pnfs_modules_tbl.
42 static DEFINE_SPINLOCK(pnfs_spinlock
);
45 * pnfs_modules_tbl holds all pnfs modules
47 static LIST_HEAD(pnfs_modules_tbl
);
49 /* Return the registered pnfs layout driver module matching given id */
50 static struct pnfs_layoutdriver_type
*
51 find_pnfs_driver_locked(u32 id
)
53 struct pnfs_layoutdriver_type
*local
;
55 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
60 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
64 static struct pnfs_layoutdriver_type
*
65 find_pnfs_driver(u32 id
)
67 struct pnfs_layoutdriver_type
*local
;
69 spin_lock(&pnfs_spinlock
);
70 local
= find_pnfs_driver_locked(id
);
71 spin_unlock(&pnfs_spinlock
);
76 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
78 if (nfss
->pnfs_curr_ld
)
79 module_put(nfss
->pnfs_curr_ld
->owner
);
80 nfss
->pnfs_curr_ld
= NULL
;
84 * Try to set the server's pnfs module to the pnfs layout type specified by id.
85 * Currently only one pNFS layout driver per filesystem is supported.
87 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
90 set_pnfs_layoutdriver(struct nfs_server
*server
, u32 id
)
92 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
96 if (!(server
->nfs_client
->cl_exchange_flags
&
97 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
98 printk(KERN_ERR
"%s: id %u cl_exchange_flags 0x%x\n", __func__
,
99 id
, server
->nfs_client
->cl_exchange_flags
);
102 ld_type
= find_pnfs_driver(id
);
104 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
105 ld_type
= find_pnfs_driver(id
);
107 dprintk("%s: No pNFS module found for %u.\n",
112 if (!try_module_get(ld_type
->owner
)) {
113 dprintk("%s: Could not grab reference on module\n", __func__
);
116 server
->pnfs_curr_ld
= ld_type
;
118 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
122 dprintk("%s: Using NFSv4 I/O\n", __func__
);
123 server
->pnfs_curr_ld
= NULL
;
127 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
129 int status
= -EINVAL
;
130 struct pnfs_layoutdriver_type
*tmp
;
132 if (ld_type
->id
== 0) {
133 printk(KERN_ERR
"%s id 0 is reserved\n", __func__
);
136 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
137 printk(KERN_ERR
"%s Layout driver must provide "
138 "alloc_lseg and free_lseg.\n", __func__
);
142 spin_lock(&pnfs_spinlock
);
143 tmp
= find_pnfs_driver_locked(ld_type
->id
);
145 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
147 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
150 printk(KERN_ERR
"%s Module with id %d already loaded!\n",
151 __func__
, ld_type
->id
);
153 spin_unlock(&pnfs_spinlock
);
157 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
160 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
162 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
163 spin_lock(&pnfs_spinlock
);
164 list_del(&ld_type
->pnfs_tblid
);
165 spin_unlock(&pnfs_spinlock
);
167 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
170 * pNFS client layout cache
173 /* Need to hold i_lock if caller does not already hold reference */
175 get_layout_hdr(struct pnfs_layout_hdr
*lo
)
177 atomic_inc(&lo
->plh_refcount
);
181 destroy_layout_hdr(struct pnfs_layout_hdr
*lo
)
183 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
184 BUG_ON(!list_empty(&lo
->plh_layouts
));
185 NFS_I(lo
->plh_inode
)->layout
= NULL
;
190 put_layout_hdr_locked(struct pnfs_layout_hdr
*lo
)
192 if (atomic_dec_and_test(&lo
->plh_refcount
))
193 destroy_layout_hdr(lo
);
197 put_layout_hdr(struct pnfs_layout_hdr
*lo
)
199 struct inode
*inode
= lo
->plh_inode
;
201 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
202 destroy_layout_hdr(lo
);
203 spin_unlock(&inode
->i_lock
);
208 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
210 INIT_LIST_HEAD(&lseg
->pls_list
);
211 atomic_set(&lseg
->pls_refcount
, 1);
213 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
214 lseg
->pls_layout
= lo
;
217 static void free_lseg(struct pnfs_layout_segment
*lseg
)
219 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
221 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
222 /* Matched by get_layout_hdr in pnfs_insert_layout */
223 put_layout_hdr(NFS_I(ino
)->layout
);
227 put_lseg_common(struct pnfs_layout_segment
*lseg
)
229 struct inode
*inode
= lseg
->pls_layout
->plh_inode
;
231 BUG_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
232 list_del_init(&lseg
->pls_list
);
233 if (list_empty(&lseg
->pls_layout
->plh_segs
)) {
234 set_bit(NFS_LAYOUT_DESTROYED
, &lseg
->pls_layout
->plh_flags
);
235 /* Matched by initial refcount set in alloc_init_layout_hdr */
236 put_layout_hdr_locked(lseg
->pls_layout
);
238 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
242 put_lseg(struct pnfs_layout_segment
*lseg
)
249 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
250 atomic_read(&lseg
->pls_refcount
),
251 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
252 inode
= lseg
->pls_layout
->plh_inode
;
253 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
256 put_lseg_common(lseg
);
257 list_add(&lseg
->pls_list
, &free_me
);
258 spin_unlock(&inode
->i_lock
);
259 pnfs_free_lseg_list(&free_me
);
264 should_free_lseg(u32 lseg_iomode
, u32 recall_iomode
)
266 return (recall_iomode
== IOMODE_ANY
||
267 lseg_iomode
== recall_iomode
);
270 /* Returns 1 if lseg is removed from list, 0 otherwise */
271 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
272 struct list_head
*tmp_list
)
276 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
277 /* Remove the reference keeping the lseg in the
278 * list. It will now be removed when all
279 * outstanding io is finished.
281 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
282 atomic_read(&lseg
->pls_refcount
));
283 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
284 put_lseg_common(lseg
);
285 list_add(&lseg
->pls_list
, tmp_list
);
292 /* Returns count of number of matching invalid lsegs remaining in list
296 mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
297 struct list_head
*tmp_list
,
300 struct pnfs_layout_segment
*lseg
, *next
;
301 int invalid
= 0, removed
= 0;
303 dprintk("%s:Begin lo %p\n", __func__
, lo
);
305 if (list_empty(&lo
->plh_segs
)) {
306 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
))
307 put_layout_hdr_locked(lo
);
310 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
311 if (should_free_lseg(lseg
->pls_range
.iomode
, iomode
)) {
312 dprintk("%s: freeing lseg %p iomode %d "
313 "offset %llu length %llu\n", __func__
,
314 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
315 lseg
->pls_range
.length
);
317 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
319 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
320 return invalid
- removed
;
323 /* note free_me must contain lsegs from a single layout_hdr */
325 pnfs_free_lseg_list(struct list_head
*free_me
)
327 struct pnfs_layout_segment
*lseg
, *tmp
;
328 struct pnfs_layout_hdr
*lo
;
330 if (list_empty(free_me
))
333 lo
= list_first_entry(free_me
, struct pnfs_layout_segment
,
334 pls_list
)->pls_layout
;
336 if (test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
)) {
337 struct nfs_client
*clp
;
339 clp
= NFS_SERVER(lo
->plh_inode
)->nfs_client
;
340 spin_lock(&clp
->cl_lock
);
341 list_del_init(&lo
->plh_layouts
);
342 spin_unlock(&clp
->cl_lock
);
344 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
345 list_del(&lseg
->pls_list
);
351 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
353 struct pnfs_layout_hdr
*lo
;
356 spin_lock(&nfsi
->vfs_inode
.i_lock
);
359 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
360 mark_matching_lsegs_invalid(lo
, &tmp_list
, IOMODE_ANY
);
362 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
363 pnfs_free_lseg_list(&tmp_list
);
367 * Called by the state manger to remove all layouts established under an
371 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
373 struct pnfs_layout_hdr
*lo
;
376 spin_lock(&clp
->cl_lock
);
377 list_splice_init(&clp
->cl_layouts
, &tmp_list
);
378 spin_unlock(&clp
->cl_lock
);
380 while (!list_empty(&tmp_list
)) {
381 lo
= list_entry(tmp_list
.next
, struct pnfs_layout_hdr
,
383 dprintk("%s freeing layout for inode %lu\n", __func__
,
384 lo
->plh_inode
->i_ino
);
385 pnfs_destroy_layout(NFS_I(lo
->plh_inode
));
389 /* update lo->plh_stateid with new if is more recent */
391 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
396 oldseq
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
397 newseq
= be32_to_cpu(new->stateid
.seqid
);
398 if ((int)(newseq
- oldseq
) > 0) {
399 memcpy(&lo
->plh_stateid
, &new->stateid
, sizeof(new->stateid
));
400 if (update_barrier
) {
401 u32 new_barrier
= be32_to_cpu(new->stateid
.seqid
);
403 if ((int)(new_barrier
- lo
->plh_barrier
))
404 lo
->plh_barrier
= new_barrier
;
406 /* Because of wraparound, we want to keep the barrier
407 * "close" to the current seqids. It needs to be
408 * within 2**31 to count as "behind", so if it
409 * gets too near that limit, give us a litle leeway
410 * and bring it to within 2**30.
411 * NOTE - and yes, this is all unsigned arithmetic.
413 if (unlikely((newseq
- lo
->plh_barrier
) > (3 << 29)))
414 lo
->plh_barrier
= newseq
- (1 << 30);
419 /* lget is set to 1 if called from inside send_layoutget call chain */
421 pnfs_layoutgets_blocked(struct pnfs_layout_hdr
*lo
, nfs4_stateid
*stateid
,
425 (int)(lo
->plh_barrier
- be32_to_cpu(stateid
->stateid
.seqid
)) >= 0)
427 return lo
->plh_block_lgets
||
428 test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
) ||
429 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
430 (list_empty(&lo
->plh_segs
) &&
431 (atomic_read(&lo
->plh_outstanding
) > lget
));
435 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
436 struct nfs4_state
*open_state
)
440 dprintk("--> %s\n", __func__
);
441 spin_lock(&lo
->plh_inode
->i_lock
);
442 if (pnfs_layoutgets_blocked(lo
, NULL
, 1)) {
444 } else if (list_empty(&lo
->plh_segs
)) {
448 seq
= read_seqbegin(&open_state
->seqlock
);
449 memcpy(dst
->data
, open_state
->stateid
.data
,
450 sizeof(open_state
->stateid
.data
));
451 } while (read_seqretry(&open_state
->seqlock
, seq
));
453 memcpy(dst
->data
, lo
->plh_stateid
.data
, sizeof(lo
->plh_stateid
.data
));
454 spin_unlock(&lo
->plh_inode
->i_lock
);
455 dprintk("<-- %s\n", __func__
);
460 * Get layout from server.
461 * for now, assume that whole file layouts are requested.
463 * arg->length: all ones
465 static struct pnfs_layout_segment
*
466 send_layoutget(struct pnfs_layout_hdr
*lo
,
467 struct nfs_open_context
*ctx
,
470 struct inode
*ino
= lo
->plh_inode
;
471 struct nfs_server
*server
= NFS_SERVER(ino
);
472 struct nfs4_layoutget
*lgp
;
473 struct pnfs_layout_segment
*lseg
= NULL
;
475 dprintk("--> %s\n", __func__
);
478 lgp
= kzalloc(sizeof(*lgp
), GFP_KERNEL
);
481 lgp
->args
.minlength
= NFS4_MAX_UINT64
;
482 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
483 lgp
->args
.range
.iomode
= iomode
;
484 lgp
->args
.range
.offset
= 0;
485 lgp
->args
.range
.length
= NFS4_MAX_UINT64
;
486 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
487 lgp
->args
.inode
= ino
;
488 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
491 /* Synchronously retrieve layout information from server and
494 nfs4_proc_layoutget(lgp
);
496 /* remember that LAYOUTGET failed and suspend trying */
497 set_bit(lo_fail_bit(iomode
), &lo
->plh_flags
);
502 bool pnfs_roc(struct inode
*ino
)
504 struct pnfs_layout_hdr
*lo
;
505 struct pnfs_layout_segment
*lseg
, *tmp
;
509 spin_lock(&ino
->i_lock
);
510 lo
= NFS_I(ino
)->layout
;
511 if (!lo
|| !test_and_clear_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
) ||
512 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
514 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
515 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
516 mark_lseg_invalid(lseg
, &tmp_list
);
521 lo
->plh_block_lgets
++;
522 get_layout_hdr(lo
); /* matched in pnfs_roc_release */
523 spin_unlock(&ino
->i_lock
);
524 pnfs_free_lseg_list(&tmp_list
);
528 spin_unlock(&ino
->i_lock
);
532 void pnfs_roc_release(struct inode
*ino
)
534 struct pnfs_layout_hdr
*lo
;
536 spin_lock(&ino
->i_lock
);
537 lo
= NFS_I(ino
)->layout
;
538 lo
->plh_block_lgets
--;
539 put_layout_hdr_locked(lo
);
540 spin_unlock(&ino
->i_lock
);
543 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
545 struct pnfs_layout_hdr
*lo
;
547 spin_lock(&ino
->i_lock
);
548 lo
= NFS_I(ino
)->layout
;
549 if ((int)(barrier
- lo
->plh_barrier
) > 0)
550 lo
->plh_barrier
= barrier
;
551 spin_unlock(&ino
->i_lock
);
554 bool pnfs_roc_drain(struct inode
*ino
, u32
*barrier
)
556 struct nfs_inode
*nfsi
= NFS_I(ino
);
557 struct pnfs_layout_segment
*lseg
;
560 spin_lock(&ino
->i_lock
);
561 list_for_each_entry(lseg
, &nfsi
->layout
->plh_segs
, pls_list
)
562 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
567 struct pnfs_layout_hdr
*lo
= nfsi
->layout
;
568 u32 current_seqid
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
570 /* Since close does not return a layout stateid for use as
571 * a barrier, we choose the worst-case barrier.
573 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
575 spin_unlock(&ino
->i_lock
);
580 * Compare two layout segments for sorting into layout cache.
581 * We want to preferentially return RW over RO layouts, so ensure those
585 cmp_layout(u32 iomode1
, u32 iomode2
)
587 /* read > read/write */
588 return (int)(iomode2
== IOMODE_READ
) - (int)(iomode1
== IOMODE_READ
);
592 pnfs_insert_layout(struct pnfs_layout_hdr
*lo
,
593 struct pnfs_layout_segment
*lseg
)
595 struct pnfs_layout_segment
*lp
;
598 dprintk("%s:Begin\n", __func__
);
600 assert_spin_locked(&lo
->plh_inode
->i_lock
);
601 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
602 if (cmp_layout(lp
->pls_range
.iomode
, lseg
->pls_range
.iomode
) > 0)
604 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
605 dprintk("%s: inserted lseg %p "
606 "iomode %d offset %llu length %llu before "
607 "lp %p iomode %d offset %llu length %llu\n",
608 __func__
, lseg
, lseg
->pls_range
.iomode
,
609 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
610 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
611 lp
->pls_range
.length
);
616 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
617 dprintk("%s: inserted lseg %p "
618 "iomode %d offset %llu length %llu at tail\n",
619 __func__
, lseg
, lseg
->pls_range
.iomode
,
620 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
624 dprintk("%s:Return\n", __func__
);
627 static struct pnfs_layout_hdr
*
628 alloc_init_layout_hdr(struct inode
*ino
)
630 struct pnfs_layout_hdr
*lo
;
632 lo
= kzalloc(sizeof(struct pnfs_layout_hdr
), GFP_KERNEL
);
635 atomic_set(&lo
->plh_refcount
, 1);
636 INIT_LIST_HEAD(&lo
->plh_layouts
);
637 INIT_LIST_HEAD(&lo
->plh_segs
);
638 INIT_LIST_HEAD(&lo
->plh_bulk_recall
);
643 static struct pnfs_layout_hdr
*
644 pnfs_find_alloc_layout(struct inode
*ino
)
646 struct nfs_inode
*nfsi
= NFS_I(ino
);
647 struct pnfs_layout_hdr
*new = NULL
;
649 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
651 assert_spin_locked(&ino
->i_lock
);
653 if (test_bit(NFS_LAYOUT_DESTROYED
, &nfsi
->layout
->plh_flags
))
658 spin_unlock(&ino
->i_lock
);
659 new = alloc_init_layout_hdr(ino
);
660 spin_lock(&ino
->i_lock
);
662 if (likely(nfsi
->layout
== NULL
)) /* Won the race? */
670 * iomode matching rules:
681 is_matching_lseg(struct pnfs_layout_segment
*lseg
, u32 iomode
)
683 return (iomode
!= IOMODE_RW
|| lseg
->pls_range
.iomode
== IOMODE_RW
);
687 * lookup range in layout
689 static struct pnfs_layout_segment
*
690 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
, u32 iomode
)
692 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
694 dprintk("%s:Begin\n", __func__
);
696 assert_spin_locked(&lo
->plh_inode
->i_lock
);
697 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
698 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
699 is_matching_lseg(lseg
, iomode
)) {
700 ret
= get_lseg(lseg
);
703 if (cmp_layout(iomode
, lseg
->pls_range
.iomode
) > 0)
707 dprintk("%s:Return lseg %p ref %d\n",
708 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
713 * Layout segment is retreived from the server if not cached.
714 * The appropriate layout segment is referenced and returned to the caller.
716 struct pnfs_layout_segment
*
717 pnfs_update_layout(struct inode
*ino
,
718 struct nfs_open_context
*ctx
,
719 enum pnfs_iomode iomode
)
721 struct nfs_inode
*nfsi
= NFS_I(ino
);
722 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
723 struct pnfs_layout_hdr
*lo
;
724 struct pnfs_layout_segment
*lseg
= NULL
;
727 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
729 spin_lock(&ino
->i_lock
);
730 lo
= pnfs_find_alloc_layout(ino
);
732 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__
);
736 /* Do we even need to bother with this? */
737 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
738 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
739 dprintk("%s matches recall, use MDS\n", __func__
);
743 /* if LAYOUTGET already failed once we don't try again */
744 if (test_bit(lo_fail_bit(iomode
), &nfsi
->layout
->plh_flags
))
747 /* Check to see if the layout for the given range already exists */
748 lseg
= pnfs_find_lseg(lo
, iomode
);
752 if (pnfs_layoutgets_blocked(lo
, NULL
, 0))
754 atomic_inc(&lo
->plh_outstanding
);
757 if (list_empty(&lo
->plh_segs
))
759 spin_unlock(&ino
->i_lock
);
761 /* The lo must be on the clp list if there is any
762 * chance of a CB_LAYOUTRECALL(FILE) coming in.
764 spin_lock(&clp
->cl_lock
);
765 BUG_ON(!list_empty(&lo
->plh_layouts
));
766 list_add_tail(&lo
->plh_layouts
, &clp
->cl_layouts
);
767 spin_unlock(&clp
->cl_lock
);
770 lseg
= send_layoutget(lo
, ctx
, iomode
);
771 if (!lseg
&& first
) {
772 spin_lock(&clp
->cl_lock
);
773 list_del_init(&lo
->plh_layouts
);
774 spin_unlock(&clp
->cl_lock
);
776 atomic_dec(&lo
->plh_outstanding
);
779 dprintk("%s end, state 0x%lx lseg %p\n", __func__
,
780 nfsi
->layout
? nfsi
->layout
->plh_flags
: -1, lseg
);
783 spin_unlock(&ino
->i_lock
);
788 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
790 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
791 struct nfs4_layoutget_res
*res
= &lgp
->res
;
792 struct pnfs_layout_segment
*lseg
;
793 struct inode
*ino
= lo
->plh_inode
;
794 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
797 /* Verify we got what we asked for.
798 * Note that because the xdr parsing only accepts a single
799 * element array, this can fail even if the server is behaving
802 if (lgp
->args
.range
.iomode
> res
->range
.iomode
||
803 res
->range
.offset
!= 0 ||
804 res
->range
.length
!= NFS4_MAX_UINT64
) {
808 /* Inject layout blob into I/O device driver */
809 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
);
810 if (!lseg
|| IS_ERR(lseg
)) {
814 status
= PTR_ERR(lseg
);
815 dprintk("%s: Could not allocate layout: error %d\n",
820 spin_lock(&ino
->i_lock
);
821 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
822 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
823 dprintk("%s forget reply due to recall\n", __func__
);
824 goto out_forget_reply
;
827 if (pnfs_layoutgets_blocked(lo
, &res
->stateid
, 1)) {
828 dprintk("%s forget reply due to state\n", __func__
);
829 goto out_forget_reply
;
832 lseg
->pls_range
= res
->range
;
833 *lgp
->lsegpp
= get_lseg(lseg
);
834 pnfs_insert_layout(lo
, lseg
);
836 if (res
->return_on_close
) {
837 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
838 set_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
);
841 /* Done processing layoutget. Set the layout stateid */
842 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
843 spin_unlock(&ino
->i_lock
);
848 spin_unlock(&ino
->i_lock
);
849 lseg
->pls_layout
= lo
;
850 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
854 static int pnfs_read_pg_test(struct nfs_pageio_descriptor
*pgio
,
855 struct nfs_page
*prev
,
856 struct nfs_page
*req
)
858 if (pgio
->pg_count
== prev
->wb_bytes
) {
859 /* This is first coelesce call for a series of nfs_pages */
860 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
864 return NFS_SERVER(pgio
->pg_inode
)->pnfs_curr_ld
->pg_test(pgio
, prev
, req
);
868 pnfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
)
870 struct pnfs_layoutdriver_type
*ld
;
872 ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
873 pgio
->pg_test
= (ld
&& ld
->pg_test
) ? pnfs_read_pg_test
: NULL
;
876 static int pnfs_write_pg_test(struct nfs_pageio_descriptor
*pgio
,
877 struct nfs_page
*prev
,
878 struct nfs_page
*req
)
880 if (pgio
->pg_count
== prev
->wb_bytes
) {
881 /* This is first coelesce call for a series of nfs_pages */
882 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
886 return NFS_SERVER(pgio
->pg_inode
)->pnfs_curr_ld
->pg_test(pgio
, prev
, req
);
890 pnfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
)
892 struct pnfs_layoutdriver_type
*ld
;
894 ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
895 pgio
->pg_test
= (ld
&& ld
->pg_test
) ? pnfs_write_pg_test
: NULL
;
899 pnfs_try_to_write_data(struct nfs_write_data
*wdata
,
900 const struct rpc_call_ops
*call_ops
, int how
)
902 struct inode
*inode
= wdata
->inode
;
903 enum pnfs_try_status trypnfs
;
904 struct nfs_server
*nfss
= NFS_SERVER(inode
);
906 wdata
->mds_ops
= call_ops
;
908 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
909 inode
->i_ino
, wdata
->args
.count
, wdata
->args
.offset
, how
);
911 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(wdata
, how
);
912 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
913 put_lseg(wdata
->lseg
);
916 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
918 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
923 * Call the appropriate parallel I/O subsystem read function.
926 pnfs_try_to_read_data(struct nfs_read_data
*rdata
,
927 const struct rpc_call_ops
*call_ops
)
929 struct inode
*inode
= rdata
->inode
;
930 struct nfs_server
*nfss
= NFS_SERVER(inode
);
931 enum pnfs_try_status trypnfs
;
933 rdata
->mds_ops
= call_ops
;
935 dprintk("%s: Reading ino:%lu %u@%llu\n",
936 __func__
, inode
->i_ino
, rdata
->args
.count
, rdata
->args
.offset
);
938 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(rdata
);
939 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
940 put_lseg(rdata
->lseg
);
943 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
945 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);