2 * linux/fs/nfs/blocklayout/blocklayout.c
4 * Module for the NFSv4.1 pNFS block layout driver.
6 * Copyright (c) 2006 The Regents of the University of Michigan.
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h> /* struct bio */
38 #include <linux/prefetch.h>
39 #include <linux/pagevec.h>
42 #include "../nfs4session.h"
43 #include "../internal.h"
44 #include "blocklayout.h"
46 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
52 static bool is_hole(struct pnfs_block_extent
*be
)
54 switch (be
->be_state
) {
55 case PNFS_BLOCK_NONE_DATA
:
57 case PNFS_BLOCK_INVALID_DATA
:
58 return be
->be_tag
? false : true;
64 /* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
69 void (*pnfs_callback
) (void *data
);
73 static inline struct parallel_io
*alloc_parallel(void *data
)
75 struct parallel_io
*rv
;
77 rv
= kmalloc(sizeof(*rv
), GFP_NOFS
);
80 kref_init(&rv
->refcnt
);
85 static inline void get_parallel(struct parallel_io
*p
)
90 static void destroy_parallel(struct kref
*kref
)
92 struct parallel_io
*p
= container_of(kref
, struct parallel_io
, refcnt
);
94 dprintk("%s enter\n", __func__
);
95 p
->pnfs_callback(p
->data
);
99 static inline void put_parallel(struct parallel_io
*p
)
101 kref_put(&p
->refcnt
, destroy_parallel
);
105 bl_submit_bio(int rw
, struct bio
*bio
)
108 get_parallel(bio
->bi_private
);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__
,
110 rw
== READ
? "read" : "write", bio
->bi_iter
.bi_size
,
111 (unsigned long long)bio
->bi_iter
.bi_sector
);
117 static struct bio
*bl_alloc_init_bio(int npg
, sector_t isect
,
118 struct pnfs_block_extent
*be
,
119 void (*end_io
)(struct bio
*, int err
),
120 struct parallel_io
*par
)
122 struct pnfs_block_dev
*dev
=
123 container_of(be
->be_device
, struct pnfs_block_dev
, d_node
);
126 npg
= min(npg
, BIO_MAX_PAGES
);
127 bio
= bio_alloc(GFP_NOIO
, npg
);
128 if (!bio
&& (current
->flags
& PF_MEMALLOC
)) {
129 while (!bio
&& (npg
/= 2))
130 bio
= bio_alloc(GFP_NOIO
, npg
);
134 bio
->bi_iter
.bi_sector
= isect
- be
->be_f_offset
+
136 bio
->bi_bdev
= dev
->d_bdev
;
137 bio
->bi_end_io
= end_io
;
138 bio
->bi_private
= par
;
143 static struct bio
*do_add_page_to_bio(struct bio
*bio
, int npg
, int rw
,
144 sector_t isect
, struct page
*page
,
145 struct pnfs_block_extent
*be
,
146 void (*end_io
)(struct bio
*, int err
),
147 struct parallel_io
*par
,
148 unsigned int offset
, int len
)
150 isect
= isect
+ (offset
>> SECTOR_SHIFT
);
151 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__
,
152 npg
, rw
, (unsigned long long)isect
, offset
, len
);
155 bio
= bl_alloc_init_bio(npg
, isect
, be
, end_io
, par
);
157 return ERR_PTR(-ENOMEM
);
159 if (bio_add_page(bio
, page
, len
, offset
) < len
) {
160 bio
= bl_submit_bio(rw
, bio
);
166 static void bl_end_io_read(struct bio
*bio
, int err
)
168 struct parallel_io
*par
= bio
->bi_private
;
171 struct nfs_pgio_header
*header
= par
->data
;
173 if (!header
->pnfs_error
)
174 header
->pnfs_error
= -EIO
;
175 pnfs_set_lo_fail(header
->lseg
);
182 static void bl_read_cleanup(struct work_struct
*work
)
184 struct rpc_task
*task
;
185 struct nfs_pgio_header
*hdr
;
186 dprintk("%s enter\n", __func__
);
187 task
= container_of(work
, struct rpc_task
, u
.tk_work
);
188 hdr
= container_of(task
, struct nfs_pgio_header
, task
);
189 pnfs_ld_read_done(hdr
);
193 bl_end_par_io_read(void *data
)
195 struct nfs_pgio_header
*hdr
= data
;
197 hdr
->task
.tk_status
= hdr
->pnfs_error
;
198 INIT_WORK(&hdr
->task
.u
.tk_work
, bl_read_cleanup
);
199 schedule_work(&hdr
->task
.u
.tk_work
);
202 static enum pnfs_try_status
203 bl_read_pagelist(struct nfs_pgio_header
*header
)
205 struct pnfs_block_layout
*bl
= BLK_LSEG2EXT(header
->lseg
);
206 struct bio
*bio
= NULL
;
207 struct pnfs_block_extent be
;
208 sector_t isect
, extent_length
= 0;
209 struct parallel_io
*par
;
210 loff_t f_offset
= header
->args
.offset
;
211 size_t bytes_left
= header
->args
.count
;
212 unsigned int pg_offset
, pg_len
;
213 struct page
**pages
= header
->args
.pages
;
214 int pg_index
= header
->args
.pgbase
>> PAGE_CACHE_SHIFT
;
215 const bool is_dio
= (header
->dreq
!= NULL
);
216 struct blk_plug plug
;
219 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__
,
220 header
->page_array
.npages
, f_offset
,
221 (unsigned int)header
->args
.count
);
223 par
= alloc_parallel(header
);
225 return PNFS_NOT_ATTEMPTED
;
226 par
->pnfs_callback
= bl_end_par_io_read
;
228 blk_start_plug(&plug
);
230 isect
= (sector_t
) (f_offset
>> SECTOR_SHIFT
);
231 /* Code assumes extents are page-aligned */
232 for (i
= pg_index
; i
< header
->page_array
.npages
; i
++) {
233 if (extent_length
<= 0) {
234 /* We've used up the previous extent */
235 bio
= bl_submit_bio(READ
, bio
);
237 /* Get the next one */
238 if (!ext_tree_lookup(bl
, isect
, &be
, false)) {
239 header
->pnfs_error
= -EIO
;
242 extent_length
= be
.be_length
- (isect
- be
.be_f_offset
);
245 pg_offset
= f_offset
& ~PAGE_CACHE_MASK
;
247 if (pg_offset
+ bytes_left
> PAGE_CACHE_SIZE
)
248 pg_len
= PAGE_CACHE_SIZE
- pg_offset
;
253 bytes_left
-= pg_len
;
254 isect
+= (pg_offset
>> SECTOR_SHIFT
);
255 extent_length
-= (pg_offset
>> SECTOR_SHIFT
);
257 BUG_ON(pg_offset
!= 0);
258 pg_len
= PAGE_CACHE_SIZE
;
262 bio
= bl_submit_bio(READ
, bio
);
263 /* Fill hole w/ zeroes w/o accessing device */
264 dprintk("%s Zeroing page for hole\n", __func__
);
265 zero_user_segment(pages
[i
], pg_offset
, pg_len
);
267 bio
= do_add_page_to_bio(bio
,
268 header
->page_array
.npages
- i
,
270 isect
, pages
[i
], &be
,
274 header
->pnfs_error
= PTR_ERR(bio
);
279 isect
+= (pg_len
>> SECTOR_SHIFT
);
280 extent_length
-= (pg_len
>> SECTOR_SHIFT
);
282 if ((isect
<< SECTOR_SHIFT
) >= header
->inode
->i_size
) {
284 header
->res
.count
= header
->inode
->i_size
- header
->args
.offset
;
286 header
->res
.count
= (isect
<< SECTOR_SHIFT
) - header
->args
.offset
;
289 bl_submit_bio(READ
, bio
);
290 blk_finish_plug(&plug
);
292 return PNFS_ATTEMPTED
;
295 static void bl_end_io_write(struct bio
*bio
, int err
)
297 struct parallel_io
*par
= bio
->bi_private
;
298 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
299 struct nfs_pgio_header
*header
= par
->data
;
302 if (!header
->pnfs_error
)
303 header
->pnfs_error
= -EIO
;
304 pnfs_set_lo_fail(header
->lseg
);
310 /* Function scheduled for call during bl_end_par_io_write,
311 * it marks sectors as written and extends the commitlist.
313 static void bl_write_cleanup(struct work_struct
*work
)
315 struct rpc_task
*task
= container_of(work
, struct rpc_task
, u
.tk_work
);
316 struct nfs_pgio_header
*hdr
=
317 container_of(task
, struct nfs_pgio_header
, task
);
319 dprintk("%s enter\n", __func__
);
321 if (likely(!hdr
->pnfs_error
)) {
322 struct pnfs_block_layout
*bl
= BLK_LSEG2EXT(hdr
->lseg
);
323 u64 start
= hdr
->args
.offset
& (loff_t
)PAGE_CACHE_MASK
;
324 u64 end
= (hdr
->args
.offset
+ hdr
->args
.count
+
325 PAGE_CACHE_SIZE
- 1) & (loff_t
)PAGE_CACHE_MASK
;
327 ext_tree_mark_written(bl
, start
>> SECTOR_SHIFT
,
328 (end
- start
) >> SECTOR_SHIFT
);
331 pnfs_ld_write_done(hdr
);
334 /* Called when last of bios associated with a bl_write_pagelist call finishes */
335 static void bl_end_par_io_write(void *data
)
337 struct nfs_pgio_header
*hdr
= data
;
339 hdr
->task
.tk_status
= hdr
->pnfs_error
;
340 hdr
->verf
.committed
= NFS_FILE_SYNC
;
341 INIT_WORK(&hdr
->task
.u
.tk_work
, bl_write_cleanup
);
342 schedule_work(&hdr
->task
.u
.tk_work
);
345 static enum pnfs_try_status
346 bl_write_pagelist(struct nfs_pgio_header
*header
, int sync
)
348 struct pnfs_block_layout
*bl
= BLK_LSEG2EXT(header
->lseg
);
349 struct bio
*bio
= NULL
;
350 struct pnfs_block_extent be
;
351 sector_t isect
, extent_length
= 0;
352 struct parallel_io
*par
= NULL
;
353 loff_t offset
= header
->args
.offset
;
354 size_t count
= header
->args
.count
;
355 struct page
**pages
= header
->args
.pages
;
356 int pg_index
= pg_index
= header
->args
.pgbase
>> PAGE_CACHE_SHIFT
;
357 struct blk_plug plug
;
360 dprintk("%s enter, %Zu@%lld\n", __func__
, count
, offset
);
362 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
363 * We want to write each, and if there is an error set pnfs_error
364 * to have it redone using nfs.
366 par
= alloc_parallel(header
);
368 return PNFS_NOT_ATTEMPTED
;
369 par
->pnfs_callback
= bl_end_par_io_write
;
371 blk_start_plug(&plug
);
373 /* we always write out the whole page */
374 offset
= offset
& (loff_t
)PAGE_CACHE_MASK
;
375 isect
= offset
>> SECTOR_SHIFT
;
377 for (i
= pg_index
; i
< header
->page_array
.npages
; i
++) {
378 if (extent_length
<= 0) {
379 /* We've used up the previous extent */
380 bio
= bl_submit_bio(WRITE
, bio
);
381 /* Get the next one */
382 if (!ext_tree_lookup(bl
, isect
, &be
, true)) {
383 header
->pnfs_error
= -EINVAL
;
387 extent_length
= be
.be_length
- (isect
- be
.be_f_offset
);
390 bio
= do_add_page_to_bio(bio
, header
->page_array
.npages
- i
,
391 WRITE
, isect
, pages
[i
], &be
,
392 bl_end_io_write
, par
,
395 header
->pnfs_error
= PTR_ERR(bio
);
399 offset
+= PAGE_CACHE_SIZE
;
400 count
-= PAGE_CACHE_SIZE
;
401 isect
+= PAGE_CACHE_SECTORS
;
402 extent_length
-= PAGE_CACHE_SECTORS
;
405 header
->res
.count
= header
->args
.count
;
407 bl_submit_bio(WRITE
, bio
);
408 blk_finish_plug(&plug
);
410 return PNFS_ATTEMPTED
;
413 static void bl_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
415 struct pnfs_block_layout
*bl
= BLK_LO2EXT(lo
);
418 dprintk("%s enter\n", __func__
);
420 err
= ext_tree_remove(bl
, true, 0, LLONG_MAX
);
426 static struct pnfs_layout_hdr
*bl_alloc_layout_hdr(struct inode
*inode
,
429 struct pnfs_block_layout
*bl
;
431 dprintk("%s enter\n", __func__
);
432 bl
= kzalloc(sizeof(*bl
), gfp_flags
);
436 bl
->bl_ext_rw
= RB_ROOT
;
437 bl
->bl_ext_ro
= RB_ROOT
;
438 spin_lock_init(&bl
->bl_ext_lock
);
440 return &bl
->bl_layout
;
443 static void bl_free_lseg(struct pnfs_layout_segment
*lseg
)
445 dprintk("%s enter\n", __func__
);
449 /* We pretty much ignore lseg, and store all data layout wide, so we
450 * can correctly merge.
452 static struct pnfs_layout_segment
*bl_alloc_lseg(struct pnfs_layout_hdr
*lo
,
453 struct nfs4_layoutget_res
*lgr
,
456 struct pnfs_layout_segment
*lseg
;
459 dprintk("%s enter\n", __func__
);
460 lseg
= kzalloc(sizeof(*lseg
), gfp_flags
);
462 return ERR_PTR(-ENOMEM
);
463 status
= nfs4_blk_process_layoutget(lo
, lgr
, gfp_flags
);
465 /* We don't want to call the full-blown bl_free_lseg,
466 * since on error extents were not touched.
469 return ERR_PTR(status
);
475 bl_return_range(struct pnfs_layout_hdr
*lo
,
476 struct pnfs_layout_range
*range
)
478 struct pnfs_block_layout
*bl
= BLK_LO2EXT(lo
);
479 sector_t offset
= range
->offset
>> SECTOR_SHIFT
, end
;
482 if (range
->offset
% 8) {
483 dprintk("%s: offset %lld not block size aligned\n",
484 __func__
, range
->offset
);
488 if (range
->length
!= NFS4_MAX_UINT64
) {
489 if (range
->length
% 8) {
490 dprintk("%s: length %lld not block size aligned\n",
491 __func__
, range
->length
);
495 end
= offset
+ (range
->length
>> SECTOR_SHIFT
);
497 end
= round_down(NFS4_MAX_UINT64
, PAGE_SIZE
);
500 err
= ext_tree_remove(bl
, range
->iomode
& IOMODE_RW
, offset
, end
);
504 bl_encode_layoutcommit(struct pnfs_layout_hdr
*lo
, struct xdr_stream
*xdr
,
505 const struct nfs4_layoutcommit_args
*arg
)
507 dprintk("%s enter\n", __func__
);
508 ext_tree_encode_commit(BLK_LO2EXT(lo
), xdr
);
512 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data
*lcdata
)
514 struct pnfs_layout_hdr
*lo
= NFS_I(lcdata
->args
.inode
)->layout
;
516 dprintk("%s enter\n", __func__
);
517 ext_tree_mark_committed(BLK_LO2EXT(lo
), lcdata
->res
.status
);
521 bl_set_layoutdriver(struct nfs_server
*server
, const struct nfs_fh
*fh
)
523 dprintk("%s enter\n", __func__
);
525 if (server
->pnfs_blksize
== 0) {
526 dprintk("%s Server did not return blksize\n", __func__
);
529 if (server
->pnfs_blksize
> PAGE_SIZE
) {
530 printk(KERN_ERR
"%s: pNFS blksize %d not supported.\n",
531 __func__
, server
->pnfs_blksize
);
535 return nfs4_deviceid_getdevicelist(server
, fh
);
539 is_aligned_req(struct nfs_pageio_descriptor
*pgio
,
540 struct nfs_page
*req
, unsigned int alignment
)
543 * Always accept buffered writes, higher layers take care of the
546 if (pgio
->pg_dreq
== NULL
)
549 if (!IS_ALIGNED(req
->wb_offset
, alignment
))
552 if (IS_ALIGNED(req
->wb_bytes
, alignment
))
555 if (req_offset(req
) + req
->wb_bytes
== i_size_read(pgio
->pg_inode
)) {
557 * If the write goes up to the inode size, just write
558 * the full page. Data past the inode size is
559 * guaranteed to be zeroed by the higher level client
560 * code, and this behaviour is mandated by RFC 5663
570 bl_pg_init_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
572 if (!is_aligned_req(pgio
, req
, SECTOR_SIZE
)) {
573 nfs_pageio_reset_read_mds(pgio
);
577 pnfs_generic_pg_init_read(pgio
, req
);
581 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
582 * of bytes (maximum @req->wb_bytes) that can be coalesced.
585 bl_pg_test_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*prev
,
586 struct nfs_page
*req
)
588 if (!is_aligned_req(pgio
, req
, SECTOR_SIZE
))
590 return pnfs_generic_pg_test(pgio
, prev
, req
);
594 * Return the number of contiguous bytes for a given inode
595 * starting at page frame idx.
597 static u64
pnfs_num_cont_bytes(struct inode
*inode
, pgoff_t idx
)
599 struct address_space
*mapping
= inode
->i_mapping
;
602 /* Optimize common case that writes from 0 to end of file */
603 end
= DIV_ROUND_UP(i_size_read(inode
), PAGE_CACHE_SIZE
);
604 if (end
!= NFS_I(inode
)->npages
) {
606 end
= page_cache_next_hole(mapping
, idx
+ 1, ULONG_MAX
);
611 return i_size_read(inode
) - (idx
<< PAGE_CACHE_SHIFT
);
613 return (end
- idx
) << PAGE_CACHE_SHIFT
;
617 bl_pg_init_write(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
621 if (!is_aligned_req(pgio
, req
, PAGE_SIZE
)) {
622 nfs_pageio_reset_write_mds(pgio
);
626 if (pgio
->pg_dreq
== NULL
)
627 wb_size
= pnfs_num_cont_bytes(pgio
->pg_inode
,
630 wb_size
= nfs_dreq_bytes_left(pgio
->pg_dreq
);
632 pnfs_generic_pg_init_write(pgio
, req
, wb_size
);
636 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
637 * of bytes (maximum @req->wb_bytes) that can be coalesced.
640 bl_pg_test_write(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*prev
,
641 struct nfs_page
*req
)
643 if (!is_aligned_req(pgio
, req
, PAGE_SIZE
))
645 return pnfs_generic_pg_test(pgio
, prev
, req
);
648 static const struct nfs_pageio_ops bl_pg_read_ops
= {
649 .pg_init
= bl_pg_init_read
,
650 .pg_test
= bl_pg_test_read
,
651 .pg_doio
= pnfs_generic_pg_readpages
,
654 static const struct nfs_pageio_ops bl_pg_write_ops
= {
655 .pg_init
= bl_pg_init_write
,
656 .pg_test
= bl_pg_test_write
,
657 .pg_doio
= pnfs_generic_pg_writepages
,
660 static struct pnfs_layoutdriver_type blocklayout_type
= {
661 .id
= LAYOUT_BLOCK_VOLUME
,
662 .name
= "LAYOUT_BLOCK_VOLUME",
663 .owner
= THIS_MODULE
,
664 .flags
= PNFS_LAYOUTRET_ON_SETATTR
|
665 PNFS_READ_WHOLE_PAGE
,
666 .read_pagelist
= bl_read_pagelist
,
667 .write_pagelist
= bl_write_pagelist
,
668 .alloc_layout_hdr
= bl_alloc_layout_hdr
,
669 .free_layout_hdr
= bl_free_layout_hdr
,
670 .alloc_lseg
= bl_alloc_lseg
,
671 .free_lseg
= bl_free_lseg
,
672 .return_range
= bl_return_range
,
673 .encode_layoutcommit
= bl_encode_layoutcommit
,
674 .cleanup_layoutcommit
= bl_cleanup_layoutcommit
,
675 .set_layoutdriver
= bl_set_layoutdriver
,
676 .alloc_deviceid_node
= bl_alloc_deviceid_node
,
677 .free_deviceid_node
= bl_free_deviceid_node
,
678 .pg_read_ops
= &bl_pg_read_ops
,
679 .pg_write_ops
= &bl_pg_write_ops
,
682 static const struct rpc_pipe_ops bl_upcall_ops
= {
683 .upcall
= rpc_pipe_generic_upcall
,
684 .downcall
= bl_pipe_downcall
,
685 .destroy_msg
= bl_pipe_destroy_msg
,
688 static struct dentry
*nfs4blocklayout_register_sb(struct super_block
*sb
,
689 struct rpc_pipe
*pipe
)
691 struct dentry
*dir
, *dentry
;
693 dir
= rpc_d_lookup_sb(sb
, NFS_PIPE_DIRNAME
);
695 return ERR_PTR(-ENOENT
);
696 dentry
= rpc_mkpipe_dentry(dir
, "blocklayout", NULL
, pipe
);
701 static void nfs4blocklayout_unregister_sb(struct super_block
*sb
,
702 struct rpc_pipe
*pipe
)
705 rpc_unlink(pipe
->dentry
);
708 static int rpc_pipefs_event(struct notifier_block
*nb
, unsigned long event
,
711 struct super_block
*sb
= ptr
;
712 struct net
*net
= sb
->s_fs_info
;
713 struct nfs_net
*nn
= net_generic(net
, nfs_net_id
);
714 struct dentry
*dentry
;
717 if (!try_module_get(THIS_MODULE
))
720 if (nn
->bl_device_pipe
== NULL
) {
721 module_put(THIS_MODULE
);
726 case RPC_PIPEFS_MOUNT
:
727 dentry
= nfs4blocklayout_register_sb(sb
, nn
->bl_device_pipe
);
728 if (IS_ERR(dentry
)) {
729 ret
= PTR_ERR(dentry
);
732 nn
->bl_device_pipe
->dentry
= dentry
;
734 case RPC_PIPEFS_UMOUNT
:
735 if (nn
->bl_device_pipe
->dentry
)
736 nfs4blocklayout_unregister_sb(sb
, nn
->bl_device_pipe
);
742 module_put(THIS_MODULE
);
746 static struct notifier_block nfs4blocklayout_block
= {
747 .notifier_call
= rpc_pipefs_event
,
750 static struct dentry
*nfs4blocklayout_register_net(struct net
*net
,
751 struct rpc_pipe
*pipe
)
753 struct super_block
*pipefs_sb
;
754 struct dentry
*dentry
;
756 pipefs_sb
= rpc_get_sb_net(net
);
759 dentry
= nfs4blocklayout_register_sb(pipefs_sb
, pipe
);
764 static void nfs4blocklayout_unregister_net(struct net
*net
,
765 struct rpc_pipe
*pipe
)
767 struct super_block
*pipefs_sb
;
769 pipefs_sb
= rpc_get_sb_net(net
);
771 nfs4blocklayout_unregister_sb(pipefs_sb
, pipe
);
776 static int nfs4blocklayout_net_init(struct net
*net
)
778 struct nfs_net
*nn
= net_generic(net
, nfs_net_id
);
779 struct dentry
*dentry
;
781 init_waitqueue_head(&nn
->bl_wq
);
782 nn
->bl_device_pipe
= rpc_mkpipe_data(&bl_upcall_ops
, 0);
783 if (IS_ERR(nn
->bl_device_pipe
))
784 return PTR_ERR(nn
->bl_device_pipe
);
785 dentry
= nfs4blocklayout_register_net(net
, nn
->bl_device_pipe
);
786 if (IS_ERR(dentry
)) {
787 rpc_destroy_pipe_data(nn
->bl_device_pipe
);
788 return PTR_ERR(dentry
);
790 nn
->bl_device_pipe
->dentry
= dentry
;
794 static void nfs4blocklayout_net_exit(struct net
*net
)
796 struct nfs_net
*nn
= net_generic(net
, nfs_net_id
);
798 nfs4blocklayout_unregister_net(net
, nn
->bl_device_pipe
);
799 rpc_destroy_pipe_data(nn
->bl_device_pipe
);
800 nn
->bl_device_pipe
= NULL
;
803 static struct pernet_operations nfs4blocklayout_net_ops
= {
804 .init
= nfs4blocklayout_net_init
,
805 .exit
= nfs4blocklayout_net_exit
,
808 static int __init
nfs4blocklayout_init(void)
812 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__
);
814 ret
= pnfs_register_layoutdriver(&blocklayout_type
);
818 ret
= rpc_pipefs_notifier_register(&nfs4blocklayout_block
);
821 ret
= register_pernet_subsys(&nfs4blocklayout_net_ops
);
828 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block
);
830 pnfs_unregister_layoutdriver(&blocklayout_type
);
834 static void __exit
nfs4blocklayout_exit(void)
836 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
839 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block
);
840 unregister_pernet_subsys(&nfs4blocklayout_net_ops
);
841 pnfs_unregister_layoutdriver(&blocklayout_type
);
844 MODULE_ALIAS("nfs-layouttype4-3");
846 module_init(nfs4blocklayout_init
);
847 module_exit(nfs4blocklayout_exit
);