4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
8 #include <linux/config.h>
9 #include <linux/init.h>
11 #include <linux/fcntl.h>
12 #include <linux/slab.h>
13 #include <linux/kmod.h>
14 #include <linux/major.h>
15 #include <linux/devfs_fs_kernel.h>
16 #include <linux/smp_lock.h>
17 #include <linux/highmem.h>
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/blkpg.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mpage.h>
23 #include <linux/mount.h>
24 #include <linux/uio.h>
25 #include <linux/namei.h>
26 #include <asm/uaccess.h>
29 static sector_t
max_block(struct block_device
*bdev
)
31 sector_t retval
= ~((sector_t
)0);
32 loff_t sz
= i_size_read(bdev
->bd_inode
);
35 unsigned int size
= block_size(bdev
);
36 unsigned int sizebits
= blksize_bits(size
);
37 retval
= (sz
>> sizebits
);
42 /* Kill _all_ buffers, dirty or not.. */
43 static void kill_bdev(struct block_device
*bdev
)
45 invalidate_bdev(bdev
, 1);
46 truncate_inode_pages(bdev
->bd_inode
->i_mapping
, 0);
49 int set_blocksize(struct block_device
*bdev
, int size
)
53 /* Size must be a power of two, and between 512 and PAGE_SIZE */
54 if (size
> PAGE_SIZE
|| size
< 512 || (size
& (size
-1)))
57 /* Size cannot be smaller than the size supported by the device */
58 if (size
< bdev_hardsect_size(bdev
))
61 oldsize
= bdev
->bd_block_size
;
65 /* Ok, we're actually changing the blocksize.. */
67 bdev
->bd_block_size
= size
;
68 bdev
->bd_inode
->i_blkbits
= blksize_bits(size
);
73 EXPORT_SYMBOL(set_blocksize
);
75 int sb_set_blocksize(struct super_block
*sb
, int size
)
78 if (set_blocksize(sb
->s_bdev
, size
) < 0)
80 sb
->s_blocksize
= size
;
81 for (bits
= 9, size
>>= 9; size
>>= 1; bits
++)
83 sb
->s_blocksize_bits
= bits
;
84 return sb
->s_blocksize
;
87 EXPORT_SYMBOL(sb_set_blocksize
);
89 int sb_min_blocksize(struct super_block
*sb
, int size
)
91 int minsize
= bdev_hardsect_size(sb
->s_bdev
);
94 return sb_set_blocksize(sb
, size
);
97 EXPORT_SYMBOL(sb_min_blocksize
);
100 blkdev_get_block(struct inode
*inode
, sector_t iblock
,
101 struct buffer_head
*bh
, int create
)
103 if (iblock
>= max_block(inode
->i_bdev
))
106 bh
->b_bdev
= inode
->i_bdev
;
107 bh
->b_blocknr
= iblock
;
108 set_buffer_mapped(bh
);
113 blkdev_get_blocks(struct inode
*inode
, sector_t iblock
,
114 unsigned long max_blocks
, struct buffer_head
*bh
, int create
)
116 if ((iblock
+ max_blocks
) > max_block(inode
->i_bdev
))
119 bh
->b_bdev
= inode
->i_bdev
;
120 bh
->b_blocknr
= iblock
;
121 bh
->b_size
= max_blocks
<< inode
->i_blkbits
;
122 set_buffer_mapped(bh
);
127 blkdev_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
,
128 loff_t offset
, unsigned long nr_segs
)
130 struct file
*file
= iocb
->ki_filp
;
131 struct inode
*inode
= file
->f_dentry
->d_inode
->i_mapping
->host
;
133 return blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_bdev
, iov
, offset
,
134 nr_segs
, blkdev_get_blocks
, NULL
);
137 static int blkdev_writepage(struct page
*page
, struct writeback_control
*wbc
)
139 return block_write_full_page(page
, blkdev_get_block
, wbc
);
142 static int blkdev_readpage(struct file
* file
, struct page
* page
)
144 return block_read_full_page(page
, blkdev_get_block
);
147 static int blkdev_prepare_write(struct file
*file
, struct page
*page
, unsigned from
, unsigned to
)
149 return block_prepare_write(page
, from
, to
, blkdev_get_block
);
152 static int blkdev_commit_write(struct file
*file
, struct page
*page
, unsigned from
, unsigned to
)
154 return block_commit_write(page
, from
, to
);
159 * for a block special file file->f_dentry->d_inode->i_size is zero
160 * so we compute the size by hand (just as in block_read/write above)
162 static loff_t
block_llseek(struct file
*file
, loff_t offset
, int origin
)
164 struct inode
*bd_inode
;
168 bd_inode
= file
->f_dentry
->d_inode
->i_bdev
->bd_inode
;
169 down(&bd_inode
->i_sem
);
170 size
= i_size_read(bd_inode
);
177 offset
+= file
->f_pos
;
180 if (offset
>= 0 && offset
<= size
) {
181 if (offset
!= file
->f_pos
) {
182 file
->f_pos
= offset
;
186 up(&bd_inode
->i_sem
);
191 * Filp may be NULL when we are called by an msync of a vma
192 * since the vma has no handle.
195 static int block_fsync(struct file
*filp
, struct dentry
*dentry
, int datasync
)
197 struct inode
* inode
= dentry
->d_inode
;
199 return sync_blockdev(inode
->i_bdev
);
206 static spinlock_t bdev_lock __cacheline_aligned_in_smp
= SPIN_LOCK_UNLOCKED
;
207 static kmem_cache_t
* bdev_cachep
;
210 struct block_device bdev
;
211 struct inode vfs_inode
;
214 static inline struct bdev_inode
*BDEV_I(struct inode
*inode
)
216 return container_of(inode
, struct bdev_inode
, vfs_inode
);
219 static struct inode
*bdev_alloc_inode(struct super_block
*sb
)
221 struct bdev_inode
*ei
= kmem_cache_alloc(bdev_cachep
, SLAB_KERNEL
);
224 return &ei
->vfs_inode
;
227 static void bdev_destroy_inode(struct inode
*inode
)
229 kmem_cache_free(bdev_cachep
, BDEV_I(inode
));
232 static void init_once(void * foo
, kmem_cache_t
* cachep
, unsigned long flags
)
234 struct bdev_inode
*ei
= (struct bdev_inode
*) foo
;
235 struct block_device
*bdev
= &ei
->bdev
;
237 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
238 SLAB_CTOR_CONSTRUCTOR
)
240 memset(bdev
, 0, sizeof(*bdev
));
241 sema_init(&bdev
->bd_sem
, 1);
242 INIT_LIST_HEAD(&bdev
->bd_inodes
);
243 INIT_LIST_HEAD(&bdev
->bd_list
);
244 inode_init_once(&ei
->vfs_inode
);
248 static inline void __bd_forget(struct inode
*inode
)
250 list_del_init(&inode
->i_devices
);
251 inode
->i_bdev
= NULL
;
252 inode
->i_mapping
= &inode
->i_data
;
255 static void bdev_clear_inode(struct inode
*inode
)
257 struct block_device
*bdev
= &BDEV_I(inode
)->bdev
;
259 spin_lock(&bdev_lock
);
260 while ( (p
= bdev
->bd_inodes
.next
) != &bdev
->bd_inodes
) {
261 __bd_forget(list_entry(p
, struct inode
, i_devices
));
263 list_del_init(&bdev
->bd_list
);
264 spin_unlock(&bdev_lock
);
267 static struct super_operations bdev_sops
= {
268 .statfs
= simple_statfs
,
269 .alloc_inode
= bdev_alloc_inode
,
270 .destroy_inode
= bdev_destroy_inode
,
271 .drop_inode
= generic_delete_inode
,
272 .clear_inode
= bdev_clear_inode
,
275 static struct super_block
*bd_get_sb(struct file_system_type
*fs_type
,
276 int flags
, const char *dev_name
, void *data
)
278 return get_sb_pseudo(fs_type
, "bdev:", &bdev_sops
, 0x62646576);
281 static struct file_system_type bd_type
= {
284 .kill_sb
= kill_anon_super
,
287 static struct vfsmount
*bd_mnt
;
288 struct super_block
*blockdev_superblock
;
290 void __init
bdev_cache_init(void)
293 bdev_cachep
= kmem_cache_create("bdev_cache",
294 sizeof(struct bdev_inode
),
296 SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
,
300 panic("Cannot create bdev_cache SLAB cache");
301 err
= register_filesystem(&bd_type
);
303 panic("Cannot register bdev pseudo-fs");
304 bd_mnt
= kern_mount(&bd_type
);
305 err
= PTR_ERR(bd_mnt
);
307 panic("Cannot create bdev pseudo-fs");
308 blockdev_superblock
= bd_mnt
->mnt_sb
; /* For writeback */
312 * Most likely _very_ bad one - but then it's hardly critical for small
313 * /dev and can be fixed when somebody will need really large one.
314 * Keep in mind that it will be fed through icache hash function too.
316 static inline unsigned long hash(dev_t dev
)
318 return MAJOR(dev
)+MINOR(dev
);
321 static int bdev_test(struct inode
*inode
, void *data
)
323 return BDEV_I(inode
)->bdev
.bd_dev
== *(dev_t
*)data
;
326 static int bdev_set(struct inode
*inode
, void *data
)
328 BDEV_I(inode
)->bdev
.bd_dev
= *(dev_t
*)data
;
332 static LIST_HEAD(all_bdevs
);
334 struct block_device
*bdget(dev_t dev
)
336 struct block_device
*bdev
;
339 inode
= iget5_locked(bd_mnt
->mnt_sb
, hash(dev
),
340 bdev_test
, bdev_set
, &dev
);
345 bdev
= &BDEV_I(inode
)->bdev
;
347 if (inode
->i_state
& I_NEW
) {
348 bdev
->bd_contains
= NULL
;
349 bdev
->bd_inode
= inode
;
350 bdev
->bd_block_size
= (1 << inode
->i_blkbits
);
351 bdev
->bd_part_count
= 0;
352 bdev
->bd_invalidated
= 0;
353 inode
->i_mode
= S_IFBLK
;
355 inode
->i_bdev
= bdev
;
356 inode
->i_data
.a_ops
= &def_blk_aops
;
357 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
358 inode
->i_data
.backing_dev_info
= &default_backing_dev_info
;
359 spin_lock(&bdev_lock
);
360 list_add(&bdev
->bd_list
, &all_bdevs
);
361 spin_unlock(&bdev_lock
);
362 unlock_new_inode(inode
);
367 EXPORT_SYMBOL(bdget
);
369 long nr_blockdev_pages(void)
373 spin_lock(&bdev_lock
);
374 list_for_each(p
, &all_bdevs
) {
375 struct block_device
*bdev
;
376 bdev
= list_entry(p
, struct block_device
, bd_list
);
377 ret
+= bdev
->bd_inode
->i_mapping
->nrpages
;
379 spin_unlock(&bdev_lock
);
383 void bdput(struct block_device
*bdev
)
385 iput(bdev
->bd_inode
);
388 EXPORT_SYMBOL(bdput
);
390 int bd_acquire(struct inode
*inode
)
392 struct block_device
*bdev
;
393 spin_lock(&bdev_lock
);
394 if (inode
->i_bdev
&& igrab(inode
->i_bdev
->bd_inode
)) {
395 spin_unlock(&bdev_lock
);
398 spin_unlock(&bdev_lock
);
399 bdev
= bdget(inode
->i_rdev
);
402 spin_lock(&bdev_lock
);
405 inode
->i_bdev
= bdev
;
406 inode
->i_mapping
= bdev
->bd_inode
->i_mapping
;
407 list_add(&inode
->i_devices
, &bdev
->bd_inodes
);
408 spin_unlock(&bdev_lock
);
412 /* Call when you free inode */
414 void bd_forget(struct inode
*inode
)
416 spin_lock(&bdev_lock
);
419 spin_unlock(&bdev_lock
);
422 int bd_claim(struct block_device
*bdev
, void *holder
)
425 spin_lock(&bdev_lock
);
427 /* first decide result */
428 if (bdev
->bd_holder
== holder
)
429 res
= 0; /* already a holder */
430 else if (bdev
->bd_holder
!= NULL
)
431 res
= -EBUSY
; /* held by someone else */
432 else if (bdev
->bd_contains
== bdev
)
433 res
= 0; /* is a whole device which isn't held */
435 else if (bdev
->bd_contains
->bd_holder
== bd_claim
)
436 res
= 0; /* is a partition of a device that is being partitioned */
437 else if (bdev
->bd_contains
->bd_holder
!= NULL
)
438 res
= -EBUSY
; /* is a partition of a held device */
440 res
= 0; /* is a partition of an un-held device */
442 /* now impose change */
444 /* note that for a whole device bd_holders
445 * will be incremented twice, and bd_holder will
446 * be set to bd_claim before being set to holder
448 bdev
->bd_contains
->bd_holders
++;
449 bdev
->bd_contains
->bd_holder
= bd_claim
;
451 bdev
->bd_holder
= holder
;
453 spin_unlock(&bdev_lock
);
457 EXPORT_SYMBOL(bd_claim
);
459 void bd_release(struct block_device
*bdev
)
461 spin_lock(&bdev_lock
);
462 if (!--bdev
->bd_contains
->bd_holders
)
463 bdev
->bd_contains
->bd_holder
= NULL
;
464 if (!--bdev
->bd_holders
)
465 bdev
->bd_holder
= NULL
;
466 spin_unlock(&bdev_lock
);
469 EXPORT_SYMBOL(bd_release
);
472 * Tries to open block device by device number. Use it ONLY if you
473 * really do not have anything better - i.e. when you are behind a
474 * truly sucky interface and all you are given is a device number. _Never_
475 * to be used for internal purposes. If you ever need it - reconsider
478 struct block_device
*open_by_devnum(dev_t dev
, unsigned mode
, int kind
)
480 struct block_device
*bdev
= bdget(dev
);
482 int flags
= mode
& FMODE_WRITE
? O_RDWR
: O_RDONLY
;
484 err
= blkdev_get(bdev
, mode
, flags
, kind
);
485 return err
? ERR_PTR(err
) : bdev
;
488 EXPORT_SYMBOL(open_by_devnum
);
491 * This routine checks whether a removable media has been changed,
492 * and invalidates all buffer-cache-entries in that case. This
493 * is a relatively slow routine, so we have to try to minimize using
494 * it. Thus it is called only upon a 'mount' or 'open'. This
495 * is the best way of combining speed and utility, I think.
496 * People changing diskettes in the middle of an operation deserve
499 int check_disk_change(struct block_device
*bdev
)
501 struct gendisk
*disk
= bdev
->bd_disk
;
502 struct block_device_operations
* bdops
= disk
->fops
;
504 if (!bdops
->media_changed
)
506 if (!bdops
->media_changed(bdev
->bd_disk
))
509 if (__invalidate_device(bdev
, 0))
510 printk("VFS: busy inodes on changed media.\n");
512 if (bdops
->revalidate_disk
)
513 bdops
->revalidate_disk(bdev
->bd_disk
);
514 if (bdev
->bd_disk
->minors
> 1)
515 bdev
->bd_invalidated
= 1;
519 EXPORT_SYMBOL(check_disk_change
);
521 static void bd_set_size(struct block_device
*bdev
, loff_t size
)
523 unsigned bsize
= bdev_hardsect_size(bdev
);
524 i_size_write(bdev
->bd_inode
, size
);
525 while (bsize
< PAGE_CACHE_SIZE
) {
530 bdev
->bd_block_size
= bsize
;
531 bdev
->bd_inode
->i_blkbits
= blksize_bits(bsize
);
534 static int do_open(struct block_device
*bdev
, struct inode
*inode
, struct file
*file
)
536 struct module
*owner
= NULL
;
537 struct gendisk
*disk
;
542 disk
= get_gendisk(bdev
->bd_dev
, &part
);
548 owner
= disk
->fops
->owner
;
551 if (!bdev
->bd_openers
) {
552 bdev
->bd_disk
= disk
;
553 bdev
->bd_contains
= bdev
;
555 struct backing_dev_info
*bdi
;
556 if (disk
->fops
->open
) {
557 ret
= disk
->fops
->open(inode
, file
);
561 if (!bdev
->bd_openers
) {
562 bd_set_size(bdev
,(loff_t
)get_capacity(disk
)<<9);
563 bdi
= blk_get_backing_dev_info(bdev
);
565 bdi
= &default_backing_dev_info
;
566 bdev
->bd_inode
->i_data
.backing_dev_info
= bdi
;
568 if (bdev
->bd_invalidated
)
569 rescan_partitions(disk
, bdev
);
572 struct block_device
*whole
;
573 whole
= bdget_disk(disk
, 0);
577 ret
= blkdev_get(whole
, file
->f_mode
, file
->f_flags
, BDEV_RAW
);
580 bdev
->bd_contains
= whole
;
581 down(&whole
->bd_sem
);
582 whole
->bd_part_count
++;
583 p
= disk
->part
[part
- 1];
584 bdev
->bd_inode
->i_data
.backing_dev_info
=
585 whole
->bd_inode
->i_data
.backing_dev_info
;
586 if (!(disk
->flags
& GENHD_FL_UP
) || !p
|| !p
->nr_sects
) {
587 whole
->bd_part_count
--;
592 kobject_get(&p
->kobj
);
594 bd_set_size(bdev
, (loff_t
) p
->nr_sects
<< 9);
600 if (bdev
->bd_contains
== bdev
) {
601 if (bdev
->bd_disk
->fops
->open
) {
602 ret
= bdev
->bd_disk
->fops
->open(inode
, file
);
606 if (bdev
->bd_invalidated
)
607 rescan_partitions(bdev
->bd_disk
, bdev
);
609 down(&bdev
->bd_contains
->bd_sem
);
610 bdev
->bd_contains
->bd_part_count
++;
611 up(&bdev
->bd_contains
->bd_sem
);
620 bdev
->bd_disk
= NULL
;
621 bdev
->bd_inode
->i_data
.backing_dev_info
= &default_backing_dev_info
;
622 if (bdev
!= bdev
->bd_contains
)
623 blkdev_put(bdev
->bd_contains
, BDEV_RAW
);
624 bdev
->bd_contains
= NULL
;
635 int blkdev_get(struct block_device
*bdev
, mode_t mode
, unsigned flags
, int kind
)
638 * This crockload is due to bad choice of ->open() type.
640 * For now, block device ->open() routine must _not_
641 * examine anything in 'inode' argument except ->i_rdev.
643 struct file fake_file
= {};
644 struct dentry fake_dentry
= {};
645 fake_file
.f_mode
= mode
;
646 fake_file
.f_flags
= flags
;
647 fake_file
.f_dentry
= &fake_dentry
;
648 fake_dentry
.d_inode
= bdev
->bd_inode
;
650 return do_open(bdev
, bdev
->bd_inode
, &fake_file
);
653 EXPORT_SYMBOL(blkdev_get
);
655 int blkdev_open(struct inode
* inode
, struct file
* filp
)
657 struct block_device
*bdev
;
661 * Preserve backwards compatibility and allow large file access
662 * even if userspace doesn't ask for it explicitly. Some mkfs
663 * binary needs it. We might want to drop this workaround
664 * during an unstable branch.
666 filp
->f_flags
|= O_LARGEFILE
;
669 bdev
= inode
->i_bdev
;
671 res
= do_open(bdev
, inode
, filp
);
675 if (!(filp
->f_flags
& O_EXCL
) )
678 if (!(res
= bd_claim(bdev
, filp
)))
681 blkdev_put(bdev
, BDEV_FILE
);
685 EXPORT_SYMBOL(blkdev_open
);
687 int blkdev_put(struct block_device
*bdev
, int kind
)
690 struct inode
*bd_inode
= bdev
->bd_inode
;
691 struct gendisk
*disk
= bdev
->bd_disk
;
695 if (!--bdev
->bd_openers
) {
699 sync_blockdev(bd_inode
->i_bdev
);
704 if (bdev
->bd_contains
== bdev
) {
705 if (disk
->fops
->release
)
706 ret
= disk
->fops
->release(bd_inode
, NULL
);
708 down(&bdev
->bd_contains
->bd_sem
);
709 bdev
->bd_contains
->bd_part_count
--;
710 up(&bdev
->bd_contains
->bd_sem
);
712 if (!bdev
->bd_openers
) {
713 struct module
*owner
= disk
->fops
->owner
;
718 if (bdev
->bd_contains
!= bdev
) {
719 kobject_put(&bdev
->bd_part
->kobj
);
720 bdev
->bd_part
= NULL
;
722 bdev
->bd_disk
= NULL
;
723 bdev
->bd_inode
->i_data
.backing_dev_info
= &default_backing_dev_info
;
724 if (bdev
!= bdev
->bd_contains
) {
725 blkdev_put(bdev
->bd_contains
, BDEV_RAW
);
727 bdev
->bd_contains
= NULL
;
735 EXPORT_SYMBOL(blkdev_put
);
737 int blkdev_close(struct inode
* inode
, struct file
* filp
)
739 if (inode
->i_bdev
->bd_holder
== filp
)
740 bd_release(inode
->i_bdev
);
741 return blkdev_put(inode
->i_bdev
, BDEV_FILE
);
744 static ssize_t
blkdev_file_write(struct file
*file
, const char __user
*buf
,
745 size_t count
, loff_t
*ppos
)
747 struct iovec local_iov
= { .iov_base
= (void __user
*)buf
, .iov_len
= count
};
749 return generic_file_write_nolock(file
, &local_iov
, 1, ppos
);
752 static ssize_t
blkdev_file_aio_write(struct kiocb
*iocb
, const char __user
*buf
,
753 size_t count
, loff_t pos
)
755 struct iovec local_iov
= { .iov_base
= (void __user
*)buf
, .iov_len
= count
};
757 return generic_file_aio_write_nolock(iocb
, &local_iov
, 1, &iocb
->ki_pos
);
761 struct address_space_operations def_blk_aops
= {
762 .readpage
= blkdev_readpage
,
763 .writepage
= blkdev_writepage
,
764 .sync_page
= block_sync_page
,
765 .prepare_write
= blkdev_prepare_write
,
766 .commit_write
= blkdev_commit_write
,
767 .writepages
= generic_writepages
,
768 .direct_IO
= blkdev_direct_IO
,
771 struct file_operations def_blk_fops
= {
773 .release
= blkdev_close
,
774 .llseek
= block_llseek
,
775 .read
= generic_file_read
,
776 .write
= blkdev_file_write
,
777 .aio_read
= generic_file_aio_read
,
778 .aio_write
= blkdev_file_aio_write
,
779 .mmap
= generic_file_mmap
,
780 .fsync
= block_fsync
,
781 .ioctl
= blkdev_ioctl
,
782 .readv
= generic_file_readv
,
783 .writev
= generic_file_writev
,
784 .sendfile
= generic_file_sendfile
,
787 EXPORT_SYMBOL(def_blk_fops
);
789 int ioctl_by_bdev(struct block_device
*bdev
, unsigned cmd
, unsigned long arg
)
792 mm_segment_t old_fs
= get_fs();
794 res
= blkdev_ioctl(bdev
->bd_inode
, NULL
, cmd
, arg
);
799 EXPORT_SYMBOL(ioctl_by_bdev
);
802 * lookup_bdev - lookup a struct block_device by name
804 * @path: special file representing the block device
806 * Get a reference to the blockdevice at @path in the current
807 * namespace if possible and return it. Return ERR_PTR(error)
810 struct block_device
*lookup_bdev(const char *path
)
812 struct block_device
*bdev
;
818 return ERR_PTR(-EINVAL
);
820 error
= path_lookup(path
, LOOKUP_FOLLOW
, &nd
);
822 return ERR_PTR(error
);
824 inode
= nd
.dentry
->d_inode
;
826 if (!S_ISBLK(inode
->i_mode
))
829 if (nd
.mnt
->mnt_flags
& MNT_NODEV
)
831 error
= bd_acquire(inode
);
834 bdev
= inode
->i_bdev
;
840 bdev
= ERR_PTR(error
);
845 * open_bdev_excl - open a block device by name and set it up for use
847 * @path: special file representing the block device
848 * @flags: %MS_RDONLY for opening read-only
849 * @kind: usage (same as the 4th paramter to blkdev_get)
850 * @holder: owner for exclusion
852 * Open the blockdevice described by the special file at @path, claim it
853 * for the @holder and properly set it up for @kind usage.
855 struct block_device
*open_bdev_excl(const char *path
, int flags
,
856 int kind
, void *holder
)
858 struct block_device
*bdev
;
859 mode_t mode
= FMODE_READ
;
862 bdev
= lookup_bdev(path
);
866 if (!(flags
& MS_RDONLY
))
868 error
= blkdev_get(bdev
, mode
, 0, kind
);
870 return ERR_PTR(error
);
872 if (!(flags
& MS_RDONLY
) && bdev_read_only(bdev
))
874 error
= bd_claim(bdev
, holder
);
881 blkdev_put(bdev
, BDEV_FS
);
882 return ERR_PTR(error
);
885 EXPORT_SYMBOL(open_bdev_excl
);
888 * close_bdev_excl - release a blockdevice openen by open_bdev_excl()
890 * @bdev: blockdevice to close
891 * @kind: usage (same as the 4th paramter to blkdev_get)
893 * This is the counterpart to open_bdev_excl().
895 void close_bdev_excl(struct block_device
*bdev
, int kind
)
898 blkdev_put(bdev
, kind
);
901 EXPORT_SYMBOL(close_bdev_excl
);