[PATCH] DVB: Documentation and Kconfig updazes
[linux-2.6/history.git] / fs / block_dev.c
bloba07e66d7809bfe91eb118b260ff616df5a1adaf7
1 /*
2 * linux/fs/block_dev.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
8 #include <linux/config.h>
9 #include <linux/init.h>
10 #include <linux/mm.h>
11 #include <linux/fcntl.h>
12 #include <linux/slab.h>
13 #include <linux/kmod.h>
14 #include <linux/major.h>
15 #include <linux/devfs_fs_kernel.h>
16 #include <linux/smp_lock.h>
17 #include <linux/highmem.h>
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/blkpg.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mpage.h>
23 #include <linux/mount.h>
24 #include <linux/uio.h>
25 #include <linux/namei.h>
26 #include <asm/uaccess.h>
28 struct bdev_inode {
29 struct block_device bdev;
30 struct inode vfs_inode;
33 static inline struct bdev_inode *BDEV_I(struct inode *inode)
35 return container_of(inode, struct bdev_inode, vfs_inode);
38 inline struct block_device *I_BDEV(struct inode *inode)
40 return &BDEV_I(inode)->bdev;
43 EXPORT_SYMBOL(I_BDEV);
45 static sector_t max_block(struct block_device *bdev)
47 sector_t retval = ~((sector_t)0);
48 loff_t sz = i_size_read(bdev->bd_inode);
50 if (sz) {
51 unsigned int size = block_size(bdev);
52 unsigned int sizebits = blksize_bits(size);
53 retval = (sz >> sizebits);
55 return retval;
58 /* Kill _all_ buffers, dirty or not.. */
59 static void kill_bdev(struct block_device *bdev)
61 invalidate_bdev(bdev, 1);
62 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
65 int set_blocksize(struct block_device *bdev, int size)
67 int oldsize;
69 /* Size must be a power of two, and between 512 and PAGE_SIZE */
70 if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
71 return -EINVAL;
73 /* Size cannot be smaller than the size supported by the device */
74 if (size < bdev_hardsect_size(bdev))
75 return -EINVAL;
77 oldsize = bdev->bd_block_size;
78 if (oldsize == size)
79 return 0;
81 /* Ok, we're actually changing the blocksize.. */
82 sync_blockdev(bdev);
83 bdev->bd_block_size = size;
84 bdev->bd_inode->i_blkbits = blksize_bits(size);
85 kill_bdev(bdev);
86 return 0;
89 EXPORT_SYMBOL(set_blocksize);
91 int sb_set_blocksize(struct super_block *sb, int size)
93 int bits;
94 if (set_blocksize(sb->s_bdev, size) < 0)
95 return 0;
96 sb->s_blocksize = size;
97 for (bits = 9, size >>= 9; size >>= 1; bits++)
99 sb->s_blocksize_bits = bits;
100 return sb->s_blocksize;
103 EXPORT_SYMBOL(sb_set_blocksize);
105 int sb_min_blocksize(struct super_block *sb, int size)
107 int minsize = bdev_hardsect_size(sb->s_bdev);
108 if (size < minsize)
109 size = minsize;
110 return sb_set_blocksize(sb, size);
113 EXPORT_SYMBOL(sb_min_blocksize);
115 static int
116 blkdev_get_block(struct inode *inode, sector_t iblock,
117 struct buffer_head *bh, int create)
119 if (iblock >= max_block(I_BDEV(inode))) {
120 if (create)
121 return -EIO;
124 * for reads, we're just trying to fill a partial page.
125 * return a hole, they will have to call get_block again
126 * before they can fill it, and they will get -EIO at that
127 * time
129 return 0;
131 bh->b_bdev = I_BDEV(inode);
132 bh->b_blocknr = iblock;
133 set_buffer_mapped(bh);
134 return 0;
137 static int
138 blkdev_get_blocks(struct inode *inode, sector_t iblock,
139 unsigned long max_blocks, struct buffer_head *bh, int create)
141 if ((iblock + max_blocks) > max_block(I_BDEV(inode)))
142 return -EIO;
144 bh->b_bdev = I_BDEV(inode);
145 bh->b_blocknr = iblock;
146 bh->b_size = max_blocks << inode->i_blkbits;
147 set_buffer_mapped(bh);
148 return 0;
151 static int
152 blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
153 loff_t offset, unsigned long nr_segs)
155 struct file *file = iocb->ki_filp;
156 struct inode *inode = file->f_mapping->host;
158 return blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
159 nr_segs, blkdev_get_blocks, NULL);
162 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
164 return block_write_full_page(page, blkdev_get_block, wbc);
167 static int blkdev_readpage(struct file * file, struct page * page)
169 return block_read_full_page(page, blkdev_get_block);
172 static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
174 return block_prepare_write(page, from, to, blkdev_get_block);
177 static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
179 return block_commit_write(page, from, to);
183 * private llseek:
184 * for a block special file file->f_dentry->d_inode->i_size is zero
185 * so we compute the size by hand (just as in block_read/write above)
187 static loff_t block_llseek(struct file *file, loff_t offset, int origin)
189 struct inode *bd_inode = file->f_mapping->host;
190 loff_t size;
191 loff_t retval;
193 down(&bd_inode->i_sem);
194 size = i_size_read(bd_inode);
196 switch (origin) {
197 case 2:
198 offset += size;
199 break;
200 case 1:
201 offset += file->f_pos;
203 retval = -EINVAL;
204 if (offset >= 0 && offset <= size) {
205 if (offset != file->f_pos) {
206 file->f_pos = offset;
208 retval = offset;
210 up(&bd_inode->i_sem);
211 return retval;
215 * Filp is never NULL; the only case when ->fsync() is called with
216 * NULL first argument is nfsd_sync_dir() and that's not a directory.
219 static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
221 return sync_blockdev(I_BDEV(filp->f_mapping->host));
225 * pseudo-fs
228 static spinlock_t bdev_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
229 static kmem_cache_t * bdev_cachep;
231 static struct inode *bdev_alloc_inode(struct super_block *sb)
233 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
234 if (!ei)
235 return NULL;
236 return &ei->vfs_inode;
239 static void bdev_destroy_inode(struct inode *inode)
241 kmem_cache_free(bdev_cachep, BDEV_I(inode));
244 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
246 struct bdev_inode *ei = (struct bdev_inode *) foo;
247 struct block_device *bdev = &ei->bdev;
249 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
250 SLAB_CTOR_CONSTRUCTOR)
252 memset(bdev, 0, sizeof(*bdev));
253 sema_init(&bdev->bd_sem, 1);
254 INIT_LIST_HEAD(&bdev->bd_inodes);
255 INIT_LIST_HEAD(&bdev->bd_list);
256 inode_init_once(&ei->vfs_inode);
260 static inline void __bd_forget(struct inode *inode)
262 list_del_init(&inode->i_devices);
263 inode->i_bdev = NULL;
264 inode->i_mapping = &inode->i_data;
267 static void bdev_clear_inode(struct inode *inode)
269 struct block_device *bdev = &BDEV_I(inode)->bdev;
270 struct list_head *p;
271 spin_lock(&bdev_lock);
272 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
273 __bd_forget(list_entry(p, struct inode, i_devices));
275 list_del_init(&bdev->bd_list);
276 spin_unlock(&bdev_lock);
279 static struct super_operations bdev_sops = {
280 .statfs = simple_statfs,
281 .alloc_inode = bdev_alloc_inode,
282 .destroy_inode = bdev_destroy_inode,
283 .drop_inode = generic_delete_inode,
284 .clear_inode = bdev_clear_inode,
287 static struct super_block *bd_get_sb(struct file_system_type *fs_type,
288 int flags, const char *dev_name, void *data)
290 return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
293 static struct file_system_type bd_type = {
294 .name = "bdev",
295 .get_sb = bd_get_sb,
296 .kill_sb = kill_anon_super,
299 static struct vfsmount *bd_mnt;
300 struct super_block *blockdev_superblock;
302 void __init bdev_cache_init(void)
304 int err;
305 bdev_cachep = kmem_cache_create("bdev_cache",
306 sizeof(struct bdev_inode),
308 SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
309 init_once,
310 NULL);
311 if (!bdev_cachep)
312 panic("Cannot create bdev_cache SLAB cache");
313 err = register_filesystem(&bd_type);
314 if (err)
315 panic("Cannot register bdev pseudo-fs");
316 bd_mnt = kern_mount(&bd_type);
317 err = PTR_ERR(bd_mnt);
318 if (IS_ERR(bd_mnt))
319 panic("Cannot create bdev pseudo-fs");
320 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
324 * Most likely _very_ bad one - but then it's hardly critical for small
325 * /dev and can be fixed when somebody will need really large one.
326 * Keep in mind that it will be fed through icache hash function too.
328 static inline unsigned long hash(dev_t dev)
330 return MAJOR(dev)+MINOR(dev);
333 static int bdev_test(struct inode *inode, void *data)
335 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
338 static int bdev_set(struct inode *inode, void *data)
340 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
341 return 0;
344 static LIST_HEAD(all_bdevs);
346 struct block_device *bdget(dev_t dev)
348 struct block_device *bdev;
349 struct inode *inode;
351 inode = iget5_locked(bd_mnt->mnt_sb, hash(dev),
352 bdev_test, bdev_set, &dev);
354 if (!inode)
355 return NULL;
357 bdev = &BDEV_I(inode)->bdev;
359 if (inode->i_state & I_NEW) {
360 bdev->bd_contains = NULL;
361 bdev->bd_inode = inode;
362 bdev->bd_block_size = (1 << inode->i_blkbits);
363 bdev->bd_part_count = 0;
364 bdev->bd_invalidated = 0;
365 inode->i_mode = S_IFBLK;
366 inode->i_rdev = dev;
367 inode->i_bdev = bdev;
368 inode->i_data.a_ops = &def_blk_aops;
369 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
370 inode->i_data.backing_dev_info = &default_backing_dev_info;
371 spin_lock(&bdev_lock);
372 list_add(&bdev->bd_list, &all_bdevs);
373 spin_unlock(&bdev_lock);
374 unlock_new_inode(inode);
376 return bdev;
379 EXPORT_SYMBOL(bdget);
381 long nr_blockdev_pages(void)
383 struct list_head *p;
384 long ret = 0;
385 spin_lock(&bdev_lock);
386 list_for_each(p, &all_bdevs) {
387 struct block_device *bdev;
388 bdev = list_entry(p, struct block_device, bd_list);
389 ret += bdev->bd_inode->i_mapping->nrpages;
391 spin_unlock(&bdev_lock);
392 return ret;
395 void bdput(struct block_device *bdev)
397 iput(bdev->bd_inode);
400 EXPORT_SYMBOL(bdput);
402 static struct block_device *bd_acquire(struct inode *inode)
404 struct block_device *bdev;
405 spin_lock(&bdev_lock);
406 bdev = inode->i_bdev;
407 if (bdev && igrab(bdev->bd_inode)) {
408 spin_unlock(&bdev_lock);
409 return bdev;
411 spin_unlock(&bdev_lock);
412 bdev = bdget(inode->i_rdev);
413 if (bdev) {
414 spin_lock(&bdev_lock);
415 if (inode->i_bdev)
416 __bd_forget(inode);
417 inode->i_bdev = bdev;
418 inode->i_mapping = bdev->bd_inode->i_mapping;
419 list_add(&inode->i_devices, &bdev->bd_inodes);
420 spin_unlock(&bdev_lock);
422 return bdev;
425 /* Call when you free inode */
427 void bd_forget(struct inode *inode)
429 spin_lock(&bdev_lock);
430 if (inode->i_bdev)
431 __bd_forget(inode);
432 spin_unlock(&bdev_lock);
435 int bd_claim(struct block_device *bdev, void *holder)
437 int res;
438 spin_lock(&bdev_lock);
440 /* first decide result */
441 if (bdev->bd_holder == holder)
442 res = 0; /* already a holder */
443 else if (bdev->bd_holder != NULL)
444 res = -EBUSY; /* held by someone else */
445 else if (bdev->bd_contains == bdev)
446 res = 0; /* is a whole device which isn't held */
448 else if (bdev->bd_contains->bd_holder == bd_claim)
449 res = 0; /* is a partition of a device that is being partitioned */
450 else if (bdev->bd_contains->bd_holder != NULL)
451 res = -EBUSY; /* is a partition of a held device */
452 else
453 res = 0; /* is a partition of an un-held device */
455 /* now impose change */
456 if (res==0) {
457 /* note that for a whole device bd_holders
458 * will be incremented twice, and bd_holder will
459 * be set to bd_claim before being set to holder
461 bdev->bd_contains->bd_holders ++;
462 bdev->bd_contains->bd_holder = bd_claim;
463 bdev->bd_holders++;
464 bdev->bd_holder = holder;
466 spin_unlock(&bdev_lock);
467 return res;
470 EXPORT_SYMBOL(bd_claim);
472 void bd_release(struct block_device *bdev)
474 spin_lock(&bdev_lock);
475 if (!--bdev->bd_contains->bd_holders)
476 bdev->bd_contains->bd_holder = NULL;
477 if (!--bdev->bd_holders)
478 bdev->bd_holder = NULL;
479 spin_unlock(&bdev_lock);
482 EXPORT_SYMBOL(bd_release);
485 * Tries to open block device by device number. Use it ONLY if you
486 * really do not have anything better - i.e. when you are behind a
487 * truly sucky interface and all you are given is a device number. _Never_
488 * to be used for internal purposes. If you ever need it - reconsider
489 * your API.
491 struct block_device *open_by_devnum(dev_t dev, unsigned mode)
493 struct block_device *bdev = bdget(dev);
494 int err = -ENOMEM;
495 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
496 if (bdev)
497 err = blkdev_get(bdev, mode, flags);
498 return err ? ERR_PTR(err) : bdev;
501 EXPORT_SYMBOL(open_by_devnum);
504 * This routine checks whether a removable media has been changed,
505 * and invalidates all buffer-cache-entries in that case. This
506 * is a relatively slow routine, so we have to try to minimize using
507 * it. Thus it is called only upon a 'mount' or 'open'. This
508 * is the best way of combining speed and utility, I think.
509 * People changing diskettes in the middle of an operation deserve
510 * to lose :-)
512 int check_disk_change(struct block_device *bdev)
514 struct gendisk *disk = bdev->bd_disk;
515 struct block_device_operations * bdops = disk->fops;
517 if (!bdops->media_changed)
518 return 0;
519 if (!bdops->media_changed(bdev->bd_disk))
520 return 0;
522 if (__invalidate_device(bdev, 0))
523 printk("VFS: busy inodes on changed media.\n");
525 if (bdops->revalidate_disk)
526 bdops->revalidate_disk(bdev->bd_disk);
527 if (bdev->bd_disk->minors > 1)
528 bdev->bd_invalidated = 1;
529 return 1;
532 EXPORT_SYMBOL(check_disk_change);
534 void bd_set_size(struct block_device *bdev, loff_t size)
536 unsigned bsize = bdev_hardsect_size(bdev);
538 bdev->bd_inode->i_size = size;
539 while (bsize < PAGE_CACHE_SIZE) {
540 if (size & bsize)
541 break;
542 bsize <<= 1;
544 bdev->bd_block_size = bsize;
545 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
547 EXPORT_SYMBOL(bd_set_size);
549 static int do_open(struct block_device *bdev, struct file *file)
551 struct module *owner = NULL;
552 struct gendisk *disk;
553 int ret = -ENXIO;
554 int part;
556 file->f_mapping = bdev->bd_inode->i_mapping;
557 lock_kernel();
558 disk = get_gendisk(bdev->bd_dev, &part);
559 if (!disk) {
560 unlock_kernel();
561 bdput(bdev);
562 return ret;
564 owner = disk->fops->owner;
566 down(&bdev->bd_sem);
567 if (!bdev->bd_openers) {
568 bdev->bd_disk = disk;
569 bdev->bd_contains = bdev;
570 if (!part) {
571 struct backing_dev_info *bdi;
572 if (disk->fops->open) {
573 ret = disk->fops->open(bdev->bd_inode, file);
574 if (ret)
575 goto out_first;
577 if (!bdev->bd_openers) {
578 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
579 bdi = blk_get_backing_dev_info(bdev);
580 if (bdi == NULL)
581 bdi = &default_backing_dev_info;
582 bdev->bd_inode->i_data.backing_dev_info = bdi;
584 if (bdev->bd_invalidated)
585 rescan_partitions(disk, bdev);
586 } else {
587 struct hd_struct *p;
588 struct block_device *whole;
589 whole = bdget_disk(disk, 0);
590 ret = -ENOMEM;
591 if (!whole)
592 goto out_first;
593 ret = blkdev_get(whole, file->f_mode, file->f_flags);
594 if (ret)
595 goto out_first;
596 bdev->bd_contains = whole;
597 down(&whole->bd_sem);
598 whole->bd_part_count++;
599 p = disk->part[part - 1];
600 bdev->bd_inode->i_data.backing_dev_info =
601 whole->bd_inode->i_data.backing_dev_info;
602 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
603 whole->bd_part_count--;
604 up(&whole->bd_sem);
605 ret = -ENXIO;
606 goto out_first;
608 kobject_get(&p->kobj);
609 bdev->bd_part = p;
610 bd_set_size(bdev, (loff_t) p->nr_sects << 9);
611 up(&whole->bd_sem);
613 } else {
614 put_disk(disk);
615 module_put(owner);
616 if (bdev->bd_contains == bdev) {
617 if (bdev->bd_disk->fops->open) {
618 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
619 if (ret)
620 goto out;
622 if (bdev->bd_invalidated)
623 rescan_partitions(bdev->bd_disk, bdev);
624 } else {
625 down(&bdev->bd_contains->bd_sem);
626 bdev->bd_contains->bd_part_count++;
627 up(&bdev->bd_contains->bd_sem);
630 bdev->bd_openers++;
631 up(&bdev->bd_sem);
632 unlock_kernel();
633 return 0;
635 out_first:
636 bdev->bd_disk = NULL;
637 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
638 if (bdev != bdev->bd_contains)
639 blkdev_put(bdev->bd_contains);
640 bdev->bd_contains = NULL;
641 put_disk(disk);
642 module_put(owner);
643 out:
644 up(&bdev->bd_sem);
645 unlock_kernel();
646 if (ret)
647 bdput(bdev);
648 return ret;
651 int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
654 * This crockload is due to bad choice of ->open() type.
655 * It will go away.
656 * For now, block device ->open() routine must _not_
657 * examine anything in 'inode' argument except ->i_rdev.
659 struct file fake_file = {};
660 struct dentry fake_dentry = {};
661 fake_file.f_mode = mode;
662 fake_file.f_flags = flags;
663 fake_file.f_dentry = &fake_dentry;
664 fake_dentry.d_inode = bdev->bd_inode;
666 return do_open(bdev, &fake_file);
669 EXPORT_SYMBOL(blkdev_get);
671 int blkdev_open(struct inode * inode, struct file * filp)
673 struct block_device *bdev;
674 int res;
677 * Preserve backwards compatibility and allow large file access
678 * even if userspace doesn't ask for it explicitly. Some mkfs
679 * binary needs it. We might want to drop this workaround
680 * during an unstable branch.
682 filp->f_flags |= O_LARGEFILE;
684 bdev = bd_acquire(inode);
686 res = do_open(bdev, filp);
687 if (res)
688 return res;
690 if (!(filp->f_flags & O_EXCL) )
691 return 0;
693 if (!(res = bd_claim(bdev, filp)))
694 return 0;
696 blkdev_put(bdev);
697 return res;
700 EXPORT_SYMBOL(blkdev_open);
702 int blkdev_put(struct block_device *bdev)
704 int ret = 0;
705 struct inode *bd_inode = bdev->bd_inode;
706 struct gendisk *disk = bdev->bd_disk;
708 down(&bdev->bd_sem);
709 lock_kernel();
710 if (!--bdev->bd_openers) {
711 sync_blockdev(bdev);
712 kill_bdev(bdev);
714 if (bdev->bd_contains == bdev) {
715 if (disk->fops->release)
716 ret = disk->fops->release(bd_inode, NULL);
717 } else {
718 down(&bdev->bd_contains->bd_sem);
719 bdev->bd_contains->bd_part_count--;
720 up(&bdev->bd_contains->bd_sem);
722 if (!bdev->bd_openers) {
723 struct module *owner = disk->fops->owner;
725 put_disk(disk);
726 module_put(owner);
728 if (bdev->bd_contains != bdev) {
729 kobject_put(&bdev->bd_part->kobj);
730 bdev->bd_part = NULL;
732 bdev->bd_disk = NULL;
733 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
734 if (bdev != bdev->bd_contains) {
735 blkdev_put(bdev->bd_contains);
737 bdev->bd_contains = NULL;
739 unlock_kernel();
740 up(&bdev->bd_sem);
741 bdput(bdev);
742 return ret;
745 EXPORT_SYMBOL(blkdev_put);
747 static int blkdev_close(struct inode * inode, struct file * filp)
749 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
750 if (bdev->bd_holder == filp)
751 bd_release(bdev);
752 return blkdev_put(bdev);
755 static ssize_t blkdev_file_write(struct file *file, const char __user *buf,
756 size_t count, loff_t *ppos)
758 struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
760 return generic_file_write_nolock(file, &local_iov, 1, ppos);
763 static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char __user *buf,
764 size_t count, loff_t pos)
766 struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
768 return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
771 static int block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
772 unsigned long arg)
774 return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
777 struct address_space_operations def_blk_aops = {
778 .readpage = blkdev_readpage,
779 .writepage = blkdev_writepage,
780 .sync_page = block_sync_page,
781 .prepare_write = blkdev_prepare_write,
782 .commit_write = blkdev_commit_write,
783 .writepages = generic_writepages,
784 .direct_IO = blkdev_direct_IO,
787 struct file_operations def_blk_fops = {
788 .open = blkdev_open,
789 .release = blkdev_close,
790 .llseek = block_llseek,
791 .read = generic_file_read,
792 .write = blkdev_file_write,
793 .aio_read = generic_file_aio_read,
794 .aio_write = blkdev_file_aio_write,
795 .mmap = generic_file_mmap,
796 .fsync = block_fsync,
797 .ioctl = block_ioctl,
798 .readv = generic_file_readv,
799 .writev = generic_file_writev,
800 .sendfile = generic_file_sendfile,
803 EXPORT_SYMBOL(def_blk_fops);
805 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
807 int res;
808 mm_segment_t old_fs = get_fs();
809 set_fs(KERNEL_DS);
810 res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg);
811 set_fs(old_fs);
812 return res;
815 EXPORT_SYMBOL(ioctl_by_bdev);
818 * lookup_bdev - lookup a struct block_device by name
820 * @path: special file representing the block device
822 * Get a reference to the blockdevice at @path in the current
823 * namespace if possible and return it. Return ERR_PTR(error)
824 * otherwise.
826 struct block_device *lookup_bdev(const char *path)
828 struct block_device *bdev;
829 struct inode *inode;
830 struct nameidata nd;
831 int error;
833 if (!path || !*path)
834 return ERR_PTR(-EINVAL);
836 error = path_lookup(path, LOOKUP_FOLLOW, &nd);
837 if (error)
838 return ERR_PTR(error);
840 inode = nd.dentry->d_inode;
841 error = -ENOTBLK;
842 if (!S_ISBLK(inode->i_mode))
843 goto fail;
844 error = -EACCES;
845 if (nd.mnt->mnt_flags & MNT_NODEV)
846 goto fail;
847 error = -ENOMEM;
848 bdev = bd_acquire(inode);
849 if (!bdev)
850 goto fail;
851 out:
852 path_release(&nd);
853 return bdev;
854 fail:
855 bdev = ERR_PTR(error);
856 goto out;
860 * open_bdev_excl - open a block device by name and set it up for use
862 * @path: special file representing the block device
863 * @flags: %MS_RDONLY for opening read-only
864 * @holder: owner for exclusion
866 * Open the blockdevice described by the special file at @path, claim it
867 * for the @holder.
869 struct block_device *open_bdev_excl(const char *path, int flags, void *holder)
871 struct block_device *bdev;
872 mode_t mode = FMODE_READ;
873 int error = 0;
875 bdev = lookup_bdev(path);
876 if (IS_ERR(bdev))
877 return bdev;
879 if (!(flags & MS_RDONLY))
880 mode |= FMODE_WRITE;
881 error = blkdev_get(bdev, mode, 0);
882 if (error)
883 return ERR_PTR(error);
884 error = -EACCES;
885 if (!(flags & MS_RDONLY) && bdev_read_only(bdev))
886 goto blkdev_put;
887 error = bd_claim(bdev, holder);
888 if (error)
889 goto blkdev_put;
891 return bdev;
893 blkdev_put:
894 blkdev_put(bdev);
895 return ERR_PTR(error);
898 EXPORT_SYMBOL(open_bdev_excl);
901 * close_bdev_excl - release a blockdevice openen by open_bdev_excl()
903 * @bdev: blockdevice to close
905 * This is the counterpart to open_bdev_excl().
907 void close_bdev_excl(struct block_device *bdev)
909 bd_release(bdev);
910 blkdev_put(bdev);
913 EXPORT_SYMBOL(close_bdev_excl);