initial commit with v2.6.9
[linux-2.6.9-moxart.git] / fs / block_dev.c
blob5c3f09b861728fcc7baa0adb3721b32e2206e011
1 /*
2 * linux/fs/block_dev.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
8 #include <linux/config.h>
9 #include <linux/init.h>
10 #include <linux/mm.h>
11 #include <linux/fcntl.h>
12 #include <linux/slab.h>
13 #include <linux/kmod.h>
14 #include <linux/major.h>
15 #include <linux/devfs_fs_kernel.h>
16 #include <linux/smp_lock.h>
17 #include <linux/highmem.h>
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/blkpg.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mpage.h>
23 #include <linux/mount.h>
24 #include <linux/uio.h>
25 #include <linux/namei.h>
26 #include <asm/uaccess.h>
28 struct bdev_inode {
29 struct block_device bdev;
30 struct inode vfs_inode;
33 static inline struct bdev_inode *BDEV_I(struct inode *inode)
35 return container_of(inode, struct bdev_inode, vfs_inode);
38 inline struct block_device *I_BDEV(struct inode *inode)
40 return &BDEV_I(inode)->bdev;
43 EXPORT_SYMBOL(I_BDEV);
45 static sector_t max_block(struct block_device *bdev)
47 sector_t retval = ~((sector_t)0);
48 loff_t sz = i_size_read(bdev->bd_inode);
50 if (sz) {
51 unsigned int size = block_size(bdev);
52 unsigned int sizebits = blksize_bits(size);
53 retval = (sz >> sizebits);
55 return retval;
58 /* Kill _all_ buffers, dirty or not.. */
59 static void kill_bdev(struct block_device *bdev)
61 invalidate_bdev(bdev, 1);
62 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
65 int set_blocksize(struct block_device *bdev, int size)
67 /* Size must be a power of two, and between 512 and PAGE_SIZE */
68 if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
69 return -EINVAL;
71 /* Size cannot be smaller than the size supported by the device */
72 if (size < bdev_hardsect_size(bdev))
73 return -EINVAL;
75 /* Don't change the size if it is same as current */
76 if (bdev->bd_block_size != size) {
77 sync_blockdev(bdev);
78 bdev->bd_block_size = size;
79 bdev->bd_inode->i_blkbits = blksize_bits(size);
80 kill_bdev(bdev);
82 return 0;
85 EXPORT_SYMBOL(set_blocksize);
87 int sb_set_blocksize(struct super_block *sb, int size)
89 int bits = 9; /* 2^9 = 512 */
91 if (set_blocksize(sb->s_bdev, size))
92 return 0;
93 /* If we get here, we know size is power of two
94 * and it's value is between 512 and PAGE_SIZE */
95 sb->s_blocksize = size;
96 for (size >>= 10; size; size >>= 1)
97 ++bits;
98 sb->s_blocksize_bits = bits;
99 return sb->s_blocksize;
102 EXPORT_SYMBOL(sb_set_blocksize);
104 int sb_min_blocksize(struct super_block *sb, int size)
106 int minsize = bdev_hardsect_size(sb->s_bdev);
107 if (size < minsize)
108 size = minsize;
109 return sb_set_blocksize(sb, size);
112 EXPORT_SYMBOL(sb_min_blocksize);
114 static int
115 blkdev_get_block(struct inode *inode, sector_t iblock,
116 struct buffer_head *bh, int create)
118 if (iblock >= max_block(I_BDEV(inode))) {
119 if (create)
120 return -EIO;
123 * for reads, we're just trying to fill a partial page.
124 * return a hole, they will have to call get_block again
125 * before they can fill it, and they will get -EIO at that
126 * time
128 return 0;
130 bh->b_bdev = I_BDEV(inode);
131 bh->b_blocknr = iblock;
132 set_buffer_mapped(bh);
133 return 0;
136 static int
137 blkdev_get_blocks(struct inode *inode, sector_t iblock,
138 unsigned long max_blocks, struct buffer_head *bh, int create)
140 if ((iblock + max_blocks) > max_block(I_BDEV(inode)))
141 return -EIO;
143 bh->b_bdev = I_BDEV(inode);
144 bh->b_blocknr = iblock;
145 bh->b_size = max_blocks << inode->i_blkbits;
146 set_buffer_mapped(bh);
147 return 0;
150 static ssize_t
151 blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
152 loff_t offset, unsigned long nr_segs)
154 struct file *file = iocb->ki_filp;
155 struct inode *inode = file->f_mapping->host;
157 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
158 iov, offset, nr_segs, blkdev_get_blocks, NULL);
161 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
163 return block_write_full_page(page, blkdev_get_block, wbc);
166 static int blkdev_readpage(struct file * file, struct page * page)
168 return block_read_full_page(page, blkdev_get_block);
171 static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
173 return block_prepare_write(page, from, to, blkdev_get_block);
176 static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
178 return block_commit_write(page, from, to);
182 * private llseek:
183 * for a block special file file->f_dentry->d_inode->i_size is zero
184 * so we compute the size by hand (just as in block_read/write above)
186 static loff_t block_llseek(struct file *file, loff_t offset, int origin)
188 struct inode *bd_inode = file->f_mapping->host;
189 loff_t size;
190 loff_t retval;
192 down(&bd_inode->i_sem);
193 size = i_size_read(bd_inode);
195 switch (origin) {
196 case 2:
197 offset += size;
198 break;
199 case 1:
200 offset += file->f_pos;
202 retval = -EINVAL;
203 if (offset >= 0 && offset <= size) {
204 if (offset != file->f_pos) {
205 file->f_pos = offset;
207 retval = offset;
209 up(&bd_inode->i_sem);
210 return retval;
214 * Filp is never NULL; the only case when ->fsync() is called with
215 * NULL first argument is nfsd_sync_dir() and that's not a directory.
218 static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
220 return sync_blockdev(I_BDEV(filp->f_mapping->host));
224 * pseudo-fs
227 static spinlock_t bdev_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
228 static kmem_cache_t * bdev_cachep;
230 static struct inode *bdev_alloc_inode(struct super_block *sb)
232 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
233 if (!ei)
234 return NULL;
235 return &ei->vfs_inode;
238 static void bdev_destroy_inode(struct inode *inode)
240 struct bdev_inode *bdi = BDEV_I(inode);
242 bdi->bdev.bd_inode_backing_dev_info = NULL;
243 kmem_cache_free(bdev_cachep, bdi);
246 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
248 struct bdev_inode *ei = (struct bdev_inode *) foo;
249 struct block_device *bdev = &ei->bdev;
251 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
252 SLAB_CTOR_CONSTRUCTOR)
254 memset(bdev, 0, sizeof(*bdev));
255 sema_init(&bdev->bd_sem, 1);
256 sema_init(&bdev->bd_mount_sem, 1);
257 INIT_LIST_HEAD(&bdev->bd_inodes);
258 INIT_LIST_HEAD(&bdev->bd_list);
259 inode_init_once(&ei->vfs_inode);
263 static inline void __bd_forget(struct inode *inode)
265 list_del_init(&inode->i_devices);
266 inode->i_bdev = NULL;
267 inode->i_mapping = &inode->i_data;
270 static void bdev_clear_inode(struct inode *inode)
272 struct block_device *bdev = &BDEV_I(inode)->bdev;
273 struct list_head *p;
274 spin_lock(&bdev_lock);
275 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
276 __bd_forget(list_entry(p, struct inode, i_devices));
278 list_del_init(&bdev->bd_list);
279 spin_unlock(&bdev_lock);
282 static struct super_operations bdev_sops = {
283 .statfs = simple_statfs,
284 .alloc_inode = bdev_alloc_inode,
285 .destroy_inode = bdev_destroy_inode,
286 .drop_inode = generic_delete_inode,
287 .clear_inode = bdev_clear_inode,
290 static struct super_block *bd_get_sb(struct file_system_type *fs_type,
291 int flags, const char *dev_name, void *data)
293 return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
296 static struct file_system_type bd_type = {
297 .name = "bdev",
298 .get_sb = bd_get_sb,
299 .kill_sb = kill_anon_super,
302 static struct vfsmount *bd_mnt;
303 struct super_block *blockdev_superblock;
305 void __init bdev_cache_init(void)
307 int err;
308 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
309 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
310 init_once, NULL);
311 err = register_filesystem(&bd_type);
312 if (err)
313 panic("Cannot register bdev pseudo-fs");
314 bd_mnt = kern_mount(&bd_type);
315 err = PTR_ERR(bd_mnt);
316 if (IS_ERR(bd_mnt))
317 panic("Cannot create bdev pseudo-fs");
318 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
322 * Most likely _very_ bad one - but then it's hardly critical for small
323 * /dev and can be fixed when somebody will need really large one.
324 * Keep in mind that it will be fed through icache hash function too.
326 static inline unsigned long hash(dev_t dev)
328 return MAJOR(dev)+MINOR(dev);
331 static int bdev_test(struct inode *inode, void *data)
333 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
336 static int bdev_set(struct inode *inode, void *data)
338 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
339 return 0;
342 static LIST_HEAD(all_bdevs);
344 struct block_device *bdget(dev_t dev)
346 struct block_device *bdev;
347 struct inode *inode;
349 inode = iget5_locked(bd_mnt->mnt_sb, hash(dev),
350 bdev_test, bdev_set, &dev);
352 if (!inode)
353 return NULL;
355 bdev = &BDEV_I(inode)->bdev;
357 if (inode->i_state & I_NEW) {
358 bdev->bd_contains = NULL;
359 bdev->bd_inode = inode;
360 bdev->bd_block_size = (1 << inode->i_blkbits);
361 bdev->bd_part_count = 0;
362 bdev->bd_invalidated = 0;
363 inode->i_mode = S_IFBLK;
364 inode->i_rdev = dev;
365 inode->i_bdev = bdev;
366 inode->i_data.a_ops = &def_blk_aops;
367 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
368 inode->i_data.backing_dev_info = &default_backing_dev_info;
369 spin_lock(&bdev_lock);
370 list_add(&bdev->bd_list, &all_bdevs);
371 spin_unlock(&bdev_lock);
372 unlock_new_inode(inode);
374 return bdev;
377 EXPORT_SYMBOL(bdget);
379 long nr_blockdev_pages(void)
381 struct list_head *p;
382 long ret = 0;
383 spin_lock(&bdev_lock);
384 list_for_each(p, &all_bdevs) {
385 struct block_device *bdev;
386 bdev = list_entry(p, struct block_device, bd_list);
387 ret += bdev->bd_inode->i_mapping->nrpages;
389 spin_unlock(&bdev_lock);
390 return ret;
393 void bdput(struct block_device *bdev)
395 iput(bdev->bd_inode);
398 EXPORT_SYMBOL(bdput);
400 static struct block_device *bd_acquire(struct inode *inode)
402 struct block_device *bdev;
403 spin_lock(&bdev_lock);
404 bdev = inode->i_bdev;
405 if (bdev && igrab(bdev->bd_inode)) {
406 spin_unlock(&bdev_lock);
407 return bdev;
409 spin_unlock(&bdev_lock);
410 bdev = bdget(inode->i_rdev);
411 if (bdev) {
412 spin_lock(&bdev_lock);
413 if (inode->i_bdev)
414 __bd_forget(inode);
415 inode->i_bdev = bdev;
416 inode->i_mapping = bdev->bd_inode->i_mapping;
417 list_add(&inode->i_devices, &bdev->bd_inodes);
418 spin_unlock(&bdev_lock);
420 return bdev;
423 /* Call when you free inode */
425 void bd_forget(struct inode *inode)
427 spin_lock(&bdev_lock);
428 if (inode->i_bdev)
429 __bd_forget(inode);
430 spin_unlock(&bdev_lock);
433 int bd_claim(struct block_device *bdev, void *holder)
435 int res;
436 spin_lock(&bdev_lock);
438 /* first decide result */
439 if (bdev->bd_holder == holder)
440 res = 0; /* already a holder */
441 else if (bdev->bd_holder != NULL)
442 res = -EBUSY; /* held by someone else */
443 else if (bdev->bd_contains == bdev)
444 res = 0; /* is a whole device which isn't held */
446 else if (bdev->bd_contains->bd_holder == bd_claim)
447 res = 0; /* is a partition of a device that is being partitioned */
448 else if (bdev->bd_contains->bd_holder != NULL)
449 res = -EBUSY; /* is a partition of a held device */
450 else
451 res = 0; /* is a partition of an un-held device */
453 /* now impose change */
454 if (res==0) {
455 /* note that for a whole device bd_holders
456 * will be incremented twice, and bd_holder will
457 * be set to bd_claim before being set to holder
459 bdev->bd_contains->bd_holders ++;
460 bdev->bd_contains->bd_holder = bd_claim;
461 bdev->bd_holders++;
462 bdev->bd_holder = holder;
464 spin_unlock(&bdev_lock);
465 return res;
468 EXPORT_SYMBOL(bd_claim);
470 void bd_release(struct block_device *bdev)
472 spin_lock(&bdev_lock);
473 if (!--bdev->bd_contains->bd_holders)
474 bdev->bd_contains->bd_holder = NULL;
475 if (!--bdev->bd_holders)
476 bdev->bd_holder = NULL;
477 spin_unlock(&bdev_lock);
480 EXPORT_SYMBOL(bd_release);
483 * Tries to open block device by device number. Use it ONLY if you
484 * really do not have anything better - i.e. when you are behind a
485 * truly sucky interface and all you are given is a device number. _Never_
486 * to be used for internal purposes. If you ever need it - reconsider
487 * your API.
489 struct block_device *open_by_devnum(dev_t dev, unsigned mode)
491 struct block_device *bdev = bdget(dev);
492 int err = -ENOMEM;
493 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
494 if (bdev)
495 err = blkdev_get(bdev, mode, flags);
496 return err ? ERR_PTR(err) : bdev;
499 EXPORT_SYMBOL(open_by_devnum);
502 * This routine checks whether a removable media has been changed,
503 * and invalidates all buffer-cache-entries in that case. This
504 * is a relatively slow routine, so we have to try to minimize using
505 * it. Thus it is called only upon a 'mount' or 'open'. This
506 * is the best way of combining speed and utility, I think.
507 * People changing diskettes in the middle of an operation deserve
508 * to lose :-)
510 int check_disk_change(struct block_device *bdev)
512 struct gendisk *disk = bdev->bd_disk;
513 struct block_device_operations * bdops = disk->fops;
515 if (!bdops->media_changed)
516 return 0;
517 if (!bdops->media_changed(bdev->bd_disk))
518 return 0;
520 if (__invalidate_device(bdev, 0))
521 printk("VFS: busy inodes on changed media.\n");
523 if (bdops->revalidate_disk)
524 bdops->revalidate_disk(bdev->bd_disk);
525 if (bdev->bd_disk->minors > 1)
526 bdev->bd_invalidated = 1;
527 return 1;
530 EXPORT_SYMBOL(check_disk_change);
532 void bd_set_size(struct block_device *bdev, loff_t size)
534 unsigned bsize = bdev_hardsect_size(bdev);
536 bdev->bd_inode->i_size = size;
537 while (bsize < PAGE_CACHE_SIZE) {
538 if (size & bsize)
539 break;
540 bsize <<= 1;
542 bdev->bd_block_size = bsize;
543 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
545 EXPORT_SYMBOL(bd_set_size);
547 static int do_open(struct block_device *bdev, struct file *file)
549 struct module *owner = NULL;
550 struct gendisk *disk;
551 int ret = -ENXIO;
552 int part;
554 file->f_mapping = bdev->bd_inode->i_mapping;
555 lock_kernel();
556 disk = get_gendisk(bdev->bd_dev, &part);
557 if (!disk) {
558 unlock_kernel();
559 bdput(bdev);
560 return ret;
562 owner = disk->fops->owner;
564 down(&bdev->bd_sem);
565 if (!bdev->bd_openers) {
566 bdev->bd_disk = disk;
567 bdev->bd_contains = bdev;
568 if (!part) {
569 struct backing_dev_info *bdi;
570 if (disk->fops->open) {
571 ret = disk->fops->open(bdev->bd_inode, file);
572 if (ret)
573 goto out_first;
575 if (!bdev->bd_openers) {
576 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
577 bdi = blk_get_backing_dev_info(bdev);
578 if (bdi == NULL)
579 bdi = &default_backing_dev_info;
580 bdev->bd_inode->i_data.backing_dev_info = bdi;
582 if (bdev->bd_invalidated)
583 rescan_partitions(disk, bdev);
584 } else {
585 struct hd_struct *p;
586 struct block_device *whole;
587 whole = bdget_disk(disk, 0);
588 ret = -ENOMEM;
589 if (!whole)
590 goto out_first;
591 ret = blkdev_get(whole, file->f_mode, file->f_flags);
592 if (ret)
593 goto out_first;
594 bdev->bd_contains = whole;
595 down(&whole->bd_sem);
596 whole->bd_part_count++;
597 p = disk->part[part - 1];
598 bdev->bd_inode->i_data.backing_dev_info =
599 whole->bd_inode->i_data.backing_dev_info;
600 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
601 whole->bd_part_count--;
602 up(&whole->bd_sem);
603 ret = -ENXIO;
604 goto out_first;
606 kobject_get(&p->kobj);
607 bdev->bd_part = p;
608 bd_set_size(bdev, (loff_t) p->nr_sects << 9);
609 up(&whole->bd_sem);
611 } else {
612 put_disk(disk);
613 module_put(owner);
614 if (bdev->bd_contains == bdev) {
615 if (bdev->bd_disk->fops->open) {
616 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
617 if (ret)
618 goto out;
620 if (bdev->bd_invalidated)
621 rescan_partitions(bdev->bd_disk, bdev);
622 } else {
623 down(&bdev->bd_contains->bd_sem);
624 bdev->bd_contains->bd_part_count++;
625 up(&bdev->bd_contains->bd_sem);
628 bdev->bd_openers++;
629 up(&bdev->bd_sem);
630 unlock_kernel();
631 return 0;
633 out_first:
634 bdev->bd_disk = NULL;
635 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
636 if (bdev != bdev->bd_contains)
637 blkdev_put(bdev->bd_contains);
638 bdev->bd_contains = NULL;
639 put_disk(disk);
640 module_put(owner);
641 out:
642 up(&bdev->bd_sem);
643 unlock_kernel();
644 if (ret)
645 bdput(bdev);
646 return ret;
649 int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
652 * This crockload is due to bad choice of ->open() type.
653 * It will go away.
654 * For now, block device ->open() routine must _not_
655 * examine anything in 'inode' argument except ->i_rdev.
657 struct file fake_file = {};
658 struct dentry fake_dentry = {};
659 fake_file.f_mode = mode;
660 fake_file.f_flags = flags;
661 fake_file.f_dentry = &fake_dentry;
662 fake_dentry.d_inode = bdev->bd_inode;
664 return do_open(bdev, &fake_file);
667 EXPORT_SYMBOL(blkdev_get);
669 int blkdev_open(struct inode * inode, struct file * filp)
671 struct block_device *bdev;
672 int res;
675 * Preserve backwards compatibility and allow large file access
676 * even if userspace doesn't ask for it explicitly. Some mkfs
677 * binary needs it. We might want to drop this workaround
678 * during an unstable branch.
680 filp->f_flags |= O_LARGEFILE;
682 bdev = bd_acquire(inode);
684 res = do_open(bdev, filp);
685 if (res)
686 return res;
688 if (!(filp->f_flags & O_EXCL) )
689 return 0;
691 if (!(res = bd_claim(bdev, filp)))
692 return 0;
694 blkdev_put(bdev);
695 return res;
698 EXPORT_SYMBOL(blkdev_open);
700 int blkdev_put(struct block_device *bdev)
702 int ret = 0;
703 struct inode *bd_inode = bdev->bd_inode;
704 struct gendisk *disk = bdev->bd_disk;
706 down(&bdev->bd_sem);
707 lock_kernel();
708 if (!--bdev->bd_openers) {
709 sync_blockdev(bdev);
710 kill_bdev(bdev);
712 if (bdev->bd_contains == bdev) {
713 if (disk->fops->release)
714 ret = disk->fops->release(bd_inode, NULL);
715 } else {
716 down(&bdev->bd_contains->bd_sem);
717 bdev->bd_contains->bd_part_count--;
718 up(&bdev->bd_contains->bd_sem);
720 if (!bdev->bd_openers) {
721 struct module *owner = disk->fops->owner;
723 put_disk(disk);
724 module_put(owner);
726 if (bdev->bd_contains != bdev) {
727 kobject_put(&bdev->bd_part->kobj);
728 bdev->bd_part = NULL;
730 bdev->bd_disk = NULL;
731 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
732 if (bdev != bdev->bd_contains) {
733 blkdev_put(bdev->bd_contains);
735 bdev->bd_contains = NULL;
737 unlock_kernel();
738 up(&bdev->bd_sem);
739 bdput(bdev);
740 return ret;
743 EXPORT_SYMBOL(blkdev_put);
745 static int blkdev_close(struct inode * inode, struct file * filp)
747 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
748 if (bdev->bd_holder == filp)
749 bd_release(bdev);
750 return blkdev_put(bdev);
753 static ssize_t blkdev_file_write(struct file *file, const char __user *buf,
754 size_t count, loff_t *ppos)
756 struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
758 return generic_file_write_nolock(file, &local_iov, 1, ppos);
761 static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char __user *buf,
762 size_t count, loff_t pos)
764 struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
766 return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
769 static int block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
770 unsigned long arg)
772 return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
775 struct address_space_operations def_blk_aops = {
776 .readpage = blkdev_readpage,
777 .writepage = blkdev_writepage,
778 .sync_page = block_sync_page,
779 .prepare_write = blkdev_prepare_write,
780 .commit_write = blkdev_commit_write,
781 .writepages = generic_writepages,
782 .direct_IO = blkdev_direct_IO,
785 struct file_operations def_blk_fops = {
786 .open = blkdev_open,
787 .release = blkdev_close,
788 .llseek = block_llseek,
789 .read = generic_file_read,
790 .write = blkdev_file_write,
791 .aio_read = generic_file_aio_read,
792 .aio_write = blkdev_file_aio_write,
793 .mmap = generic_file_mmap,
794 .fsync = block_fsync,
795 .ioctl = block_ioctl,
796 .readv = generic_file_readv,
797 .writev = generic_file_write_nolock,
798 .sendfile = generic_file_sendfile,
801 EXPORT_SYMBOL(def_blk_fops);
803 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
805 int res;
806 mm_segment_t old_fs = get_fs();
807 set_fs(KERNEL_DS);
808 res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg);
809 set_fs(old_fs);
810 return res;
813 EXPORT_SYMBOL(ioctl_by_bdev);
816 * lookup_bdev - lookup a struct block_device by name
818 * @path: special file representing the block device
820 * Get a reference to the blockdevice at @path in the current
821 * namespace if possible and return it. Return ERR_PTR(error)
822 * otherwise.
824 struct block_device *lookup_bdev(const char *path)
826 struct block_device *bdev;
827 struct inode *inode;
828 struct nameidata nd;
829 int error;
831 if (!path || !*path)
832 return ERR_PTR(-EINVAL);
834 error = path_lookup(path, LOOKUP_FOLLOW, &nd);
835 if (error)
836 return ERR_PTR(error);
838 inode = nd.dentry->d_inode;
839 error = -ENOTBLK;
840 if (!S_ISBLK(inode->i_mode))
841 goto fail;
842 error = -EACCES;
843 if (nd.mnt->mnt_flags & MNT_NODEV)
844 goto fail;
845 error = -ENOMEM;
846 bdev = bd_acquire(inode);
847 if (!bdev)
848 goto fail;
849 out:
850 path_release(&nd);
851 return bdev;
852 fail:
853 bdev = ERR_PTR(error);
854 goto out;
858 * open_bdev_excl - open a block device by name and set it up for use
860 * @path: special file representing the block device
861 * @flags: %MS_RDONLY for opening read-only
862 * @holder: owner for exclusion
864 * Open the blockdevice described by the special file at @path, claim it
865 * for the @holder.
867 struct block_device *open_bdev_excl(const char *path, int flags, void *holder)
869 struct block_device *bdev;
870 mode_t mode = FMODE_READ;
871 int error = 0;
873 bdev = lookup_bdev(path);
874 if (IS_ERR(bdev))
875 return bdev;
877 if (!(flags & MS_RDONLY))
878 mode |= FMODE_WRITE;
879 error = blkdev_get(bdev, mode, 0);
880 if (error)
881 return ERR_PTR(error);
882 error = -EACCES;
883 if (!(flags & MS_RDONLY) && bdev_read_only(bdev))
884 goto blkdev_put;
885 error = bd_claim(bdev, holder);
886 if (error)
887 goto blkdev_put;
889 return bdev;
891 blkdev_put:
892 blkdev_put(bdev);
893 return ERR_PTR(error);
896 EXPORT_SYMBOL(open_bdev_excl);
899 * close_bdev_excl - release a blockdevice openen by open_bdev_excl()
901 * @bdev: blockdevice to close
903 * This is the counterpart to open_bdev_excl().
905 void close_bdev_excl(struct block_device *bdev)
907 bd_release(bdev);
908 blkdev_put(bdev);
911 EXPORT_SYMBOL(close_bdev_excl);