2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/blkpg.h>
15 #include <linux/bio.h>
16 #include <linux/buffer_head.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
21 static const char *_name
= DM_NAME
;
23 static unsigned int major
= 0;
24 static unsigned int _major
= 0;
27 * One of these is allocated per bio.
30 struct mapped_device
*md
;
37 * One of these is allocated per target within a bio. Hopefully
38 * this will be simplified out one day.
46 union map_info
*dm_get_mapinfo(struct bio
*bio
)
48 if (bio
&& bio
->bi_private
)
49 return &((struct target_io
*)bio
->bi_private
)->info
;
54 * Bits for the md->flags field.
56 #define DMF_BLOCK_IO 0
57 #define DMF_SUSPENDED 1
58 #define DMF_FS_LOCKED 2
60 struct mapped_device
{
61 struct rw_semaphore lock
;
67 request_queue_t
*queue
;
73 * A list of ios that arrived while we were suspended.
76 wait_queue_head_t wait
;
77 struct bio_list deferred
;
80 * The current mapping.
85 * io objects are allocated from here.
94 wait_queue_head_t eventq
;
97 * freeze/thaw support require holding onto a super block
99 struct super_block
*frozen_sb
;
103 static kmem_cache_t
*_io_cache
;
104 static kmem_cache_t
*_tio_cache
;
106 static struct bio_set
*dm_set
;
108 static int __init
local_init(void)
112 dm_set
= bioset_create(16, 16, 4);
116 /* allocate a slab for the dm_ios */
117 _io_cache
= kmem_cache_create("dm_io",
118 sizeof(struct dm_io
), 0, 0, NULL
, NULL
);
122 /* allocate a slab for the target ios */
123 _tio_cache
= kmem_cache_create("dm_tio", sizeof(struct target_io
),
126 kmem_cache_destroy(_io_cache
);
131 r
= register_blkdev(_major
, _name
);
133 kmem_cache_destroy(_tio_cache
);
134 kmem_cache_destroy(_io_cache
);
144 static void local_exit(void)
146 kmem_cache_destroy(_tio_cache
);
147 kmem_cache_destroy(_io_cache
);
151 if (unregister_blkdev(_major
, _name
) < 0)
152 DMERR("devfs_unregister_blkdev failed");
156 DMINFO("cleaned up");
159 int (*_inits
[])(void) __initdata
= {
167 void (*_exits
[])(void) = {
175 static int __init
dm_init(void)
177 const int count
= ARRAY_SIZE(_inits
);
181 for (i
= 0; i
< count
; i
++) {
196 static void __exit
dm_exit(void)
198 int i
= ARRAY_SIZE(_exits
);
205 * Block device functions
207 static int dm_blk_open(struct inode
*inode
, struct file
*file
)
209 struct mapped_device
*md
;
211 md
= inode
->i_bdev
->bd_disk
->private_data
;
216 static int dm_blk_close(struct inode
*inode
, struct file
*file
)
218 struct mapped_device
*md
;
220 md
= inode
->i_bdev
->bd_disk
->private_data
;
225 static inline struct dm_io
*alloc_io(struct mapped_device
*md
)
227 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
230 static inline void free_io(struct mapped_device
*md
, struct dm_io
*io
)
232 mempool_free(io
, md
->io_pool
);
235 static inline struct target_io
*alloc_tio(struct mapped_device
*md
)
237 return mempool_alloc(md
->tio_pool
, GFP_NOIO
);
240 static inline void free_tio(struct mapped_device
*md
, struct target_io
*tio
)
242 mempool_free(tio
, md
->tio_pool
);
246 * Add the bio to the list of deferred io.
248 static int queue_io(struct mapped_device
*md
, struct bio
*bio
)
250 down_write(&md
->lock
);
252 if (!test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
257 bio_list_add(&md
->deferred
, bio
);
260 return 0; /* deferred successfully */
264 * Everyone (including functions in this file), should use this
265 * function to access the md->map field, and make sure they call
266 * dm_table_put() when finished.
268 struct dm_table
*dm_get_table(struct mapped_device
*md
)
272 read_lock(&md
->map_lock
);
276 read_unlock(&md
->map_lock
);
281 /*-----------------------------------------------------------------
283 * A more elegant soln is in the works that uses the queue
284 * merge fn, unfortunately there are a couple of changes to
285 * the block layer that I want to make for this. So in the
286 * interests of getting something for people to use I give
287 * you this clearly demarcated crap.
288 *---------------------------------------------------------------*/
291 * Decrements the number of outstanding ios that a bio has been
292 * cloned into, completing the original io if necc.
294 static inline void dec_pending(struct dm_io
*io
, int error
)
299 if (atomic_dec_and_test(&io
->io_count
)) {
300 if (atomic_dec_and_test(&io
->md
->pending
))
301 /* nudge anyone waiting on suspend queue */
302 wake_up(&io
->md
->wait
);
304 bio_endio(io
->bio
, io
->bio
->bi_size
, io
->error
);
309 static int clone_endio(struct bio
*bio
, unsigned int done
, int error
)
312 struct target_io
*tio
= bio
->bi_private
;
313 struct dm_io
*io
= tio
->io
;
314 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
319 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
323 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
328 /* the target wants another shot at the io */
332 free_tio(io
->md
, tio
);
333 dec_pending(io
, error
);
338 static sector_t
max_io_len(struct mapped_device
*md
,
339 sector_t sector
, struct dm_target
*ti
)
341 sector_t offset
= sector
- ti
->begin
;
342 sector_t len
= ti
->len
- offset
;
345 * Does the target need to split even further ?
349 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
358 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
359 struct target_io
*tio
)
366 BUG_ON(!clone
->bi_size
);
368 clone
->bi_end_io
= clone_endio
;
369 clone
->bi_private
= tio
;
372 * Map the clone. If r == 0 we don't need to do
373 * anything, the target has assumed ownership of
376 atomic_inc(&tio
->io
->io_count
);
377 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
379 /* the bio has been remapped so dispatch it */
380 generic_make_request(clone
);
383 /* error the io and bail out */
384 struct dm_io
*io
= tio
->io
;
385 free_tio(tio
->io
->md
, tio
);
386 dec_pending(io
, -EIO
);
392 struct mapped_device
*md
;
393 struct dm_table
*map
;
397 sector_t sector_count
;
402 * Creates a little bio that is just does part of a bvec.
404 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
405 unsigned short idx
, unsigned int offset
,
409 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
411 clone
= bio_alloc_bioset(GFP_NOIO
, 1, dm_set
);
412 *clone
->bi_io_vec
= *bv
;
414 clone
->bi_sector
= sector
;
415 clone
->bi_bdev
= bio
->bi_bdev
;
416 clone
->bi_rw
= bio
->bi_rw
;
418 clone
->bi_size
= to_bytes(len
);
419 clone
->bi_io_vec
->bv_offset
= offset
;
420 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
426 * Creates a bio that consists of range of complete bvecs.
428 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
429 unsigned short idx
, unsigned short bv_count
,
434 clone
= bio_clone(bio
, GFP_NOIO
);
435 clone
->bi_sector
= sector
;
437 clone
->bi_vcnt
= idx
+ bv_count
;
438 clone
->bi_size
= to_bytes(len
);
439 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
444 static void __clone_and_map(struct clone_info
*ci
)
446 struct bio
*clone
, *bio
= ci
->bio
;
447 struct dm_target
*ti
= dm_table_find_target(ci
->map
, ci
->sector
);
448 sector_t len
= 0, max
= max_io_len(ci
->md
, ci
->sector
, ti
);
449 struct target_io
*tio
;
452 * Allocate a target io object.
454 tio
= alloc_tio(ci
->md
);
457 memset(&tio
->info
, 0, sizeof(tio
->info
));
459 if (ci
->sector_count
<= max
) {
461 * Optimise for the simple case where we can do all of
462 * the remaining io with a single clone.
464 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
465 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
);
466 __map_bio(ti
, clone
, tio
);
467 ci
->sector_count
= 0;
469 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
471 * There are some bvecs that don't span targets.
472 * Do as many of these as possible.
475 sector_t remaining
= max
;
478 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
479 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
481 if (bv_len
> remaining
)
488 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
);
489 __map_bio(ti
, clone
, tio
);
492 ci
->sector_count
-= len
;
497 * Create two copy bios to deal with io that has
498 * been split across a target.
500 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
502 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
504 __map_bio(ti
, clone
, tio
);
507 ci
->sector_count
-= max
;
508 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
510 len
= to_sector(bv
->bv_len
) - max
;
511 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
512 bv
->bv_offset
+ to_bytes(max
), len
);
513 tio
= alloc_tio(ci
->md
);
516 memset(&tio
->info
, 0, sizeof(tio
->info
));
517 __map_bio(ti
, clone
, tio
);
520 ci
->sector_count
-= len
;
526 * Split the bio into several clones.
528 static void __split_bio(struct mapped_device
*md
, struct bio
*bio
)
530 struct clone_info ci
;
532 ci
.map
= dm_get_table(md
);
534 bio_io_error(bio
, bio
->bi_size
);
540 ci
.io
= alloc_io(md
);
542 atomic_set(&ci
.io
->io_count
, 1);
545 ci
.sector
= bio
->bi_sector
;
546 ci
.sector_count
= bio_sectors(bio
);
547 ci
.idx
= bio
->bi_idx
;
549 atomic_inc(&md
->pending
);
550 while (ci
.sector_count
)
551 __clone_and_map(&ci
);
553 /* drop the extra reference count */
554 dec_pending(ci
.io
, 0);
555 dm_table_put(ci
.map
);
557 /*-----------------------------------------------------------------
559 *---------------------------------------------------------------*/
562 * The request function that just remaps the bio built up by
565 static int dm_request(request_queue_t
*q
, struct bio
*bio
)
568 struct mapped_device
*md
= q
->queuedata
;
570 down_read(&md
->lock
);
573 * If we're suspended we have to queue
576 while (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
579 if (bio_rw(bio
) == READA
) {
580 bio_io_error(bio
, bio
->bi_size
);
584 r
= queue_io(md
, bio
);
586 bio_io_error(bio
, bio
->bi_size
);
590 return 0; /* deferred successfully */
593 * We're in a while loop, because someone could suspend
594 * before we get to the following read lock.
596 down_read(&md
->lock
);
599 __split_bio(md
, bio
);
604 static int dm_flush_all(request_queue_t
*q
, struct gendisk
*disk
,
605 sector_t
*error_sector
)
607 struct mapped_device
*md
= q
->queuedata
;
608 struct dm_table
*map
= dm_get_table(md
);
612 ret
= dm_table_flush_all(md
->map
);
619 static void dm_unplug_all(request_queue_t
*q
)
621 struct mapped_device
*md
= q
->queuedata
;
622 struct dm_table
*map
= dm_get_table(md
);
625 dm_table_unplug_all(map
);
630 static int dm_any_congested(void *congested_data
, int bdi_bits
)
633 struct mapped_device
*md
= (struct mapped_device
*) congested_data
;
634 struct dm_table
*map
= dm_get_table(md
);
636 if (!map
|| test_bit(DMF_BLOCK_IO
, &md
->flags
))
639 r
= dm_table_any_congested(map
, bdi_bits
);
645 /*-----------------------------------------------------------------
646 * An IDR is used to keep track of allocated minor numbers.
647 *---------------------------------------------------------------*/
648 static DECLARE_MUTEX(_minor_lock
);
649 static DEFINE_IDR(_minor_idr
);
651 static void free_minor(unsigned int minor
)
654 idr_remove(&_minor_idr
, minor
);
659 * See if the device with a specific minor # is free.
661 static int specific_minor(struct mapped_device
*md
, unsigned int minor
)
665 if (minor
>= (1 << MINORBITS
))
670 if (idr_find(&_minor_idr
, minor
)) {
675 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
681 r
= idr_get_new_above(&_minor_idr
, md
, minor
, &m
);
687 idr_remove(&_minor_idr
, m
);
697 static int next_free_minor(struct mapped_device
*md
, unsigned int *minor
)
704 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
710 r
= idr_get_new(&_minor_idr
, md
, &m
);
715 if (m
>= (1 << MINORBITS
)) {
716 idr_remove(&_minor_idr
, m
);
728 static struct block_device_operations dm_blk_dops
;
731 * Allocate and initialise a blank device with a given minor.
733 static struct mapped_device
*alloc_dev(unsigned int minor
, int persistent
)
736 struct mapped_device
*md
= kmalloc(sizeof(*md
), GFP_KERNEL
);
739 DMWARN("unable to allocate device, out of memory.");
743 /* get a minor number for the dev */
744 r
= persistent
? specific_minor(md
, minor
) : next_free_minor(md
, &minor
);
748 memset(md
, 0, sizeof(*md
));
749 init_rwsem(&md
->lock
);
750 rwlock_init(&md
->map_lock
);
751 atomic_set(&md
->holders
, 1);
752 atomic_set(&md
->event_nr
, 0);
754 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
758 md
->queue
->queuedata
= md
;
759 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
760 md
->queue
->backing_dev_info
.congested_data
= md
;
761 blk_queue_make_request(md
->queue
, dm_request
);
762 md
->queue
->unplug_fn
= dm_unplug_all
;
763 md
->queue
->issue_flush_fn
= dm_flush_all
;
765 md
->io_pool
= mempool_create(MIN_IOS
, mempool_alloc_slab
,
766 mempool_free_slab
, _io_cache
);
770 md
->tio_pool
= mempool_create(MIN_IOS
, mempool_alloc_slab
,
771 mempool_free_slab
, _tio_cache
);
775 md
->disk
= alloc_disk(1);
779 md
->disk
->major
= _major
;
780 md
->disk
->first_minor
= minor
;
781 md
->disk
->fops
= &dm_blk_dops
;
782 md
->disk
->queue
= md
->queue
;
783 md
->disk
->private_data
= md
;
784 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
787 atomic_set(&md
->pending
, 0);
788 init_waitqueue_head(&md
->wait
);
789 init_waitqueue_head(&md
->eventq
);
794 mempool_destroy(md
->tio_pool
);
796 mempool_destroy(md
->io_pool
);
798 blk_put_queue(md
->queue
);
805 static void free_dev(struct mapped_device
*md
)
807 free_minor(md
->disk
->first_minor
);
808 mempool_destroy(md
->tio_pool
);
809 mempool_destroy(md
->io_pool
);
810 del_gendisk(md
->disk
);
812 blk_put_queue(md
->queue
);
817 * Bind a table to the device.
819 static void event_callback(void *context
)
821 struct mapped_device
*md
= (struct mapped_device
*) context
;
823 atomic_inc(&md
->event_nr
);
824 wake_up(&md
->eventq
);
827 static void __set_size(struct gendisk
*disk
, sector_t size
)
829 struct block_device
*bdev
;
831 set_capacity(disk
, size
);
832 bdev
= bdget_disk(disk
, 0);
834 down(&bdev
->bd_inode
->i_sem
);
835 i_size_write(bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
836 up(&bdev
->bd_inode
->i_sem
);
841 static int __bind(struct mapped_device
*md
, struct dm_table
*t
)
843 request_queue_t
*q
= md
->queue
;
846 size
= dm_table_get_size(t
);
847 __set_size(md
->disk
, size
);
851 write_lock(&md
->map_lock
);
853 write_unlock(&md
->map_lock
);
856 dm_table_event_callback(md
->map
, event_callback
, md
);
857 dm_table_set_restrictions(t
, q
);
861 static void __unbind(struct mapped_device
*md
)
863 struct dm_table
*map
= md
->map
;
868 dm_table_event_callback(map
, NULL
, NULL
);
869 write_lock(&md
->map_lock
);
871 write_unlock(&md
->map_lock
);
876 * Constructor for a new device.
878 static int create_aux(unsigned int minor
, int persistent
,
879 struct mapped_device
**result
)
881 struct mapped_device
*md
;
883 md
= alloc_dev(minor
, persistent
);
891 int dm_create(struct mapped_device
**result
)
893 return create_aux(0, 0, result
);
896 int dm_create_with_minor(unsigned int minor
, struct mapped_device
**result
)
898 return create_aux(minor
, 1, result
);
901 void *dm_get_mdptr(dev_t dev
)
903 struct mapped_device
*md
;
905 unsigned minor
= MINOR(dev
);
907 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
912 md
= idr_find(&_minor_idr
, minor
);
914 if (md
&& (dm_disk(md
)->first_minor
== minor
))
915 mdptr
= md
->interface_ptr
;
922 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
924 md
->interface_ptr
= ptr
;
927 void dm_get(struct mapped_device
*md
)
929 atomic_inc(&md
->holders
);
932 void dm_put(struct mapped_device
*md
)
934 struct dm_table
*map
= dm_get_table(md
);
936 if (atomic_dec_and_test(&md
->holders
)) {
937 if (!test_bit(DMF_SUSPENDED
, &md
->flags
) && map
) {
938 dm_table_presuspend_targets(map
);
939 dm_table_postsuspend_targets(map
);
949 * Process the deferred bios
951 static void __flush_deferred_io(struct mapped_device
*md
, struct bio
*c
)
964 * Swap in a new table (destroying old one).
966 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
970 down_write(&md
->lock
);
972 /* device must be suspended */
973 if (!test_bit(DMF_SUSPENDED
, &md
->flags
)) {
979 r
= __bind(md
, table
);
988 * Functions to lock and unlock any filesystem running on the
991 static int __lock_fs(struct mapped_device
*md
)
993 struct block_device
*bdev
;
995 if (test_and_set_bit(DMF_FS_LOCKED
, &md
->flags
))
998 bdev
= bdget_disk(md
->disk
, 0);
1000 DMWARN("bdget failed in __lock_fs");
1004 WARN_ON(md
->frozen_sb
);
1005 md
->frozen_sb
= freeze_bdev(bdev
);
1006 /* don't bdput right now, we don't want the bdev
1007 * to go away while it is locked. We'll bdput
1013 static int __unlock_fs(struct mapped_device
*md
)
1015 struct block_device
*bdev
;
1017 if (!test_and_clear_bit(DMF_FS_LOCKED
, &md
->flags
))
1020 bdev
= bdget_disk(md
->disk
, 0);
1022 DMWARN("bdget failed in __unlock_fs");
1026 thaw_bdev(bdev
, md
->frozen_sb
);
1027 md
->frozen_sb
= NULL
;
1034 * We need to be able to change a mapping table under a mounted
1035 * filesystem. For example we might want to move some data in
1036 * the background. Before the table can be swapped with
1037 * dm_bind_table, dm_suspend must be called to flush any in
1038 * flight bios and ensure that any further io gets deferred.
1040 int dm_suspend(struct mapped_device
*md
)
1042 struct dm_table
*map
;
1043 DECLARE_WAITQUEUE(wait
, current
);
1045 /* Flush I/O to the device. */
1046 down_read(&md
->lock
);
1047 if (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
1052 map
= dm_get_table(md
);
1054 dm_table_presuspend_targets(map
);
1060 * First we set the BLOCK_IO flag so no more ios will be
1063 down_write(&md
->lock
);
1064 if (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
1066 * If we get here we know another thread is
1067 * trying to suspend as well, so we leave the fs
1068 * locked for this thread.
1070 up_write(&md
->lock
);
1074 set_bit(DMF_BLOCK_IO
, &md
->flags
);
1075 add_wait_queue(&md
->wait
, &wait
);
1076 up_write(&md
->lock
);
1080 dm_table_unplug_all(map
);
1085 * Then we wait for the already mapped ios to
1089 set_current_state(TASK_INTERRUPTIBLE
);
1091 if (!atomic_read(&md
->pending
) || signal_pending(current
))
1096 set_current_state(TASK_RUNNING
);
1098 down_write(&md
->lock
);
1099 remove_wait_queue(&md
->wait
, &wait
);
1101 /* were we interrupted ? */
1102 if (atomic_read(&md
->pending
)) {
1104 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1105 up_write(&md
->lock
);
1109 set_bit(DMF_SUSPENDED
, &md
->flags
);
1111 map
= dm_get_table(md
);
1113 dm_table_postsuspend_targets(map
);
1115 up_write(&md
->lock
);
1120 int dm_resume(struct mapped_device
*md
)
1123 struct dm_table
*map
= dm_get_table(md
);
1125 down_write(&md
->lock
);
1127 !test_bit(DMF_SUSPENDED
, &md
->flags
) ||
1128 !dm_table_get_size(map
)) {
1129 up_write(&md
->lock
);
1134 dm_table_resume_targets(map
);
1135 clear_bit(DMF_SUSPENDED
, &md
->flags
);
1136 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1138 def
= bio_list_get(&md
->deferred
);
1139 __flush_deferred_io(md
, def
);
1140 up_write(&md
->lock
);
1142 dm_table_unplug_all(map
);
1148 /*-----------------------------------------------------------------
1149 * Event notification.
1150 *---------------------------------------------------------------*/
1151 uint32_t dm_get_event_nr(struct mapped_device
*md
)
1153 return atomic_read(&md
->event_nr
);
1156 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
1158 return wait_event_interruptible(md
->eventq
,
1159 (event_nr
!= atomic_read(&md
->event_nr
)));
1163 * The gendisk is only valid as long as you have a reference
1166 struct gendisk
*dm_disk(struct mapped_device
*md
)
1171 int dm_suspended(struct mapped_device
*md
)
1173 return test_bit(DMF_SUSPENDED
, &md
->flags
);
1176 static struct block_device_operations dm_blk_dops
= {
1177 .open
= dm_blk_open
,
1178 .release
= dm_blk_close
,
1179 .owner
= THIS_MODULE
1182 EXPORT_SYMBOL(dm_get_mapinfo
);
1187 module_init(dm_init
);
1188 module_exit(dm_exit
);
1190 module_param(major
, uint
, 0);
1191 MODULE_PARM_DESC(major
, "The major number of the device mapper");
1192 MODULE_DESCRIPTION(DM_NAME
" driver");
1193 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1194 MODULE_LICENSE("GPL");