2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
26 static void raid0_unplug(struct request_queue
*q
)
28 mddev_t
*mddev
= q
->queuedata
;
29 raid0_conf_t
*conf
= mddev
->private;
30 mdk_rdev_t
**devlist
= conf
->devlist
;
33 for (i
=0; i
<mddev
->raid_disks
; i
++) {
34 struct request_queue
*r_queue
= bdev_get_queue(devlist
[i
]->bdev
);
40 static int raid0_congested(void *data
, int bits
)
42 mddev_t
*mddev
= data
;
43 raid0_conf_t
*conf
= mddev
->private;
44 mdk_rdev_t
**devlist
= conf
->devlist
;
47 for (i
= 0; i
< mddev
->raid_disks
&& !ret
; i
++) {
48 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
50 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
56 * inform the user of the raid configuration
58 static void dump_zones(mddev_t
*mddev
)
61 sector_t zone_size
= 0;
62 sector_t zone_start
= 0;
63 char b
[BDEVNAME_SIZE
];
64 raid0_conf_t
*conf
= mddev
->private;
65 printk(KERN_INFO
"******* %s configuration *********\n",
68 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
69 printk(KERN_INFO
"zone%d=[", j
);
70 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
72 bdevname(conf
->devlist
[j
*mddev
->raid_disks
76 zone_size
= conf
->strip_zone
[j
].zone_end
- zone_start
;
77 printk(KERN_INFO
" zone offset=%llukb "
78 "device offset=%llukb size=%llukb\n",
79 (unsigned long long)zone_start
>>1,
80 (unsigned long long)conf
->strip_zone
[j
].dev_start
>>1,
81 (unsigned long long)zone_size
>>1);
82 zone_start
= conf
->strip_zone
[j
].zone_end
;
84 printk(KERN_INFO
"**********************************\n\n");
87 static int create_strip_zones(mddev_t
*mddev
)
90 sector_t curr_zone_end
, sectors
;
91 mdk_rdev_t
*smallest
, *rdev1
, *rdev2
, *rdev
, **dev
;
92 struct strip_zone
*zone
;
94 char b
[BDEVNAME_SIZE
];
95 raid0_conf_t
*conf
= kzalloc(sizeof(*conf
), GFP_KERNEL
);
99 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
100 printk(KERN_INFO
"raid0: looking at %s\n",
101 bdevname(rdev1
->bdev
,b
));
103 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
104 printk(KERN_INFO
"raid0: comparing %s(%llu)",
105 bdevname(rdev1
->bdev
,b
),
106 (unsigned long long)rdev1
->sectors
);
107 printk(KERN_INFO
" with %s(%llu)\n",
108 bdevname(rdev2
->bdev
,b
),
109 (unsigned long long)rdev2
->sectors
);
110 if (rdev2
== rdev1
) {
111 printk(KERN_INFO
"raid0: END\n");
114 if (rdev2
->sectors
== rdev1
->sectors
) {
116 * Not unique, don't count it as a new
119 printk(KERN_INFO
"raid0: EQUAL\n");
123 printk(KERN_INFO
"raid0: NOT EQUAL\n");
126 printk(KERN_INFO
"raid0: ==> UNIQUE\n");
127 conf
->nr_strip_zones
++;
128 printk(KERN_INFO
"raid0: %d zones\n",
129 conf
->nr_strip_zones
);
132 printk(KERN_INFO
"raid0: FINAL %d zones\n", conf
->nr_strip_zones
);
134 conf
->strip_zone
= kzalloc(sizeof(struct strip_zone
)*
135 conf
->nr_strip_zones
, GFP_KERNEL
);
136 if (!conf
->strip_zone
)
138 conf
->devlist
= kzalloc(sizeof(mdk_rdev_t
*)*
139 conf
->nr_strip_zones
*mddev
->raid_disks
,
144 /* The first zone must contain all devices, so here we check that
145 * there is a proper alignment of slots to devices and find them all
147 zone
= &conf
->strip_zone
[0];
152 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
153 int j
= rdev1
->raid_disk
;
155 if (j
< 0 || j
>= mddev
->raid_disks
) {
156 printk(KERN_ERR
"raid0: bad disk number %d - "
161 printk(KERN_ERR
"raid0: multiple devices for %d - "
167 blk_queue_stack_limits(mddev
->queue
,
168 rdev1
->bdev
->bd_disk
->queue
);
169 /* as we don't honour merge_bvec_fn, we must never risk
170 * violating it, so limit ->max_sector to one PAGE, as
171 * a one page request is never in violation.
174 if (rdev1
->bdev
->bd_disk
->queue
->merge_bvec_fn
&&
175 queue_max_sectors(mddev
->queue
) > (PAGE_SIZE
>>9))
176 blk_queue_max_sectors(mddev
->queue
, PAGE_SIZE
>>9);
178 if (!smallest
|| (rdev1
->sectors
< smallest
->sectors
))
182 if (cnt
!= mddev
->raid_disks
) {
183 printk(KERN_ERR
"raid0: too few disks (%d of %d) - "
184 "aborting!\n", cnt
, mddev
->raid_disks
);
188 zone
->zone_end
= smallest
->sectors
* cnt
;
190 curr_zone_end
= zone
->zone_end
;
192 /* now do the other zones */
193 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
195 zone
= conf
->strip_zone
+ i
;
196 dev
= conf
->devlist
+ i
* mddev
->raid_disks
;
198 printk(KERN_INFO
"raid0: zone %d\n", i
);
199 zone
->dev_start
= smallest
->sectors
;
203 for (j
=0; j
<cnt
; j
++) {
204 char b
[BDEVNAME_SIZE
];
205 rdev
= conf
->devlist
[j
];
206 printk(KERN_INFO
"raid0: checking %s ...",
207 bdevname(rdev
->bdev
, b
));
208 if (rdev
->sectors
<= zone
->dev_start
) {
209 printk(KERN_INFO
" nope.\n");
212 printk(KERN_INFO
" contained as device %d\n", c
);
215 if (!smallest
|| rdev
->sectors
< smallest
->sectors
) {
217 printk(KERN_INFO
" (%llu) is smallest!.\n",
218 (unsigned long long)rdev
->sectors
);
223 sectors
= (smallest
->sectors
- zone
->dev_start
) * c
;
224 printk(KERN_INFO
"raid0: zone->nb_dev: %d, sectors: %llu\n",
225 zone
->nb_dev
, (unsigned long long)sectors
);
227 curr_zone_end
+= sectors
;
228 zone
->zone_end
= curr_zone_end
;
230 printk(KERN_INFO
"raid0: current zone start: %llu\n",
231 (unsigned long long)smallest
->sectors
);
233 mddev
->queue
->unplug_fn
= raid0_unplug
;
234 mddev
->queue
->backing_dev_info
.congested_fn
= raid0_congested
;
235 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
238 * now since we have the hard sector sizes, we can make sure
239 * chunk size is a multiple of that sector size
241 if ((mddev
->chunk_sectors
<< 9) % queue_logical_block_size(mddev
->queue
)) {
242 printk(KERN_ERR
"%s chunk_size of %d not valid\n",
244 mddev
->chunk_sectors
<< 9);
247 printk(KERN_INFO
"raid0: done.\n");
248 mddev
->private = conf
;
251 kfree(conf
->strip_zone
);
252 kfree(conf
->devlist
);
254 mddev
->private = NULL
;
259 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
261 * @bvm: properties of new bio
262 * @biovec: the request that could be merged to it.
264 * Return amount of bytes we can accept at this offset
266 static int raid0_mergeable_bvec(struct request_queue
*q
,
267 struct bvec_merge_data
*bvm
,
268 struct bio_vec
*biovec
)
270 mddev_t
*mddev
= q
->queuedata
;
271 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
273 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
274 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
276 if (is_power_of_2(chunk_sectors
))
277 max
= (chunk_sectors
- ((sector
& (chunk_sectors
-1))
278 + bio_sectors
)) << 9;
280 max
= (chunk_sectors
- (sector_div(sector
, chunk_sectors
)
281 + bio_sectors
)) << 9;
282 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
283 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
284 return biovec
->bv_len
;
289 static sector_t
raid0_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
291 sector_t array_sectors
= 0;
294 WARN_ONCE(sectors
|| raid_disks
,
295 "%s does not support generic reshape\n", __func__
);
297 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
298 array_sectors
+= rdev
->sectors
;
300 return array_sectors
;
303 static int raid0_run(mddev_t
*mddev
)
307 if (mddev
->chunk_sectors
== 0) {
308 printk(KERN_ERR
"md/raid0: chunk size must be set.\n");
311 blk_queue_max_sectors(mddev
->queue
, mddev
->chunk_sectors
);
312 mddev
->queue
->queue_lock
= &mddev
->queue
->__queue_lock
;
314 ret
= create_strip_zones(mddev
);
318 /* calculate array device size */
319 md_set_array_sectors(mddev
, raid0_size(mddev
, 0, 0));
321 printk(KERN_INFO
"raid0 : md_size is %llu sectors.\n",
322 (unsigned long long)mddev
->array_sectors
);
323 /* calculate the max read-ahead size.
324 * For read-ahead of large files to be effective, we need to
325 * readahead at least twice a whole stripe. i.e. number of devices
326 * multiplied by chunk size times 2.
327 * If an individual device has an ra_pages greater than the
328 * chunk size, then we will not drive that device as hard as it
329 * wants. We consider this a configuration error: a larger
330 * chunksize should be used in that case.
333 int stripe
= mddev
->raid_disks
*
334 (mddev
->chunk_sectors
<< 9) / PAGE_SIZE
;
335 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
336 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
339 blk_queue_merge_bvec(mddev
->queue
, raid0_mergeable_bvec
);
344 static int raid0_stop(mddev_t
*mddev
)
346 raid0_conf_t
*conf
= mddev
->private;
348 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
349 kfree(conf
->strip_zone
);
350 kfree(conf
->devlist
);
352 mddev
->private = NULL
;
356 /* Find the zone which holds a particular offset
357 * Update *sectorp to be an offset in that zone
359 static struct strip_zone
*find_zone(struct raid0_private_data
*conf
,
363 struct strip_zone
*z
= conf
->strip_zone
;
364 sector_t sector
= *sectorp
;
366 for (i
= 0; i
< conf
->nr_strip_zones
; i
++)
367 if (sector
< z
[i
].zone_end
) {
369 *sectorp
= sector
- z
[i
-1].zone_end
;
376 * remaps the bio to the target device. we separate two flows.
377 * power 2 flow and a general flow for the sake of perfromance
379 static mdk_rdev_t
*map_sector(mddev_t
*mddev
, struct strip_zone
*zone
,
380 sector_t sector
, sector_t
*sector_offset
)
382 unsigned int sect_in_chunk
;
384 raid0_conf_t
*conf
= mddev
->private;
385 unsigned int chunk_sects
= mddev
->chunk_sectors
;
387 if (is_power_of_2(chunk_sects
)) {
388 int chunksect_bits
= ffz(~chunk_sects
);
389 /* find the sector offset inside the chunk */
390 sect_in_chunk
= sector
& (chunk_sects
- 1);
391 sector
>>= chunksect_bits
;
393 chunk
= *sector_offset
;
394 /* quotient is the chunk in real device*/
395 sector_div(chunk
, zone
->nb_dev
<< chunksect_bits
);
397 sect_in_chunk
= sector_div(sector
, chunk_sects
);
398 chunk
= *sector_offset
;
399 sector_div(chunk
, chunk_sects
* zone
->nb_dev
);
402 * position the bio over the real device
403 * real sector = chunk in device + starting of zone
404 * + the position in the chunk
406 *sector_offset
= (chunk
* chunk_sects
) + sect_in_chunk
;
407 return conf
->devlist
[(zone
- conf
->strip_zone
)*mddev
->raid_disks
408 + sector_div(sector
, zone
->nb_dev
)];
412 * Is io distribute over 1 or more chunks ?
414 static inline int is_io_in_chunk_boundary(mddev_t
*mddev
,
415 unsigned int chunk_sects
, struct bio
*bio
)
417 if (likely(is_power_of_2(chunk_sects
))) {
418 return chunk_sects
>= ((bio
->bi_sector
& (chunk_sects
-1))
419 + (bio
->bi_size
>> 9));
421 sector_t sector
= bio
->bi_sector
;
422 return chunk_sects
>= (sector_div(sector
, chunk_sects
)
423 + (bio
->bi_size
>> 9));
427 static int raid0_make_request(struct request_queue
*q
, struct bio
*bio
)
429 mddev_t
*mddev
= q
->queuedata
;
430 unsigned int chunk_sects
;
431 sector_t sector_offset
;
432 struct strip_zone
*zone
;
434 const int rw
= bio_data_dir(bio
);
437 if (unlikely(bio_barrier(bio
))) {
438 bio_endio(bio
, -EOPNOTSUPP
);
442 cpu
= part_stat_lock();
443 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
444 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
448 chunk_sects
= mddev
->chunk_sectors
;
449 if (unlikely(!is_io_in_chunk_boundary(mddev
, chunk_sects
, bio
))) {
450 sector_t sector
= bio
->bi_sector
;
452 /* Sanity check -- queue functions should prevent this happening */
453 if (bio
->bi_vcnt
!= 1 ||
456 /* This is a one page bio that upper layers
457 * refuse to split for us, so we need to split it.
459 if (likely(is_power_of_2(chunk_sects
)))
460 bp
= bio_split(bio
, chunk_sects
- (sector
&
463 bp
= bio_split(bio
, chunk_sects
-
464 sector_div(sector
, chunk_sects
));
465 if (raid0_make_request(q
, &bp
->bio1
))
466 generic_make_request(&bp
->bio1
);
467 if (raid0_make_request(q
, &bp
->bio2
))
468 generic_make_request(&bp
->bio2
);
470 bio_pair_release(bp
);
474 sector_offset
= bio
->bi_sector
;
475 zone
= find_zone(mddev
->private, §or_offset
);
476 tmp_dev
= map_sector(mddev
, zone
, bio
->bi_sector
,
478 bio
->bi_bdev
= tmp_dev
->bdev
;
479 bio
->bi_sector
= sector_offset
+ zone
->dev_start
+
480 tmp_dev
->data_offset
;
482 * Let the main block layer submit the IO and resolve recursion:
487 printk("raid0_make_request bug: can't convert block across chunks"
488 " or bigger than %dk %llu %d\n", chunk_sects
/ 2,
489 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
495 static void raid0_status(struct seq_file
*seq
, mddev_t
*mddev
)
500 char b
[BDEVNAME_SIZE
];
501 raid0_conf_t
*conf
= mddev
->private;
504 sector_t zone_start
= 0;
507 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
508 seq_printf(seq
, " z%d", j
);
509 seq_printf(seq
, "=[");
510 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
511 seq_printf(seq
, "%s/", bdevname(
512 conf
->devlist
[j
*mddev
->raid_disks
+ k
]
515 zone_size
= conf
->strip_zone
[j
].zone_end
- zone_start
;
516 seq_printf(seq
, "] ze=%lld ds=%lld s=%lld\n",
517 (unsigned long long)zone_start
>>1,
518 (unsigned long long)conf
->strip_zone
[j
].dev_start
>>1,
519 (unsigned long long)zone_size
>>1);
520 zone_start
= conf
->strip_zone
[j
].zone_end
;
523 seq_printf(seq
, " %dk chunks", mddev
->chunk_sectors
/ 2);
527 static struct mdk_personality raid0_personality
=
531 .owner
= THIS_MODULE
,
532 .make_request
= raid0_make_request
,
535 .status
= raid0_status
,
539 static int __init
raid0_init (void)
541 return register_md_personality (&raid0_personality
);
544 static void raid0_exit (void)
546 unregister_md_personality (&raid0_personality
);
549 module_init(raid0_init
);
550 module_exit(raid0_exit
);
551 MODULE_LICENSE("GPL");
552 MODULE_ALIAS("md-personality-2"); /* RAID0 */
553 MODULE_ALIAS("md-raid0");
554 MODULE_ALIAS("md-level-0");