2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
26 static void raid0_unplug(struct request_queue
*q
)
28 mddev_t
*mddev
= q
->queuedata
;
29 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
30 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
33 for (i
=0; i
<mddev
->raid_disks
; i
++) {
34 struct request_queue
*r_queue
= bdev_get_queue(devlist
[i
]->bdev
);
40 static int raid0_congested(void *data
, int bits
)
42 mddev_t
*mddev
= data
;
43 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
44 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
47 for (i
= 0; i
< mddev
->raid_disks
&& !ret
; i
++) {
48 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
50 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
55 static int create_strip_zones (mddev_t
*mddev
)
58 sector_t curr_zone_end
;
60 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
61 mdk_rdev_t
*smallest
, *rdev1
, *rdev2
, *rdev
;
62 struct strip_zone
*zone
;
64 char b
[BDEVNAME_SIZE
];
67 * The number of 'same size groups'
69 conf
->nr_strip_zones
= 0;
71 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
72 printk(KERN_INFO
"raid0: looking at %s\n",
73 bdevname(rdev1
->bdev
,b
));
75 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
76 printk(KERN_INFO
"raid0: comparing %s(%llu)",
77 bdevname(rdev1
->bdev
,b
),
78 (unsigned long long)rdev1
->sectors
);
79 printk(KERN_INFO
" with %s(%llu)\n",
80 bdevname(rdev2
->bdev
,b
),
81 (unsigned long long)rdev2
->sectors
);
83 printk(KERN_INFO
"raid0: END\n");
86 if (rdev2
->sectors
== rdev1
->sectors
) {
88 * Not unique, don't count it as a new
91 printk(KERN_INFO
"raid0: EQUAL\n");
95 printk(KERN_INFO
"raid0: NOT EQUAL\n");
98 printk(KERN_INFO
"raid0: ==> UNIQUE\n");
99 conf
->nr_strip_zones
++;
100 printk(KERN_INFO
"raid0: %d zones\n",
101 conf
->nr_strip_zones
);
104 printk(KERN_INFO
"raid0: FINAL %d zones\n", conf
->nr_strip_zones
);
106 conf
->strip_zone
= kzalloc(sizeof(struct strip_zone
)*
107 conf
->nr_strip_zones
, GFP_KERNEL
);
108 if (!conf
->strip_zone
)
110 conf
->devlist
= kzalloc(sizeof(mdk_rdev_t
*)*
111 conf
->nr_strip_zones
*mddev
->raid_disks
,
116 /* The first zone must contain all devices, so here we check that
117 * there is a proper alignment of slots to devices and find them all
119 zone
= &conf
->strip_zone
[0];
122 zone
->dev
= conf
->devlist
;
123 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
124 int j
= rdev1
->raid_disk
;
126 if (j
< 0 || j
>= mddev
->raid_disks
) {
127 printk(KERN_ERR
"raid0: bad disk number %d - "
132 printk(KERN_ERR
"raid0: multiple devices for %d - "
136 zone
->dev
[j
] = rdev1
;
138 blk_queue_stack_limits(mddev
->queue
,
139 rdev1
->bdev
->bd_disk
->queue
);
140 /* as we don't honour merge_bvec_fn, we must never risk
141 * violating it, so limit ->max_sector to one PAGE, as
142 * a one page request is never in violation.
145 if (rdev1
->bdev
->bd_disk
->queue
->merge_bvec_fn
&&
146 queue_max_sectors(mddev
->queue
) > (PAGE_SIZE
>>9))
147 blk_queue_max_sectors(mddev
->queue
, PAGE_SIZE
>>9);
149 if (!smallest
|| (rdev1
->sectors
< smallest
->sectors
))
153 if (cnt
!= mddev
->raid_disks
) {
154 printk(KERN_ERR
"raid0: too few disks (%d of %d) - "
155 "aborting!\n", cnt
, mddev
->raid_disks
);
159 zone
->sectors
= smallest
->sectors
* cnt
;
160 zone
->zone_end
= zone
->sectors
;
162 curr_zone_end
= zone
->sectors
;
164 /* now do the other zones */
165 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
167 zone
= conf
->strip_zone
+ i
;
168 zone
->dev
= conf
->strip_zone
[i
-1].dev
+ mddev
->raid_disks
;
170 printk(KERN_INFO
"raid0: zone %d\n", i
);
171 zone
->dev_start
= smallest
->sectors
;
175 for (j
=0; j
<cnt
; j
++) {
176 char b
[BDEVNAME_SIZE
];
177 rdev
= conf
->strip_zone
[0].dev
[j
];
178 printk(KERN_INFO
"raid0: checking %s ...",
179 bdevname(rdev
->bdev
, b
));
180 if (rdev
->sectors
<= zone
->dev_start
) {
181 printk(KERN_INFO
" nope.\n");
184 printk(KERN_INFO
" contained as device %d\n", c
);
187 if (!smallest
|| rdev
->sectors
< smallest
->sectors
) {
189 printk(KERN_INFO
" (%llu) is smallest!.\n",
190 (unsigned long long)rdev
->sectors
);
195 zone
->sectors
= (smallest
->sectors
- zone
->dev_start
) * c
;
196 printk(KERN_INFO
"raid0: zone->nb_dev: %d, sectors: %llu\n",
197 zone
->nb_dev
, (unsigned long long)zone
->sectors
);
199 curr_zone_end
+= zone
->sectors
;
200 zone
->zone_end
= curr_zone_end
;
202 printk(KERN_INFO
"raid0: current zone start: %llu\n",
203 (unsigned long long)smallest
->sectors
);
205 /* Now find appropriate hash spacing.
206 * We want a number which causes most hash entries to cover
207 * at most two strips, but the hash table must be at most
208 * 1 PAGE. We choose the smallest strip, or contiguous collection
209 * of strips, that has big enough size. We never consider the last
210 * strip though as it's size has no bearing on the efficacy of the hash
213 conf
->spacing
= curr_zone_end
;
214 min_spacing
= curr_zone_end
;
215 sector_div(min_spacing
, PAGE_SIZE
/sizeof(struct strip_zone
*));
216 for (i
=0; i
< conf
->nr_strip_zones
-1; i
++) {
218 for (j
= i
; j
< conf
->nr_strip_zones
- 1 &&
219 s
< min_spacing
; j
++)
220 s
+= conf
->strip_zone
[j
].sectors
;
221 if (s
>= min_spacing
&& s
< conf
->spacing
)
225 mddev
->queue
->unplug_fn
= raid0_unplug
;
227 mddev
->queue
->backing_dev_info
.congested_fn
= raid0_congested
;
228 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
230 printk(KERN_INFO
"raid0: done.\n");
237 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
239 * @bvm: properties of new bio
240 * @biovec: the request that could be merged to it.
242 * Return amount of bytes we can accept at this offset
244 static int raid0_mergeable_bvec(struct request_queue
*q
,
245 struct bvec_merge_data
*bvm
,
246 struct bio_vec
*biovec
)
248 mddev_t
*mddev
= q
->queuedata
;
249 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
251 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
252 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
254 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
255 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
256 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
257 return biovec
->bv_len
;
262 static sector_t
raid0_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
264 sector_t array_sectors
= 0;
267 WARN_ONCE(sectors
|| raid_disks
,
268 "%s does not support generic reshape\n", __func__
);
270 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
271 array_sectors
+= rdev
->sectors
;
273 return array_sectors
;
276 static int raid0_run (mddev_t
*mddev
)
278 unsigned cur
=0, i
=0, nb_zone
;
282 if (mddev
->chunk_size
== 0) {
283 printk(KERN_ERR
"md/raid0: non-zero chunk size required.\n");
286 printk(KERN_INFO
"%s: setting max_sectors to %d, segment boundary to %d\n",
288 mddev
->chunk_size
>> 9,
289 (mddev
->chunk_size
>>1)-1);
290 blk_queue_max_sectors(mddev
->queue
, mddev
->chunk_size
>> 9);
291 blk_queue_segment_boundary(mddev
->queue
, (mddev
->chunk_size
>>1) - 1);
292 mddev
->queue
->queue_lock
= &mddev
->queue
->__queue_lock
;
294 conf
= kmalloc(sizeof (raid0_conf_t
), GFP_KERNEL
);
297 mddev
->private = (void *)conf
;
299 conf
->strip_zone
= NULL
;
300 conf
->devlist
= NULL
;
301 if (create_strip_zones (mddev
))
304 /* calculate array device size */
305 md_set_array_sectors(mddev
, raid0_size(mddev
, 0, 0));
307 printk(KERN_INFO
"raid0 : md_size is %llu sectors.\n",
308 (unsigned long long)mddev
->array_sectors
);
309 printk(KERN_INFO
"raid0 : conf->spacing is %llu sectors.\n",
310 (unsigned long long)conf
->spacing
);
312 sector_t s
= raid0_size(mddev
, 0, 0);
313 sector_t space
= conf
->spacing
;
315 conf
->sector_shift
= 0;
316 if (sizeof(sector_t
) > sizeof(u32
)) {
317 /*shift down space and s so that sector_div will work */
318 while (space
> (sector_t
) (~(u32
)0)) {
321 s
+= 1; /* force round-up */
322 conf
->sector_shift
++;
325 round
= sector_div(s
, (u32
)space
) ? 1 : 0;
328 printk(KERN_INFO
"raid0 : nb_zone is %d.\n", nb_zone
);
329 sectors
= conf
->strip_zone
[cur
].sectors
;
331 for (i
=1; i
< nb_zone
; i
++) {
332 while (sectors
<= conf
->spacing
) {
334 sectors
+= conf
->strip_zone
[cur
].sectors
;
336 sectors
-= conf
->spacing
;
338 if (conf
->sector_shift
) {
339 conf
->spacing
>>= conf
->sector_shift
;
340 /* round spacing up so when we divide by it, we
341 * err on the side of too-low, which is safest
346 /* calculate the max read-ahead size.
347 * For read-ahead of large files to be effective, we need to
348 * readahead at least twice a whole stripe. i.e. number of devices
349 * multiplied by chunk size times 2.
350 * If an individual device has an ra_pages greater than the
351 * chunk size, then we will not drive that device as hard as it
352 * wants. We consider this a configuration error: a larger
353 * chunksize should be used in that case.
356 int stripe
= mddev
->raid_disks
* mddev
->chunk_size
/ PAGE_SIZE
;
357 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
358 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
362 blk_queue_merge_bvec(mddev
->queue
, raid0_mergeable_bvec
);
366 kfree(conf
->strip_zone
);
367 kfree(conf
->devlist
);
369 mddev
->private = NULL
;
374 static int raid0_stop (mddev_t
*mddev
)
376 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
378 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
379 kfree(conf
->strip_zone
);
380 conf
->strip_zone
= NULL
;
382 mddev
->private = NULL
;
387 /* Find the zone which holds a particular offset */
388 static struct strip_zone
*find_zone(struct raid0_private_data
*conf
,
392 struct strip_zone
*z
= conf
->strip_zone
;
394 for (i
= 0; i
< conf
->nr_strip_zones
; i
++)
395 if (sector
< z
[i
].zone_end
)
400 static int raid0_make_request (struct request_queue
*q
, struct bio
*bio
)
402 mddev_t
*mddev
= q
->queuedata
;
403 unsigned int sect_in_chunk
, chunksect_bits
, chunk_sects
;
404 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
405 struct strip_zone
*zone
;
408 sector_t sector
, rsect
;
409 const int rw
= bio_data_dir(bio
);
412 if (unlikely(bio_barrier(bio
))) {
413 bio_endio(bio
, -EOPNOTSUPP
);
417 cpu
= part_stat_lock();
418 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
419 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
423 chunk_sects
= mddev
->chunk_size
>> 9;
424 chunksect_bits
= ffz(~chunk_sects
);
425 sector
= bio
->bi_sector
;
427 if (unlikely(chunk_sects
< (bio
->bi_sector
& (chunk_sects
- 1)) + (bio
->bi_size
>> 9))) {
429 /* Sanity check -- queue functions should prevent this happening */
430 if (bio
->bi_vcnt
!= 1 ||
433 /* This is a one page bio that upper layers
434 * refuse to split for us, so we need to split it.
436 bp
= bio_split(bio
, chunk_sects
- (bio
->bi_sector
& (chunk_sects
- 1)));
437 if (raid0_make_request(q
, &bp
->bio1
))
438 generic_make_request(&bp
->bio1
);
439 if (raid0_make_request(q
, &bp
->bio2
))
440 generic_make_request(&bp
->bio2
);
442 bio_pair_release(bp
);
445 zone
= find_zone(conf
, sector
);
446 sect_in_chunk
= bio
->bi_sector
& (chunk_sects
- 1);
448 sector_t x
= (zone
->sectors
+ sector
- zone
->zone_end
)
451 sector_div(x
, zone
->nb_dev
);
454 x
= sector
>> chunksect_bits
;
455 tmp_dev
= zone
->dev
[sector_div(x
, zone
->nb_dev
)];
457 rsect
= (chunk
<< chunksect_bits
) + zone
->dev_start
+ sect_in_chunk
;
459 bio
->bi_bdev
= tmp_dev
->bdev
;
460 bio
->bi_sector
= rsect
+ tmp_dev
->data_offset
;
463 * Let the main block layer submit the IO and resolve recursion:
468 printk("raid0_make_request bug: can't convert block across chunks"
469 " or bigger than %dk %llu %d\n", chunk_sects
/ 2,
470 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
476 static void raid0_status (struct seq_file
*seq
, mddev_t
*mddev
)
481 char b
[BDEVNAME_SIZE
];
482 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
485 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
486 seq_printf(seq
, " z%d", j
);
487 seq_printf(seq
, "=[");
488 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
489 seq_printf(seq
, "%s/", bdevname(
490 conf
->strip_zone
[j
].dev
[k
]->bdev
,b
));
492 seq_printf(seq
, "] ze=%d ds=%d s=%d\n",
493 conf
->strip_zone
[j
].zone_end
,
494 conf
->strip_zone
[j
].dev_start
,
495 conf
->strip_zone
[j
].sectors
);
498 seq_printf(seq
, " %dk chunks", mddev
->chunk_size
/1024);
502 static struct mdk_personality raid0_personality
=
506 .owner
= THIS_MODULE
,
507 .make_request
= raid0_make_request
,
510 .status
= raid0_status
,
514 static int __init
raid0_init (void)
516 return register_md_personality (&raid0_personality
);
519 static void raid0_exit (void)
521 unregister_md_personality (&raid0_personality
);
524 module_init(raid0_init
);
525 module_exit(raid0_exit
);
526 MODULE_LICENSE("GPL");
527 MODULE_ALIAS("md-personality-2"); /* RAID0 */
528 MODULE_ALIAS("md-raid0");
529 MODULE_ALIAS("md-level-0");