Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / drivers / md / raid0.c
blob818b4828409667d5a40b53efb734575dd00ee3a2
1 /*
2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/module.h>
22 #include <linux/raid/raid0.h>
24 #define MAJOR_NR MD_MAJOR
25 #define MD_DRIVER
26 #define MD_PERSONALITY
28 static void raid0_unplug(struct request_queue *q)
30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev_to_conf(mddev);
32 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
33 int i;
35 for (i=0; i<mddev->raid_disks; i++) {
36 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
38 blk_unplug(r_queue);
42 static int raid0_congested(void *data, int bits)
44 mddev_t *mddev = data;
45 raid0_conf_t *conf = mddev_to_conf(mddev);
46 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
47 int i, ret = 0;
49 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
50 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
52 ret |= bdi_congested(&q->backing_dev_info, bits);
54 return ret;
58 static int create_strip_zones (mddev_t *mddev)
60 int i, c, j;
61 sector_t current_offset, curr_zone_offset;
62 sector_t min_spacing;
63 raid0_conf_t *conf = mddev_to_conf(mddev);
64 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
65 struct list_head *tmp1, *tmp2;
66 struct strip_zone *zone;
67 int cnt;
68 char b[BDEVNAME_SIZE];
71 * The number of 'same size groups'
73 conf->nr_strip_zones = 0;
75 rdev_for_each(rdev1, tmp1, mddev) {
76 printk("raid0: looking at %s\n",
77 bdevname(rdev1->bdev,b));
78 c = 0;
79 rdev_for_each(rdev2, tmp2, mddev) {
80 printk("raid0: comparing %s(%llu)",
81 bdevname(rdev1->bdev,b),
82 (unsigned long long)rdev1->size);
83 printk(" with %s(%llu)\n",
84 bdevname(rdev2->bdev,b),
85 (unsigned long long)rdev2->size);
86 if (rdev2 == rdev1) {
87 printk("raid0: END\n");
88 break;
90 if (rdev2->size == rdev1->size)
93 * Not unique, don't count it as a new
94 * group
96 printk("raid0: EQUAL\n");
97 c = 1;
98 break;
100 printk("raid0: NOT EQUAL\n");
102 if (!c) {
103 printk("raid0: ==> UNIQUE\n");
104 conf->nr_strip_zones++;
105 printk("raid0: %d zones\n", conf->nr_strip_zones);
108 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
110 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
111 conf->nr_strip_zones, GFP_KERNEL);
112 if (!conf->strip_zone)
113 return 1;
114 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
115 conf->nr_strip_zones*mddev->raid_disks,
116 GFP_KERNEL);
117 if (!conf->devlist)
118 return 1;
120 /* The first zone must contain all devices, so here we check that
121 * there is a proper alignment of slots to devices and find them all
123 zone = &conf->strip_zone[0];
124 cnt = 0;
125 smallest = NULL;
126 zone->dev = conf->devlist;
127 rdev_for_each(rdev1, tmp1, mddev) {
128 int j = rdev1->raid_disk;
130 if (j < 0 || j >= mddev->raid_disks) {
131 printk("raid0: bad disk number %d - aborting!\n", j);
132 goto abort;
134 if (zone->dev[j]) {
135 printk("raid0: multiple devices for %d - aborting!\n",
137 goto abort;
139 zone->dev[j] = rdev1;
141 blk_queue_stack_limits(mddev->queue,
142 rdev1->bdev->bd_disk->queue);
143 /* as we don't honour merge_bvec_fn, we must never risk
144 * violating it, so limit ->max_sector to one PAGE, as
145 * a one page request is never in violation.
148 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
149 mddev->queue->max_sectors > (PAGE_SIZE>>9))
150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
152 if (!smallest || (rdev1->size <smallest->size))
153 smallest = rdev1;
154 cnt++;
156 if (cnt != mddev->raid_disks) {
157 printk("raid0: too few disks (%d of %d) - aborting!\n",
158 cnt, mddev->raid_disks);
159 goto abort;
161 zone->nb_dev = cnt;
162 zone->size = smallest->size * cnt;
163 zone->zone_offset = 0;
165 current_offset = smallest->size;
166 curr_zone_offset = zone->size;
168 /* now do the other zones */
169 for (i = 1; i < conf->nr_strip_zones; i++)
171 zone = conf->strip_zone + i;
172 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
174 printk("raid0: zone %d\n", i);
175 zone->dev_offset = current_offset;
176 smallest = NULL;
177 c = 0;
179 for (j=0; j<cnt; j++) {
180 char b[BDEVNAME_SIZE];
181 rdev = conf->strip_zone[0].dev[j];
182 printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
183 if (rdev->size > current_offset)
185 printk(" contained as device %d\n", c);
186 zone->dev[c] = rdev;
187 c++;
188 if (!smallest || (rdev->size <smallest->size)) {
189 smallest = rdev;
190 printk(" (%llu) is smallest!.\n",
191 (unsigned long long)rdev->size);
193 } else
194 printk(" nope.\n");
197 zone->nb_dev = c;
198 zone->size = (smallest->size - current_offset) * c;
199 printk("raid0: zone->nb_dev: %d, size: %llu\n",
200 zone->nb_dev, (unsigned long long)zone->size);
202 zone->zone_offset = curr_zone_offset;
203 curr_zone_offset += zone->size;
205 current_offset = smallest->size;
206 printk("raid0: current zone offset: %llu\n",
207 (unsigned long long)current_offset);
210 /* Now find appropriate hash spacing.
211 * We want a number which causes most hash entries to cover
212 * at most two strips, but the hash table must be at most
213 * 1 PAGE. We choose the smallest strip, or contiguous collection
214 * of strips, that has big enough size. We never consider the last
215 * strip though as it's size has no bearing on the efficacy of the hash
216 * table.
218 conf->hash_spacing = curr_zone_offset;
219 min_spacing = curr_zone_offset;
220 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
221 for (i=0; i < conf->nr_strip_zones-1; i++) {
222 sector_t sz = 0;
223 for (j=i; j<conf->nr_strip_zones-1 &&
224 sz < min_spacing ; j++)
225 sz += conf->strip_zone[j].size;
226 if (sz >= min_spacing && sz < conf->hash_spacing)
227 conf->hash_spacing = sz;
230 mddev->queue->unplug_fn = raid0_unplug;
232 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
233 mddev->queue->backing_dev_info.congested_data = mddev;
235 printk("raid0: done.\n");
236 return 0;
237 abort:
238 return 1;
242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
243 * @q: request queue
244 * @bio: the buffer head that's been built up so far
245 * @biovec: the request that could be merged to it.
247 * Return amount of bytes we can accept at this offset
249 static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
251 mddev_t *mddev = q->queuedata;
252 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
253 int max;
254 unsigned int chunk_sectors = mddev->chunk_size >> 9;
255 unsigned int bio_sectors = bio->bi_size >> 9;
257 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
258 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
259 if (max <= biovec->bv_len && bio_sectors == 0)
260 return biovec->bv_len;
261 else
262 return max;
265 static int raid0_run (mddev_t *mddev)
267 unsigned cur=0, i=0, nb_zone;
268 s64 size;
269 raid0_conf_t *conf;
270 mdk_rdev_t *rdev;
271 struct list_head *tmp;
273 if (mddev->chunk_size == 0) {
274 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
275 return -EINVAL;
277 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
278 mdname(mddev),
279 mddev->chunk_size >> 9,
280 (mddev->chunk_size>>1)-1);
281 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
282 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
284 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
285 if (!conf)
286 goto out;
287 mddev->private = (void *)conf;
289 conf->strip_zone = NULL;
290 conf->devlist = NULL;
291 if (create_strip_zones (mddev))
292 goto out_free_conf;
294 /* calculate array device size */
295 mddev->array_size = 0;
296 rdev_for_each(rdev, tmp, mddev)
297 mddev->array_size += rdev->size;
299 printk("raid0 : md_size is %llu blocks.\n",
300 (unsigned long long)mddev->array_size);
301 printk("raid0 : conf->hash_spacing is %llu blocks.\n",
302 (unsigned long long)conf->hash_spacing);
304 sector_t s = mddev->array_size;
305 sector_t space = conf->hash_spacing;
306 int round;
307 conf->preshift = 0;
308 if (sizeof(sector_t) > sizeof(u32)) {
309 /*shift down space and s so that sector_div will work */
310 while (space > (sector_t) (~(u32)0)) {
311 s >>= 1;
312 space >>= 1;
313 s += 1; /* force round-up */
314 conf->preshift++;
317 round = sector_div(s, (u32)space) ? 1 : 0;
318 nb_zone = s + round;
320 printk("raid0 : nb_zone is %d.\n", nb_zone);
322 printk("raid0 : Allocating %Zd bytes for hash.\n",
323 nb_zone*sizeof(struct strip_zone*));
324 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
325 if (!conf->hash_table)
326 goto out_free_conf;
327 size = conf->strip_zone[cur].size;
329 conf->hash_table[0] = conf->strip_zone + cur;
330 for (i=1; i< nb_zone; i++) {
331 while (size <= conf->hash_spacing) {
332 cur++;
333 size += conf->strip_zone[cur].size;
335 size -= conf->hash_spacing;
336 conf->hash_table[i] = conf->strip_zone + cur;
338 if (conf->preshift) {
339 conf->hash_spacing >>= conf->preshift;
340 /* round hash_spacing up so when we divide by it, we
341 * err on the side of too-low, which is safest
343 conf->hash_spacing++;
346 /* calculate the max read-ahead size.
347 * For read-ahead of large files to be effective, we need to
348 * readahead at least twice a whole stripe. i.e. number of devices
349 * multiplied by chunk size times 2.
350 * If an individual device has an ra_pages greater than the
351 * chunk size, then we will not drive that device as hard as it
352 * wants. We consider this a configuration error: a larger
353 * chunksize should be used in that case.
356 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
357 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
358 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
362 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
363 return 0;
365 out_free_conf:
366 kfree(conf->strip_zone);
367 kfree(conf->devlist);
368 kfree(conf);
369 mddev->private = NULL;
370 out:
371 return -ENOMEM;
374 static int raid0_stop (mddev_t *mddev)
376 raid0_conf_t *conf = mddev_to_conf(mddev);
378 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
379 kfree(conf->hash_table);
380 conf->hash_table = NULL;
381 kfree(conf->strip_zone);
382 conf->strip_zone = NULL;
383 kfree(conf);
384 mddev->private = NULL;
386 return 0;
389 static int raid0_make_request (struct request_queue *q, struct bio *bio)
391 mddev_t *mddev = q->queuedata;
392 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
393 raid0_conf_t *conf = mddev_to_conf(mddev);
394 struct strip_zone *zone;
395 mdk_rdev_t *tmp_dev;
396 sector_t chunk;
397 sector_t block, rsect;
398 const int rw = bio_data_dir(bio);
400 if (unlikely(bio_barrier(bio))) {
401 bio_endio(bio, -EOPNOTSUPP);
402 return 0;
405 disk_stat_inc(mddev->gendisk, ios[rw]);
406 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
408 chunk_size = mddev->chunk_size >> 10;
409 chunk_sects = mddev->chunk_size >> 9;
410 chunksize_bits = ffz(~chunk_size);
411 block = bio->bi_sector >> 1;
414 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
415 struct bio_pair *bp;
416 /* Sanity check -- queue functions should prevent this happening */
417 if (bio->bi_vcnt != 1 ||
418 bio->bi_idx != 0)
419 goto bad_map;
420 /* This is a one page bio that upper layers
421 * refuse to split for us, so we need to split it.
423 bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
424 if (raid0_make_request(q, &bp->bio1))
425 generic_make_request(&bp->bio1);
426 if (raid0_make_request(q, &bp->bio2))
427 generic_make_request(&bp->bio2);
429 bio_pair_release(bp);
430 return 0;
435 sector_t x = block >> conf->preshift;
436 sector_div(x, (u32)conf->hash_spacing);
437 zone = conf->hash_table[x];
440 while (block >= (zone->zone_offset + zone->size))
441 zone++;
443 sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1);
447 sector_t x = (block - zone->zone_offset) >> chunksize_bits;
449 sector_div(x, zone->nb_dev);
450 chunk = x;
452 x = block >> chunksize_bits;
453 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
455 rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1)
456 + sect_in_chunk;
458 bio->bi_bdev = tmp_dev->bdev;
459 bio->bi_sector = rsect + tmp_dev->data_offset;
462 * Let the main block layer submit the IO and resolve recursion:
464 return 1;
466 bad_map:
467 printk("raid0_make_request bug: can't convert block across chunks"
468 " or bigger than %dk %llu %d\n", chunk_size,
469 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
471 bio_io_error(bio);
472 return 0;
475 static void raid0_status (struct seq_file *seq, mddev_t *mddev)
477 #undef MD_DEBUG
478 #ifdef MD_DEBUG
479 int j, k, h;
480 char b[BDEVNAME_SIZE];
481 raid0_conf_t *conf = mddev_to_conf(mddev);
483 h = 0;
484 for (j = 0; j < conf->nr_strip_zones; j++) {
485 seq_printf(seq, " z%d", j);
486 if (conf->hash_table[h] == conf->strip_zone+j)
487 seq_printf(seq, "(h%d)", h++);
488 seq_printf(seq, "=[");
489 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
490 seq_printf(seq, "%s/", bdevname(
491 conf->strip_zone[j].dev[k]->bdev,b));
493 seq_printf(seq, "] zo=%d do=%d s=%d\n",
494 conf->strip_zone[j].zone_offset,
495 conf->strip_zone[j].dev_offset,
496 conf->strip_zone[j].size);
498 #endif
499 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
500 return;
503 static struct mdk_personality raid0_personality=
505 .name = "raid0",
506 .level = 0,
507 .owner = THIS_MODULE,
508 .make_request = raid0_make_request,
509 .run = raid0_run,
510 .stop = raid0_stop,
511 .status = raid0_status,
514 static int __init raid0_init (void)
516 return register_md_personality (&raid0_personality);
519 static void raid0_exit (void)
521 unregister_md_personality (&raid0_personality);
524 module_init(raid0_init);
525 module_exit(raid0_exit);
526 MODULE_LICENSE("GPL");
527 MODULE_ALIAS("md-personality-2"); /* RAID0 */
528 MODULE_ALIAS("md-raid0");
529 MODULE_ALIAS("md-level-0");