2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/raid/raid0.h>
23 static void raid0_unplug(struct request_queue
*q
)
25 mddev_t
*mddev
= q
->queuedata
;
26 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
27 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
30 for (i
=0; i
<mddev
->raid_disks
; i
++) {
31 struct request_queue
*r_queue
= bdev_get_queue(devlist
[i
]->bdev
);
37 static int raid0_congested(void *data
, int bits
)
39 mddev_t
*mddev
= data
;
40 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
41 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
44 for (i
= 0; i
< mddev
->raid_disks
&& !ret
; i
++) {
45 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
47 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
53 static int create_strip_zones (mddev_t
*mddev
)
56 sector_t current_start
, curr_zone_start
;
58 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
59 mdk_rdev_t
*smallest
, *rdev1
, *rdev2
, *rdev
;
60 struct strip_zone
*zone
;
62 char b
[BDEVNAME_SIZE
];
65 * The number of 'same size groups'
67 conf
->nr_strip_zones
= 0;
69 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
70 printk(KERN_INFO
"raid0: looking at %s\n",
71 bdevname(rdev1
->bdev
,b
));
73 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
74 printk(KERN_INFO
"raid0: comparing %s(%llu)",
75 bdevname(rdev1
->bdev
,b
),
76 (unsigned long long)rdev1
->size
);
77 printk(KERN_INFO
" with %s(%llu)\n",
78 bdevname(rdev2
->bdev
,b
),
79 (unsigned long long)rdev2
->size
);
81 printk(KERN_INFO
"raid0: END\n");
84 if (rdev2
->size
== rdev1
->size
)
87 * Not unique, don't count it as a new
90 printk(KERN_INFO
"raid0: EQUAL\n");
94 printk(KERN_INFO
"raid0: NOT EQUAL\n");
97 printk(KERN_INFO
"raid0: ==> UNIQUE\n");
98 conf
->nr_strip_zones
++;
99 printk(KERN_INFO
"raid0: %d zones\n",
100 conf
->nr_strip_zones
);
103 printk(KERN_INFO
"raid0: FINAL %d zones\n", conf
->nr_strip_zones
);
105 conf
->strip_zone
= kzalloc(sizeof(struct strip_zone
)*
106 conf
->nr_strip_zones
, GFP_KERNEL
);
107 if (!conf
->strip_zone
)
109 conf
->devlist
= kzalloc(sizeof(mdk_rdev_t
*)*
110 conf
->nr_strip_zones
*mddev
->raid_disks
,
115 /* The first zone must contain all devices, so here we check that
116 * there is a proper alignment of slots to devices and find them all
118 zone
= &conf
->strip_zone
[0];
121 zone
->dev
= conf
->devlist
;
122 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
123 int j
= rdev1
->raid_disk
;
125 if (j
< 0 || j
>= mddev
->raid_disks
) {
126 printk(KERN_ERR
"raid0: bad disk number %d - "
131 printk(KERN_ERR
"raid0: multiple devices for %d - "
135 zone
->dev
[j
] = rdev1
;
137 blk_queue_stack_limits(mddev
->queue
,
138 rdev1
->bdev
->bd_disk
->queue
);
139 /* as we don't honour merge_bvec_fn, we must never risk
140 * violating it, so limit ->max_sector to one PAGE, as
141 * a one page request is never in violation.
144 if (rdev1
->bdev
->bd_disk
->queue
->merge_bvec_fn
&&
145 mddev
->queue
->max_sectors
> (PAGE_SIZE
>>9))
146 blk_queue_max_sectors(mddev
->queue
, PAGE_SIZE
>>9);
148 if (!smallest
|| (rdev1
->size
<smallest
->size
))
152 if (cnt
!= mddev
->raid_disks
) {
153 printk(KERN_ERR
"raid0: too few disks (%d of %d) - "
154 "aborting!\n", cnt
, mddev
->raid_disks
);
158 zone
->sectors
= smallest
->size
* cnt
* 2;
159 zone
->zone_start
= 0;
161 current_start
= smallest
->size
* 2;
162 curr_zone_start
= zone
->sectors
;
164 /* now do the other zones */
165 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
167 zone
= conf
->strip_zone
+ i
;
168 zone
->dev
= conf
->strip_zone
[i
-1].dev
+ mddev
->raid_disks
;
170 printk(KERN_INFO
"raid0: zone %d\n", i
);
171 zone
->dev_start
= current_start
;
175 for (j
=0; j
<cnt
; j
++) {
176 char b
[BDEVNAME_SIZE
];
177 rdev
= conf
->strip_zone
[0].dev
[j
];
178 printk(KERN_INFO
"raid0: checking %s ...",
179 bdevname(rdev
->bdev
, b
));
180 if (rdev
->size
> current_start
/ 2) {
181 printk(KERN_INFO
" contained as device %d\n",
185 if (!smallest
|| (rdev
->size
<smallest
->size
)) {
187 printk(KERN_INFO
" (%llu) is smallest!.\n",
188 (unsigned long long)rdev
->size
);
191 printk(KERN_INFO
" nope.\n");
195 zone
->sectors
= (smallest
->size
* 2 - current_start
) * c
;
196 printk(KERN_INFO
"raid0: zone->nb_dev: %d, sectors: %llu\n",
197 zone
->nb_dev
, (unsigned long long)zone
->sectors
);
199 zone
->zone_start
= curr_zone_start
;
200 curr_zone_start
+= zone
->sectors
;
202 current_start
= smallest
->size
* 2;
203 printk(KERN_INFO
"raid0: current zone start: %llu\n",
204 (unsigned long long)current_start
);
207 /* Now find appropriate hash spacing.
208 * We want a number which causes most hash entries to cover
209 * at most two strips, but the hash table must be at most
210 * 1 PAGE. We choose the smallest strip, or contiguous collection
211 * of strips, that has big enough size. We never consider the last
212 * strip though as it's size has no bearing on the efficacy of the hash
215 conf
->spacing
= curr_zone_start
;
216 min_spacing
= curr_zone_start
;
217 sector_div(min_spacing
, PAGE_SIZE
/sizeof(struct strip_zone
*));
218 for (i
=0; i
< conf
->nr_strip_zones
-1; i
++) {
220 for (j
= i
; j
< conf
->nr_strip_zones
- 1 &&
221 s
< min_spacing
; j
++)
222 s
+= conf
->strip_zone
[j
].sectors
;
223 if (s
>= min_spacing
&& s
< conf
->spacing
)
227 mddev
->queue
->unplug_fn
= raid0_unplug
;
229 mddev
->queue
->backing_dev_info
.congested_fn
= raid0_congested
;
230 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
232 printk(KERN_INFO
"raid0: done.\n");
239 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
241 * @bvm: properties of new bio
242 * @biovec: the request that could be merged to it.
244 * Return amount of bytes we can accept at this offset
246 static int raid0_mergeable_bvec(struct request_queue
*q
,
247 struct bvec_merge_data
*bvm
,
248 struct bio_vec
*biovec
)
250 mddev_t
*mddev
= q
->queuedata
;
251 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
253 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
254 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
256 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
257 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
258 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
259 return biovec
->bv_len
;
264 static int raid0_run (mddev_t
*mddev
)
266 unsigned cur
=0, i
=0, nb_zone
;
271 if (mddev
->chunk_size
== 0) {
272 printk(KERN_ERR
"md/raid0: non-zero chunk size required.\n");
275 printk(KERN_INFO
"%s: setting max_sectors to %d, segment boundary to %d\n",
277 mddev
->chunk_size
>> 9,
278 (mddev
->chunk_size
>>1)-1);
279 blk_queue_max_sectors(mddev
->queue
, mddev
->chunk_size
>> 9);
280 blk_queue_segment_boundary(mddev
->queue
, (mddev
->chunk_size
>>1) - 1);
281 mddev
->queue
->queue_lock
= &mddev
->queue
->__queue_lock
;
283 conf
= kmalloc(sizeof (raid0_conf_t
), GFP_KERNEL
);
286 mddev
->private = (void *)conf
;
288 conf
->strip_zone
= NULL
;
289 conf
->devlist
= NULL
;
290 if (create_strip_zones (mddev
))
293 /* calculate array device size */
294 mddev
->array_sectors
= 0;
295 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
296 mddev
->array_sectors
+= rdev
->size
* 2;
298 printk(KERN_INFO
"raid0 : md_size is %llu sectors.\n",
299 (unsigned long long)mddev
->array_sectors
);
300 printk(KERN_INFO
"raid0 : conf->spacing is %llu sectors.\n",
301 (unsigned long long)conf
->spacing
);
303 sector_t s
= mddev
->array_sectors
;
304 sector_t space
= conf
->spacing
;
306 conf
->sector_shift
= 0;
307 if (sizeof(sector_t
) > sizeof(u32
)) {
308 /*shift down space and s so that sector_div will work */
309 while (space
> (sector_t
) (~(u32
)0)) {
312 s
+= 1; /* force round-up */
313 conf
->sector_shift
++;
316 round
= sector_div(s
, (u32
)space
) ? 1 : 0;
319 printk(KERN_INFO
"raid0 : nb_zone is %d.\n", nb_zone
);
321 printk(KERN_INFO
"raid0 : Allocating %zu bytes for hash.\n",
322 nb_zone
*sizeof(struct strip_zone
*));
323 conf
->hash_table
= kmalloc (sizeof (struct strip_zone
*)*nb_zone
, GFP_KERNEL
);
324 if (!conf
->hash_table
)
326 sectors
= conf
->strip_zone
[cur
].sectors
;
328 conf
->hash_table
[0] = conf
->strip_zone
+ cur
;
329 for (i
=1; i
< nb_zone
; i
++) {
330 while (sectors
<= conf
->spacing
) {
332 sectors
+= conf
->strip_zone
[cur
].sectors
;
334 sectors
-= conf
->spacing
;
335 conf
->hash_table
[i
] = conf
->strip_zone
+ cur
;
337 if (conf
->sector_shift
) {
338 conf
->spacing
>>= conf
->sector_shift
;
339 /* round spacing up so when we divide by it, we
340 * err on the side of too-low, which is safest
345 /* calculate the max read-ahead size.
346 * For read-ahead of large files to be effective, we need to
347 * readahead at least twice a whole stripe. i.e. number of devices
348 * multiplied by chunk size times 2.
349 * If an individual device has an ra_pages greater than the
350 * chunk size, then we will not drive that device as hard as it
351 * wants. We consider this a configuration error: a larger
352 * chunksize should be used in that case.
355 int stripe
= mddev
->raid_disks
* mddev
->chunk_size
/ PAGE_SIZE
;
356 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
357 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
361 blk_queue_merge_bvec(mddev
->queue
, raid0_mergeable_bvec
);
365 kfree(conf
->strip_zone
);
366 kfree(conf
->devlist
);
368 mddev
->private = NULL
;
373 static int raid0_stop (mddev_t
*mddev
)
375 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
377 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
378 kfree(conf
->hash_table
);
379 conf
->hash_table
= NULL
;
380 kfree(conf
->strip_zone
);
381 conf
->strip_zone
= NULL
;
383 mddev
->private = NULL
;
388 static int raid0_make_request (struct request_queue
*q
, struct bio
*bio
)
390 mddev_t
*mddev
= q
->queuedata
;
391 unsigned int sect_in_chunk
, chunksect_bits
, chunk_sects
;
392 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
393 struct strip_zone
*zone
;
396 sector_t sector
, rsect
;
397 const int rw
= bio_data_dir(bio
);
400 if (unlikely(bio_barrier(bio
))) {
401 bio_endio(bio
, -EOPNOTSUPP
);
405 cpu
= part_stat_lock();
406 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
407 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
411 chunk_sects
= mddev
->chunk_size
>> 9;
412 chunksect_bits
= ffz(~chunk_sects
);
413 sector
= bio
->bi_sector
;
415 if (unlikely(chunk_sects
< (bio
->bi_sector
& (chunk_sects
- 1)) + (bio
->bi_size
>> 9))) {
417 /* Sanity check -- queue functions should prevent this happening */
418 if (bio
->bi_vcnt
!= 1 ||
421 /* This is a one page bio that upper layers
422 * refuse to split for us, so we need to split it.
424 bp
= bio_split(bio
, chunk_sects
- (bio
->bi_sector
& (chunk_sects
- 1)));
425 if (raid0_make_request(q
, &bp
->bio1
))
426 generic_make_request(&bp
->bio1
);
427 if (raid0_make_request(q
, &bp
->bio2
))
428 generic_make_request(&bp
->bio2
);
430 bio_pair_release(bp
);
436 sector_t x
= sector
>> conf
->sector_shift
;
437 sector_div(x
, (u32
)conf
->spacing
);
438 zone
= conf
->hash_table
[x
];
441 while (sector
>= zone
->zone_start
+ zone
->sectors
)
444 sect_in_chunk
= bio
->bi_sector
& (chunk_sects
- 1);
448 sector_t x
= (sector
- zone
->zone_start
) >> chunksect_bits
;
450 sector_div(x
, zone
->nb_dev
);
453 x
= sector
>> chunksect_bits
;
454 tmp_dev
= zone
->dev
[sector_div(x
, zone
->nb_dev
)];
456 rsect
= (chunk
<< chunksect_bits
) + zone
->dev_start
+ sect_in_chunk
;
458 bio
->bi_bdev
= tmp_dev
->bdev
;
459 bio
->bi_sector
= rsect
+ tmp_dev
->data_offset
;
462 * Let the main block layer submit the IO and resolve recursion:
467 printk("raid0_make_request bug: can't convert block across chunks"
468 " or bigger than %dk %llu %d\n", chunk_sects
/ 2,
469 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
475 static void raid0_status (struct seq_file
*seq
, mddev_t
*mddev
)
480 char b
[BDEVNAME_SIZE
];
481 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
484 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
485 seq_printf(seq
, " z%d", j
);
486 if (conf
->hash_table
[h
] == conf
->strip_zone
+j
)
487 seq_printf(seq
, "(h%d)", h
++);
488 seq_printf(seq
, "=[");
489 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
490 seq_printf(seq
, "%s/", bdevname(
491 conf
->strip_zone
[j
].dev
[k
]->bdev
,b
));
493 seq_printf(seq
, "] zs=%d ds=%d s=%d\n",
494 conf
->strip_zone
[j
].zone_start
,
495 conf
->strip_zone
[j
].dev_start
,
496 conf
->strip_zone
[j
].sectors
);
499 seq_printf(seq
, " %dk chunks", mddev
->chunk_size
/1024);
503 static struct mdk_personality raid0_personality
=
507 .owner
= THIS_MODULE
,
508 .make_request
= raid0_make_request
,
511 .status
= raid0_status
,
514 static int __init
raid0_init (void)
516 return register_md_personality (&raid0_personality
);
519 static void raid0_exit (void)
521 unregister_md_personality (&raid0_personality
);
524 module_init(raid0_init
);
525 module_exit(raid0_exit
);
526 MODULE_LICENSE("GPL");
527 MODULE_ALIAS("md-personality-2"); /* RAID0 */
528 MODULE_ALIAS("md-raid0");
529 MODULE_ALIAS("md-level-0");