2 * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
7 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/blkdev.h>
12 #include <linux/bio.h>
13 #include <linux/slab.h>
14 #include <linux/log2.h>
16 #define DM_MSG_PREFIX "striped"
17 #define DM_IO_ERROR_THRESHOLD 15
21 sector_t physical_start
;
29 /* The size of this target / num. stripes */
30 sector_t stripe_width
;
32 /* stripe chunk size */
36 /* Needed for handling events */
39 /* Work struct used for triggering events*/
40 struct work_struct kstriped_ws
;
42 struct stripe stripe
[0];
45 static struct workqueue_struct
*kstriped
;
48 * An event is triggered whenever a drive
49 * drops out of a stripe volume.
51 static void trigger_event(struct work_struct
*work
)
53 struct stripe_c
*sc
= container_of(work
, struct stripe_c
, kstriped_ws
);
55 dm_table_event(sc
->ti
->table
);
59 static inline struct stripe_c
*alloc_context(unsigned int stripes
)
63 if (dm_array_too_big(sizeof(struct stripe_c
), sizeof(struct stripe
),
67 len
= sizeof(struct stripe_c
) + (sizeof(struct stripe
) * stripes
);
69 return kmalloc(len
, GFP_KERNEL
);
73 * Parse a single <dev> <sector> pair
75 static int get_stripe(struct dm_target
*ti
, struct stripe_c
*sc
,
76 unsigned int stripe
, char **argv
)
78 unsigned long long start
;
80 if (sscanf(argv
[1], "%llu", &start
) != 1)
83 if (dm_get_device(ti
, argv
[0], start
, sc
->stripe_width
,
84 dm_table_get_mode(ti
->table
),
85 &sc
->stripe
[stripe
].dev
))
88 sc
->stripe
[stripe
].physical_start
= start
;
94 * Construct a striped mapping.
95 * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+
97 static int stripe_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
108 ti
->error
= "Not enough arguments";
112 stripes
= simple_strtoul(argv
[0], &end
, 10);
113 if (!stripes
|| *end
) {
114 ti
->error
= "Invalid stripe count";
118 chunk_size
= simple_strtoul(argv
[1], &end
, 10);
120 ti
->error
= "Invalid chunk_size";
125 * chunk_size is a power of two
127 if (!is_power_of_2(chunk_size
) ||
128 (chunk_size
< (PAGE_SIZE
>> SECTOR_SHIFT
))) {
129 ti
->error
= "Invalid chunk size";
133 if (ti
->len
& (chunk_size
- 1)) {
134 ti
->error
= "Target length not divisible by "
140 if (sector_div(width
, stripes
)) {
141 ti
->error
= "Target length not divisible by "
147 * Do we have enough arguments for that many stripes ?
149 if (argc
!= (2 + 2 * stripes
)) {
150 ti
->error
= "Not enough destinations "
155 sc
= alloc_context(stripes
);
157 ti
->error
= "Memory allocation for striped context "
162 INIT_WORK(&sc
->kstriped_ws
, trigger_event
);
164 /* Set pointer to dm target; used in trigger_event */
167 sc
->stripes
= stripes
;
168 sc
->stripe_width
= width
;
169 ti
->split_io
= chunk_size
;
170 ti
->num_flush_requests
= stripes
;
172 sc
->chunk_mask
= ((sector_t
) chunk_size
) - 1;
173 for (sc
->chunk_shift
= 0; chunk_size
; sc
->chunk_shift
++)
178 * Get the stripe destinations.
180 for (i
= 0; i
< stripes
; i
++) {
183 r
= get_stripe(ti
, sc
, i
, argv
);
185 ti
->error
= "Couldn't parse stripe destination";
187 dm_put_device(ti
, sc
->stripe
[i
].dev
);
191 atomic_set(&(sc
->stripe
[i
].error_count
), 0);
199 static void stripe_dtr(struct dm_target
*ti
)
202 struct stripe_c
*sc
= (struct stripe_c
*) ti
->private;
204 for (i
= 0; i
< sc
->stripes
; i
++)
205 dm_put_device(ti
, sc
->stripe
[i
].dev
);
207 flush_workqueue(kstriped
);
211 static int stripe_map(struct dm_target
*ti
, struct bio
*bio
,
212 union map_info
*map_context
)
214 struct stripe_c
*sc
= (struct stripe_c
*) ti
->private;
215 sector_t offset
, chunk
;
218 if (unlikely(bio_empty_barrier(bio
))) {
219 BUG_ON(map_context
->flush_request
>= sc
->stripes
);
220 bio
->bi_bdev
= sc
->stripe
[map_context
->flush_request
].dev
->bdev
;
221 return DM_MAPIO_REMAPPED
;
224 offset
= bio
->bi_sector
- ti
->begin
;
225 chunk
= offset
>> sc
->chunk_shift
;
226 stripe
= sector_div(chunk
, sc
->stripes
);
228 bio
->bi_bdev
= sc
->stripe
[stripe
].dev
->bdev
;
229 bio
->bi_sector
= sc
->stripe
[stripe
].physical_start
+
230 (chunk
<< sc
->chunk_shift
) + (offset
& sc
->chunk_mask
);
231 return DM_MAPIO_REMAPPED
;
238 * #stripes [stripe_name <stripe_name>] [group word count]
239 * [error count 'A|D' <error count 'A|D'>]
242 * #stripes [stripe chunk size]
243 * [stripe_name physical_start <stripe_name physical_start>]
247 static int stripe_status(struct dm_target
*ti
,
248 status_type_t type
, char *result
, unsigned int maxlen
)
250 struct stripe_c
*sc
= (struct stripe_c
*) ti
->private;
251 char buffer
[sc
->stripes
+ 1];
256 case STATUSTYPE_INFO
:
257 DMEMIT("%d ", sc
->stripes
);
258 for (i
= 0; i
< sc
->stripes
; i
++) {
259 DMEMIT("%s ", sc
->stripe
[i
].dev
->name
);
260 buffer
[i
] = atomic_read(&(sc
->stripe
[i
].error_count
)) ?
264 DMEMIT("1 %s", buffer
);
267 case STATUSTYPE_TABLE
:
268 DMEMIT("%d %llu", sc
->stripes
,
269 (unsigned long long)sc
->chunk_mask
+ 1);
270 for (i
= 0; i
< sc
->stripes
; i
++)
271 DMEMIT(" %s %llu", sc
->stripe
[i
].dev
->name
,
272 (unsigned long long)sc
->stripe
[i
].physical_start
);
278 static int stripe_end_io(struct dm_target
*ti
, struct bio
*bio
,
279 int error
, union map_info
*map_context
)
282 char major_minor
[16];
283 struct stripe_c
*sc
= ti
->private;
286 return 0; /* I/O complete */
288 if ((error
== -EWOULDBLOCK
) && bio_rw_flagged(bio
, BIO_RW_AHEAD
))
291 if (error
== -EOPNOTSUPP
)
294 memset(major_minor
, 0, sizeof(major_minor
));
295 sprintf(major_minor
, "%d:%d",
296 MAJOR(disk_devt(bio
->bi_bdev
->bd_disk
)),
297 MINOR(disk_devt(bio
->bi_bdev
->bd_disk
)));
300 * Test to see which stripe drive triggered the event
301 * and increment error count for all stripes on that device.
302 * If the error count for a given device exceeds the threshold
303 * value we will no longer trigger any further events.
305 for (i
= 0; i
< sc
->stripes
; i
++)
306 if (!strcmp(sc
->stripe
[i
].dev
->name
, major_minor
)) {
307 atomic_inc(&(sc
->stripe
[i
].error_count
));
308 if (atomic_read(&(sc
->stripe
[i
].error_count
)) <
309 DM_IO_ERROR_THRESHOLD
)
310 queue_work(kstriped
, &sc
->kstriped_ws
);
316 static int stripe_iterate_devices(struct dm_target
*ti
,
317 iterate_devices_callout_fn fn
, void *data
)
319 struct stripe_c
*sc
= ti
->private;
324 ret
= fn(ti
, sc
->stripe
[i
].dev
,
325 sc
->stripe
[i
].physical_start
,
326 sc
->stripe_width
, data
);
327 } while (!ret
&& ++i
< sc
->stripes
);
332 static void stripe_io_hints(struct dm_target
*ti
,
333 struct queue_limits
*limits
)
335 struct stripe_c
*sc
= ti
->private;
336 unsigned chunk_size
= (sc
->chunk_mask
+ 1) << 9;
338 blk_limits_io_min(limits
, chunk_size
);
339 blk_limits_io_opt(limits
, chunk_size
* sc
->stripes
);
342 static struct target_type stripe_target
= {
344 .version
= {1, 3, 0},
345 .module
= THIS_MODULE
,
349 .end_io
= stripe_end_io
,
350 .status
= stripe_status
,
351 .iterate_devices
= stripe_iterate_devices
,
352 .io_hints
= stripe_io_hints
,
355 int __init
dm_stripe_init(void)
359 r
= dm_register_target(&stripe_target
);
361 DMWARN("target registration failed");
365 kstriped
= create_singlethread_workqueue("kstriped");
367 DMERR("failed to create workqueue kstriped");
368 dm_unregister_target(&stripe_target
);
375 void dm_stripe_exit(void)
377 dm_unregister_target(&stripe_target
);
378 destroy_workqueue(kstriped
);