Staging: w35und: unregister device from the ieee80211 stack upon ->disconnect()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / md / faulty.c
blob8695809b24b05f049c13a3dc667bf2a02d9b4ba6
1 /*
2 * faulty.c : Multiple Devices driver for Linux
4 * Copyright (C) 2004 Neil Brown
6 * fautly-device-simulator personality for md
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
14 * You should have received a copy of the GNU General Public License
15 * (for example /usr/src/linux/COPYING); if not, write to the Free
16 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * The "faulty" personality causes some requests to fail.
23 * Possible failure modes are:
24 * reads fail "randomly" but succeed on retry
25 * writes fail "randomly" but succeed on retry
26 * reads for some address fail and then persist until a write
27 * reads for some address fail and then persist irrespective of write
28 * writes for some address fail and persist
29 * all writes fail
31 * Different modes can be active at a time, but only
32 * one can be set at array creation. Others can be added later.
33 * A mode can be one-shot or recurrent with the recurrance being
34 * once in every N requests.
35 * The bottom 5 bits of the "layout" indicate the mode. The
36 * remainder indicate a period, or 0 for one-shot.
38 * There is an implementation limit on the number of concurrently
39 * persisting-faulty blocks. When a new fault is requested that would
40 * exceed the limit, it is ignored.
41 * All current faults can be clear using a layout of "0".
43 * Requests are always sent to the device. If they are to fail,
44 * we clone the bio and insert a new b_end_io into the chain.
47 #define WriteTransient 0
48 #define ReadTransient 1
49 #define WritePersistent 2
50 #define ReadPersistent 3
51 #define WriteAll 4 /* doesn't go to device */
52 #define ReadFixable 5
53 #define Modes 6
55 #define ClearErrors 31
56 #define ClearFaults 30
58 #define AllPersist 100 /* internal use only */
59 #define NoPersist 101
61 #define ModeMask 0x1f
62 #define ModeShift 5
64 #define MaxFault 50
65 #include <linux/blkdev.h>
66 #include <linux/raid/md_u.h>
67 #include "md.h"
68 #include <linux/seq_file.h>
71 static void faulty_fail(struct bio *bio, int error)
73 struct bio *b = bio->bi_private;
75 b->bi_size = bio->bi_size;
76 b->bi_sector = bio->bi_sector;
78 bio_put(bio);
80 bio_io_error(b);
83 typedef struct faulty_conf {
84 int period[Modes];
85 atomic_t counters[Modes];
86 sector_t faults[MaxFault];
87 int modes[MaxFault];
88 int nfaults;
89 mdk_rdev_t *rdev;
90 } conf_t;
92 static int check_mode(conf_t *conf, int mode)
94 if (conf->period[mode] == 0 &&
95 atomic_read(&conf->counters[mode]) <= 0)
96 return 0; /* no failure, no decrement */
99 if (atomic_dec_and_test(&conf->counters[mode])) {
100 if (conf->period[mode])
101 atomic_set(&conf->counters[mode], conf->period[mode]);
102 return 1;
104 return 0;
107 static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir)
109 /* If we find a ReadFixable sector, we fix it ... */
110 int i;
111 for (i=0; i<conf->nfaults; i++)
112 if (conf->faults[i] >= start &&
113 conf->faults[i] < end) {
114 /* found it ... */
115 switch (conf->modes[i] * 2 + dir) {
116 case WritePersistent*2+WRITE: return 1;
117 case ReadPersistent*2+READ: return 1;
118 case ReadFixable*2+READ: return 1;
119 case ReadFixable*2+WRITE:
120 conf->modes[i] = NoPersist;
121 return 0;
122 case AllPersist*2+READ:
123 case AllPersist*2+WRITE: return 1;
124 default:
125 return 0;
128 return 0;
131 static void add_sector(conf_t *conf, sector_t start, int mode)
133 int i;
134 int n = conf->nfaults;
135 for (i=0; i<conf->nfaults; i++)
136 if (conf->faults[i] == start) {
137 switch(mode) {
138 case NoPersist: conf->modes[i] = mode; return;
139 case WritePersistent:
140 if (conf->modes[i] == ReadPersistent ||
141 conf->modes[i] == ReadFixable)
142 conf->modes[i] = AllPersist;
143 else
144 conf->modes[i] = WritePersistent;
145 return;
146 case ReadPersistent:
147 if (conf->modes[i] == WritePersistent)
148 conf->modes[i] = AllPersist;
149 else
150 conf->modes[i] = ReadPersistent;
151 return;
152 case ReadFixable:
153 if (conf->modes[i] == WritePersistent ||
154 conf->modes[i] == ReadPersistent)
155 conf->modes[i] = AllPersist;
156 else
157 conf->modes[i] = ReadFixable;
158 return;
160 } else if (conf->modes[i] == NoPersist)
161 n = i;
163 if (n >= MaxFault)
164 return;
165 conf->faults[n] = start;
166 conf->modes[n] = mode;
167 if (conf->nfaults == n)
168 conf->nfaults = n+1;
171 static int make_request(struct request_queue *q, struct bio *bio)
173 mddev_t *mddev = q->queuedata;
174 conf_t *conf = (conf_t*)mddev->private;
175 int failit = 0;
177 if (bio_data_dir(bio) == WRITE) {
178 /* write request */
179 if (atomic_read(&conf->counters[WriteAll])) {
180 /* special case - don't decrement, don't generic_make_request,
181 * just fail immediately
183 bio_endio(bio, -EIO);
184 return 0;
187 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
188 WRITE))
189 failit = 1;
190 if (check_mode(conf, WritePersistent)) {
191 add_sector(conf, bio->bi_sector, WritePersistent);
192 failit = 1;
194 if (check_mode(conf, WriteTransient))
195 failit = 1;
196 } else {
197 /* read request */
198 if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9),
199 READ))
200 failit = 1;
201 if (check_mode(conf, ReadTransient))
202 failit = 1;
203 if (check_mode(conf, ReadPersistent)) {
204 add_sector(conf, bio->bi_sector, ReadPersistent);
205 failit = 1;
207 if (check_mode(conf, ReadFixable)) {
208 add_sector(conf, bio->bi_sector, ReadFixable);
209 failit = 1;
212 if (failit) {
213 struct bio *b = bio_clone(bio, GFP_NOIO);
214 b->bi_bdev = conf->rdev->bdev;
215 b->bi_private = bio;
216 b->bi_end_io = faulty_fail;
217 generic_make_request(b);
218 return 0;
219 } else {
220 bio->bi_bdev = conf->rdev->bdev;
221 return 1;
225 static void status(struct seq_file *seq, mddev_t *mddev)
227 conf_t *conf = (conf_t*)mddev->private;
228 int n;
230 if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
231 seq_printf(seq, " WriteTransient=%d(%d)",
232 n, conf->period[WriteTransient]);
234 if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
235 seq_printf(seq, " ReadTransient=%d(%d)",
236 n, conf->period[ReadTransient]);
238 if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
239 seq_printf(seq, " WritePersistent=%d(%d)",
240 n, conf->period[WritePersistent]);
242 if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
243 seq_printf(seq, " ReadPersistent=%d(%d)",
244 n, conf->period[ReadPersistent]);
247 if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
248 seq_printf(seq, " ReadFixable=%d(%d)",
249 n, conf->period[ReadFixable]);
251 if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
252 seq_printf(seq, " WriteAll");
254 seq_printf(seq, " nfaults=%d", conf->nfaults);
258 static int reconfig(mddev_t *mddev, int layout, int chunk_size)
260 int mode = layout & ModeMask;
261 int count = layout >> ModeShift;
262 conf_t *conf = mddev->private;
264 if (chunk_size != -1)
265 return -EINVAL;
267 /* new layout */
268 if (mode == ClearFaults)
269 conf->nfaults = 0;
270 else if (mode == ClearErrors) {
271 int i;
272 for (i=0 ; i < Modes ; i++) {
273 conf->period[i] = 0;
274 atomic_set(&conf->counters[i], 0);
276 } else if (mode < Modes) {
277 conf->period[mode] = count;
278 if (!count) count++;
279 atomic_set(&conf->counters[mode], count);
280 } else
281 return -EINVAL;
282 mddev->layout = -1; /* makes sure further changes come through */
283 return 0;
286 static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks)
288 WARN_ONCE(raid_disks,
289 "%s does not support generic reshape\n", __func__);
291 if (sectors == 0)
292 return mddev->dev_sectors;
294 return sectors;
297 static int run(mddev_t *mddev)
299 mdk_rdev_t *rdev;
300 int i;
302 conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL);
303 if (!conf)
304 return -ENOMEM;
306 for (i=0; i<Modes; i++) {
307 atomic_set(&conf->counters[i], 0);
308 conf->period[i] = 0;
310 conf->nfaults = 0;
312 list_for_each_entry(rdev, &mddev->disks, same_set)
313 conf->rdev = rdev;
315 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
316 mddev->private = conf;
318 reconfig(mddev, mddev->layout, -1);
320 return 0;
323 static int stop(mddev_t *mddev)
325 conf_t *conf = (conf_t *)mddev->private;
327 kfree(conf);
328 mddev->private = NULL;
329 return 0;
332 static struct mdk_personality faulty_personality =
334 .name = "faulty",
335 .level = LEVEL_FAULTY,
336 .owner = THIS_MODULE,
337 .make_request = make_request,
338 .run = run,
339 .stop = stop,
340 .status = status,
341 .reconfig = reconfig,
342 .size = faulty_size,
345 static int __init raid_init(void)
347 return register_md_personality(&faulty_personality);
350 static void raid_exit(void)
352 unregister_md_personality(&faulty_personality);
355 module_init(raid_init);
356 module_exit(raid_exit);
357 MODULE_LICENSE("GPL");
358 MODULE_ALIAS("md-personality-10"); /* faulty */
359 MODULE_ALIAS("md-faulty");
360 MODULE_ALIAS("md-level--5");