Merge with 2.4.0-test3-pre4.
[linux-2.6/linux-mips.git] / drivers / block / elevator.c
blob7720c43c4ad865b35c23a206c43cf3711fe33344
1 /*
2 * linux/drivers/block/elevator.c
4 * Block device elevator/IO-scheduler.
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 * 30042000 Jens Axboe <axboe@suse.de> :
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
14 * an existing request
15 * - elevator_dequeue_fn, called when a request is taken off the active list
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/elevator.h>
22 #include <linux/blk.h>
23 #include <asm/uaccess.h>
25 void elevator_default(struct request *req, elevator_t * elevator,
26 struct list_head * real_head,
27 struct list_head * head, int orig_latency)
29 struct list_head * entry = real_head, * point = NULL;
30 struct request * tmp;
31 int sequence = elevator->sequence;
32 int latency = orig_latency -= elevator->nr_segments, pass = 0;
33 int point_latency = 0xbeefbeef;
35 if (list_empty(real_head)) {
36 req->elevator_sequence = elevator_sequence(elevator, orig_latency);
37 list_add(&req->queue, real_head);
38 return;
41 while ((entry = entry->prev) != head) {
42 if (!point && latency >= 0) {
43 point = entry;
44 point_latency = latency;
46 tmp = blkdev_entry_to_request(entry);
47 if (elevator_sequence_before(tmp->elevator_sequence, sequence) || !tmp->q)
48 break;
49 if (latency >= 0) {
50 if (IN_ORDER(tmp, req) ||
51 (pass && !IN_ORDER(tmp, blkdev_next_request(tmp))))
52 goto link;
54 latency += tmp->nr_segments;
55 pass = 1;
58 if (point) {
59 entry = point;
60 latency = point_latency;
63 link:
64 list_add(&req->queue, entry);
65 req->elevator_sequence = elevator_sequence(elevator, latency);
68 int elevator_default_merge(request_queue_t *q, struct request **req,
69 struct buffer_head *bh, int rw,
70 int *max_sectors, int *max_segments)
72 struct list_head *entry, *head = &q->queue_head;
73 unsigned int count = bh->b_size >> 9;
74 elevator_t *elevator = &q->elevator;
75 int orig_latency, latency, sequence, action, starving = 0;
78 * Avoid write-bombs as not to hurt interactiveness of reads
80 if (rw == WRITE)
81 *max_segments = elevator->max_bomb_segments;
83 latency = orig_latency = elevator_request_latency(elevator, rw);
84 sequence = elevator->sequence;
86 entry = head;
87 if (q->head_active && !q->plugged)
88 head = head->next;
90 while ((entry = entry->prev) != head && !starving) {
91 struct request *__rq = *req = blkdev_entry_to_request(entry);
92 latency += __rq->nr_segments;
93 if (elevator_sequence_before(__rq->elevator_sequence, sequence))
94 starving = 1;
95 if (latency < 0)
96 continue;
97 if (__rq->sem)
98 continue;
99 if (__rq->cmd != rw)
100 continue;
101 if (__rq->nr_sectors + count > *max_sectors)
102 continue;
103 if (__rq->rq_dev != bh->b_rdev)
104 continue;
105 if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
106 if (latency - __rq->nr_segments < 0)
107 break;
108 action = ELEVATOR_BACK_MERGE;
109 } else if (__rq->sector - count == bh->b_rsector) {
110 if (starving)
111 break;
112 action = ELEVATOR_FRONT_MERGE;
113 } else {
114 continue;
116 q->elevator.sequence++;
117 return action;
119 return ELEVATOR_NO_MERGE;
122 inline void elevator_default_dequeue(struct request *req)
124 if (req->cmd == READ)
125 req->e->read_pendings--;
127 req->e->nr_segments -= req->nr_segments;
131 * Order ascending, but only allow a request to be skipped a certain
132 * number of times
134 void elevator_linus(struct request *req, elevator_t *elevator,
135 struct list_head *real_head,
136 struct list_head *head, int orig_latency)
138 struct list_head *entry = real_head;
139 struct request *tmp;
141 req->elevator_sequence = orig_latency;
143 if (list_empty(real_head)) {
144 list_add(&req->queue, real_head);
145 return;
148 while ((entry = entry->prev) != head) {
149 tmp = blkdev_entry_to_request(entry);
150 if (!tmp->elevator_sequence)
151 break;
152 if (IN_ORDER(tmp, req))
153 break;
154 tmp->elevator_sequence--;
156 list_add(&req->queue, entry);
159 int elevator_linus_merge(request_queue_t *q, struct request **req,
160 struct buffer_head *bh, int rw,
161 int *max_sectors, int *max_segments)
163 struct list_head *entry, *head = &q->queue_head;
164 unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
166 entry = head;
167 if (q->head_active && !q->plugged)
168 head = head->next;
170 while ((entry = entry->prev) != head) {
171 struct request *__rq = *req = blkdev_entry_to_request(entry);
172 if (!__rq->elevator_sequence)
173 break;
174 if (__rq->sem)
175 continue;
176 if (__rq->cmd != rw)
177 continue;
178 if (__rq->nr_sectors + count > *max_sectors)
179 continue;
180 if (__rq->rq_dev != bh->b_rdev)
181 continue;
182 if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
183 ret = ELEVATOR_BACK_MERGE;
184 break;
186 if (__rq->sector - count == bh->b_rsector) {
187 ret = ELEVATOR_FRONT_MERGE;
188 break;
193 * second pass scan of requests that got passed over, if any
195 if (ret != ELEVATOR_NO_MERGE && *req) {
196 while ((entry = entry->next) != &q->queue_head) {
197 struct request *tmp = blkdev_entry_to_request(entry);
198 tmp->elevator_sequence--;
202 return ret;
206 * No request sorting, just add it to the back of the list
208 void elevator_noop(struct request *req, elevator_t *elevator,
209 struct list_head *real_head, struct list_head *head,
210 int orig_latency)
212 list_add_tail(&req->queue, real_head);
216 * See if we can find a request that is buffer can be coalesced with.
218 int elevator_noop_merge(request_queue_t *q, struct request **req,
219 struct buffer_head *bh, int rw,
220 int *max_sectors, int *max_segments)
222 struct list_head *entry, *head = &q->queue_head;
223 unsigned int count = bh->b_size >> 9;
225 if (q->head_active && !q->plugged)
226 head = head->next;
228 entry = head;
229 while ((entry = entry->prev) != head) {
230 struct request *__rq = *req = blkdev_entry_to_request(entry);
231 if (__rq->sem)
232 continue;
233 if (__rq->cmd != rw)
234 continue;
235 if (__rq->nr_sectors + count > *max_sectors)
236 continue;
237 if (__rq->rq_dev != bh->b_rdev)
238 continue;
239 if (__rq->sector + __rq->nr_sectors == bh->b_rsector)
240 return ELEVATOR_BACK_MERGE;
241 if (__rq->sector - count == bh->b_rsector)
242 return ELEVATOR_FRONT_MERGE;
244 return ELEVATOR_NO_MERGE;
248 * The noop "elevator" does not do any accounting
250 void elevator_noop_dequeue(struct request *req) {}
252 #ifdef ELEVATOR_DEBUG
253 void elevator_default_debug(request_queue_t * q, kdev_t dev)
255 int read_pendings = 0, nr_segments = 0;
256 elevator_t * elevator = &q->elevator;
257 struct list_head * entry = &q->queue_head;
258 static int counter;
260 if (elevator->elevator_fn != elevator_default)
261 return;
263 if (counter++ % 100)
264 return;
266 while ((entry = entry->prev) != &q->queue_head) {
267 struct request * req;
269 req = blkdev_entry_to_request(entry);
270 if (req->cmd != READ && req->cmd != WRITE && (req->q || req->nr_segments))
271 printk(KERN_WARNING
272 "%s: elevator req->cmd %d req->nr_segments %u req->q %p\n",
273 kdevname(dev), req->cmd, req->nr_segments, req->q);
274 if (!req->q) {
275 if (req->nr_segments)
276 printk(KERN_WARNING
277 "%s: elevator req->q NULL req->nr_segments %u\n",
278 kdevname(dev), req->nr_segments);
279 continue;
281 if (req->cmd == READ)
282 read_pendings++;
283 nr_segments += req->nr_segments;
286 if (read_pendings != elevator->read_pendings) {
287 printk(KERN_WARNING
288 "%s: elevator read_pendings %d should be %d\n",
289 kdevname(dev), elevator->read_pendings,
290 read_pendings);
291 elevator->read_pendings = read_pendings;
293 if (nr_segments != elevator->nr_segments) {
294 printk(KERN_WARNING
295 "%s: elevator nr_segments %d should be %d\n",
296 kdevname(dev), elevator->nr_segments,
297 nr_segments);
298 elevator->nr_segments = nr_segments;
301 #endif
303 int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
305 blkelv_ioctl_arg_t output;
307 output.queue_ID = elevator->queue_ID;
308 output.read_latency = elevator->read_latency;
309 output.write_latency = elevator->write_latency;
310 output.max_bomb_segments = elevator->max_bomb_segments;
312 if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
313 return -EFAULT;
315 return 0;
318 int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
320 blkelv_ioctl_arg_t input;
322 if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
323 return -EFAULT;
325 if (input.read_latency < 0)
326 return -EINVAL;
327 if (input.write_latency < 0)
328 return -EINVAL;
329 if (input.max_bomb_segments <= 0)
330 return -EINVAL;
332 elevator->read_latency = input.read_latency;
333 elevator->write_latency = input.write_latency;
334 elevator->max_bomb_segments = input.max_bomb_segments;
336 return 0;
339 void elevator_init(elevator_t * elevator, elevator_t type)
341 static unsigned int queue_ID;
343 *elevator = type;
344 elevator->queue_ID = queue_ID++;