Merge with Linux 2.4.0-test6-pre9.
[linux-2.6/linux-mips.git] / drivers / block / elevator.c
blob200fb446a5b8a47994fc3c0dbe0b3eaf470cfdb1
1 /*
2 * linux/drivers/block/elevator.c
4 * Block device elevator/IO-scheduler.
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 * 30042000 Jens Axboe <axboe@suse.de> :
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
14 * an existing request
15 * - elevator_dequeue_fn, called when a request is taken off the active list
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/elevator.h>
22 #include <linux/blk.h>
23 #include <asm/uaccess.h>
26 * Order ascending, but only allow a request to be skipped a certain
27 * number of times
29 void elevator_linus(struct request *req, elevator_t *elevator,
30 struct list_head *real_head,
31 struct list_head *head, int orig_latency)
33 struct list_head *entry = real_head;
34 struct request *tmp;
36 req->elevator_sequence = orig_latency;
38 while ((entry = entry->prev) != head) {
39 tmp = blkdev_entry_to_request(entry);
40 if (IN_ORDER(tmp, req))
41 break;
42 if (!tmp->elevator_sequence)
43 break;
44 tmp->elevator_sequence--;
46 list_add(&req->queue, entry);
49 int elevator_linus_merge(request_queue_t *q, struct request **req,
50 struct buffer_head *bh, int rw,
51 int *max_sectors, int *max_segments)
53 struct list_head *entry, *head = &q->queue_head;
54 unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
56 entry = head;
57 if (q->head_active && !q->plugged)
58 head = head->next;
60 while ((entry = entry->prev) != head) {
61 struct request *__rq = *req = blkdev_entry_to_request(entry);
62 if (__rq->sem)
63 continue;
64 if (__rq->cmd != rw)
65 continue;
66 if (__rq->nr_sectors + count > *max_sectors)
67 continue;
68 if (__rq->rq_dev != bh->b_rdev)
69 continue;
70 if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
71 ret = ELEVATOR_BACK_MERGE;
72 break;
74 if (!__rq->elevator_sequence)
75 break;
76 if (__rq->sector - count == bh->b_rsector) {
77 __rq->elevator_sequence--;
78 ret = ELEVATOR_FRONT_MERGE;
79 break;
84 * second pass scan of requests that got passed over, if any
86 if (ret != ELEVATOR_NO_MERGE && *req) {
87 while ((entry = entry->next) != &q->queue_head) {
88 struct request *tmp = blkdev_entry_to_request(entry);
89 tmp->elevator_sequence--;
93 return ret;
97 * No request sorting, just add it to the back of the list
99 void elevator_noop(struct request *req, elevator_t *elevator,
100 struct list_head *real_head, struct list_head *head,
101 int orig_latency)
103 list_add_tail(&req->queue, real_head);
107 * See if we can find a request that is buffer can be coalesced with.
109 int elevator_noop_merge(request_queue_t *q, struct request **req,
110 struct buffer_head *bh, int rw,
111 int *max_sectors, int *max_segments)
113 struct list_head *entry, *head = &q->queue_head;
114 unsigned int count = bh->b_size >> 9;
116 if (q->head_active && !q->plugged)
117 head = head->next;
119 entry = head;
120 while ((entry = entry->prev) != head) {
121 struct request *__rq = *req = blkdev_entry_to_request(entry);
122 if (__rq->sem)
123 continue;
124 if (__rq->cmd != rw)
125 continue;
126 if (__rq->nr_sectors + count > *max_sectors)
127 continue;
128 if (__rq->rq_dev != bh->b_rdev)
129 continue;
130 if (__rq->sector + __rq->nr_sectors == bh->b_rsector)
131 return ELEVATOR_BACK_MERGE;
132 if (__rq->sector - count == bh->b_rsector)
133 return ELEVATOR_FRONT_MERGE;
135 return ELEVATOR_NO_MERGE;
139 * The noop "elevator" does not do any accounting
141 void elevator_noop_dequeue(struct request *req) {}
143 int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
145 blkelv_ioctl_arg_t output;
147 output.queue_ID = elevator->queue_ID;
148 output.read_latency = elevator->read_latency;
149 output.write_latency = elevator->write_latency;
150 output.max_bomb_segments = elevator->max_bomb_segments;
152 if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
153 return -EFAULT;
155 return 0;
158 int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
160 blkelv_ioctl_arg_t input;
162 if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
163 return -EFAULT;
165 if (input.read_latency < 0)
166 return -EINVAL;
167 if (input.write_latency < 0)
168 return -EINVAL;
169 if (input.max_bomb_segments <= 0)
170 return -EINVAL;
172 elevator->read_latency = input.read_latency;
173 elevator->write_latency = input.write_latency;
174 elevator->max_bomb_segments = input.max_bomb_segments;
176 return 0;
179 void elevator_init(elevator_t * elevator, elevator_t type)
181 static unsigned int queue_ID;
183 *elevator = type;
184 elevator->queue_ID = queue_ID++;