2 * linux/drivers/block/elevator.c
4 * Block device elevator/IO-scheduler.
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 * 30042000 Jens Axboe <axboe@suse.de> :
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
15 * - elevator_dequeue_fn, called when a request is taken off the active list
20 #include <linux/blkdev.h>
21 #include <linux/elevator.h>
22 #include <linux/blk.h>
23 #include <asm/uaccess.h>
25 void elevator_default(struct request
*req
, elevator_t
* elevator
,
26 struct list_head
* real_head
,
27 struct list_head
* head
, int orig_latency
)
29 struct list_head
* entry
= real_head
, * point
= NULL
;
31 int sequence
= elevator
->sequence
;
32 int latency
= orig_latency
-= elevator
->nr_segments
, pass
= 0;
33 int point_latency
= 0xbeefbeef;
35 if (list_empty(real_head
)) {
36 req
->elevator_sequence
= elevator_sequence(elevator
, orig_latency
);
37 list_add(&req
->queue
, real_head
);
41 while ((entry
= entry
->prev
) != head
) {
42 if (!point
&& latency
>= 0) {
44 point_latency
= latency
;
46 tmp
= blkdev_entry_to_request(entry
);
47 if (elevator_sequence_before(tmp
->elevator_sequence
, sequence
) || !tmp
->q
)
50 if (IN_ORDER(tmp
, req
) ||
51 (pass
&& !IN_ORDER(tmp
, blkdev_next_request(tmp
))))
54 latency
+= tmp
->nr_segments
;
60 latency
= point_latency
;
64 list_add(&req
->queue
, entry
);
65 req
->elevator_sequence
= elevator_sequence(elevator
, latency
);
68 int elevator_default_merge(request_queue_t
*q
, struct request
**req
,
69 struct buffer_head
*bh
, int rw
,
70 int *max_sectors
, int *max_segments
)
72 struct list_head
*entry
, *head
= &q
->queue_head
;
73 unsigned int count
= bh
->b_size
>> 9;
74 elevator_t
*elevator
= &q
->elevator
;
75 int orig_latency
, latency
, sequence
, action
, starving
= 0;
78 * Avoid write-bombs as not to hurt interactiveness of reads
81 *max_segments
= elevator
->max_bomb_segments
;
83 latency
= orig_latency
= elevator_request_latency(elevator
, rw
);
84 sequence
= elevator
->sequence
;
87 if (q
->head_active
&& !q
->plugged
)
90 while ((entry
= entry
->prev
) != head
&& !starving
) {
91 struct request
*__rq
= *req
= blkdev_entry_to_request(entry
);
92 latency
+= __rq
->nr_segments
;
93 if (elevator_sequence_before(__rq
->elevator_sequence
, sequence
))
101 if (__rq
->nr_sectors
+ count
> *max_sectors
)
103 if (__rq
->rq_dev
!= bh
->b_rdev
)
105 if (__rq
->sector
+ __rq
->nr_sectors
== bh
->b_rsector
) {
106 if (latency
- __rq
->nr_segments
< 0)
108 action
= ELEVATOR_BACK_MERGE
;
109 } else if (__rq
->sector
- count
== bh
->b_rsector
) {
112 action
= ELEVATOR_FRONT_MERGE
;
116 q
->elevator
.sequence
++;
119 return ELEVATOR_NO_MERGE
;
122 inline void elevator_default_dequeue(struct request
*req
)
124 if (req
->cmd
== READ
)
125 req
->e
->read_pendings
--;
127 req
->e
->nr_segments
-= req
->nr_segments
;
131 * Order ascending, but only allow a request to be skipped a certain
134 void elevator_linus(struct request
*req
, elevator_t
*elevator
,
135 struct list_head
*real_head
,
136 struct list_head
*head
, int orig_latency
)
138 struct list_head
*entry
= real_head
;
141 req
->elevator_sequence
= orig_latency
;
143 if (list_empty(real_head
)) {
144 list_add(&req
->queue
, real_head
);
148 while ((entry
= entry
->prev
) != head
) {
149 tmp
= blkdev_entry_to_request(entry
);
150 if (!tmp
->elevator_sequence
)
152 if (IN_ORDER(tmp
, req
))
154 tmp
->elevator_sequence
--;
156 list_add(&req
->queue
, entry
);
159 int elevator_linus_merge(request_queue_t
*q
, struct request
**req
,
160 struct buffer_head
*bh
, int rw
,
161 int *max_sectors
, int *max_segments
)
163 struct list_head
*entry
, *head
= &q
->queue_head
;
164 unsigned int count
= bh
->b_size
>> 9, ret
= ELEVATOR_NO_MERGE
;
167 if (q
->head_active
&& !q
->plugged
)
170 while ((entry
= entry
->prev
) != head
) {
171 struct request
*__rq
= *req
= blkdev_entry_to_request(entry
);
172 if (!__rq
->elevator_sequence
)
178 if (__rq
->nr_sectors
+ count
> *max_sectors
)
180 if (__rq
->rq_dev
!= bh
->b_rdev
)
182 if (__rq
->sector
+ __rq
->nr_sectors
== bh
->b_rsector
) {
183 ret
= ELEVATOR_BACK_MERGE
;
186 if (__rq
->sector
- count
== bh
->b_rsector
) {
187 ret
= ELEVATOR_FRONT_MERGE
;
193 * second pass scan of requests that got passed over, if any
195 if (ret
!= ELEVATOR_NO_MERGE
&& *req
) {
196 while ((entry
= entry
->next
) != &q
->queue_head
) {
197 struct request
*tmp
= blkdev_entry_to_request(entry
);
198 tmp
->elevator_sequence
--;
206 * No request sorting, just add it to the back of the list
208 void elevator_noop(struct request
*req
, elevator_t
*elevator
,
209 struct list_head
*real_head
, struct list_head
*head
,
212 list_add_tail(&req
->queue
, real_head
);
216 * See if we can find a request that is buffer can be coalesced with.
218 int elevator_noop_merge(request_queue_t
*q
, struct request
**req
,
219 struct buffer_head
*bh
, int rw
,
220 int *max_sectors
, int *max_segments
)
222 struct list_head
*entry
, *head
= &q
->queue_head
;
223 unsigned int count
= bh
->b_size
>> 9;
225 if (q
->head_active
&& !q
->plugged
)
229 while ((entry
= entry
->prev
) != head
) {
230 struct request
*__rq
= *req
= blkdev_entry_to_request(entry
);
235 if (__rq
->nr_sectors
+ count
> *max_sectors
)
237 if (__rq
->rq_dev
!= bh
->b_rdev
)
239 if (__rq
->sector
+ __rq
->nr_sectors
== bh
->b_rsector
)
240 return ELEVATOR_BACK_MERGE
;
241 if (__rq
->sector
- count
== bh
->b_rsector
)
242 return ELEVATOR_FRONT_MERGE
;
244 return ELEVATOR_NO_MERGE
;
248 * The noop "elevator" does not do any accounting
250 void elevator_noop_dequeue(struct request
*req
) {}
252 #ifdef ELEVATOR_DEBUG
253 void elevator_default_debug(request_queue_t
* q
, kdev_t dev
)
255 int read_pendings
= 0, nr_segments
= 0;
256 elevator_t
* elevator
= &q
->elevator
;
257 struct list_head
* entry
= &q
->queue_head
;
260 if (elevator
->elevator_fn
!= elevator_default
)
266 while ((entry
= entry
->prev
) != &q
->queue_head
) {
267 struct request
* req
;
269 req
= blkdev_entry_to_request(entry
);
270 if (req
->cmd
!= READ
&& req
->cmd
!= WRITE
&& (req
->q
|| req
->nr_segments
))
272 "%s: elevator req->cmd %d req->nr_segments %u req->q %p\n",
273 kdevname(dev
), req
->cmd
, req
->nr_segments
, req
->q
);
275 if (req
->nr_segments
)
277 "%s: elevator req->q NULL req->nr_segments %u\n",
278 kdevname(dev
), req
->nr_segments
);
281 if (req
->cmd
== READ
)
283 nr_segments
+= req
->nr_segments
;
286 if (read_pendings
!= elevator
->read_pendings
) {
288 "%s: elevator read_pendings %d should be %d\n",
289 kdevname(dev
), elevator
->read_pendings
,
291 elevator
->read_pendings
= read_pendings
;
293 if (nr_segments
!= elevator
->nr_segments
) {
295 "%s: elevator nr_segments %d should be %d\n",
296 kdevname(dev
), elevator
->nr_segments
,
298 elevator
->nr_segments
= nr_segments
;
303 int blkelvget_ioctl(elevator_t
* elevator
, blkelv_ioctl_arg_t
* arg
)
305 blkelv_ioctl_arg_t output
;
307 output
.queue_ID
= elevator
->queue_ID
;
308 output
.read_latency
= elevator
->read_latency
;
309 output
.write_latency
= elevator
->write_latency
;
310 output
.max_bomb_segments
= elevator
->max_bomb_segments
;
312 if (copy_to_user(arg
, &output
, sizeof(blkelv_ioctl_arg_t
)))
318 int blkelvset_ioctl(elevator_t
* elevator
, const blkelv_ioctl_arg_t
* arg
)
320 blkelv_ioctl_arg_t input
;
322 if (copy_from_user(&input
, arg
, sizeof(blkelv_ioctl_arg_t
)))
325 if (input
.read_latency
< 0)
327 if (input
.write_latency
< 0)
329 if (input
.max_bomb_segments
<= 0)
332 elevator
->read_latency
= input
.read_latency
;
333 elevator
->write_latency
= input
.write_latency
;
334 elevator
->max_bomb_segments
= input
.max_bomb_segments
;
339 void elevator_init(elevator_t
* elevator
, elevator_t type
)
341 static unsigned int queue_ID
;
344 elevator
->queue_ID
= queue_ID
++;