More Makefile cleanups, otherwise mainly noticeable are the netfilter fix
[davej-history.git] / drivers / block / elevator.c
blob1200773c27ce08f415527caadc1554b278482aba
1 /*
2 * linux/drivers/block/elevator.c
4 * Block device elevator/IO-scheduler.
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 * 30042000 Jens Axboe <axboe@suse.de> :
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
14 * an existing request
15 * - elevator_dequeue_fn, called when a request is taken off the active list
17 * 20082000 Dave Jones <davej@suse.de> :
18 * Removed tests for max-bomb-segments, which was breaking elvtune
19 * when run without -bN
23 #include <linux/fs.h>
24 #include <linux/blkdev.h>
25 #include <linux/elevator.h>
26 #include <linux/blk.h>
27 #include <asm/uaccess.h>
30 * Order ascending, but only allow a request to be skipped a certain
31 * number of times
33 void elevator_linus(struct request *req, elevator_t *elevator,
34 struct list_head *real_head,
35 struct list_head *head, int orig_latency)
37 struct list_head *entry = real_head;
38 struct request *tmp;
40 req->elevator_sequence = orig_latency;
42 while ((entry = entry->prev) != head) {
43 tmp = blkdev_entry_to_request(entry);
44 if (IN_ORDER(tmp, req))
45 break;
46 if (!tmp->elevator_sequence)
47 break;
48 tmp->elevator_sequence--;
50 list_add(&req->queue, entry);
53 int elevator_linus_merge(request_queue_t *q, struct request **req,
54 struct buffer_head *bh, int rw,
55 int *max_sectors, int *max_segments)
57 struct list_head *entry, *head = &q->queue_head;
58 unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
60 entry = head;
61 if (q->head_active && !q->plugged)
62 head = head->next;
64 while ((entry = entry->prev) != head) {
65 struct request *__rq = *req = blkdev_entry_to_request(entry);
66 if (__rq->sem)
67 continue;
68 if (__rq->cmd != rw)
69 continue;
70 if (__rq->nr_sectors + count > *max_sectors)
71 continue;
72 if (__rq->rq_dev != bh->b_rdev)
73 continue;
74 if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
75 ret = ELEVATOR_BACK_MERGE;
76 break;
78 if (!__rq->elevator_sequence)
79 break;
80 if (__rq->sector - count == bh->b_rsector) {
81 __rq->elevator_sequence--;
82 ret = ELEVATOR_FRONT_MERGE;
83 break;
88 * second pass scan of requests that got passed over, if any
90 if (ret != ELEVATOR_NO_MERGE && *req) {
91 while ((entry = entry->next) != &q->queue_head) {
92 struct request *tmp = blkdev_entry_to_request(entry);
93 tmp->elevator_sequence--;
97 return ret;
101 * No request sorting, just add it to the back of the list
103 void elevator_noop(struct request *req, elevator_t *elevator,
104 struct list_head *real_head, struct list_head *head,
105 int orig_latency)
107 list_add_tail(&req->queue, real_head);
111 * See if we can find a request that is buffer can be coalesced with.
113 int elevator_noop_merge(request_queue_t *q, struct request **req,
114 struct buffer_head *bh, int rw,
115 int *max_sectors, int *max_segments)
117 struct list_head *entry, *head = &q->queue_head;
118 unsigned int count = bh->b_size >> 9;
120 if (q->head_active && !q->plugged)
121 head = head->next;
123 entry = head;
124 while ((entry = entry->prev) != head) {
125 struct request *__rq = *req = blkdev_entry_to_request(entry);
126 if (__rq->sem)
127 continue;
128 if (__rq->cmd != rw)
129 continue;
130 if (__rq->nr_sectors + count > *max_sectors)
131 continue;
132 if (__rq->rq_dev != bh->b_rdev)
133 continue;
134 if (__rq->sector + __rq->nr_sectors == bh->b_rsector)
135 return ELEVATOR_BACK_MERGE;
136 if (__rq->sector - count == bh->b_rsector)
137 return ELEVATOR_FRONT_MERGE;
139 return ELEVATOR_NO_MERGE;
143 * The noop "elevator" does not do any accounting
145 void elevator_noop_dequeue(struct request *req) {}
147 int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
149 blkelv_ioctl_arg_t output;
151 output.queue_ID = elevator->queue_ID;
152 output.read_latency = elevator->read_latency;
153 output.write_latency = elevator->write_latency;
154 output.max_bomb_segments = 0;
156 if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
157 return -EFAULT;
159 return 0;
162 int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
164 blkelv_ioctl_arg_t input;
166 if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
167 return -EFAULT;
169 if (input.read_latency < 0)
170 return -EINVAL;
171 if (input.write_latency < 0)
172 return -EINVAL;
174 elevator->read_latency = input.read_latency;
175 elevator->write_latency = input.write_latency;
176 return 0;
179 void elevator_init(elevator_t * elevator, elevator_t type)
181 static unsigned int queue_ID;
183 *elevator = type;
184 elevator->queue_ID = queue_ID++;