2 * Block device elevator/IO-scheduler.
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * - elevator_dequeue_fn, called when a request is taken off the active list
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
25 #include <linux/kernel.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37 #include <linux/uaccess.h>
39 #include <trace/events/block.h>
43 static DEFINE_SPINLOCK(elv_list_lock
);
44 static LIST_HEAD(elv_list
);
49 static const int elv_hash_shift
= 6;
50 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
51 #define ELV_HASH_FN(sec) \
52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
57 * Query io scheduler to see if the current process issuing bio may be
60 static int elv_iosched_allow_merge(struct request
*rq
, struct bio
*bio
)
62 struct request_queue
*q
= rq
->q
;
63 struct elevator_queue
*e
= q
->elevator
;
65 if (e
->ops
->elevator_allow_merge_fn
)
66 return e
->ops
->elevator_allow_merge_fn(q
, rq
, bio
);
72 * can we safely merge with this request?
74 int elv_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
76 if (!rq_mergeable(rq
))
80 * Don't merge file system requests and discard requests
82 if ((bio
->bi_rw
& REQ_DISCARD
) != (rq
->bio
->bi_rw
& REQ_DISCARD
))
86 * Don't merge discard requests and secure discard requests
88 if ((bio
->bi_rw
& REQ_SECURE
) != (rq
->bio
->bi_rw
& REQ_SECURE
))
92 * different data direction or already started, don't merge
94 if (bio_data_dir(bio
) != rq_data_dir(rq
))
98 * must be same device and not a special request
100 if (rq
->rq_disk
!= bio
->bi_bdev
->bd_disk
|| rq
->special
)
104 * only merge integrity protected bio into ditto rq
106 if (bio_integrity(bio
) != blk_integrity_rq(rq
))
109 if (!elv_iosched_allow_merge(rq
, bio
))
114 EXPORT_SYMBOL(elv_rq_merge_ok
);
116 static inline int elv_try_merge(struct request
*__rq
, struct bio
*bio
)
118 int ret
= ELEVATOR_NO_MERGE
;
121 * we can merge and sequence is ok, check if it's possible
123 if (elv_rq_merge_ok(__rq
, bio
)) {
124 if (blk_rq_pos(__rq
) + blk_rq_sectors(__rq
) == bio
->bi_sector
)
125 ret
= ELEVATOR_BACK_MERGE
;
126 else if (blk_rq_pos(__rq
) - bio_sectors(bio
) == bio
->bi_sector
)
127 ret
= ELEVATOR_FRONT_MERGE
;
133 static struct elevator_type
*elevator_find(const char *name
)
135 struct elevator_type
*e
;
137 list_for_each_entry(e
, &elv_list
, list
) {
138 if (!strcmp(e
->elevator_name
, name
))
145 static void elevator_put(struct elevator_type
*e
)
147 module_put(e
->elevator_owner
);
150 static struct elevator_type
*elevator_get(const char *name
)
152 struct elevator_type
*e
;
154 spin_lock(&elv_list_lock
);
156 e
= elevator_find(name
);
158 char elv
[ELV_NAME_MAX
+ strlen("-iosched")];
160 spin_unlock(&elv_list_lock
);
162 snprintf(elv
, sizeof(elv
), "%s-iosched", name
);
164 request_module("%s", elv
);
165 spin_lock(&elv_list_lock
);
166 e
= elevator_find(name
);
169 if (e
&& !try_module_get(e
->elevator_owner
))
172 spin_unlock(&elv_list_lock
);
177 static void *elevator_init_queue(struct request_queue
*q
,
178 struct elevator_queue
*eq
)
180 return eq
->ops
->elevator_init_fn(q
);
183 static void elevator_attach(struct request_queue
*q
, struct elevator_queue
*eq
,
187 eq
->elevator_data
= data
;
190 static char chosen_elevator
[16];
192 static int __init
elevator_setup(char *str
)
195 * Be backwards-compatible with previous kernels, so users
196 * won't get the wrong elevator.
198 strncpy(chosen_elevator
, str
, sizeof(chosen_elevator
) - 1);
202 __setup("elevator=", elevator_setup
);
204 static struct kobj_type elv_ktype
;
206 static struct elevator_queue
*elevator_alloc(struct request_queue
*q
,
207 struct elevator_type
*e
)
209 struct elevator_queue
*eq
;
212 eq
= kmalloc_node(sizeof(*eq
), GFP_KERNEL
| __GFP_ZERO
, q
->node
);
217 eq
->elevator_type
= e
;
218 kobject_init(&eq
->kobj
, &elv_ktype
);
219 mutex_init(&eq
->sysfs_lock
);
221 eq
->hash
= kmalloc_node(sizeof(struct hlist_head
) * ELV_HASH_ENTRIES
,
222 GFP_KERNEL
, q
->node
);
226 for (i
= 0; i
< ELV_HASH_ENTRIES
; i
++)
227 INIT_HLIST_HEAD(&eq
->hash
[i
]);
236 static void elevator_release(struct kobject
*kobj
)
238 struct elevator_queue
*e
;
240 e
= container_of(kobj
, struct elevator_queue
, kobj
);
241 elevator_put(e
->elevator_type
);
246 int elevator_init(struct request_queue
*q
, char *name
)
248 struct elevator_type
*e
= NULL
;
249 struct elevator_queue
*eq
;
252 if (unlikely(q
->elevator
))
255 INIT_LIST_HEAD(&q
->queue_head
);
256 q
->last_merge
= NULL
;
258 q
->boundary_rq
= NULL
;
261 e
= elevator_get(name
);
266 if (!e
&& *chosen_elevator
) {
267 e
= elevator_get(chosen_elevator
);
269 printk(KERN_ERR
"I/O scheduler %s not found\n",
274 e
= elevator_get(CONFIG_DEFAULT_IOSCHED
);
277 "Default I/O scheduler not found. " \
279 e
= elevator_get("noop");
283 eq
= elevator_alloc(q
, e
);
287 data
= elevator_init_queue(q
, eq
);
289 kobject_put(&eq
->kobj
);
293 elevator_attach(q
, eq
, data
);
296 EXPORT_SYMBOL(elevator_init
);
298 void elevator_exit(struct elevator_queue
*e
)
300 mutex_lock(&e
->sysfs_lock
);
301 if (e
->ops
->elevator_exit_fn
)
302 e
->ops
->elevator_exit_fn(e
);
304 mutex_unlock(&e
->sysfs_lock
);
306 kobject_put(&e
->kobj
);
308 EXPORT_SYMBOL(elevator_exit
);
310 static inline void __elv_rqhash_del(struct request
*rq
)
312 hlist_del_init(&rq
->hash
);
315 static void elv_rqhash_del(struct request_queue
*q
, struct request
*rq
)
318 __elv_rqhash_del(rq
);
321 static void elv_rqhash_add(struct request_queue
*q
, struct request
*rq
)
323 struct elevator_queue
*e
= q
->elevator
;
325 BUG_ON(ELV_ON_HASH(rq
));
326 hlist_add_head(&rq
->hash
, &e
->hash
[ELV_HASH_FN(rq_hash_key(rq
))]);
329 static void elv_rqhash_reposition(struct request_queue
*q
, struct request
*rq
)
331 __elv_rqhash_del(rq
);
332 elv_rqhash_add(q
, rq
);
335 static struct request
*elv_rqhash_find(struct request_queue
*q
, sector_t offset
)
337 struct elevator_queue
*e
= q
->elevator
;
338 struct hlist_head
*hash_list
= &e
->hash
[ELV_HASH_FN(offset
)];
339 struct hlist_node
*entry
, *next
;
342 hlist_for_each_entry_safe(rq
, entry
, next
, hash_list
, hash
) {
343 BUG_ON(!ELV_ON_HASH(rq
));
345 if (unlikely(!rq_mergeable(rq
))) {
346 __elv_rqhash_del(rq
);
350 if (rq_hash_key(rq
) == offset
)
358 * RB-tree support functions for inserting/lookup/removal of requests
359 * in a sorted RB tree.
361 struct request
*elv_rb_add(struct rb_root
*root
, struct request
*rq
)
363 struct rb_node
**p
= &root
->rb_node
;
364 struct rb_node
*parent
= NULL
;
365 struct request
*__rq
;
369 __rq
= rb_entry(parent
, struct request
, rb_node
);
371 if (blk_rq_pos(rq
) < blk_rq_pos(__rq
))
373 else if (blk_rq_pos(rq
) > blk_rq_pos(__rq
))
379 rb_link_node(&rq
->rb_node
, parent
, p
);
380 rb_insert_color(&rq
->rb_node
, root
);
383 EXPORT_SYMBOL(elv_rb_add
);
385 void elv_rb_del(struct rb_root
*root
, struct request
*rq
)
387 BUG_ON(RB_EMPTY_NODE(&rq
->rb_node
));
388 rb_erase(&rq
->rb_node
, root
);
389 RB_CLEAR_NODE(&rq
->rb_node
);
391 EXPORT_SYMBOL(elv_rb_del
);
393 struct request
*elv_rb_find(struct rb_root
*root
, sector_t sector
)
395 struct rb_node
*n
= root
->rb_node
;
399 rq
= rb_entry(n
, struct request
, rb_node
);
401 if (sector
< blk_rq_pos(rq
))
403 else if (sector
> blk_rq_pos(rq
))
411 EXPORT_SYMBOL(elv_rb_find
);
414 * Insert rq into dispatch queue of q. Queue lock must be held on
415 * entry. rq is sort instead into the dispatch queue. To be used by
416 * specific elevators.
418 void elv_dispatch_sort(struct request_queue
*q
, struct request
*rq
)
421 struct list_head
*entry
;
424 if (q
->last_merge
== rq
)
425 q
->last_merge
= NULL
;
427 elv_rqhash_del(q
, rq
);
431 boundary
= q
->end_sector
;
432 stop_flags
= REQ_SOFTBARRIER
| REQ_HARDBARRIER
| REQ_STARTED
;
433 list_for_each_prev(entry
, &q
->queue_head
) {
434 struct request
*pos
= list_entry_rq(entry
);
436 if ((rq
->cmd_flags
& REQ_DISCARD
) !=
437 (pos
->cmd_flags
& REQ_DISCARD
))
439 if (rq_data_dir(rq
) != rq_data_dir(pos
))
441 if (pos
->cmd_flags
& stop_flags
)
443 if (blk_rq_pos(rq
) >= boundary
) {
444 if (blk_rq_pos(pos
) < boundary
)
447 if (blk_rq_pos(pos
) >= boundary
)
450 if (blk_rq_pos(rq
) >= blk_rq_pos(pos
))
454 list_add(&rq
->queuelist
, entry
);
456 EXPORT_SYMBOL(elv_dispatch_sort
);
459 * Insert rq into dispatch queue of q. Queue lock must be held on
460 * entry. rq is added to the back of the dispatch queue. To be used by
461 * specific elevators.
463 void elv_dispatch_add_tail(struct request_queue
*q
, struct request
*rq
)
465 if (q
->last_merge
== rq
)
466 q
->last_merge
= NULL
;
468 elv_rqhash_del(q
, rq
);
472 q
->end_sector
= rq_end_sector(rq
);
474 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
476 EXPORT_SYMBOL(elv_dispatch_add_tail
);
478 int elv_merge(struct request_queue
*q
, struct request
**req
, struct bio
*bio
)
480 struct elevator_queue
*e
= q
->elevator
;
481 struct request
*__rq
;
486 * nomerges: No merges at all attempted
487 * noxmerges: Only simple one-hit cache try
488 * merges: All merge tries attempted
490 if (blk_queue_nomerges(q
))
491 return ELEVATOR_NO_MERGE
;
494 * First try one-hit cache.
497 ret
= elv_try_merge(q
->last_merge
, bio
);
498 if (ret
!= ELEVATOR_NO_MERGE
) {
499 *req
= q
->last_merge
;
504 if (blk_queue_noxmerges(q
))
505 return ELEVATOR_NO_MERGE
;
508 * See if our hash lookup can find a potential backmerge.
510 __rq
= elv_rqhash_find(q
, bio
->bi_sector
);
511 if (__rq
&& elv_rq_merge_ok(__rq
, bio
)) {
513 return ELEVATOR_BACK_MERGE
;
516 if (e
->ops
->elevator_merge_fn
)
517 return e
->ops
->elevator_merge_fn(q
, req
, bio
);
519 return ELEVATOR_NO_MERGE
;
522 void elv_merged_request(struct request_queue
*q
, struct request
*rq
, int type
)
524 struct elevator_queue
*e
= q
->elevator
;
526 if (e
->ops
->elevator_merged_fn
)
527 e
->ops
->elevator_merged_fn(q
, rq
, type
);
529 if (type
== ELEVATOR_BACK_MERGE
)
530 elv_rqhash_reposition(q
, rq
);
535 void elv_merge_requests(struct request_queue
*q
, struct request
*rq
,
536 struct request
*next
)
538 struct elevator_queue
*e
= q
->elevator
;
540 if (e
->ops
->elevator_merge_req_fn
)
541 e
->ops
->elevator_merge_req_fn(q
, rq
, next
);
543 elv_rqhash_reposition(q
, rq
);
544 elv_rqhash_del(q
, next
);
550 void elv_bio_merged(struct request_queue
*q
, struct request
*rq
,
553 struct elevator_queue
*e
= q
->elevator
;
555 if (e
->ops
->elevator_bio_merged_fn
)
556 e
->ops
->elevator_bio_merged_fn(q
, rq
, bio
);
559 void elv_requeue_request(struct request_queue
*q
, struct request
*rq
)
562 * it already went through dequeue, we need to decrement the
563 * in_flight count again
565 if (blk_account_rq(rq
)) {
566 q
->in_flight
[rq_is_sync(rq
)]--;
567 if (rq
->cmd_flags
& REQ_SORTED
)
568 elv_deactivate_rq(q
, rq
);
571 rq
->cmd_flags
&= ~REQ_STARTED
;
573 elv_insert(q
, rq
, ELEVATOR_INSERT_REQUEUE
);
576 void elv_drain_elevator(struct request_queue
*q
)
579 while (q
->elevator
->ops
->elevator_dispatch_fn(q
, 1))
581 if (q
->nr_sorted
== 0)
583 if (printed
++ < 10) {
584 printk(KERN_ERR
"%s: forced dispatching is broken "
585 "(nr_sorted=%u), please report this\n",
586 q
->elevator
->elevator_type
->elevator_name
, q
->nr_sorted
);
591 * Call with queue lock held, interrupts disabled
593 void elv_quiesce_start(struct request_queue
*q
)
598 queue_flag_set(QUEUE_FLAG_ELVSWITCH
, q
);
601 * make sure we don't have any requests in flight
603 elv_drain_elevator(q
);
604 while (q
->rq
.elvpriv
) {
606 spin_unlock_irq(q
->queue_lock
);
608 spin_lock_irq(q
->queue_lock
);
609 elv_drain_elevator(q
);
613 void elv_quiesce_end(struct request_queue
*q
)
615 queue_flag_clear(QUEUE_FLAG_ELVSWITCH
, q
);
618 void elv_insert(struct request_queue
*q
, struct request
*rq
, int where
)
620 struct list_head
*pos
;
624 trace_block_rq_insert(q
, rq
);
629 case ELEVATOR_INSERT_FRONT
:
630 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
632 list_add(&rq
->queuelist
, &q
->queue_head
);
635 case ELEVATOR_INSERT_BACK
:
636 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
637 elv_drain_elevator(q
);
638 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
640 * We kick the queue here for the following reasons.
641 * - The elevator might have returned NULL previously
642 * to delay requests and returned them now. As the
643 * queue wasn't empty before this request, ll_rw_blk
644 * won't run the queue on return, resulting in hang.
645 * - Usually, back inserted requests won't be merged
646 * with anything. There's no point in delaying queue
652 case ELEVATOR_INSERT_SORT
:
653 BUG_ON(rq
->cmd_type
!= REQ_TYPE_FS
&&
654 !(rq
->cmd_flags
& REQ_DISCARD
));
655 rq
->cmd_flags
|= REQ_SORTED
;
657 if (rq_mergeable(rq
)) {
658 elv_rqhash_add(q
, rq
);
664 * Some ioscheds (cfq) run q->request_fn directly, so
665 * rq cannot be accessed after calling
666 * elevator_add_req_fn.
668 q
->elevator
->ops
->elevator_add_req_fn(q
, rq
);
671 case ELEVATOR_INSERT_REQUEUE
:
673 * If ordered flush isn't in progress, we do front
674 * insertion; otherwise, requests should be requeued
677 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
680 * Most requeues happen because of a busy condition,
681 * don't force unplug of the queue for that case.
685 if (q
->ordseq
== 0) {
686 list_add(&rq
->queuelist
, &q
->queue_head
);
690 ordseq
= blk_ordered_req_seq(rq
);
692 list_for_each(pos
, &q
->queue_head
) {
693 struct request
*pos_rq
= list_entry_rq(pos
);
694 if (ordseq
<= blk_ordered_req_seq(pos_rq
))
698 list_add_tail(&rq
->queuelist
, pos
);
702 printk(KERN_ERR
"%s: bad insertion point %d\n",
707 if (unplug_it
&& blk_queue_plugged(q
)) {
708 int nrq
= q
->rq
.count
[BLK_RW_SYNC
] + q
->rq
.count
[BLK_RW_ASYNC
]
709 - queue_in_flight(q
);
711 if (nrq
>= q
->unplug_thresh
)
712 __generic_unplug_device(q
);
716 void __elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
,
720 rq
->cmd_flags
|= REQ_ORDERED_COLOR
;
722 if (rq
->cmd_flags
& (REQ_SOFTBARRIER
| REQ_HARDBARRIER
)) {
724 * toggle ordered color
726 if (rq
->cmd_flags
& REQ_HARDBARRIER
)
730 * barriers implicitly indicate back insertion
732 if (where
== ELEVATOR_INSERT_SORT
)
733 where
= ELEVATOR_INSERT_BACK
;
736 * this request is scheduling boundary, update
739 if (rq
->cmd_type
== REQ_TYPE_FS
||
740 (rq
->cmd_flags
& REQ_DISCARD
)) {
741 q
->end_sector
= rq_end_sector(rq
);
744 } else if (!(rq
->cmd_flags
& REQ_ELVPRIV
) &&
745 where
== ELEVATOR_INSERT_SORT
)
746 where
= ELEVATOR_INSERT_BACK
;
751 elv_insert(q
, rq
, where
);
753 EXPORT_SYMBOL(__elv_add_request
);
755 void elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
,
760 spin_lock_irqsave(q
->queue_lock
, flags
);
761 __elv_add_request(q
, rq
, where
, plug
);
762 spin_unlock_irqrestore(q
->queue_lock
, flags
);
764 EXPORT_SYMBOL(elv_add_request
);
766 int elv_queue_empty(struct request_queue
*q
)
768 struct elevator_queue
*e
= q
->elevator
;
770 if (!list_empty(&q
->queue_head
))
773 if (e
->ops
->elevator_queue_empty_fn
)
774 return e
->ops
->elevator_queue_empty_fn(q
);
778 EXPORT_SYMBOL(elv_queue_empty
);
780 struct request
*elv_latter_request(struct request_queue
*q
, struct request
*rq
)
782 struct elevator_queue
*e
= q
->elevator
;
784 if (e
->ops
->elevator_latter_req_fn
)
785 return e
->ops
->elevator_latter_req_fn(q
, rq
);
789 struct request
*elv_former_request(struct request_queue
*q
, struct request
*rq
)
791 struct elevator_queue
*e
= q
->elevator
;
793 if (e
->ops
->elevator_former_req_fn
)
794 return e
->ops
->elevator_former_req_fn(q
, rq
);
798 int elv_set_request(struct request_queue
*q
, struct request
*rq
, gfp_t gfp_mask
)
800 struct elevator_queue
*e
= q
->elevator
;
802 if (e
->ops
->elevator_set_req_fn
)
803 return e
->ops
->elevator_set_req_fn(q
, rq
, gfp_mask
);
805 rq
->elevator_private
= NULL
;
809 void elv_put_request(struct request_queue
*q
, struct request
*rq
)
811 struct elevator_queue
*e
= q
->elevator
;
813 if (e
->ops
->elevator_put_req_fn
)
814 e
->ops
->elevator_put_req_fn(rq
);
817 int elv_may_queue(struct request_queue
*q
, int rw
)
819 struct elevator_queue
*e
= q
->elevator
;
821 if (e
->ops
->elevator_may_queue_fn
)
822 return e
->ops
->elevator_may_queue_fn(q
, rw
);
824 return ELV_MQUEUE_MAY
;
827 void elv_abort_queue(struct request_queue
*q
)
831 while (!list_empty(&q
->queue_head
)) {
832 rq
= list_entry_rq(q
->queue_head
.next
);
833 rq
->cmd_flags
|= REQ_QUIET
;
834 trace_block_rq_abort(q
, rq
);
836 * Mark this request as started so we don't trigger
837 * any debug logic in the end I/O path.
839 blk_start_request(rq
);
840 __blk_end_request_all(rq
, -EIO
);
843 EXPORT_SYMBOL(elv_abort_queue
);
845 void elv_completed_request(struct request_queue
*q
, struct request
*rq
)
847 struct elevator_queue
*e
= q
->elevator
;
850 * request is released from the driver, io must be done
852 if (blk_account_rq(rq
)) {
853 q
->in_flight
[rq_is_sync(rq
)]--;
854 if ((rq
->cmd_flags
& REQ_SORTED
) &&
855 e
->ops
->elevator_completed_req_fn
)
856 e
->ops
->elevator_completed_req_fn(q
, rq
);
860 * Check if the queue is waiting for fs requests to be
861 * drained for flush sequence.
863 if (unlikely(q
->ordseq
)) {
864 struct request
*next
= NULL
;
866 if (!list_empty(&q
->queue_head
))
867 next
= list_entry_rq(q
->queue_head
.next
);
869 if (!queue_in_flight(q
) &&
870 blk_ordered_cur_seq(q
) == QUEUE_ORDSEQ_DRAIN
&&
871 (!next
|| blk_ordered_req_seq(next
) > QUEUE_ORDSEQ_DRAIN
)) {
872 blk_ordered_complete_seq(q
, QUEUE_ORDSEQ_DRAIN
, 0);
878 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
881 elv_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
883 struct elv_fs_entry
*entry
= to_elv(attr
);
884 struct elevator_queue
*e
;
890 e
= container_of(kobj
, struct elevator_queue
, kobj
);
891 mutex_lock(&e
->sysfs_lock
);
892 error
= e
->ops
? entry
->show(e
, page
) : -ENOENT
;
893 mutex_unlock(&e
->sysfs_lock
);
898 elv_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
899 const char *page
, size_t length
)
901 struct elv_fs_entry
*entry
= to_elv(attr
);
902 struct elevator_queue
*e
;
908 e
= container_of(kobj
, struct elevator_queue
, kobj
);
909 mutex_lock(&e
->sysfs_lock
);
910 error
= e
->ops
? entry
->store(e
, page
, length
) : -ENOENT
;
911 mutex_unlock(&e
->sysfs_lock
);
915 static const struct sysfs_ops elv_sysfs_ops
= {
916 .show
= elv_attr_show
,
917 .store
= elv_attr_store
,
920 static struct kobj_type elv_ktype
= {
921 .sysfs_ops
= &elv_sysfs_ops
,
922 .release
= elevator_release
,
925 int elv_register_queue(struct request_queue
*q
)
927 struct elevator_queue
*e
= q
->elevator
;
930 error
= kobject_add(&e
->kobj
, &q
->kobj
, "%s", "iosched");
932 struct elv_fs_entry
*attr
= e
->elevator_type
->elevator_attrs
;
934 while (attr
->attr
.name
) {
935 if (sysfs_create_file(&e
->kobj
, &attr
->attr
))
940 kobject_uevent(&e
->kobj
, KOBJ_ADD
);
944 EXPORT_SYMBOL(elv_register_queue
);
946 static void __elv_unregister_queue(struct elevator_queue
*e
)
948 kobject_uevent(&e
->kobj
, KOBJ_REMOVE
);
949 kobject_del(&e
->kobj
);
952 void elv_unregister_queue(struct request_queue
*q
)
955 __elv_unregister_queue(q
->elevator
);
957 EXPORT_SYMBOL(elv_unregister_queue
);
959 void elv_register(struct elevator_type
*e
)
963 spin_lock(&elv_list_lock
);
964 BUG_ON(elevator_find(e
->elevator_name
));
965 list_add_tail(&e
->list
, &elv_list
);
966 spin_unlock(&elv_list_lock
);
968 if (!strcmp(e
->elevator_name
, chosen_elevator
) ||
969 (!*chosen_elevator
&&
970 !strcmp(e
->elevator_name
, CONFIG_DEFAULT_IOSCHED
)))
973 printk(KERN_INFO
"io scheduler %s registered%s\n", e
->elevator_name
,
976 EXPORT_SYMBOL_GPL(elv_register
);
978 void elv_unregister(struct elevator_type
*e
)
980 struct task_struct
*g
, *p
;
983 * Iterate every thread in the process to remove the io contexts.
986 read_lock(&tasklist_lock
);
987 do_each_thread(g
, p
) {
990 e
->ops
.trim(p
->io_context
);
992 } while_each_thread(g
, p
);
993 read_unlock(&tasklist_lock
);
996 spin_lock(&elv_list_lock
);
997 list_del_init(&e
->list
);
998 spin_unlock(&elv_list_lock
);
1000 EXPORT_SYMBOL_GPL(elv_unregister
);
1003 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1004 * we don't free the old io scheduler, before we have allocated what we
1005 * need for the new one. this way we have a chance of going back to the old
1006 * one, if the new one fails init for some reason.
1008 static int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
)
1010 struct elevator_queue
*old_elevator
, *e
;
1014 * Allocate new elevator
1016 e
= elevator_alloc(q
, new_e
);
1020 data
= elevator_init_queue(q
, e
);
1022 kobject_put(&e
->kobj
);
1027 * Turn on BYPASS and drain all requests w/ elevator private data
1029 spin_lock_irq(q
->queue_lock
);
1030 elv_quiesce_start(q
);
1033 * Remember old elevator.
1035 old_elevator
= q
->elevator
;
1038 * attach and start new elevator
1040 elevator_attach(q
, e
, data
);
1042 spin_unlock_irq(q
->queue_lock
);
1044 __elv_unregister_queue(old_elevator
);
1046 if (elv_register_queue(q
))
1050 * finally exit old elevator and turn off BYPASS.
1052 elevator_exit(old_elevator
);
1053 spin_lock_irq(q
->queue_lock
);
1055 spin_unlock_irq(q
->queue_lock
);
1057 blk_add_trace_msg(q
, "elv switch: %s", e
->elevator_type
->elevator_name
);
1063 * switch failed, exit the new io scheduler and reattach the old
1064 * one again (along with re-adding the sysfs dir)
1067 q
->elevator
= old_elevator
;
1068 elv_register_queue(q
);
1070 spin_lock_irq(q
->queue_lock
);
1071 queue_flag_clear(QUEUE_FLAG_ELVSWITCH
, q
);
1072 spin_unlock_irq(q
->queue_lock
);
1077 ssize_t
elv_iosched_store(struct request_queue
*q
, const char *name
,
1080 char elevator_name
[ELV_NAME_MAX
];
1081 struct elevator_type
*e
;
1086 strlcpy(elevator_name
, name
, sizeof(elevator_name
));
1087 e
= elevator_get(strstrip(elevator_name
));
1089 printk(KERN_ERR
"elevator: type %s not found\n", elevator_name
);
1093 if (!strcmp(elevator_name
, q
->elevator
->elevator_type
->elevator_name
)) {
1098 if (!elevator_switch(q
, e
))
1099 printk(KERN_ERR
"elevator: switch to %s failed\n",
1104 ssize_t
elv_iosched_show(struct request_queue
*q
, char *name
)
1106 struct elevator_queue
*e
= q
->elevator
;
1107 struct elevator_type
*elv
;
1108 struct elevator_type
*__e
;
1111 if (!q
->elevator
|| !blk_queue_stackable(q
))
1112 return sprintf(name
, "none\n");
1114 elv
= e
->elevator_type
;
1116 spin_lock(&elv_list_lock
);
1117 list_for_each_entry(__e
, &elv_list
, list
) {
1118 if (!strcmp(elv
->elevator_name
, __e
->elevator_name
))
1119 len
+= sprintf(name
+len
, "[%s] ", elv
->elevator_name
);
1121 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
1123 spin_unlock(&elv_list_lock
);
1125 len
+= sprintf(len
+name
, "\n");
1129 struct request
*elv_rb_former_request(struct request_queue
*q
,
1132 struct rb_node
*rbprev
= rb_prev(&rq
->rb_node
);
1135 return rb_entry_rq(rbprev
);
1139 EXPORT_SYMBOL(elv_rb_former_request
);
1141 struct request
*elv_rb_latter_request(struct request_queue
*q
,
1144 struct rb_node
*rbnext
= rb_next(&rq
->rb_node
);
1147 return rb_entry_rq(rbnext
);
1151 EXPORT_SYMBOL(elv_rb_latter_request
);