2 * Block device elevator/IO-scheduler.
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * - elevator_dequeue_fn, called when a request is taken off the active list
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
25 #include <linux/kernel.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
38 #include <trace/events/block.h>
42 static DEFINE_SPINLOCK(elv_list_lock
);
43 static LIST_HEAD(elv_list
);
48 static const int elv_hash_shift
= 6;
49 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
50 #define ELV_HASH_FN(sec) \
51 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
52 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
53 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
56 * Query io scheduler to see if the current process issuing bio may be
59 static int elv_iosched_allow_merge(struct request
*rq
, struct bio
*bio
)
61 struct request_queue
*q
= rq
->q
;
62 struct elevator_queue
*e
= q
->elevator
;
64 if (e
->ops
->elevator_allow_merge_fn
)
65 return e
->ops
->elevator_allow_merge_fn(q
, rq
, bio
);
71 * can we safely merge with this request?
73 int elv_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
75 if (!rq_mergeable(rq
))
79 * Don't merge file system requests and discard requests
81 if ((bio
->bi_rw
& REQ_DISCARD
) != (rq
->bio
->bi_rw
& REQ_DISCARD
))
85 * Don't merge discard requests and secure discard requests
87 if ((bio
->bi_rw
& REQ_SECURE
) != (rq
->bio
->bi_rw
& REQ_SECURE
))
91 * different data direction or already started, don't merge
93 if (bio_data_dir(bio
) != rq_data_dir(rq
))
97 * must be same device and not a special request
99 if (rq
->rq_disk
!= bio
->bi_bdev
->bd_disk
|| rq
->special
)
103 * only merge integrity protected bio into ditto rq
105 if (bio_integrity(bio
) != blk_integrity_rq(rq
))
108 if (!elv_iosched_allow_merge(rq
, bio
))
113 EXPORT_SYMBOL(elv_rq_merge_ok
);
115 int elv_try_merge(struct request
*__rq
, struct bio
*bio
)
117 int ret
= ELEVATOR_NO_MERGE
;
120 * we can merge and sequence is ok, check if it's possible
122 if (elv_rq_merge_ok(__rq
, bio
)) {
123 if (blk_rq_pos(__rq
) + blk_rq_sectors(__rq
) == bio
->bi_sector
)
124 ret
= ELEVATOR_BACK_MERGE
;
125 else if (blk_rq_pos(__rq
) - bio_sectors(bio
) == bio
->bi_sector
)
126 ret
= ELEVATOR_FRONT_MERGE
;
132 static struct elevator_type
*elevator_find(const char *name
)
134 struct elevator_type
*e
;
136 list_for_each_entry(e
, &elv_list
, list
) {
137 if (!strcmp(e
->elevator_name
, name
))
144 static void elevator_put(struct elevator_type
*e
)
146 module_put(e
->elevator_owner
);
149 static struct elevator_type
*elevator_get(const char *name
)
151 struct elevator_type
*e
;
153 spin_lock(&elv_list_lock
);
155 e
= elevator_find(name
);
157 spin_unlock(&elv_list_lock
);
158 request_module("%s-iosched", name
);
159 spin_lock(&elv_list_lock
);
160 e
= elevator_find(name
);
163 if (e
&& !try_module_get(e
->elevator_owner
))
166 spin_unlock(&elv_list_lock
);
171 static int elevator_init_queue(struct request_queue
*q
,
172 struct elevator_queue
*eq
)
174 eq
->elevator_data
= eq
->ops
->elevator_init_fn(q
);
175 if (eq
->elevator_data
)
180 static char chosen_elevator
[ELV_NAME_MAX
];
182 static int __init
elevator_setup(char *str
)
185 * Be backwards-compatible with previous kernels, so users
186 * won't get the wrong elevator.
188 strncpy(chosen_elevator
, str
, sizeof(chosen_elevator
) - 1);
192 __setup("elevator=", elevator_setup
);
194 static struct kobj_type elv_ktype
;
196 static struct elevator_queue
*elevator_alloc(struct request_queue
*q
,
197 struct elevator_type
*e
)
199 struct elevator_queue
*eq
;
202 eq
= kmalloc_node(sizeof(*eq
), GFP_KERNEL
| __GFP_ZERO
, q
->node
);
207 eq
->elevator_type
= e
;
208 kobject_init(&eq
->kobj
, &elv_ktype
);
209 mutex_init(&eq
->sysfs_lock
);
211 eq
->hash
= kmalloc_node(sizeof(struct hlist_head
) * ELV_HASH_ENTRIES
,
212 GFP_KERNEL
, q
->node
);
216 for (i
= 0; i
< ELV_HASH_ENTRIES
; i
++)
217 INIT_HLIST_HEAD(&eq
->hash
[i
]);
226 static void elevator_release(struct kobject
*kobj
)
228 struct elevator_queue
*e
;
230 e
= container_of(kobj
, struct elevator_queue
, kobj
);
231 elevator_put(e
->elevator_type
);
236 int elevator_init(struct request_queue
*q
, char *name
)
238 struct elevator_type
*e
= NULL
;
239 struct elevator_queue
*eq
;
242 if (unlikely(q
->elevator
))
245 INIT_LIST_HEAD(&q
->queue_head
);
246 q
->last_merge
= NULL
;
248 q
->boundary_rq
= NULL
;
251 e
= elevator_get(name
);
256 if (!e
&& *chosen_elevator
) {
257 e
= elevator_get(chosen_elevator
);
259 printk(KERN_ERR
"I/O scheduler %s not found\n",
264 e
= elevator_get(CONFIG_DEFAULT_IOSCHED
);
267 "Default I/O scheduler not found. " \
269 e
= elevator_get("noop");
273 eq
= elevator_alloc(q
, e
);
277 err
= elevator_init_queue(q
, eq
);
279 kobject_put(&eq
->kobj
);
286 EXPORT_SYMBOL(elevator_init
);
288 void elevator_exit(struct elevator_queue
*e
)
290 mutex_lock(&e
->sysfs_lock
);
291 if (e
->ops
->elevator_exit_fn
)
292 e
->ops
->elevator_exit_fn(e
);
294 mutex_unlock(&e
->sysfs_lock
);
296 kobject_put(&e
->kobj
);
298 EXPORT_SYMBOL(elevator_exit
);
300 static inline void __elv_rqhash_del(struct request
*rq
)
302 hlist_del_init(&rq
->hash
);
305 static void elv_rqhash_del(struct request_queue
*q
, struct request
*rq
)
308 __elv_rqhash_del(rq
);
311 static void elv_rqhash_add(struct request_queue
*q
, struct request
*rq
)
313 struct elevator_queue
*e
= q
->elevator
;
315 BUG_ON(ELV_ON_HASH(rq
));
316 hlist_add_head(&rq
->hash
, &e
->hash
[ELV_HASH_FN(rq_hash_key(rq
))]);
319 static void elv_rqhash_reposition(struct request_queue
*q
, struct request
*rq
)
321 __elv_rqhash_del(rq
);
322 elv_rqhash_add(q
, rq
);
325 static struct request
*elv_rqhash_find(struct request_queue
*q
, sector_t offset
)
327 struct elevator_queue
*e
= q
->elevator
;
328 struct hlist_head
*hash_list
= &e
->hash
[ELV_HASH_FN(offset
)];
329 struct hlist_node
*entry
, *next
;
332 hlist_for_each_entry_safe(rq
, entry
, next
, hash_list
, hash
) {
333 BUG_ON(!ELV_ON_HASH(rq
));
335 if (unlikely(!rq_mergeable(rq
))) {
336 __elv_rqhash_del(rq
);
340 if (rq_hash_key(rq
) == offset
)
348 * RB-tree support functions for inserting/lookup/removal of requests
349 * in a sorted RB tree.
351 void elv_rb_add(struct rb_root
*root
, struct request
*rq
)
353 struct rb_node
**p
= &root
->rb_node
;
354 struct rb_node
*parent
= NULL
;
355 struct request
*__rq
;
359 __rq
= rb_entry(parent
, struct request
, rb_node
);
361 if (blk_rq_pos(rq
) < blk_rq_pos(__rq
))
363 else if (blk_rq_pos(rq
) >= blk_rq_pos(__rq
))
367 rb_link_node(&rq
->rb_node
, parent
, p
);
368 rb_insert_color(&rq
->rb_node
, root
);
370 EXPORT_SYMBOL(elv_rb_add
);
372 void elv_rb_del(struct rb_root
*root
, struct request
*rq
)
374 BUG_ON(RB_EMPTY_NODE(&rq
->rb_node
));
375 rb_erase(&rq
->rb_node
, root
);
376 RB_CLEAR_NODE(&rq
->rb_node
);
378 EXPORT_SYMBOL(elv_rb_del
);
380 struct request
*elv_rb_find(struct rb_root
*root
, sector_t sector
)
382 struct rb_node
*n
= root
->rb_node
;
386 rq
= rb_entry(n
, struct request
, rb_node
);
388 if (sector
< blk_rq_pos(rq
))
390 else if (sector
> blk_rq_pos(rq
))
398 EXPORT_SYMBOL(elv_rb_find
);
401 * Insert rq into dispatch queue of q. Queue lock must be held on
402 * entry. rq is sort instead into the dispatch queue. To be used by
403 * specific elevators.
405 void elv_dispatch_sort(struct request_queue
*q
, struct request
*rq
)
408 struct list_head
*entry
;
411 if (q
->last_merge
== rq
)
412 q
->last_merge
= NULL
;
414 elv_rqhash_del(q
, rq
);
418 boundary
= q
->end_sector
;
419 stop_flags
= REQ_SOFTBARRIER
| REQ_STARTED
;
420 list_for_each_prev(entry
, &q
->queue_head
) {
421 struct request
*pos
= list_entry_rq(entry
);
423 if ((rq
->cmd_flags
& REQ_DISCARD
) !=
424 (pos
->cmd_flags
& REQ_DISCARD
))
426 if (rq_data_dir(rq
) != rq_data_dir(pos
))
428 if (pos
->cmd_flags
& stop_flags
)
430 if (blk_rq_pos(rq
) >= boundary
) {
431 if (blk_rq_pos(pos
) < boundary
)
434 if (blk_rq_pos(pos
) >= boundary
)
437 if (blk_rq_pos(rq
) >= blk_rq_pos(pos
))
441 list_add(&rq
->queuelist
, entry
);
443 EXPORT_SYMBOL(elv_dispatch_sort
);
446 * Insert rq into dispatch queue of q. Queue lock must be held on
447 * entry. rq is added to the back of the dispatch queue. To be used by
448 * specific elevators.
450 void elv_dispatch_add_tail(struct request_queue
*q
, struct request
*rq
)
452 if (q
->last_merge
== rq
)
453 q
->last_merge
= NULL
;
455 elv_rqhash_del(q
, rq
);
459 q
->end_sector
= rq_end_sector(rq
);
461 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
463 EXPORT_SYMBOL(elv_dispatch_add_tail
);
465 int elv_merge(struct request_queue
*q
, struct request
**req
, struct bio
*bio
)
467 struct elevator_queue
*e
= q
->elevator
;
468 struct request
*__rq
;
473 * nomerges: No merges at all attempted
474 * noxmerges: Only simple one-hit cache try
475 * merges: All merge tries attempted
477 if (blk_queue_nomerges(q
))
478 return ELEVATOR_NO_MERGE
;
481 * First try one-hit cache.
484 ret
= elv_try_merge(q
->last_merge
, bio
);
485 if (ret
!= ELEVATOR_NO_MERGE
) {
486 *req
= q
->last_merge
;
491 if (blk_queue_noxmerges(q
))
492 return ELEVATOR_NO_MERGE
;
495 * See if our hash lookup can find a potential backmerge.
497 __rq
= elv_rqhash_find(q
, bio
->bi_sector
);
498 if (__rq
&& elv_rq_merge_ok(__rq
, bio
)) {
500 return ELEVATOR_BACK_MERGE
;
503 if (e
->ops
->elevator_merge_fn
)
504 return e
->ops
->elevator_merge_fn(q
, req
, bio
);
506 return ELEVATOR_NO_MERGE
;
510 * Attempt to do an insertion back merge. Only check for the case where
511 * we can append 'rq' to an existing request, so we can throw 'rq' away
514 * Returns true if we merged, false otherwise
516 static bool elv_attempt_insert_merge(struct request_queue
*q
,
519 struct request
*__rq
;
521 if (blk_queue_nomerges(q
))
525 * First try one-hit cache.
527 if (q
->last_merge
&& blk_attempt_req_merge(q
, q
->last_merge
, rq
))
530 if (blk_queue_noxmerges(q
))
534 * See if our hash lookup can find a potential backmerge.
536 __rq
= elv_rqhash_find(q
, blk_rq_pos(rq
));
537 if (__rq
&& blk_attempt_req_merge(q
, __rq
, rq
))
543 void elv_merged_request(struct request_queue
*q
, struct request
*rq
, int type
)
545 struct elevator_queue
*e
= q
->elevator
;
547 if (e
->ops
->elevator_merged_fn
)
548 e
->ops
->elevator_merged_fn(q
, rq
, type
);
550 if (type
== ELEVATOR_BACK_MERGE
)
551 elv_rqhash_reposition(q
, rq
);
556 void elv_merge_requests(struct request_queue
*q
, struct request
*rq
,
557 struct request
*next
)
559 struct elevator_queue
*e
= q
->elevator
;
560 const int next_sorted
= next
->cmd_flags
& REQ_SORTED
;
562 if (next_sorted
&& e
->ops
->elevator_merge_req_fn
)
563 e
->ops
->elevator_merge_req_fn(q
, rq
, next
);
565 elv_rqhash_reposition(q
, rq
);
568 elv_rqhash_del(q
, next
);
575 void elv_bio_merged(struct request_queue
*q
, struct request
*rq
,
578 struct elevator_queue
*e
= q
->elevator
;
580 if (e
->ops
->elevator_bio_merged_fn
)
581 e
->ops
->elevator_bio_merged_fn(q
, rq
, bio
);
584 void elv_requeue_request(struct request_queue
*q
, struct request
*rq
)
587 * it already went through dequeue, we need to decrement the
588 * in_flight count again
590 if (blk_account_rq(rq
)) {
591 q
->in_flight
[rq_is_sync(rq
)]--;
592 if (rq
->cmd_flags
& REQ_SORTED
)
593 elv_deactivate_rq(q
, rq
);
596 rq
->cmd_flags
&= ~REQ_STARTED
;
598 __elv_add_request(q
, rq
, ELEVATOR_INSERT_REQUEUE
);
601 void elv_drain_elevator(struct request_queue
*q
)
605 lockdep_assert_held(q
->queue_lock
);
607 while (q
->elevator
->ops
->elevator_dispatch_fn(q
, 1))
609 if (q
->nr_sorted
&& printed
++ < 10) {
610 printk(KERN_ERR
"%s: forced dispatching is broken "
611 "(nr_sorted=%u), please report this\n",
612 q
->elevator
->elevator_type
->elevator_name
, q
->nr_sorted
);
616 void elv_quiesce_start(struct request_queue
*q
)
621 spin_lock_irq(q
->queue_lock
);
622 queue_flag_set(QUEUE_FLAG_ELVSWITCH
, q
);
623 spin_unlock_irq(q
->queue_lock
);
625 blk_drain_queue(q
, false);
628 void elv_quiesce_end(struct request_queue
*q
)
630 spin_lock_irq(q
->queue_lock
);
631 queue_flag_clear(QUEUE_FLAG_ELVSWITCH
, q
);
632 spin_unlock_irq(q
->queue_lock
);
635 void __elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
)
637 trace_block_rq_insert(q
, rq
);
641 if (rq
->cmd_flags
& REQ_SOFTBARRIER
) {
642 /* barriers are scheduling boundary, update end_sector */
643 if (rq
->cmd_type
== REQ_TYPE_FS
||
644 (rq
->cmd_flags
& REQ_DISCARD
)) {
645 q
->end_sector
= rq_end_sector(rq
);
648 } else if (!(rq
->cmd_flags
& REQ_ELVPRIV
) &&
649 (where
== ELEVATOR_INSERT_SORT
||
650 where
== ELEVATOR_INSERT_SORT_MERGE
))
651 where
= ELEVATOR_INSERT_BACK
;
654 case ELEVATOR_INSERT_REQUEUE
:
655 case ELEVATOR_INSERT_FRONT
:
656 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
657 list_add(&rq
->queuelist
, &q
->queue_head
);
660 case ELEVATOR_INSERT_BACK
:
661 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
662 elv_drain_elevator(q
);
663 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
665 * We kick the queue here for the following reasons.
666 * - The elevator might have returned NULL previously
667 * to delay requests and returned them now. As the
668 * queue wasn't empty before this request, ll_rw_blk
669 * won't run the queue on return, resulting in hang.
670 * - Usually, back inserted requests won't be merged
671 * with anything. There's no point in delaying queue
677 case ELEVATOR_INSERT_SORT_MERGE
:
679 * If we succeed in merging this request with one in the
680 * queue already, we are done - rq has now been freed,
681 * so no need to do anything further.
683 if (elv_attempt_insert_merge(q
, rq
))
685 case ELEVATOR_INSERT_SORT
:
686 BUG_ON(rq
->cmd_type
!= REQ_TYPE_FS
&&
687 !(rq
->cmd_flags
& REQ_DISCARD
));
688 rq
->cmd_flags
|= REQ_SORTED
;
690 if (rq_mergeable(rq
)) {
691 elv_rqhash_add(q
, rq
);
697 * Some ioscheds (cfq) run q->request_fn directly, so
698 * rq cannot be accessed after calling
699 * elevator_add_req_fn.
701 q
->elevator
->ops
->elevator_add_req_fn(q
, rq
);
704 case ELEVATOR_INSERT_FLUSH
:
705 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
706 blk_insert_flush(rq
);
709 printk(KERN_ERR
"%s: bad insertion point %d\n",
714 EXPORT_SYMBOL(__elv_add_request
);
716 void elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
)
720 spin_lock_irqsave(q
->queue_lock
, flags
);
721 __elv_add_request(q
, rq
, where
);
722 spin_unlock_irqrestore(q
->queue_lock
, flags
);
724 EXPORT_SYMBOL(elv_add_request
);
726 struct request
*elv_latter_request(struct request_queue
*q
, struct request
*rq
)
728 struct elevator_queue
*e
= q
->elevator
;
730 if (e
->ops
->elevator_latter_req_fn
)
731 return e
->ops
->elevator_latter_req_fn(q
, rq
);
735 struct request
*elv_former_request(struct request_queue
*q
, struct request
*rq
)
737 struct elevator_queue
*e
= q
->elevator
;
739 if (e
->ops
->elevator_former_req_fn
)
740 return e
->ops
->elevator_former_req_fn(q
, rq
);
744 int elv_set_request(struct request_queue
*q
, struct request
*rq
, gfp_t gfp_mask
)
746 struct elevator_queue
*e
= q
->elevator
;
748 if (e
->ops
->elevator_set_req_fn
)
749 return e
->ops
->elevator_set_req_fn(q
, rq
, gfp_mask
);
751 rq
->elevator_private
[0] = NULL
;
755 void elv_put_request(struct request_queue
*q
, struct request
*rq
)
757 struct elevator_queue
*e
= q
->elevator
;
759 if (e
->ops
->elevator_put_req_fn
)
760 e
->ops
->elevator_put_req_fn(rq
);
763 int elv_may_queue(struct request_queue
*q
, int rw
)
765 struct elevator_queue
*e
= q
->elevator
;
767 if (e
->ops
->elevator_may_queue_fn
)
768 return e
->ops
->elevator_may_queue_fn(q
, rw
);
770 return ELV_MQUEUE_MAY
;
773 void elv_abort_queue(struct request_queue
*q
)
777 blk_abort_flushes(q
);
779 while (!list_empty(&q
->queue_head
)) {
780 rq
= list_entry_rq(q
->queue_head
.next
);
781 rq
->cmd_flags
|= REQ_QUIET
;
782 trace_block_rq_abort(q
, rq
);
784 * Mark this request as started so we don't trigger
785 * any debug logic in the end I/O path.
787 blk_start_request(rq
);
788 __blk_end_request_all(rq
, -EIO
);
791 EXPORT_SYMBOL(elv_abort_queue
);
793 void elv_completed_request(struct request_queue
*q
, struct request
*rq
)
795 struct elevator_queue
*e
= q
->elevator
;
798 * request is released from the driver, io must be done
800 if (blk_account_rq(rq
)) {
801 q
->in_flight
[rq_is_sync(rq
)]--;
802 if ((rq
->cmd_flags
& REQ_SORTED
) &&
803 e
->ops
->elevator_completed_req_fn
)
804 e
->ops
->elevator_completed_req_fn(q
, rq
);
808 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
811 elv_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
813 struct elv_fs_entry
*entry
= to_elv(attr
);
814 struct elevator_queue
*e
;
820 e
= container_of(kobj
, struct elevator_queue
, kobj
);
821 mutex_lock(&e
->sysfs_lock
);
822 error
= e
->ops
? entry
->show(e
, page
) : -ENOENT
;
823 mutex_unlock(&e
->sysfs_lock
);
828 elv_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
829 const char *page
, size_t length
)
831 struct elv_fs_entry
*entry
= to_elv(attr
);
832 struct elevator_queue
*e
;
838 e
= container_of(kobj
, struct elevator_queue
, kobj
);
839 mutex_lock(&e
->sysfs_lock
);
840 error
= e
->ops
? entry
->store(e
, page
, length
) : -ENOENT
;
841 mutex_unlock(&e
->sysfs_lock
);
845 static const struct sysfs_ops elv_sysfs_ops
= {
846 .show
= elv_attr_show
,
847 .store
= elv_attr_store
,
850 static struct kobj_type elv_ktype
= {
851 .sysfs_ops
= &elv_sysfs_ops
,
852 .release
= elevator_release
,
855 int __elv_register_queue(struct request_queue
*q
, struct elevator_queue
*e
)
859 error
= kobject_add(&e
->kobj
, &q
->kobj
, "%s", "iosched");
861 struct elv_fs_entry
*attr
= e
->elevator_type
->elevator_attrs
;
863 while (attr
->attr
.name
) {
864 if (sysfs_create_file(&e
->kobj
, &attr
->attr
))
869 kobject_uevent(&e
->kobj
, KOBJ_ADD
);
875 int elv_register_queue(struct request_queue
*q
)
877 return __elv_register_queue(q
, q
->elevator
);
879 EXPORT_SYMBOL(elv_register_queue
);
881 void elv_unregister_queue(struct request_queue
*q
)
884 struct elevator_queue
*e
= q
->elevator
;
886 kobject_uevent(&e
->kobj
, KOBJ_REMOVE
);
887 kobject_del(&e
->kobj
);
891 EXPORT_SYMBOL(elv_unregister_queue
);
893 void elv_register(struct elevator_type
*e
)
897 spin_lock(&elv_list_lock
);
898 BUG_ON(elevator_find(e
->elevator_name
));
899 list_add_tail(&e
->list
, &elv_list
);
900 spin_unlock(&elv_list_lock
);
902 if (!strcmp(e
->elevator_name
, chosen_elevator
) ||
903 (!*chosen_elevator
&&
904 !strcmp(e
->elevator_name
, CONFIG_DEFAULT_IOSCHED
)))
907 printk(KERN_INFO
"io scheduler %s registered%s\n", e
->elevator_name
,
910 EXPORT_SYMBOL_GPL(elv_register
);
912 void elv_unregister(struct elevator_type
*e
)
914 spin_lock(&elv_list_lock
);
915 list_del_init(&e
->list
);
916 spin_unlock(&elv_list_lock
);
918 EXPORT_SYMBOL_GPL(elv_unregister
);
921 * switch to new_e io scheduler. be careful not to introduce deadlocks -
922 * we don't free the old io scheduler, before we have allocated what we
923 * need for the new one. this way we have a chance of going back to the old
924 * one, if the new one fails init for some reason.
926 static int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
)
928 struct elevator_queue
*old_elevator
, *e
;
931 /* allocate new elevator */
932 e
= elevator_alloc(q
, new_e
);
936 err
= elevator_init_queue(q
, e
);
938 kobject_put(&e
->kobj
);
942 /* turn on BYPASS and drain all requests w/ elevator private data */
943 elv_quiesce_start(q
);
945 /* unregister old queue, register new one and kill old elevator */
946 if (q
->elevator
->registered
) {
947 elv_unregister_queue(q
);
948 err
= __elv_register_queue(q
, e
);
953 /* done, replace the old one with new one and turn off BYPASS */
954 spin_lock_irq(q
->queue_lock
);
955 old_elevator
= q
->elevator
;
957 spin_unlock_irq(q
->queue_lock
);
959 elevator_exit(old_elevator
);
962 blk_add_trace_msg(q
, "elv switch: %s", e
->elevator_type
->elevator_name
);
968 * switch failed, exit the new io scheduler and reattach the old
969 * one again (along with re-adding the sysfs dir)
972 elv_register_queue(q
);
979 * Switch this queue to the given IO scheduler.
981 int elevator_change(struct request_queue
*q
, const char *name
)
983 char elevator_name
[ELV_NAME_MAX
];
984 struct elevator_type
*e
;
989 strlcpy(elevator_name
, name
, sizeof(elevator_name
));
990 e
= elevator_get(strstrip(elevator_name
));
992 printk(KERN_ERR
"elevator: type %s not found\n", elevator_name
);
996 if (!strcmp(elevator_name
, q
->elevator
->elevator_type
->elevator_name
)) {
1001 return elevator_switch(q
, e
);
1003 EXPORT_SYMBOL(elevator_change
);
1005 ssize_t
elv_iosched_store(struct request_queue
*q
, const char *name
,
1013 ret
= elevator_change(q
, name
);
1017 printk(KERN_ERR
"elevator: switch to %s failed\n", name
);
1021 ssize_t
elv_iosched_show(struct request_queue
*q
, char *name
)
1023 struct elevator_queue
*e
= q
->elevator
;
1024 struct elevator_type
*elv
;
1025 struct elevator_type
*__e
;
1028 if (!q
->elevator
|| !blk_queue_stackable(q
))
1029 return sprintf(name
, "none\n");
1031 elv
= e
->elevator_type
;
1033 spin_lock(&elv_list_lock
);
1034 list_for_each_entry(__e
, &elv_list
, list
) {
1035 if (!strcmp(elv
->elevator_name
, __e
->elevator_name
))
1036 len
+= sprintf(name
+len
, "[%s] ", elv
->elevator_name
);
1038 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
1040 spin_unlock(&elv_list_lock
);
1042 len
+= sprintf(len
+name
, "\n");
1046 struct request
*elv_rb_former_request(struct request_queue
*q
,
1049 struct rb_node
*rbprev
= rb_prev(&rq
->rb_node
);
1052 return rb_entry_rq(rbprev
);
1056 EXPORT_SYMBOL(elv_rb_former_request
);
1058 struct request
*elv_rb_latter_request(struct request_queue
*q
,
1061 struct rb_node
*rbnext
= rb_next(&rq
->rb_node
);
1064 return rb_entry_rq(rbnext
);
1068 EXPORT_SYMBOL(elv_rb_latter_request
);