2 * Block device elevator/IO-scheduler.
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * - elevator_dequeue_fn, called when a request is taken off the active list
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
25 #include <linux/kernel.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <trace/block.h>
37 #include <linux/hash.h>
38 #include <linux/uaccess.h>
42 static DEFINE_SPINLOCK(elv_list_lock
);
43 static LIST_HEAD(elv_list
);
45 DEFINE_TRACE(block_rq_abort
);
50 static const int elv_hash_shift
= 6;
51 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
52 #define ELV_HASH_FN(sec) \
53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
54 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
55 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
56 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
58 DEFINE_TRACE(block_rq_insert
);
59 DEFINE_TRACE(block_rq_issue
);
62 * Query io scheduler to see if the current process issuing bio may be
65 static int elv_iosched_allow_merge(struct request
*rq
, struct bio
*bio
)
67 struct request_queue
*q
= rq
->q
;
68 struct elevator_queue
*e
= q
->elevator
;
70 if (e
->ops
->elevator_allow_merge_fn
)
71 return e
->ops
->elevator_allow_merge_fn(q
, rq
, bio
);
77 * can we safely merge with this request?
79 int elv_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
81 if (!rq_mergeable(rq
))
85 * Don't merge file system requests and discard requests
87 if (bio_discard(bio
) != bio_discard(rq
->bio
))
91 * different data direction or already started, don't merge
93 if (bio_data_dir(bio
) != rq_data_dir(rq
))
97 * must be same device and not a special request
99 if (rq
->rq_disk
!= bio
->bi_bdev
->bd_disk
|| rq
->special
)
103 * only merge integrity protected bio into ditto rq
105 if (bio_integrity(bio
) != blk_integrity_rq(rq
))
108 if (!elv_iosched_allow_merge(rq
, bio
))
113 EXPORT_SYMBOL(elv_rq_merge_ok
);
115 static inline int elv_try_merge(struct request
*__rq
, struct bio
*bio
)
117 int ret
= ELEVATOR_NO_MERGE
;
120 * we can merge and sequence is ok, check if it's possible
122 if (elv_rq_merge_ok(__rq
, bio
)) {
123 if (__rq
->sector
+ __rq
->nr_sectors
== bio
->bi_sector
)
124 ret
= ELEVATOR_BACK_MERGE
;
125 else if (__rq
->sector
- bio_sectors(bio
) == bio
->bi_sector
)
126 ret
= ELEVATOR_FRONT_MERGE
;
132 static struct elevator_type
*elevator_find(const char *name
)
134 struct elevator_type
*e
;
136 list_for_each_entry(e
, &elv_list
, list
) {
137 if (!strcmp(e
->elevator_name
, name
))
144 static void elevator_put(struct elevator_type
*e
)
146 module_put(e
->elevator_owner
);
149 static struct elevator_type
*elevator_get(const char *name
)
151 struct elevator_type
*e
;
153 spin_lock(&elv_list_lock
);
155 e
= elevator_find(name
);
157 char elv
[ELV_NAME_MAX
+ strlen("-iosched")];
159 spin_unlock(&elv_list_lock
);
161 if (!strcmp(name
, "anticipatory"))
162 sprintf(elv
, "as-iosched");
164 sprintf(elv
, "%s-iosched", name
);
166 request_module("%s", elv
);
167 spin_lock(&elv_list_lock
);
168 e
= elevator_find(name
);
171 if (e
&& !try_module_get(e
->elevator_owner
))
174 spin_unlock(&elv_list_lock
);
179 static void *elevator_init_queue(struct request_queue
*q
,
180 struct elevator_queue
*eq
)
182 return eq
->ops
->elevator_init_fn(q
);
185 static void elevator_attach(struct request_queue
*q
, struct elevator_queue
*eq
,
189 eq
->elevator_data
= data
;
192 static char chosen_elevator
[16];
194 static int __init
elevator_setup(char *str
)
197 * Be backwards-compatible with previous kernels, so users
198 * won't get the wrong elevator.
200 if (!strcmp(str
, "as"))
201 strcpy(chosen_elevator
, "anticipatory");
203 strncpy(chosen_elevator
, str
, sizeof(chosen_elevator
) - 1);
207 __setup("elevator=", elevator_setup
);
209 static struct kobj_type elv_ktype
;
211 static struct elevator_queue
*elevator_alloc(struct request_queue
*q
,
212 struct elevator_type
*e
)
214 struct elevator_queue
*eq
;
217 eq
= kmalloc_node(sizeof(*eq
), GFP_KERNEL
| __GFP_ZERO
, q
->node
);
222 eq
->elevator_type
= e
;
223 kobject_init(&eq
->kobj
, &elv_ktype
);
224 mutex_init(&eq
->sysfs_lock
);
226 eq
->hash
= kmalloc_node(sizeof(struct hlist_head
) * ELV_HASH_ENTRIES
,
227 GFP_KERNEL
, q
->node
);
231 for (i
= 0; i
< ELV_HASH_ENTRIES
; i
++)
232 INIT_HLIST_HEAD(&eq
->hash
[i
]);
241 static void elevator_release(struct kobject
*kobj
)
243 struct elevator_queue
*e
;
245 e
= container_of(kobj
, struct elevator_queue
, kobj
);
246 elevator_put(e
->elevator_type
);
251 int elevator_init(struct request_queue
*q
, char *name
)
253 struct elevator_type
*e
= NULL
;
254 struct elevator_queue
*eq
;
258 INIT_LIST_HEAD(&q
->queue_head
);
259 q
->last_merge
= NULL
;
261 q
->boundary_rq
= NULL
;
264 e
= elevator_get(name
);
269 if (!e
&& *chosen_elevator
) {
270 e
= elevator_get(chosen_elevator
);
272 printk(KERN_ERR
"I/O scheduler %s not found\n",
277 e
= elevator_get(CONFIG_DEFAULT_IOSCHED
);
280 "Default I/O scheduler not found. " \
282 e
= elevator_get("noop");
286 eq
= elevator_alloc(q
, e
);
290 data
= elevator_init_queue(q
, eq
);
292 kobject_put(&eq
->kobj
);
296 elevator_attach(q
, eq
, data
);
299 EXPORT_SYMBOL(elevator_init
);
301 void elevator_exit(struct elevator_queue
*e
)
303 mutex_lock(&e
->sysfs_lock
);
304 if (e
->ops
->elevator_exit_fn
)
305 e
->ops
->elevator_exit_fn(e
);
307 mutex_unlock(&e
->sysfs_lock
);
309 kobject_put(&e
->kobj
);
311 EXPORT_SYMBOL(elevator_exit
);
313 static void elv_activate_rq(struct request_queue
*q
, struct request
*rq
)
315 struct elevator_queue
*e
= q
->elevator
;
317 if (e
->ops
->elevator_activate_req_fn
)
318 e
->ops
->elevator_activate_req_fn(q
, rq
);
321 static void elv_deactivate_rq(struct request_queue
*q
, struct request
*rq
)
323 struct elevator_queue
*e
= q
->elevator
;
325 if (e
->ops
->elevator_deactivate_req_fn
)
326 e
->ops
->elevator_deactivate_req_fn(q
, rq
);
329 static inline void __elv_rqhash_del(struct request
*rq
)
331 hlist_del_init(&rq
->hash
);
334 static void elv_rqhash_del(struct request_queue
*q
, struct request
*rq
)
337 __elv_rqhash_del(rq
);
340 static void elv_rqhash_add(struct request_queue
*q
, struct request
*rq
)
342 struct elevator_queue
*e
= q
->elevator
;
344 BUG_ON(ELV_ON_HASH(rq
));
345 hlist_add_head(&rq
->hash
, &e
->hash
[ELV_HASH_FN(rq_hash_key(rq
))]);
348 static void elv_rqhash_reposition(struct request_queue
*q
, struct request
*rq
)
350 __elv_rqhash_del(rq
);
351 elv_rqhash_add(q
, rq
);
354 static struct request
*elv_rqhash_find(struct request_queue
*q
, sector_t offset
)
356 struct elevator_queue
*e
= q
->elevator
;
357 struct hlist_head
*hash_list
= &e
->hash
[ELV_HASH_FN(offset
)];
358 struct hlist_node
*entry
, *next
;
361 hlist_for_each_entry_safe(rq
, entry
, next
, hash_list
, hash
) {
362 BUG_ON(!ELV_ON_HASH(rq
));
364 if (unlikely(!rq_mergeable(rq
))) {
365 __elv_rqhash_del(rq
);
369 if (rq_hash_key(rq
) == offset
)
377 * RB-tree support functions for inserting/lookup/removal of requests
378 * in a sorted RB tree.
380 struct request
*elv_rb_add(struct rb_root
*root
, struct request
*rq
)
382 struct rb_node
**p
= &root
->rb_node
;
383 struct rb_node
*parent
= NULL
;
384 struct request
*__rq
;
388 __rq
= rb_entry(parent
, struct request
, rb_node
);
390 if (rq
->sector
< __rq
->sector
)
392 else if (rq
->sector
> __rq
->sector
)
398 rb_link_node(&rq
->rb_node
, parent
, p
);
399 rb_insert_color(&rq
->rb_node
, root
);
402 EXPORT_SYMBOL(elv_rb_add
);
404 void elv_rb_del(struct rb_root
*root
, struct request
*rq
)
406 BUG_ON(RB_EMPTY_NODE(&rq
->rb_node
));
407 rb_erase(&rq
->rb_node
, root
);
408 RB_CLEAR_NODE(&rq
->rb_node
);
410 EXPORT_SYMBOL(elv_rb_del
);
412 struct request
*elv_rb_find(struct rb_root
*root
, sector_t sector
)
414 struct rb_node
*n
= root
->rb_node
;
418 rq
= rb_entry(n
, struct request
, rb_node
);
420 if (sector
< rq
->sector
)
422 else if (sector
> rq
->sector
)
430 EXPORT_SYMBOL(elv_rb_find
);
433 * Insert rq into dispatch queue of q. Queue lock must be held on
434 * entry. rq is sort instead into the dispatch queue. To be used by
435 * specific elevators.
437 void elv_dispatch_sort(struct request_queue
*q
, struct request
*rq
)
440 struct list_head
*entry
;
443 if (q
->last_merge
== rq
)
444 q
->last_merge
= NULL
;
446 elv_rqhash_del(q
, rq
);
450 boundary
= q
->end_sector
;
451 stop_flags
= REQ_SOFTBARRIER
| REQ_HARDBARRIER
| REQ_STARTED
;
452 list_for_each_prev(entry
, &q
->queue_head
) {
453 struct request
*pos
= list_entry_rq(entry
);
455 if (blk_discard_rq(rq
) != blk_discard_rq(pos
))
457 if (rq_data_dir(rq
) != rq_data_dir(pos
))
459 if (pos
->cmd_flags
& stop_flags
)
461 if (rq
->sector
>= boundary
) {
462 if (pos
->sector
< boundary
)
465 if (pos
->sector
>= boundary
)
468 if (rq
->sector
>= pos
->sector
)
472 list_add(&rq
->queuelist
, entry
);
474 EXPORT_SYMBOL(elv_dispatch_sort
);
477 * Insert rq into dispatch queue of q. Queue lock must be held on
478 * entry. rq is added to the back of the dispatch queue. To be used by
479 * specific elevators.
481 void elv_dispatch_add_tail(struct request_queue
*q
, struct request
*rq
)
483 if (q
->last_merge
== rq
)
484 q
->last_merge
= NULL
;
486 elv_rqhash_del(q
, rq
);
490 q
->end_sector
= rq_end_sector(rq
);
492 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
494 EXPORT_SYMBOL(elv_dispatch_add_tail
);
496 int elv_merge(struct request_queue
*q
, struct request
**req
, struct bio
*bio
)
498 struct elevator_queue
*e
= q
->elevator
;
499 struct request
*__rq
;
503 * First try one-hit cache.
506 ret
= elv_try_merge(q
->last_merge
, bio
);
507 if (ret
!= ELEVATOR_NO_MERGE
) {
508 *req
= q
->last_merge
;
513 if (blk_queue_nomerges(q
))
514 return ELEVATOR_NO_MERGE
;
517 * See if our hash lookup can find a potential backmerge.
519 __rq
= elv_rqhash_find(q
, bio
->bi_sector
);
520 if (__rq
&& elv_rq_merge_ok(__rq
, bio
)) {
522 return ELEVATOR_BACK_MERGE
;
525 if (e
->ops
->elevator_merge_fn
)
526 return e
->ops
->elevator_merge_fn(q
, req
, bio
);
528 return ELEVATOR_NO_MERGE
;
531 void elv_merged_request(struct request_queue
*q
, struct request
*rq
, int type
)
533 struct elevator_queue
*e
= q
->elevator
;
535 if (e
->ops
->elevator_merged_fn
)
536 e
->ops
->elevator_merged_fn(q
, rq
, type
);
538 if (type
== ELEVATOR_BACK_MERGE
)
539 elv_rqhash_reposition(q
, rq
);
544 void elv_merge_requests(struct request_queue
*q
, struct request
*rq
,
545 struct request
*next
)
547 struct elevator_queue
*e
= q
->elevator
;
549 if (e
->ops
->elevator_merge_req_fn
)
550 e
->ops
->elevator_merge_req_fn(q
, rq
, next
);
552 elv_rqhash_reposition(q
, rq
);
553 elv_rqhash_del(q
, next
);
559 void elv_requeue_request(struct request_queue
*q
, struct request
*rq
)
562 * it already went through dequeue, we need to decrement the
563 * in_flight count again
565 if (blk_account_rq(rq
)) {
567 if (blk_sorted_rq(rq
))
568 elv_deactivate_rq(q
, rq
);
571 rq
->cmd_flags
&= ~REQ_STARTED
;
573 elv_insert(q
, rq
, ELEVATOR_INSERT_REQUEUE
);
576 void elv_drain_elevator(struct request_queue
*q
)
579 while (q
->elevator
->ops
->elevator_dispatch_fn(q
, 1))
581 if (q
->nr_sorted
== 0)
583 if (printed
++ < 10) {
584 printk(KERN_ERR
"%s: forced dispatching is broken "
585 "(nr_sorted=%u), please report this\n",
586 q
->elevator
->elevator_type
->elevator_name
, q
->nr_sorted
);
591 * Call with queue lock held, interrupts disabled
593 void elv_quisce_start(struct request_queue
*q
)
595 queue_flag_set(QUEUE_FLAG_ELVSWITCH
, q
);
598 * make sure we don't have any requests in flight
600 elv_drain_elevator(q
);
601 while (q
->rq
.elvpriv
) {
602 blk_start_queueing(q
);
603 spin_unlock_irq(q
->queue_lock
);
605 spin_lock_irq(q
->queue_lock
);
606 elv_drain_elevator(q
);
610 void elv_quisce_end(struct request_queue
*q
)
612 queue_flag_clear(QUEUE_FLAG_ELVSWITCH
, q
);
615 void elv_insert(struct request_queue
*q
, struct request
*rq
, int where
)
617 struct list_head
*pos
;
621 trace_block_rq_insert(q
, rq
);
626 case ELEVATOR_INSERT_FRONT
:
627 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
629 list_add(&rq
->queuelist
, &q
->queue_head
);
632 case ELEVATOR_INSERT_BACK
:
633 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
634 elv_drain_elevator(q
);
635 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
637 * We kick the queue here for the following reasons.
638 * - The elevator might have returned NULL previously
639 * to delay requests and returned them now. As the
640 * queue wasn't empty before this request, ll_rw_blk
641 * won't run the queue on return, resulting in hang.
642 * - Usually, back inserted requests won't be merged
643 * with anything. There's no point in delaying queue
647 blk_start_queueing(q
);
650 case ELEVATOR_INSERT_SORT
:
651 BUG_ON(!blk_fs_request(rq
) && !blk_discard_rq(rq
));
652 rq
->cmd_flags
|= REQ_SORTED
;
654 if (rq_mergeable(rq
)) {
655 elv_rqhash_add(q
, rq
);
661 * Some ioscheds (cfq) run q->request_fn directly, so
662 * rq cannot be accessed after calling
663 * elevator_add_req_fn.
665 q
->elevator
->ops
->elevator_add_req_fn(q
, rq
);
668 case ELEVATOR_INSERT_REQUEUE
:
670 * If ordered flush isn't in progress, we do front
671 * insertion; otherwise, requests should be requeued
674 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
677 * Most requeues happen because of a busy condition,
678 * don't force unplug of the queue for that case.
682 if (q
->ordseq
== 0) {
683 list_add(&rq
->queuelist
, &q
->queue_head
);
687 ordseq
= blk_ordered_req_seq(rq
);
689 list_for_each(pos
, &q
->queue_head
) {
690 struct request
*pos_rq
= list_entry_rq(pos
);
691 if (ordseq
<= blk_ordered_req_seq(pos_rq
))
695 list_add_tail(&rq
->queuelist
, pos
);
699 printk(KERN_ERR
"%s: bad insertion point %d\n",
704 if (unplug_it
&& blk_queue_plugged(q
)) {
705 int nrq
= q
->rq
.count
[BLK_RW_SYNC
] + q
->rq
.count
[BLK_RW_ASYNC
]
708 if (nrq
>= q
->unplug_thresh
)
709 __generic_unplug_device(q
);
713 void __elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
,
717 rq
->cmd_flags
|= REQ_ORDERED_COLOR
;
719 if (rq
->cmd_flags
& (REQ_SOFTBARRIER
| REQ_HARDBARRIER
)) {
721 * toggle ordered color
723 if (blk_barrier_rq(rq
))
727 * barriers implicitly indicate back insertion
729 if (where
== ELEVATOR_INSERT_SORT
)
730 where
= ELEVATOR_INSERT_BACK
;
733 * this request is scheduling boundary, update
736 if (blk_fs_request(rq
) || blk_discard_rq(rq
)) {
737 q
->end_sector
= rq_end_sector(rq
);
740 } else if (!(rq
->cmd_flags
& REQ_ELVPRIV
) &&
741 where
== ELEVATOR_INSERT_SORT
)
742 where
= ELEVATOR_INSERT_BACK
;
747 elv_insert(q
, rq
, where
);
749 EXPORT_SYMBOL(__elv_add_request
);
751 void elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
,
756 spin_lock_irqsave(q
->queue_lock
, flags
);
757 __elv_add_request(q
, rq
, where
, plug
);
758 spin_unlock_irqrestore(q
->queue_lock
, flags
);
760 EXPORT_SYMBOL(elv_add_request
);
762 static inline struct request
*__elv_next_request(struct request_queue
*q
)
767 while (!list_empty(&q
->queue_head
)) {
768 rq
= list_entry_rq(q
->queue_head
.next
);
769 if (blk_do_ordered(q
, &rq
))
773 if (!q
->elevator
->ops
->elevator_dispatch_fn(q
, 0))
778 struct request
*elv_next_request(struct request_queue
*q
)
783 while ((rq
= __elv_next_request(q
)) != NULL
) {
784 if (!(rq
->cmd_flags
& REQ_STARTED
)) {
786 * This is the first time the device driver
787 * sees this request (possibly after
788 * requeueing). Notify IO scheduler.
790 if (blk_sorted_rq(rq
))
791 elv_activate_rq(q
, rq
);
794 * just mark as started even if we don't start
795 * it, a request that has been delayed should
796 * not be passed by new incoming requests
798 rq
->cmd_flags
|= REQ_STARTED
;
799 trace_block_rq_issue(q
, rq
);
802 if (!q
->boundary_rq
|| q
->boundary_rq
== rq
) {
803 q
->end_sector
= rq_end_sector(rq
);
804 q
->boundary_rq
= NULL
;
807 if (rq
->cmd_flags
& REQ_DONTPREP
)
810 if (q
->dma_drain_size
&& rq
->data_len
) {
812 * make sure space for the drain appears we
813 * know we can do this because max_hw_segments
814 * has been adjusted to be one fewer than the
817 rq
->nr_phys_segments
++;
823 ret
= q
->prep_rq_fn(q
, rq
);
824 if (ret
== BLKPREP_OK
) {
826 } else if (ret
== BLKPREP_DEFER
) {
828 * the request may have been (partially) prepped.
829 * we need to keep this request in the front to
830 * avoid resource deadlock. REQ_STARTED will
831 * prevent other fs requests from passing this one.
833 if (q
->dma_drain_size
&& rq
->data_len
&&
834 !(rq
->cmd_flags
& REQ_DONTPREP
)) {
836 * remove the space for the drain we added
837 * so that we don't add it again
839 --rq
->nr_phys_segments
;
844 } else if (ret
== BLKPREP_KILL
) {
845 rq
->cmd_flags
|= REQ_QUIET
;
846 __blk_end_request(rq
, -EIO
, blk_rq_bytes(rq
));
848 printk(KERN_ERR
"%s: bad return=%d\n", __func__
, ret
);
855 EXPORT_SYMBOL(elv_next_request
);
857 void elv_dequeue_request(struct request_queue
*q
, struct request
*rq
)
859 BUG_ON(list_empty(&rq
->queuelist
));
860 BUG_ON(ELV_ON_HASH(rq
));
862 list_del_init(&rq
->queuelist
);
865 * the time frame between a request being removed from the lists
866 * and to it is freed is accounted as io that is in progress at
869 if (blk_account_rq(rq
))
873 int elv_queue_empty(struct request_queue
*q
)
875 struct elevator_queue
*e
= q
->elevator
;
877 if (!list_empty(&q
->queue_head
))
880 if (e
->ops
->elevator_queue_empty_fn
)
881 return e
->ops
->elevator_queue_empty_fn(q
);
885 EXPORT_SYMBOL(elv_queue_empty
);
887 struct request
*elv_latter_request(struct request_queue
*q
, struct request
*rq
)
889 struct elevator_queue
*e
= q
->elevator
;
891 if (e
->ops
->elevator_latter_req_fn
)
892 return e
->ops
->elevator_latter_req_fn(q
, rq
);
896 struct request
*elv_former_request(struct request_queue
*q
, struct request
*rq
)
898 struct elevator_queue
*e
= q
->elevator
;
900 if (e
->ops
->elevator_former_req_fn
)
901 return e
->ops
->elevator_former_req_fn(q
, rq
);
905 int elv_set_request(struct request_queue
*q
, struct request
*rq
, gfp_t gfp_mask
)
907 struct elevator_queue
*e
= q
->elevator
;
909 if (e
->ops
->elevator_set_req_fn
)
910 return e
->ops
->elevator_set_req_fn(q
, rq
, gfp_mask
);
912 rq
->elevator_private
= NULL
;
916 void elv_put_request(struct request_queue
*q
, struct request
*rq
)
918 struct elevator_queue
*e
= q
->elevator
;
920 if (e
->ops
->elevator_put_req_fn
)
921 e
->ops
->elevator_put_req_fn(rq
);
924 int elv_may_queue(struct request_queue
*q
, int rw
)
926 struct elevator_queue
*e
= q
->elevator
;
928 if (e
->ops
->elevator_may_queue_fn
)
929 return e
->ops
->elevator_may_queue_fn(q
, rw
);
931 return ELV_MQUEUE_MAY
;
934 void elv_abort_queue(struct request_queue
*q
)
938 while (!list_empty(&q
->queue_head
)) {
939 rq
= list_entry_rq(q
->queue_head
.next
);
940 rq
->cmd_flags
|= REQ_QUIET
;
941 trace_block_rq_abort(q
, rq
);
942 __blk_end_request(rq
, -EIO
, blk_rq_bytes(rq
));
945 EXPORT_SYMBOL(elv_abort_queue
);
947 void elv_completed_request(struct request_queue
*q
, struct request
*rq
)
949 struct elevator_queue
*e
= q
->elevator
;
952 * request is released from the driver, io must be done
954 if (blk_account_rq(rq
)) {
956 if (blk_sorted_rq(rq
) && e
->ops
->elevator_completed_req_fn
)
957 e
->ops
->elevator_completed_req_fn(q
, rq
);
961 * Check if the queue is waiting for fs requests to be
962 * drained for flush sequence.
964 if (unlikely(q
->ordseq
)) {
965 struct request
*next
= NULL
;
967 if (!list_empty(&q
->queue_head
))
968 next
= list_entry_rq(q
->queue_head
.next
);
971 blk_ordered_cur_seq(q
) == QUEUE_ORDSEQ_DRAIN
&&
972 (!next
|| blk_ordered_req_seq(next
) > QUEUE_ORDSEQ_DRAIN
)) {
973 blk_ordered_complete_seq(q
, QUEUE_ORDSEQ_DRAIN
, 0);
974 blk_start_queueing(q
);
979 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
982 elv_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
984 struct elv_fs_entry
*entry
= to_elv(attr
);
985 struct elevator_queue
*e
;
991 e
= container_of(kobj
, struct elevator_queue
, kobj
);
992 mutex_lock(&e
->sysfs_lock
);
993 error
= e
->ops
? entry
->show(e
, page
) : -ENOENT
;
994 mutex_unlock(&e
->sysfs_lock
);
999 elv_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
1000 const char *page
, size_t length
)
1002 struct elv_fs_entry
*entry
= to_elv(attr
);
1003 struct elevator_queue
*e
;
1009 e
= container_of(kobj
, struct elevator_queue
, kobj
);
1010 mutex_lock(&e
->sysfs_lock
);
1011 error
= e
->ops
? entry
->store(e
, page
, length
) : -ENOENT
;
1012 mutex_unlock(&e
->sysfs_lock
);
1016 static struct sysfs_ops elv_sysfs_ops
= {
1017 .show
= elv_attr_show
,
1018 .store
= elv_attr_store
,
1021 static struct kobj_type elv_ktype
= {
1022 .sysfs_ops
= &elv_sysfs_ops
,
1023 .release
= elevator_release
,
1026 int elv_register_queue(struct request_queue
*q
)
1028 struct elevator_queue
*e
= q
->elevator
;
1031 error
= kobject_add(&e
->kobj
, &q
->kobj
, "%s", "iosched");
1033 struct elv_fs_entry
*attr
= e
->elevator_type
->elevator_attrs
;
1035 while (attr
->attr
.name
) {
1036 if (sysfs_create_file(&e
->kobj
, &attr
->attr
))
1041 kobject_uevent(&e
->kobj
, KOBJ_ADD
);
1046 static void __elv_unregister_queue(struct elevator_queue
*e
)
1048 kobject_uevent(&e
->kobj
, KOBJ_REMOVE
);
1049 kobject_del(&e
->kobj
);
1052 void elv_unregister_queue(struct request_queue
*q
)
1055 __elv_unregister_queue(q
->elevator
);
1058 void elv_register(struct elevator_type
*e
)
1062 spin_lock(&elv_list_lock
);
1063 BUG_ON(elevator_find(e
->elevator_name
));
1064 list_add_tail(&e
->list
, &elv_list
);
1065 spin_unlock(&elv_list_lock
);
1067 if (!strcmp(e
->elevator_name
, chosen_elevator
) ||
1068 (!*chosen_elevator
&&
1069 !strcmp(e
->elevator_name
, CONFIG_DEFAULT_IOSCHED
)))
1072 printk(KERN_INFO
"io scheduler %s registered%s\n", e
->elevator_name
,
1075 EXPORT_SYMBOL_GPL(elv_register
);
1077 void elv_unregister(struct elevator_type
*e
)
1079 struct task_struct
*g
, *p
;
1082 * Iterate every thread in the process to remove the io contexts.
1085 read_lock(&tasklist_lock
);
1086 do_each_thread(g
, p
) {
1089 e
->ops
.trim(p
->io_context
);
1091 } while_each_thread(g
, p
);
1092 read_unlock(&tasklist_lock
);
1095 spin_lock(&elv_list_lock
);
1096 list_del_init(&e
->list
);
1097 spin_unlock(&elv_list_lock
);
1099 EXPORT_SYMBOL_GPL(elv_unregister
);
1102 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1103 * we don't free the old io scheduler, before we have allocated what we
1104 * need for the new one. this way we have a chance of going back to the old
1105 * one, if the new one fails init for some reason.
1107 static int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
)
1109 struct elevator_queue
*old_elevator
, *e
;
1113 * Allocate new elevator
1115 e
= elevator_alloc(q
, new_e
);
1119 data
= elevator_init_queue(q
, e
);
1121 kobject_put(&e
->kobj
);
1126 * Turn on BYPASS and drain all requests w/ elevator private data
1128 spin_lock_irq(q
->queue_lock
);
1129 elv_quisce_start(q
);
1132 * Remember old elevator.
1134 old_elevator
= q
->elevator
;
1137 * attach and start new elevator
1139 elevator_attach(q
, e
, data
);
1141 spin_unlock_irq(q
->queue_lock
);
1143 __elv_unregister_queue(old_elevator
);
1145 if (elv_register_queue(q
))
1149 * finally exit old elevator and turn off BYPASS.
1151 elevator_exit(old_elevator
);
1152 spin_lock_irq(q
->queue_lock
);
1154 spin_unlock_irq(q
->queue_lock
);
1156 blk_add_trace_msg(q
, "elv switch: %s", e
->elevator_type
->elevator_name
);
1162 * switch failed, exit the new io scheduler and reattach the old
1163 * one again (along with re-adding the sysfs dir)
1166 q
->elevator
= old_elevator
;
1167 elv_register_queue(q
);
1169 spin_lock_irq(q
->queue_lock
);
1170 queue_flag_clear(QUEUE_FLAG_ELVSWITCH
, q
);
1171 spin_unlock_irq(q
->queue_lock
);
1176 ssize_t
elv_iosched_store(struct request_queue
*q
, const char *name
,
1179 char elevator_name
[ELV_NAME_MAX
];
1180 struct elevator_type
*e
;
1182 strlcpy(elevator_name
, name
, sizeof(elevator_name
));
1183 strstrip(elevator_name
);
1185 e
= elevator_get(elevator_name
);
1187 printk(KERN_ERR
"elevator: type %s not found\n", elevator_name
);
1191 if (!strcmp(elevator_name
, q
->elevator
->elevator_type
->elevator_name
)) {
1196 if (!elevator_switch(q
, e
))
1197 printk(KERN_ERR
"elevator: switch to %s failed\n",
1202 ssize_t
elv_iosched_show(struct request_queue
*q
, char *name
)
1204 struct elevator_queue
*e
= q
->elevator
;
1205 struct elevator_type
*elv
= e
->elevator_type
;
1206 struct elevator_type
*__e
;
1209 spin_lock(&elv_list_lock
);
1210 list_for_each_entry(__e
, &elv_list
, list
) {
1211 if (!strcmp(elv
->elevator_name
, __e
->elevator_name
))
1212 len
+= sprintf(name
+len
, "[%s] ", elv
->elevator_name
);
1214 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
1216 spin_unlock(&elv_list_lock
);
1218 len
+= sprintf(len
+name
, "\n");
1222 struct request
*elv_rb_former_request(struct request_queue
*q
,
1225 struct rb_node
*rbprev
= rb_prev(&rq
->rb_node
);
1228 return rb_entry_rq(rbprev
);
1232 EXPORT_SYMBOL(elv_rb_former_request
);
1234 struct request
*elv_rb_latter_request(struct request_queue
*q
,
1237 struct rb_node
*rbnext
= rb_next(&rq
->rb_node
);
1240 return rb_entry_rq(rbnext
);
1244 EXPORT_SYMBOL(elv_rb_latter_request
);