2 * Anticipatory & deadline i/o scheduler.
4 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
5 * Nick Piggin <nickpiggin@yahoo.com.au>
8 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/bio.h>
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/hash.h>
19 #include <linux/rbtree.h>
20 #include <linux/interrupt.h>
26 * See Documentation/block/as-iosched.txt
30 * max time before a read is submitted.
32 #define default_read_expire (HZ / 8)
35 * ditto for writes, these limits are not hard, even
36 * if the disk is capable of satisfying them.
38 #define default_write_expire (HZ / 4)
41 * read_batch_expire describes how long we will allow a stream of reads to
42 * persist before looking to see whether it is time to switch over to writes.
44 #define default_read_batch_expire (HZ / 2)
47 * write_batch_expire describes how long we want a stream of writes to run for.
48 * This is not a hard limit, but a target we set for the auto-tuning thingy.
49 * See, the problem is: we can send a lot of writes to disk cache / TCQ in
50 * a short amount of time...
52 #define default_write_batch_expire (HZ / 8)
55 * max time we may wait to anticipate a read (default around 6ms)
57 #define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
60 * Keep track of up to 20ms thinktimes. We can go as big as we like here,
61 * however huge values tend to interfere and not decay fast enough. A program
62 * might be in a non-io phase of operation. Waiting on user input for example,
63 * or doing a lengthy computation. A small penalty can be justified there, and
64 * will still catch out those processes that constantly have large thinktimes.
66 #define MAX_THINKTIME (HZ/50UL)
68 /* Bits in as_io_context.state */
70 AS_TASK_RUNNING
=0, /* Process has not exited */
71 AS_TASK_IOSTARTED
, /* Process has started some IO */
72 AS_TASK_IORUNNING
, /* Process has completed some IO */
75 enum anticipation_status
{
76 ANTIC_OFF
=0, /* Not anticipating (normal operation) */
77 ANTIC_WAIT_REQ
, /* The last read has not yet completed */
78 ANTIC_WAIT_NEXT
, /* Currently anticipating a request vs
79 last read (which has completed) */
80 ANTIC_FINISHED
, /* Anticipating but have found a candidate
89 struct request_queue
*q
; /* the "owner" queue */
92 * requests (as_rq s) are present on both sort_list and fifo_list
94 struct rb_root sort_list
[2];
95 struct list_head fifo_list
[2];
97 struct as_rq
*next_arq
[2]; /* next in sort order */
98 sector_t last_sector
[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
99 struct hlist_head
*hash
; /* request hash */
101 unsigned long exit_prob
; /* probability a task will exit while
103 unsigned long exit_no_coop
; /* probablility an exited task will
104 not be part of a later cooperating
106 unsigned long new_ttime_total
; /* mean thinktime on new proc */
107 unsigned long new_ttime_mean
;
108 u64 new_seek_total
; /* mean seek on new proc */
109 sector_t new_seek_mean
;
111 unsigned long current_batch_expires
;
112 unsigned long last_check_fifo
[2];
113 int changed_batch
; /* 1: waiting for old batch to end */
114 int new_batch
; /* 1: waiting on first read complete */
115 int batch_data_dir
; /* current batch REQ_SYNC / REQ_ASYNC */
116 int write_batch_count
; /* max # of reqs in a write batch */
117 int current_write_count
; /* how many requests left this batch */
118 int write_batch_idled
; /* has the write batch gone idle? */
121 enum anticipation_status antic_status
;
122 unsigned long antic_start
; /* jiffies: when it started */
123 struct timer_list antic_timer
; /* anticipatory scheduling timer */
124 struct work_struct antic_work
; /* Deferred unplugging */
125 struct io_context
*io_context
; /* Identify the expected process */
126 int ioc_finished
; /* IO associated with io_context is finished */
130 * settings that change how the i/o scheduler behaves
132 unsigned long fifo_expire
[2];
133 unsigned long batch_expire
[2];
134 unsigned long antic_expire
;
137 #define list_entry_fifo(ptr) list_entry((ptr), struct as_rq, fifo)
143 AS_RQ_NEW
=0, /* New - not referenced and not on any lists */
144 AS_RQ_QUEUED
, /* In the request queue. It belongs to the
146 AS_RQ_DISPATCHED
, /* On the dispatch list. It belongs to the
148 AS_RQ_PRESCHED
, /* Debug poisoning for requests being used */
151 AS_RQ_POSTSCHED
, /* when they shouldn't be */
156 * rbtree index, key is the starting offset
158 struct rb_node rb_node
;
161 struct request
*request
;
163 struct io_context
*io_context
; /* The submitting task */
166 * request hash, key is the ending offset (for back merge lookup)
168 struct hlist_node hash
;
173 struct list_head fifo
;
174 unsigned long expires
;
176 unsigned int is_sync
;
177 enum arq_state state
;
180 #define RQ_DATA(rq) ((struct as_rq *) (rq)->elevator_private)
182 static kmem_cache_t
*arq_pool
;
184 static atomic_t ioc_count
= ATOMIC_INIT(0);
185 static struct completion
*ioc_gone
;
187 static void as_move_to_dispatch(struct as_data
*ad
, struct as_rq
*arq
);
188 static void as_antic_stop(struct as_data
*ad
);
191 * IO Context helper functions
194 /* Called to deallocate the as_io_context */
195 static void free_as_io_context(struct as_io_context
*aic
)
198 if (atomic_dec_and_test(&ioc_count
) && ioc_gone
)
202 static void as_trim(struct io_context
*ioc
)
205 free_as_io_context(ioc
->aic
);
209 /* Called when the task exits */
210 static void exit_as_io_context(struct as_io_context
*aic
)
212 WARN_ON(!test_bit(AS_TASK_RUNNING
, &aic
->state
));
213 clear_bit(AS_TASK_RUNNING
, &aic
->state
);
216 static struct as_io_context
*alloc_as_io_context(void)
218 struct as_io_context
*ret
;
220 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
222 ret
->dtor
= free_as_io_context
;
223 ret
->exit
= exit_as_io_context
;
224 ret
->state
= 1 << AS_TASK_RUNNING
;
225 atomic_set(&ret
->nr_queued
, 0);
226 atomic_set(&ret
->nr_dispatched
, 0);
227 spin_lock_init(&ret
->lock
);
228 ret
->ttime_total
= 0;
229 ret
->ttime_samples
= 0;
232 ret
->seek_samples
= 0;
234 atomic_inc(&ioc_count
);
241 * If the current task has no AS IO context then create one and initialise it.
242 * Then take a ref on the task's io context and return it.
244 static struct io_context
*as_get_io_context(void)
246 struct io_context
*ioc
= get_io_context(GFP_ATOMIC
);
247 if (ioc
&& !ioc
->aic
) {
248 ioc
->aic
= alloc_as_io_context();
257 static void as_put_io_context(struct as_rq
*arq
)
259 struct as_io_context
*aic
;
261 if (unlikely(!arq
->io_context
))
264 aic
= arq
->io_context
->aic
;
266 if (arq
->is_sync
== REQ_SYNC
&& aic
) {
267 spin_lock(&aic
->lock
);
268 set_bit(AS_TASK_IORUNNING
, &aic
->state
);
269 aic
->last_end_request
= jiffies
;
270 spin_unlock(&aic
->lock
);
273 put_io_context(arq
->io_context
);
277 * the back merge hash support functions
279 static const int as_hash_shift
= 6;
280 #define AS_HASH_BLOCK(sec) ((sec) >> 3)
281 #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
282 #define AS_HASH_ENTRIES (1 << as_hash_shift)
283 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
285 static inline void __as_del_arq_hash(struct as_rq
*arq
)
287 hlist_del_init(&arq
->hash
);
290 static inline void as_del_arq_hash(struct as_rq
*arq
)
292 if (!hlist_unhashed(&arq
->hash
))
293 __as_del_arq_hash(arq
);
296 static void as_add_arq_hash(struct as_data
*ad
, struct as_rq
*arq
)
298 struct request
*rq
= arq
->request
;
300 BUG_ON(!hlist_unhashed(&arq
->hash
));
302 hlist_add_head(&arq
->hash
, &ad
->hash
[AS_HASH_FN(rq_hash_key(rq
))]);
306 * move hot entry to front of chain
308 static inline void as_hot_arq_hash(struct as_data
*ad
, struct as_rq
*arq
)
310 struct request
*rq
= arq
->request
;
311 struct hlist_head
*head
= &ad
->hash
[AS_HASH_FN(rq_hash_key(rq
))];
313 if (hlist_unhashed(&arq
->hash
)) {
318 if (&arq
->hash
!= head
->first
) {
319 hlist_del(&arq
->hash
);
320 hlist_add_head(&arq
->hash
, head
);
324 static struct request
*as_find_arq_hash(struct as_data
*ad
, sector_t offset
)
326 struct hlist_head
*hash_list
= &ad
->hash
[AS_HASH_FN(offset
)];
327 struct hlist_node
*entry
, *next
;
330 hlist_for_each_entry_safe(arq
, entry
, next
, hash_list
, hash
) {
331 struct request
*__rq
= arq
->request
;
333 BUG_ON(hlist_unhashed(&arq
->hash
));
335 if (!rq_mergeable(__rq
)) {
336 as_del_arq_hash(arq
);
340 if (rq_hash_key(__rq
) == offset
)
348 * rb tree support functions
350 #define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
351 #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
352 #define rq_rb_key(rq) (rq)->sector
355 * as_find_first_arq finds the first (lowest sector numbered) request
356 * for the specified data_dir. Used to sweep back to the start of the disk
357 * (1-way elevator) after we process the last (highest sector) request.
359 static struct as_rq
*as_find_first_arq(struct as_data
*ad
, int data_dir
)
361 struct rb_node
*n
= ad
->sort_list
[data_dir
].rb_node
;
367 if (n
->rb_left
== NULL
)
368 return rb_entry_arq(n
);
375 * Add the request to the rb tree if it is unique. If there is an alias (an
376 * existing request against the same sector), which can happen when using
377 * direct IO, then return the alias.
379 static struct as_rq
*__as_add_arq_rb(struct as_data
*ad
, struct as_rq
*arq
)
381 struct rb_node
**p
= &ARQ_RB_ROOT(ad
, arq
)->rb_node
;
382 struct rb_node
*parent
= NULL
;
384 struct request
*rq
= arq
->request
;
386 arq
->rb_key
= rq_rb_key(rq
);
390 __arq
= rb_entry_arq(parent
);
392 if (arq
->rb_key
< __arq
->rb_key
)
394 else if (arq
->rb_key
> __arq
->rb_key
)
400 rb_link_node(&arq
->rb_node
, parent
, p
);
401 rb_insert_color(&arq
->rb_node
, ARQ_RB_ROOT(ad
, arq
));
406 static void as_add_arq_rb(struct as_data
*ad
, struct as_rq
*arq
)
410 while ((unlikely(alias
= __as_add_arq_rb(ad
, arq
)))) {
411 as_move_to_dispatch(ad
, alias
);
416 static inline void as_del_arq_rb(struct as_data
*ad
, struct as_rq
*arq
)
418 if (!RB_EMPTY_NODE(&arq
->rb_node
)) {
423 rb_erase(&arq
->rb_node
, ARQ_RB_ROOT(ad
, arq
));
424 RB_CLEAR_NODE(&arq
->rb_node
);
427 static struct request
*
428 as_find_arq_rb(struct as_data
*ad
, sector_t sector
, int data_dir
)
430 struct rb_node
*n
= ad
->sort_list
[data_dir
].rb_node
;
434 arq
= rb_entry_arq(n
);
436 if (sector
< arq
->rb_key
)
438 else if (sector
> arq
->rb_key
)
448 * IO Scheduler proper
451 #define MAXBACK (1024 * 1024) /*
452 * Maximum distance the disk will go backward
456 #define BACK_PENALTY 2
459 * as_choose_req selects the preferred one of two requests of the same data_dir
460 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
462 static struct as_rq
*
463 as_choose_req(struct as_data
*ad
, struct as_rq
*arq1
, struct as_rq
*arq2
)
466 sector_t last
, s1
, s2
, d1
, d2
;
467 int r1_wrap
=0, r2_wrap
=0; /* requests are behind the disk head */
468 const sector_t maxback
= MAXBACK
;
470 if (arq1
== NULL
|| arq1
== arq2
)
475 data_dir
= arq1
->is_sync
;
477 last
= ad
->last_sector
[data_dir
];
478 s1
= arq1
->request
->sector
;
479 s2
= arq2
->request
->sector
;
481 BUG_ON(data_dir
!= arq2
->is_sync
);
484 * Strict one way elevator _except_ in the case where we allow
485 * short backward seeks which are biased as twice the cost of a
486 * similar forward seek.
490 else if (s1
+maxback
>= last
)
491 d1
= (last
- s1
)*BACK_PENALTY
;
494 d1
= 0; /* shut up, gcc */
499 else if (s2
+maxback
>= last
)
500 d2
= (last
- s2
)*BACK_PENALTY
;
506 /* Found required data */
507 if (!r1_wrap
&& r2_wrap
)
509 else if (!r2_wrap
&& r1_wrap
)
511 else if (r1_wrap
&& r2_wrap
) {
512 /* both behind the head */
519 /* Both requests in front of the head */
533 * as_find_next_arq finds the next request after @prev in elevator order.
534 * this with as_choose_req form the basis for how the scheduler chooses
535 * what request to process next. Anticipation works on top of this.
537 static struct as_rq
*as_find_next_arq(struct as_data
*ad
, struct as_rq
*last
)
539 const int data_dir
= last
->is_sync
;
541 struct rb_node
*rbnext
= rb_next(&last
->rb_node
);
542 struct rb_node
*rbprev
= rb_prev(&last
->rb_node
);
543 struct as_rq
*arq_next
, *arq_prev
;
545 BUG_ON(!RB_EMPTY_NODE(&last
->rb_node
));
548 arq_prev
= rb_entry_arq(rbprev
);
553 arq_next
= rb_entry_arq(rbnext
);
555 arq_next
= as_find_first_arq(ad
, data_dir
);
556 if (arq_next
== last
)
560 ret
= as_choose_req(ad
, arq_next
, arq_prev
);
566 * anticipatory scheduling functions follow
570 * as_antic_expired tells us when we have anticipated too long.
571 * The funny "absolute difference" math on the elapsed time is to handle
572 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
574 static int as_antic_expired(struct as_data
*ad
)
578 delta_jif
= jiffies
- ad
->antic_start
;
579 if (unlikely(delta_jif
< 0))
580 delta_jif
= -delta_jif
;
581 if (delta_jif
< ad
->antic_expire
)
588 * as_antic_waitnext starts anticipating that a nice request will soon be
589 * submitted. See also as_antic_waitreq
591 static void as_antic_waitnext(struct as_data
*ad
)
593 unsigned long timeout
;
595 BUG_ON(ad
->antic_status
!= ANTIC_OFF
596 && ad
->antic_status
!= ANTIC_WAIT_REQ
);
598 timeout
= ad
->antic_start
+ ad
->antic_expire
;
600 mod_timer(&ad
->antic_timer
, timeout
);
602 ad
->antic_status
= ANTIC_WAIT_NEXT
;
606 * as_antic_waitreq starts anticipating. We don't start timing the anticipation
607 * until the request that we're anticipating on has finished. This means we
608 * are timing from when the candidate process wakes up hopefully.
610 static void as_antic_waitreq(struct as_data
*ad
)
612 BUG_ON(ad
->antic_status
== ANTIC_FINISHED
);
613 if (ad
->antic_status
== ANTIC_OFF
) {
614 if (!ad
->io_context
|| ad
->ioc_finished
)
615 as_antic_waitnext(ad
);
617 ad
->antic_status
= ANTIC_WAIT_REQ
;
622 * This is called directly by the functions in this file to stop anticipation.
623 * We kill the timer and schedule a call to the request_fn asap.
625 static void as_antic_stop(struct as_data
*ad
)
627 int status
= ad
->antic_status
;
629 if (status
== ANTIC_WAIT_REQ
|| status
== ANTIC_WAIT_NEXT
) {
630 if (status
== ANTIC_WAIT_NEXT
)
631 del_timer(&ad
->antic_timer
);
632 ad
->antic_status
= ANTIC_FINISHED
;
633 /* see as_work_handler */
634 kblockd_schedule_work(&ad
->antic_work
);
639 * as_antic_timeout is the timer function set by as_antic_waitnext.
641 static void as_antic_timeout(unsigned long data
)
643 struct request_queue
*q
= (struct request_queue
*)data
;
644 struct as_data
*ad
= q
->elevator
->elevator_data
;
647 spin_lock_irqsave(q
->queue_lock
, flags
);
648 if (ad
->antic_status
== ANTIC_WAIT_REQ
649 || ad
->antic_status
== ANTIC_WAIT_NEXT
) {
650 struct as_io_context
*aic
= ad
->io_context
->aic
;
652 ad
->antic_status
= ANTIC_FINISHED
;
653 kblockd_schedule_work(&ad
->antic_work
);
655 if (aic
->ttime_samples
== 0) {
656 /* process anticipated on has exited or timed out*/
657 ad
->exit_prob
= (7*ad
->exit_prob
+ 256)/8;
659 if (!test_bit(AS_TASK_RUNNING
, &aic
->state
)) {
660 /* process not "saved" by a cooperating request */
661 ad
->exit_no_coop
= (7*ad
->exit_no_coop
+ 256)/8;
664 spin_unlock_irqrestore(q
->queue_lock
, flags
);
667 static void as_update_thinktime(struct as_data
*ad
, struct as_io_context
*aic
,
670 /* fixed point: 1.0 == 1<<8 */
671 if (aic
->ttime_samples
== 0) {
672 ad
->new_ttime_total
= (7*ad
->new_ttime_total
+ 256*ttime
) / 8;
673 ad
->new_ttime_mean
= ad
->new_ttime_total
/ 256;
675 ad
->exit_prob
= (7*ad
->exit_prob
)/8;
677 aic
->ttime_samples
= (7*aic
->ttime_samples
+ 256) / 8;
678 aic
->ttime_total
= (7*aic
->ttime_total
+ 256*ttime
) / 8;
679 aic
->ttime_mean
= (aic
->ttime_total
+ 128) / aic
->ttime_samples
;
682 static void as_update_seekdist(struct as_data
*ad
, struct as_io_context
*aic
,
687 if (aic
->seek_samples
== 0) {
688 ad
->new_seek_total
= (7*ad
->new_seek_total
+ 256*(u64
)sdist
)/8;
689 ad
->new_seek_mean
= ad
->new_seek_total
/ 256;
693 * Don't allow the seek distance to get too large from the
694 * odd fragment, pagein, etc
696 if (aic
->seek_samples
<= 60) /* second&third seek */
697 sdist
= min(sdist
, (aic
->seek_mean
* 4) + 2*1024*1024);
699 sdist
= min(sdist
, (aic
->seek_mean
* 4) + 2*1024*64);
701 aic
->seek_samples
= (7*aic
->seek_samples
+ 256) / 8;
702 aic
->seek_total
= (7*aic
->seek_total
+ (u64
)256*sdist
) / 8;
703 total
= aic
->seek_total
+ (aic
->seek_samples
/2);
704 do_div(total
, aic
->seek_samples
);
705 aic
->seek_mean
= (sector_t
)total
;
709 * as_update_iohist keeps a decaying histogram of IO thinktimes, and
710 * updates @aic->ttime_mean based on that. It is called when a new
713 static void as_update_iohist(struct as_data
*ad
, struct as_io_context
*aic
,
716 struct as_rq
*arq
= RQ_DATA(rq
);
717 int data_dir
= arq
->is_sync
;
718 unsigned long thinktime
= 0;
724 if (data_dir
== REQ_SYNC
) {
725 unsigned long in_flight
= atomic_read(&aic
->nr_queued
)
726 + atomic_read(&aic
->nr_dispatched
);
727 spin_lock(&aic
->lock
);
728 if (test_bit(AS_TASK_IORUNNING
, &aic
->state
) ||
729 test_bit(AS_TASK_IOSTARTED
, &aic
->state
)) {
730 /* Calculate read -> read thinktime */
731 if (test_bit(AS_TASK_IORUNNING
, &aic
->state
)
733 thinktime
= jiffies
- aic
->last_end_request
;
734 thinktime
= min(thinktime
, MAX_THINKTIME
-1);
736 as_update_thinktime(ad
, aic
, thinktime
);
738 /* Calculate read -> read seek distance */
739 if (aic
->last_request_pos
< rq
->sector
)
740 seek_dist
= rq
->sector
- aic
->last_request_pos
;
742 seek_dist
= aic
->last_request_pos
- rq
->sector
;
743 as_update_seekdist(ad
, aic
, seek_dist
);
745 aic
->last_request_pos
= rq
->sector
+ rq
->nr_sectors
;
746 set_bit(AS_TASK_IOSTARTED
, &aic
->state
);
747 spin_unlock(&aic
->lock
);
752 * as_close_req decides if one request is considered "close" to the
753 * previous one issued.
755 static int as_close_req(struct as_data
*ad
, struct as_io_context
*aic
,
758 unsigned long delay
; /* milliseconds */
759 sector_t last
= ad
->last_sector
[ad
->batch_data_dir
];
760 sector_t next
= arq
->request
->sector
;
761 sector_t delta
; /* acceptable close offset (in sectors) */
764 if (ad
->antic_status
== ANTIC_OFF
|| !ad
->ioc_finished
)
767 delay
= ((jiffies
- ad
->antic_start
) * 1000) / HZ
;
771 else if (delay
<= 20 && delay
<= ad
->antic_expire
)
772 delta
= 8192 << delay
;
776 if ((last
<= next
+ (delta
>>1)) && (next
<= last
+ delta
))
784 if (aic
->seek_samples
== 0) {
786 * Process has just started IO. Use past statistics to
787 * gauge success possibility
789 if (ad
->new_seek_mean
> s
) {
790 /* this request is better than what we're expecting */
795 if (aic
->seek_mean
> s
) {
796 /* this request is better than what we're expecting */
805 * as_can_break_anticipation returns true if we have been anticipating this
808 * It also returns true if the process against which we are anticipating
809 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
810 * dispatch it ASAP, because we know that application will not be submitting
813 * If the task which has submitted the request has exited, break anticipation.
815 * If this task has queued some other IO, do not enter enticipation.
817 static int as_can_break_anticipation(struct as_data
*ad
, struct as_rq
*arq
)
819 struct io_context
*ioc
;
820 struct as_io_context
*aic
;
822 ioc
= ad
->io_context
;
825 if (arq
&& ioc
== arq
->io_context
) {
826 /* request from same process */
830 if (ad
->ioc_finished
&& as_antic_expired(ad
)) {
832 * In this situation status should really be FINISHED,
833 * however the timer hasn't had the chance to run yet.
842 if (atomic_read(&aic
->nr_queued
) > 0) {
843 /* process has more requests queued */
847 if (atomic_read(&aic
->nr_dispatched
) > 0) {
848 /* process has more requests dispatched */
852 if (arq
&& arq
->is_sync
== REQ_SYNC
&& as_close_req(ad
, aic
, arq
)) {
854 * Found a close request that is not one of ours.
856 * This makes close requests from another process update
857 * our IO history. Is generally useful when there are
858 * two or more cooperating processes working in the same
861 if (!test_bit(AS_TASK_RUNNING
, &aic
->state
)) {
862 if (aic
->ttime_samples
== 0)
863 ad
->exit_prob
= (7*ad
->exit_prob
+ 256)/8;
865 ad
->exit_no_coop
= (7*ad
->exit_no_coop
)/8;
868 as_update_iohist(ad
, aic
, arq
->request
);
872 if (!test_bit(AS_TASK_RUNNING
, &aic
->state
)) {
873 /* process anticipated on has exited */
874 if (aic
->ttime_samples
== 0)
875 ad
->exit_prob
= (7*ad
->exit_prob
+ 256)/8;
877 if (ad
->exit_no_coop
> 128)
881 if (aic
->ttime_samples
== 0) {
882 if (ad
->new_ttime_mean
> ad
->antic_expire
)
884 if (ad
->exit_prob
* ad
->exit_no_coop
> 128*256)
886 } else if (aic
->ttime_mean
> ad
->antic_expire
) {
887 /* the process thinks too much between requests */
895 * as_can_anticipate indicates whether we should either run arq
896 * or keep anticipating a better request.
898 static int as_can_anticipate(struct as_data
*ad
, struct as_rq
*arq
)
902 * Last request submitted was a write
906 if (ad
->antic_status
== ANTIC_FINISHED
)
908 * Don't restart if we have just finished. Run the next request
912 if (as_can_break_anticipation(ad
, arq
))
914 * This request is a good candidate. Don't keep anticipating,
920 * OK from here, we haven't finished, and don't have a decent request!
921 * Status is either ANTIC_OFF so start waiting,
922 * ANTIC_WAIT_REQ so continue waiting for request to finish
923 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
930 * as_update_arq must be called whenever a request (arq) is added to
931 * the sort_list. This function keeps caches up to date, and checks if the
932 * request might be one we are "anticipating"
934 static void as_update_arq(struct as_data
*ad
, struct as_rq
*arq
)
936 const int data_dir
= arq
->is_sync
;
938 /* keep the next_arq cache up to date */
939 ad
->next_arq
[data_dir
] = as_choose_req(ad
, arq
, ad
->next_arq
[data_dir
]);
942 * have we been anticipating this request?
943 * or does it come from the same process as the one we are anticipating
946 if (ad
->antic_status
== ANTIC_WAIT_REQ
947 || ad
->antic_status
== ANTIC_WAIT_NEXT
) {
948 if (as_can_break_anticipation(ad
, arq
))
954 * Gathers timings and resizes the write batch automatically
956 static void update_write_batch(struct as_data
*ad
)
958 unsigned long batch
= ad
->batch_expire
[REQ_ASYNC
];
961 write_time
= (jiffies
- ad
->current_batch_expires
) + batch
;
965 if (write_time
> batch
&& !ad
->write_batch_idled
) {
966 if (write_time
> batch
* 3)
967 ad
->write_batch_count
/= 2;
969 ad
->write_batch_count
--;
970 } else if (write_time
< batch
&& ad
->current_write_count
== 0) {
971 if (batch
> write_time
* 3)
972 ad
->write_batch_count
*= 2;
974 ad
->write_batch_count
++;
977 if (ad
->write_batch_count
< 1)
978 ad
->write_batch_count
= 1;
982 * as_completed_request is to be called when a request has completed and
983 * returned something to the requesting process, be it an error or data.
985 static void as_completed_request(request_queue_t
*q
, struct request
*rq
)
987 struct as_data
*ad
= q
->elevator
->elevator_data
;
988 struct as_rq
*arq
= RQ_DATA(rq
);
990 WARN_ON(!list_empty(&rq
->queuelist
));
992 if (arq
->state
!= AS_RQ_REMOVED
) {
993 printk("arq->state %d\n", arq
->state
);
998 if (ad
->changed_batch
&& ad
->nr_dispatched
== 1) {
999 kblockd_schedule_work(&ad
->antic_work
);
1000 ad
->changed_batch
= 0;
1002 if (ad
->batch_data_dir
== REQ_SYNC
)
1005 WARN_ON(ad
->nr_dispatched
== 0);
1006 ad
->nr_dispatched
--;
1009 * Start counting the batch from when a request of that direction is
1010 * actually serviced. This should help devices with big TCQ windows
1011 * and writeback caches
1013 if (ad
->new_batch
&& ad
->batch_data_dir
== arq
->is_sync
) {
1014 update_write_batch(ad
);
1015 ad
->current_batch_expires
= jiffies
+
1016 ad
->batch_expire
[REQ_SYNC
];
1020 if (ad
->io_context
== arq
->io_context
&& ad
->io_context
) {
1021 ad
->antic_start
= jiffies
;
1022 ad
->ioc_finished
= 1;
1023 if (ad
->antic_status
== ANTIC_WAIT_REQ
) {
1025 * We were waiting on this request, now anticipate
1028 as_antic_waitnext(ad
);
1032 as_put_io_context(arq
);
1034 arq
->state
= AS_RQ_POSTSCHED
;
1038 * as_remove_queued_request removes a request from the pre dispatch queue
1039 * without updating refcounts. It is expected the caller will drop the
1040 * reference unless it replaces the request at somepart of the elevator
1041 * (ie. the dispatch queue)
1043 static void as_remove_queued_request(request_queue_t
*q
, struct request
*rq
)
1045 struct as_rq
*arq
= RQ_DATA(rq
);
1046 const int data_dir
= arq
->is_sync
;
1047 struct as_data
*ad
= q
->elevator
->elevator_data
;
1049 WARN_ON(arq
->state
!= AS_RQ_QUEUED
);
1051 if (arq
->io_context
&& arq
->io_context
->aic
) {
1052 BUG_ON(!atomic_read(&arq
->io_context
->aic
->nr_queued
));
1053 atomic_dec(&arq
->io_context
->aic
->nr_queued
);
1057 * Update the "next_arq" cache if we are about to remove its
1060 if (ad
->next_arq
[data_dir
] == arq
)
1061 ad
->next_arq
[data_dir
] = as_find_next_arq(ad
, arq
);
1063 list_del_init(&arq
->fifo
);
1064 as_del_arq_hash(arq
);
1065 as_del_arq_rb(ad
, arq
);
1069 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
1070 * 1 otherwise. It is ratelimited so that we only perform the check once per
1071 * `fifo_expire' interval. Otherwise a large number of expired requests
1072 * would create a hopeless seekstorm.
1074 * See as_antic_expired comment.
1076 static int as_fifo_expired(struct as_data
*ad
, int adir
)
1081 delta_jif
= jiffies
- ad
->last_check_fifo
[adir
];
1082 if (unlikely(delta_jif
< 0))
1083 delta_jif
= -delta_jif
;
1084 if (delta_jif
< ad
->fifo_expire
[adir
])
1087 ad
->last_check_fifo
[adir
] = jiffies
;
1089 if (list_empty(&ad
->fifo_list
[adir
]))
1092 arq
= list_entry_fifo(ad
->fifo_list
[adir
].next
);
1094 return time_after(jiffies
, arq
->expires
);
1098 * as_batch_expired returns true if the current batch has expired. A batch
1099 * is a set of reads or a set of writes.
1101 static inline int as_batch_expired(struct as_data
*ad
)
1103 if (ad
->changed_batch
|| ad
->new_batch
)
1106 if (ad
->batch_data_dir
== REQ_SYNC
)
1107 /* TODO! add a check so a complete fifo gets written? */
1108 return time_after(jiffies
, ad
->current_batch_expires
);
1110 return time_after(jiffies
, ad
->current_batch_expires
)
1111 || ad
->current_write_count
== 0;
1115 * move an entry to dispatch queue
1117 static void as_move_to_dispatch(struct as_data
*ad
, struct as_rq
*arq
)
1119 struct request
*rq
= arq
->request
;
1120 const int data_dir
= arq
->is_sync
;
1122 BUG_ON(!RB_EMPTY_NODE(&arq
->rb_node
));
1125 ad
->antic_status
= ANTIC_OFF
;
1128 * This has to be set in order to be correctly updated by
1131 ad
->last_sector
[data_dir
] = rq
->sector
+ rq
->nr_sectors
;
1133 if (data_dir
== REQ_SYNC
) {
1134 /* In case we have to anticipate after this */
1135 copy_io_context(&ad
->io_context
, &arq
->io_context
);
1137 if (ad
->io_context
) {
1138 put_io_context(ad
->io_context
);
1139 ad
->io_context
= NULL
;
1142 if (ad
->current_write_count
!= 0)
1143 ad
->current_write_count
--;
1145 ad
->ioc_finished
= 0;
1147 ad
->next_arq
[data_dir
] = as_find_next_arq(ad
, arq
);
1150 * take it off the sort and fifo list, add to dispatch queue
1152 as_remove_queued_request(ad
->q
, rq
);
1153 WARN_ON(arq
->state
!= AS_RQ_QUEUED
);
1155 elv_dispatch_sort(ad
->q
, rq
);
1157 arq
->state
= AS_RQ_DISPATCHED
;
1158 if (arq
->io_context
&& arq
->io_context
->aic
)
1159 atomic_inc(&arq
->io_context
->aic
->nr_dispatched
);
1160 ad
->nr_dispatched
++;
1164 * as_dispatch_request selects the best request according to
1165 * read/write expire, batch expire, etc, and moves it to the dispatch
1166 * queue. Returns 1 if a request was found, 0 otherwise.
1168 static int as_dispatch_request(request_queue_t
*q
, int force
)
1170 struct as_data
*ad
= q
->elevator
->elevator_data
;
1172 const int reads
= !list_empty(&ad
->fifo_list
[REQ_SYNC
]);
1173 const int writes
= !list_empty(&ad
->fifo_list
[REQ_ASYNC
]);
1175 if (unlikely(force
)) {
1177 * Forced dispatch, accounting is useless. Reset
1178 * accounting states and dump fifo_lists. Note that
1179 * batch_data_dir is reset to REQ_SYNC to avoid
1180 * screwing write batch accounting as write batch
1181 * accounting occurs on W->R transition.
1185 ad
->batch_data_dir
= REQ_SYNC
;
1186 ad
->changed_batch
= 0;
1189 while (ad
->next_arq
[REQ_SYNC
]) {
1190 as_move_to_dispatch(ad
, ad
->next_arq
[REQ_SYNC
]);
1193 ad
->last_check_fifo
[REQ_SYNC
] = jiffies
;
1195 while (ad
->next_arq
[REQ_ASYNC
]) {
1196 as_move_to_dispatch(ad
, ad
->next_arq
[REQ_ASYNC
]);
1199 ad
->last_check_fifo
[REQ_ASYNC
] = jiffies
;
1204 /* Signal that the write batch was uncontended, so we can't time it */
1205 if (ad
->batch_data_dir
== REQ_ASYNC
&& !reads
) {
1206 if (ad
->current_write_count
== 0 || !writes
)
1207 ad
->write_batch_idled
= 1;
1210 if (!(reads
|| writes
)
1211 || ad
->antic_status
== ANTIC_WAIT_REQ
1212 || ad
->antic_status
== ANTIC_WAIT_NEXT
1213 || ad
->changed_batch
)
1216 if (!(reads
&& writes
&& as_batch_expired(ad
))) {
1218 * batch is still running or no reads or no writes
1220 arq
= ad
->next_arq
[ad
->batch_data_dir
];
1222 if (ad
->batch_data_dir
== REQ_SYNC
&& ad
->antic_expire
) {
1223 if (as_fifo_expired(ad
, REQ_SYNC
))
1226 if (as_can_anticipate(ad
, arq
)) {
1227 as_antic_waitreq(ad
);
1233 /* we have a "next request" */
1234 if (reads
&& !writes
)
1235 ad
->current_batch_expires
=
1236 jiffies
+ ad
->batch_expire
[REQ_SYNC
];
1237 goto dispatch_request
;
1242 * at this point we are not running a batch. select the appropriate
1243 * data direction (read / write)
1247 BUG_ON(RB_EMPTY_ROOT(&ad
->sort_list
[REQ_SYNC
]));
1249 if (writes
&& ad
->batch_data_dir
== REQ_SYNC
)
1251 * Last batch was a read, switch to writes
1253 goto dispatch_writes
;
1255 if (ad
->batch_data_dir
== REQ_ASYNC
) {
1256 WARN_ON(ad
->new_batch
);
1257 ad
->changed_batch
= 1;
1259 ad
->batch_data_dir
= REQ_SYNC
;
1260 arq
= list_entry_fifo(ad
->fifo_list
[ad
->batch_data_dir
].next
);
1261 ad
->last_check_fifo
[ad
->batch_data_dir
] = jiffies
;
1262 goto dispatch_request
;
1266 * the last batch was a read
1271 BUG_ON(RB_EMPTY_ROOT(&ad
->sort_list
[REQ_ASYNC
]));
1273 if (ad
->batch_data_dir
== REQ_SYNC
) {
1274 ad
->changed_batch
= 1;
1277 * new_batch might be 1 when the queue runs out of
1278 * reads. A subsequent submission of a write might
1279 * cause a change of batch before the read is finished.
1283 ad
->batch_data_dir
= REQ_ASYNC
;
1284 ad
->current_write_count
= ad
->write_batch_count
;
1285 ad
->write_batch_idled
= 0;
1286 arq
= ad
->next_arq
[ad
->batch_data_dir
];
1287 goto dispatch_request
;
1295 * If a request has expired, service it.
1298 if (as_fifo_expired(ad
, ad
->batch_data_dir
)) {
1300 arq
= list_entry_fifo(ad
->fifo_list
[ad
->batch_data_dir
].next
);
1301 BUG_ON(arq
== NULL
);
1304 if (ad
->changed_batch
) {
1305 WARN_ON(ad
->new_batch
);
1307 if (ad
->nr_dispatched
)
1310 if (ad
->batch_data_dir
== REQ_ASYNC
)
1311 ad
->current_batch_expires
= jiffies
+
1312 ad
->batch_expire
[REQ_ASYNC
];
1316 ad
->changed_batch
= 0;
1320 * arq is the selected appropriate request.
1322 as_move_to_dispatch(ad
, arq
);
1328 * add arq to rbtree and fifo
1330 static void as_add_request(request_queue_t
*q
, struct request
*rq
)
1332 struct as_data
*ad
= q
->elevator
->elevator_data
;
1333 struct as_rq
*arq
= RQ_DATA(rq
);
1336 arq
->state
= AS_RQ_NEW
;
1338 if (rq_data_dir(arq
->request
) == READ
1339 || (arq
->request
->flags
& REQ_RW_SYNC
))
1343 data_dir
= arq
->is_sync
;
1345 arq
->io_context
= as_get_io_context();
1347 if (arq
->io_context
) {
1348 as_update_iohist(ad
, arq
->io_context
->aic
, arq
->request
);
1349 atomic_inc(&arq
->io_context
->aic
->nr_queued
);
1352 as_add_arq_rb(ad
, arq
);
1353 if (rq_mergeable(arq
->request
))
1354 as_add_arq_hash(ad
, arq
);
1357 * set expire time (only used for reads) and add to fifo list
1359 arq
->expires
= jiffies
+ ad
->fifo_expire
[data_dir
];
1360 list_add_tail(&arq
->fifo
, &ad
->fifo_list
[data_dir
]);
1362 as_update_arq(ad
, arq
); /* keep state machine up to date */
1363 arq
->state
= AS_RQ_QUEUED
;
1366 static void as_activate_request(request_queue_t
*q
, struct request
*rq
)
1368 struct as_rq
*arq
= RQ_DATA(rq
);
1370 WARN_ON(arq
->state
!= AS_RQ_DISPATCHED
);
1371 arq
->state
= AS_RQ_REMOVED
;
1372 if (arq
->io_context
&& arq
->io_context
->aic
)
1373 atomic_dec(&arq
->io_context
->aic
->nr_dispatched
);
1376 static void as_deactivate_request(request_queue_t
*q
, struct request
*rq
)
1378 struct as_rq
*arq
= RQ_DATA(rq
);
1380 WARN_ON(arq
->state
!= AS_RQ_REMOVED
);
1381 arq
->state
= AS_RQ_DISPATCHED
;
1382 if (arq
->io_context
&& arq
->io_context
->aic
)
1383 atomic_inc(&arq
->io_context
->aic
->nr_dispatched
);
1387 * as_queue_empty tells us if there are requests left in the device. It may
1388 * not be the case that a driver can get the next request even if the queue
1389 * is not empty - it is used in the block layer to check for plugging and
1390 * merging opportunities
1392 static int as_queue_empty(request_queue_t
*q
)
1394 struct as_data
*ad
= q
->elevator
->elevator_data
;
1396 return list_empty(&ad
->fifo_list
[REQ_ASYNC
])
1397 && list_empty(&ad
->fifo_list
[REQ_SYNC
]);
1400 static struct request
*as_former_request(request_queue_t
*q
,
1403 struct as_rq
*arq
= RQ_DATA(rq
);
1404 struct rb_node
*rbprev
= rb_prev(&arq
->rb_node
);
1405 struct request
*ret
= NULL
;
1408 ret
= rb_entry_arq(rbprev
)->request
;
1413 static struct request
*as_latter_request(request_queue_t
*q
,
1416 struct as_rq
*arq
= RQ_DATA(rq
);
1417 struct rb_node
*rbnext
= rb_next(&arq
->rb_node
);
1418 struct request
*ret
= NULL
;
1421 ret
= rb_entry_arq(rbnext
)->request
;
1427 as_merge(request_queue_t
*q
, struct request
**req
, struct bio
*bio
)
1429 struct as_data
*ad
= q
->elevator
->elevator_data
;
1430 sector_t rb_key
= bio
->bi_sector
+ bio_sectors(bio
);
1431 struct request
*__rq
;
1435 * see if the merge hash can satisfy a back merge
1437 __rq
= as_find_arq_hash(ad
, bio
->bi_sector
);
1439 BUG_ON(__rq
->sector
+ __rq
->nr_sectors
!= bio
->bi_sector
);
1441 if (elv_rq_merge_ok(__rq
, bio
)) {
1442 ret
= ELEVATOR_BACK_MERGE
;
1448 * check for front merge
1450 __rq
= as_find_arq_rb(ad
, rb_key
, bio_data_dir(bio
));
1452 BUG_ON(rb_key
!= rq_rb_key(__rq
));
1454 if (elv_rq_merge_ok(__rq
, bio
)) {
1455 ret
= ELEVATOR_FRONT_MERGE
;
1460 return ELEVATOR_NO_MERGE
;
1463 if (rq_mergeable(__rq
))
1464 as_hot_arq_hash(ad
, RQ_DATA(__rq
));
1470 static void as_merged_request(request_queue_t
*q
, struct request
*req
)
1472 struct as_data
*ad
= q
->elevator
->elevator_data
;
1473 struct as_rq
*arq
= RQ_DATA(req
);
1476 * hash always needs to be repositioned, key is end sector
1478 as_del_arq_hash(arq
);
1479 as_add_arq_hash(ad
, arq
);
1482 * if the merge was a front merge, we need to reposition request
1484 if (rq_rb_key(req
) != arq
->rb_key
) {
1485 as_del_arq_rb(ad
, arq
);
1486 as_add_arq_rb(ad
, arq
);
1488 * Note! At this stage of this and the next function, our next
1489 * request may not be optimal - eg the request may have "grown"
1490 * behind the disk head. We currently don't bother adjusting.
1495 static void as_merged_requests(request_queue_t
*q
, struct request
*req
,
1496 struct request
*next
)
1498 struct as_data
*ad
= q
->elevator
->elevator_data
;
1499 struct as_rq
*arq
= RQ_DATA(req
);
1500 struct as_rq
*anext
= RQ_DATA(next
);
1506 * reposition arq (this is the merged request) in hash, and in rbtree
1507 * in case of a front merge
1509 as_del_arq_hash(arq
);
1510 as_add_arq_hash(ad
, arq
);
1512 if (rq_rb_key(req
) != arq
->rb_key
) {
1513 as_del_arq_rb(ad
, arq
);
1514 as_add_arq_rb(ad
, arq
);
1518 * if anext expires before arq, assign its expire time to arq
1519 * and move into anext position (anext will be deleted) in fifo
1521 if (!list_empty(&arq
->fifo
) && !list_empty(&anext
->fifo
)) {
1522 if (time_before(anext
->expires
, arq
->expires
)) {
1523 list_move(&arq
->fifo
, &anext
->fifo
);
1524 arq
->expires
= anext
->expires
;
1526 * Don't copy here but swap, because when anext is
1527 * removed below, it must contain the unused context
1529 swap_io_context(&arq
->io_context
, &anext
->io_context
);
1534 * kill knowledge of next, this one is a goner
1536 as_remove_queued_request(q
, next
);
1537 as_put_io_context(anext
);
1539 anext
->state
= AS_RQ_MERGED
;
1543 * This is executed in a "deferred" process context, by kblockd. It calls the
1544 * driver's request_fn so the driver can submit that request.
1546 * IMPORTANT! This guy will reenter the elevator, so set up all queue global
1547 * state before calling, and don't rely on any state over calls.
1549 * FIXME! dispatch queue is not a queue at all!
1551 static void as_work_handler(void *data
)
1553 struct request_queue
*q
= data
;
1554 unsigned long flags
;
1556 spin_lock_irqsave(q
->queue_lock
, flags
);
1557 if (!as_queue_empty(q
))
1559 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1562 static void as_put_request(request_queue_t
*q
, struct request
*rq
)
1564 struct as_data
*ad
= q
->elevator
->elevator_data
;
1565 struct as_rq
*arq
= RQ_DATA(rq
);
1572 if (unlikely(arq
->state
!= AS_RQ_POSTSCHED
&&
1573 arq
->state
!= AS_RQ_PRESCHED
&&
1574 arq
->state
!= AS_RQ_MERGED
)) {
1575 printk("arq->state %d\n", arq
->state
);
1579 mempool_free(arq
, ad
->arq_pool
);
1580 rq
->elevator_private
= NULL
;
1583 static int as_set_request(request_queue_t
*q
, struct request
*rq
,
1584 struct bio
*bio
, gfp_t gfp_mask
)
1586 struct as_data
*ad
= q
->elevator
->elevator_data
;
1587 struct as_rq
*arq
= mempool_alloc(ad
->arq_pool
, gfp_mask
);
1590 memset(arq
, 0, sizeof(*arq
));
1591 RB_CLEAR_NODE(&arq
->rb_node
);
1593 arq
->state
= AS_RQ_PRESCHED
;
1594 arq
->io_context
= NULL
;
1595 INIT_HLIST_NODE(&arq
->hash
);
1596 INIT_LIST_HEAD(&arq
->fifo
);
1597 rq
->elevator_private
= arq
;
1604 static int as_may_queue(request_queue_t
*q
, int rw
, struct bio
*bio
)
1606 int ret
= ELV_MQUEUE_MAY
;
1607 struct as_data
*ad
= q
->elevator
->elevator_data
;
1608 struct io_context
*ioc
;
1609 if (ad
->antic_status
== ANTIC_WAIT_REQ
||
1610 ad
->antic_status
== ANTIC_WAIT_NEXT
) {
1611 ioc
= as_get_io_context();
1612 if (ad
->io_context
== ioc
)
1613 ret
= ELV_MQUEUE_MUST
;
1614 put_io_context(ioc
);
1620 static void as_exit_queue(elevator_t
*e
)
1622 struct as_data
*ad
= e
->elevator_data
;
1624 del_timer_sync(&ad
->antic_timer
);
1627 BUG_ON(!list_empty(&ad
->fifo_list
[REQ_SYNC
]));
1628 BUG_ON(!list_empty(&ad
->fifo_list
[REQ_ASYNC
]));
1630 mempool_destroy(ad
->arq_pool
);
1631 put_io_context(ad
->io_context
);
1637 * initialize elevator private data (as_data), and alloc a arq for
1638 * each request on the free lists
1640 static void *as_init_queue(request_queue_t
*q
, elevator_t
*e
)
1648 ad
= kmalloc_node(sizeof(*ad
), GFP_KERNEL
, q
->node
);
1651 memset(ad
, 0, sizeof(*ad
));
1653 ad
->q
= q
; /* Identify what queue the data belongs to */
1655 ad
->hash
= kmalloc_node(sizeof(struct hlist_head
)*AS_HASH_ENTRIES
,
1656 GFP_KERNEL
, q
->node
);
1662 ad
->arq_pool
= mempool_create_node(BLKDEV_MIN_RQ
, mempool_alloc_slab
,
1663 mempool_free_slab
, arq_pool
, q
->node
);
1664 if (!ad
->arq_pool
) {
1670 /* anticipatory scheduling helpers */
1671 ad
->antic_timer
.function
= as_antic_timeout
;
1672 ad
->antic_timer
.data
= (unsigned long)q
;
1673 init_timer(&ad
->antic_timer
);
1674 INIT_WORK(&ad
->antic_work
, as_work_handler
, q
);
1676 for (i
= 0; i
< AS_HASH_ENTRIES
; i
++)
1677 INIT_HLIST_HEAD(&ad
->hash
[i
]);
1679 INIT_LIST_HEAD(&ad
->fifo_list
[REQ_SYNC
]);
1680 INIT_LIST_HEAD(&ad
->fifo_list
[REQ_ASYNC
]);
1681 ad
->sort_list
[REQ_SYNC
] = RB_ROOT
;
1682 ad
->sort_list
[REQ_ASYNC
] = RB_ROOT
;
1683 ad
->fifo_expire
[REQ_SYNC
] = default_read_expire
;
1684 ad
->fifo_expire
[REQ_ASYNC
] = default_write_expire
;
1685 ad
->antic_expire
= default_antic_expire
;
1686 ad
->batch_expire
[REQ_SYNC
] = default_read_batch_expire
;
1687 ad
->batch_expire
[REQ_ASYNC
] = default_write_batch_expire
;
1689 ad
->current_batch_expires
= jiffies
+ ad
->batch_expire
[REQ_SYNC
];
1690 ad
->write_batch_count
= ad
->batch_expire
[REQ_ASYNC
] / 10;
1691 if (ad
->write_batch_count
< 2)
1692 ad
->write_batch_count
= 2;
1702 as_var_show(unsigned int var
, char *page
)
1704 return sprintf(page
, "%d\n", var
);
1708 as_var_store(unsigned long *var
, const char *page
, size_t count
)
1710 char *p
= (char *) page
;
1712 *var
= simple_strtoul(p
, &p
, 10);
1716 static ssize_t
est_time_show(elevator_t
*e
, char *page
)
1718 struct as_data
*ad
= e
->elevator_data
;
1721 pos
+= sprintf(page
+pos
, "%lu %% exit probability\n",
1722 100*ad
->exit_prob
/256);
1723 pos
+= sprintf(page
+pos
, "%lu %% probability of exiting without a "
1724 "cooperating process submitting IO\n",
1725 100*ad
->exit_no_coop
/256);
1726 pos
+= sprintf(page
+pos
, "%lu ms new thinktime\n", ad
->new_ttime_mean
);
1727 pos
+= sprintf(page
+pos
, "%llu sectors new seek distance\n",
1728 (unsigned long long)ad
->new_seek_mean
);
1733 #define SHOW_FUNCTION(__FUNC, __VAR) \
1734 static ssize_t __FUNC(elevator_t *e, char *page) \
1736 struct as_data *ad = e->elevator_data; \
1737 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1739 SHOW_FUNCTION(as_read_expire_show
, ad
->fifo_expire
[REQ_SYNC
]);
1740 SHOW_FUNCTION(as_write_expire_show
, ad
->fifo_expire
[REQ_ASYNC
]);
1741 SHOW_FUNCTION(as_antic_expire_show
, ad
->antic_expire
);
1742 SHOW_FUNCTION(as_read_batch_expire_show
, ad
->batch_expire
[REQ_SYNC
]);
1743 SHOW_FUNCTION(as_write_batch_expire_show
, ad
->batch_expire
[REQ_ASYNC
]);
1744 #undef SHOW_FUNCTION
1746 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
1747 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
1749 struct as_data *ad = e->elevator_data; \
1750 int ret = as_var_store(__PTR, (page), count); \
1751 if (*(__PTR) < (MIN)) \
1753 else if (*(__PTR) > (MAX)) \
1755 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1758 STORE_FUNCTION(as_read_expire_store
, &ad
->fifo_expire
[REQ_SYNC
], 0, INT_MAX
);
1759 STORE_FUNCTION(as_write_expire_store
, &ad
->fifo_expire
[REQ_ASYNC
], 0, INT_MAX
);
1760 STORE_FUNCTION(as_antic_expire_store
, &ad
->antic_expire
, 0, INT_MAX
);
1761 STORE_FUNCTION(as_read_batch_expire_store
,
1762 &ad
->batch_expire
[REQ_SYNC
], 0, INT_MAX
);
1763 STORE_FUNCTION(as_write_batch_expire_store
,
1764 &ad
->batch_expire
[REQ_ASYNC
], 0, INT_MAX
);
1765 #undef STORE_FUNCTION
1767 #define AS_ATTR(name) \
1768 __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
1770 static struct elv_fs_entry as_attrs
[] = {
1771 __ATTR_RO(est_time
),
1772 AS_ATTR(read_expire
),
1773 AS_ATTR(write_expire
),
1774 AS_ATTR(antic_expire
),
1775 AS_ATTR(read_batch_expire
),
1776 AS_ATTR(write_batch_expire
),
1780 static struct elevator_type iosched_as
= {
1782 .elevator_merge_fn
= as_merge
,
1783 .elevator_merged_fn
= as_merged_request
,
1784 .elevator_merge_req_fn
= as_merged_requests
,
1785 .elevator_dispatch_fn
= as_dispatch_request
,
1786 .elevator_add_req_fn
= as_add_request
,
1787 .elevator_activate_req_fn
= as_activate_request
,
1788 .elevator_deactivate_req_fn
= as_deactivate_request
,
1789 .elevator_queue_empty_fn
= as_queue_empty
,
1790 .elevator_completed_req_fn
= as_completed_request
,
1791 .elevator_former_req_fn
= as_former_request
,
1792 .elevator_latter_req_fn
= as_latter_request
,
1793 .elevator_set_req_fn
= as_set_request
,
1794 .elevator_put_req_fn
= as_put_request
,
1795 .elevator_may_queue_fn
= as_may_queue
,
1796 .elevator_init_fn
= as_init_queue
,
1797 .elevator_exit_fn
= as_exit_queue
,
1801 .elevator_attrs
= as_attrs
,
1802 .elevator_name
= "anticipatory",
1803 .elevator_owner
= THIS_MODULE
,
1806 static int __init
as_init(void)
1810 arq_pool
= kmem_cache_create("as_arq", sizeof(struct as_rq
),
1815 ret
= elv_register(&iosched_as
);
1818 * don't allow AS to get unregistered, since we would have
1819 * to browse all tasks in the system and release their
1820 * as_io_context first
1822 __module_get(THIS_MODULE
);
1826 kmem_cache_destroy(arq_pool
);
1830 static void __exit
as_exit(void)
1832 DECLARE_COMPLETION(all_gone
);
1833 elv_unregister(&iosched_as
);
1834 ioc_gone
= &all_gone
;
1835 /* ioc_gone's update must be visible before reading ioc_count */
1837 if (atomic_read(&ioc_count
))
1838 wait_for_completion(ioc_gone
);
1840 kmem_cache_destroy(arq_pool
);
1843 module_init(as_init
);
1844 module_exit(as_exit
);
1846 MODULE_AUTHOR("Nick Piggin");
1847 MODULE_LICENSE("GPL");
1848 MODULE_DESCRIPTION("anticipatory IO scheduler");