2 * Anticipatory & deadline i/o scheduler.
4 * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
5 * Nick Piggin <nickpiggin@yahoo.com.au>
8 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/bio.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/compiler.h>
17 #include <linux/rbtree.h>
18 #include <linux/interrupt.h>
24 * See Documentation/block/as-iosched.txt
28 * max time before a read is submitted.
30 #define default_read_expire (HZ / 8)
33 * ditto for writes, these limits are not hard, even
34 * if the disk is capable of satisfying them.
36 #define default_write_expire (HZ / 4)
39 * read_batch_expire describes how long we will allow a stream of reads to
40 * persist before looking to see whether it is time to switch over to writes.
42 #define default_read_batch_expire (HZ / 2)
45 * write_batch_expire describes how long we want a stream of writes to run for.
46 * This is not a hard limit, but a target we set for the auto-tuning thingy.
47 * See, the problem is: we can send a lot of writes to disk cache / TCQ in
48 * a short amount of time...
50 #define default_write_batch_expire (HZ / 8)
53 * max time we may wait to anticipate a read (default around 6ms)
55 #define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
58 * Keep track of up to 20ms thinktimes. We can go as big as we like here,
59 * however huge values tend to interfere and not decay fast enough. A program
60 * might be in a non-io phase of operation. Waiting on user input for example,
61 * or doing a lengthy computation. A small penalty can be justified there, and
62 * will still catch out those processes that constantly have large thinktimes.
64 #define MAX_THINKTIME (HZ/50UL)
66 /* Bits in as_io_context.state */
68 AS_TASK_RUNNING
=0, /* Process has not exited */
69 AS_TASK_IOSTARTED
, /* Process has started some IO */
70 AS_TASK_IORUNNING
, /* Process has completed some IO */
73 enum anticipation_status
{
74 ANTIC_OFF
=0, /* Not anticipating (normal operation) */
75 ANTIC_WAIT_REQ
, /* The last read has not yet completed */
76 ANTIC_WAIT_NEXT
, /* Currently anticipating a request vs
77 last read (which has completed) */
78 ANTIC_FINISHED
, /* Anticipating but have found a candidate
87 struct request_queue
*q
; /* the "owner" queue */
90 * requests (as_rq s) are present on both sort_list and fifo_list
92 struct rb_root sort_list
[2];
93 struct list_head fifo_list
[2];
95 struct request
*next_rq
[2]; /* next in sort order */
96 sector_t last_sector
[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
98 unsigned long exit_prob
; /* probability a task will exit while
100 unsigned long exit_no_coop
; /* probablility an exited task will
101 not be part of a later cooperating
103 unsigned long new_ttime_total
; /* mean thinktime on new proc */
104 unsigned long new_ttime_mean
;
105 u64 new_seek_total
; /* mean seek on new proc */
106 sector_t new_seek_mean
;
108 unsigned long current_batch_expires
;
109 unsigned long last_check_fifo
[2];
110 int changed_batch
; /* 1: waiting for old batch to end */
111 int new_batch
; /* 1: waiting on first read complete */
112 int batch_data_dir
; /* current batch REQ_SYNC / REQ_ASYNC */
113 int write_batch_count
; /* max # of reqs in a write batch */
114 int current_write_count
; /* how many requests left this batch */
115 int write_batch_idled
; /* has the write batch gone idle? */
117 enum anticipation_status antic_status
;
118 unsigned long antic_start
; /* jiffies: when it started */
119 struct timer_list antic_timer
; /* anticipatory scheduling timer */
120 struct work_struct antic_work
; /* Deferred unplugging */
121 struct io_context
*io_context
; /* Identify the expected process */
122 int ioc_finished
; /* IO associated with io_context is finished */
126 * settings that change how the i/o scheduler behaves
128 unsigned long fifo_expire
[2];
129 unsigned long batch_expire
[2];
130 unsigned long antic_expire
;
137 AS_RQ_NEW
=0, /* New - not referenced and not on any lists */
138 AS_RQ_QUEUED
, /* In the request queue. It belongs to the
140 AS_RQ_DISPATCHED
, /* On the dispatch list. It belongs to the
142 AS_RQ_PRESCHED
, /* Debug poisoning for requests being used */
145 AS_RQ_POSTSCHED
, /* when they shouldn't be */
148 #define RQ_IOC(rq) ((struct io_context *) (rq)->elevator_private)
149 #define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
150 #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
152 static DEFINE_PER_CPU(unsigned long, ioc_count
);
153 static struct completion
*ioc_gone
;
155 static void as_move_to_dispatch(struct as_data
*ad
, struct request
*rq
);
156 static void as_antic_stop(struct as_data
*ad
);
159 * IO Context helper functions
162 /* Called to deallocate the as_io_context */
163 static void free_as_io_context(struct as_io_context
*aic
)
166 elv_ioc_count_dec(ioc_count
);
167 if (ioc_gone
&& !elv_ioc_count_read(ioc_count
))
171 static void as_trim(struct io_context
*ioc
)
173 spin_lock_irq(&ioc
->lock
);
175 free_as_io_context(ioc
->aic
);
177 spin_unlock_irq(&ioc
->lock
);
180 /* Called when the task exits */
181 static void exit_as_io_context(struct as_io_context
*aic
)
183 WARN_ON(!test_bit(AS_TASK_RUNNING
, &aic
->state
));
184 clear_bit(AS_TASK_RUNNING
, &aic
->state
);
187 static struct as_io_context
*alloc_as_io_context(void)
189 struct as_io_context
*ret
;
191 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
193 ret
->dtor
= free_as_io_context
;
194 ret
->exit
= exit_as_io_context
;
195 ret
->state
= 1 << AS_TASK_RUNNING
;
196 atomic_set(&ret
->nr_queued
, 0);
197 atomic_set(&ret
->nr_dispatched
, 0);
198 spin_lock_init(&ret
->lock
);
199 ret
->ttime_total
= 0;
200 ret
->ttime_samples
= 0;
203 ret
->seek_samples
= 0;
205 elv_ioc_count_inc(ioc_count
);
212 * If the current task has no AS IO context then create one and initialise it.
213 * Then take a ref on the task's io context and return it.
215 static struct io_context
*as_get_io_context(int node
)
217 struct io_context
*ioc
= get_io_context(GFP_ATOMIC
, node
);
218 if (ioc
&& !ioc
->aic
) {
219 ioc
->aic
= alloc_as_io_context();
228 static void as_put_io_context(struct request
*rq
)
230 struct as_io_context
*aic
;
232 if (unlikely(!RQ_IOC(rq
)))
235 aic
= RQ_IOC(rq
)->aic
;
237 if (rq_is_sync(rq
) && aic
) {
240 spin_lock_irqsave(&aic
->lock
, flags
);
241 set_bit(AS_TASK_IORUNNING
, &aic
->state
);
242 aic
->last_end_request
= jiffies
;
243 spin_unlock_irqrestore(&aic
->lock
, flags
);
246 put_io_context(RQ_IOC(rq
));
250 * rb tree support functions
252 #define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))])
254 static void as_add_rq_rb(struct as_data
*ad
, struct request
*rq
)
256 struct request
*alias
;
258 while ((unlikely(alias
= elv_rb_add(RQ_RB_ROOT(ad
, rq
), rq
)))) {
259 as_move_to_dispatch(ad
, alias
);
264 static inline void as_del_rq_rb(struct as_data
*ad
, struct request
*rq
)
266 elv_rb_del(RQ_RB_ROOT(ad
, rq
), rq
);
270 * IO Scheduler proper
273 #define MAXBACK (1024 * 1024) /*
274 * Maximum distance the disk will go backward
278 #define BACK_PENALTY 2
281 * as_choose_req selects the preferred one of two requests of the same data_dir
282 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
284 static struct request
*
285 as_choose_req(struct as_data
*ad
, struct request
*rq1
, struct request
*rq2
)
288 sector_t last
, s1
, s2
, d1
, d2
;
289 int r1_wrap
=0, r2_wrap
=0; /* requests are behind the disk head */
290 const sector_t maxback
= MAXBACK
;
292 if (rq1
== NULL
|| rq1
== rq2
)
297 data_dir
= rq_is_sync(rq1
);
299 last
= ad
->last_sector
[data_dir
];
303 BUG_ON(data_dir
!= rq_is_sync(rq2
));
306 * Strict one way elevator _except_ in the case where we allow
307 * short backward seeks which are biased as twice the cost of a
308 * similar forward seek.
312 else if (s1
+maxback
>= last
)
313 d1
= (last
- s1
)*BACK_PENALTY
;
316 d1
= 0; /* shut up, gcc */
321 else if (s2
+maxback
>= last
)
322 d2
= (last
- s2
)*BACK_PENALTY
;
328 /* Found required data */
329 if (!r1_wrap
&& r2_wrap
)
331 else if (!r2_wrap
&& r1_wrap
)
333 else if (r1_wrap
&& r2_wrap
) {
334 /* both behind the head */
341 /* Both requests in front of the head */
355 * as_find_next_rq finds the next request after @prev in elevator order.
356 * this with as_choose_req form the basis for how the scheduler chooses
357 * what request to process next. Anticipation works on top of this.
359 static struct request
*
360 as_find_next_rq(struct as_data
*ad
, struct request
*last
)
362 struct rb_node
*rbnext
= rb_next(&last
->rb_node
);
363 struct rb_node
*rbprev
= rb_prev(&last
->rb_node
);
364 struct request
*next
= NULL
, *prev
= NULL
;
366 BUG_ON(RB_EMPTY_NODE(&last
->rb_node
));
369 prev
= rb_entry_rq(rbprev
);
372 next
= rb_entry_rq(rbnext
);
374 const int data_dir
= rq_is_sync(last
);
376 rbnext
= rb_first(&ad
->sort_list
[data_dir
]);
377 if (rbnext
&& rbnext
!= &last
->rb_node
)
378 next
= rb_entry_rq(rbnext
);
381 return as_choose_req(ad
, next
, prev
);
385 * anticipatory scheduling functions follow
389 * as_antic_expired tells us when we have anticipated too long.
390 * The funny "absolute difference" math on the elapsed time is to handle
391 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
393 static int as_antic_expired(struct as_data
*ad
)
397 delta_jif
= jiffies
- ad
->antic_start
;
398 if (unlikely(delta_jif
< 0))
399 delta_jif
= -delta_jif
;
400 if (delta_jif
< ad
->antic_expire
)
407 * as_antic_waitnext starts anticipating that a nice request will soon be
408 * submitted. See also as_antic_waitreq
410 static void as_antic_waitnext(struct as_data
*ad
)
412 unsigned long timeout
;
414 BUG_ON(ad
->antic_status
!= ANTIC_OFF
415 && ad
->antic_status
!= ANTIC_WAIT_REQ
);
417 timeout
= ad
->antic_start
+ ad
->antic_expire
;
419 mod_timer(&ad
->antic_timer
, timeout
);
421 ad
->antic_status
= ANTIC_WAIT_NEXT
;
425 * as_antic_waitreq starts anticipating. We don't start timing the anticipation
426 * until the request that we're anticipating on has finished. This means we
427 * are timing from when the candidate process wakes up hopefully.
429 static void as_antic_waitreq(struct as_data
*ad
)
431 BUG_ON(ad
->antic_status
== ANTIC_FINISHED
);
432 if (ad
->antic_status
== ANTIC_OFF
) {
433 if (!ad
->io_context
|| ad
->ioc_finished
)
434 as_antic_waitnext(ad
);
436 ad
->antic_status
= ANTIC_WAIT_REQ
;
441 * This is called directly by the functions in this file to stop anticipation.
442 * We kill the timer and schedule a call to the request_fn asap.
444 static void as_antic_stop(struct as_data
*ad
)
446 int status
= ad
->antic_status
;
448 if (status
== ANTIC_WAIT_REQ
|| status
== ANTIC_WAIT_NEXT
) {
449 if (status
== ANTIC_WAIT_NEXT
)
450 del_timer(&ad
->antic_timer
);
451 ad
->antic_status
= ANTIC_FINISHED
;
452 /* see as_work_handler */
453 kblockd_schedule_work(&ad
->antic_work
);
458 * as_antic_timeout is the timer function set by as_antic_waitnext.
460 static void as_antic_timeout(unsigned long data
)
462 struct request_queue
*q
= (struct request_queue
*)data
;
463 struct as_data
*ad
= q
->elevator
->elevator_data
;
466 spin_lock_irqsave(q
->queue_lock
, flags
);
467 if (ad
->antic_status
== ANTIC_WAIT_REQ
468 || ad
->antic_status
== ANTIC_WAIT_NEXT
) {
469 struct as_io_context
*aic
;
470 spin_lock(&ad
->io_context
->lock
);
471 aic
= ad
->io_context
->aic
;
473 ad
->antic_status
= ANTIC_FINISHED
;
474 kblockd_schedule_work(&ad
->antic_work
);
476 if (aic
->ttime_samples
== 0) {
477 /* process anticipated on has exited or timed out*/
478 ad
->exit_prob
= (7*ad
->exit_prob
+ 256)/8;
480 if (!test_bit(AS_TASK_RUNNING
, &aic
->state
)) {
481 /* process not "saved" by a cooperating request */
482 ad
->exit_no_coop
= (7*ad
->exit_no_coop
+ 256)/8;
484 spin_unlock(&ad
->io_context
->lock
);
486 spin_unlock_irqrestore(q
->queue_lock
, flags
);
489 static void as_update_thinktime(struct as_data
*ad
, struct as_io_context
*aic
,
492 /* fixed point: 1.0 == 1<<8 */
493 if (aic
->ttime_samples
== 0) {
494 ad
->new_ttime_total
= (7*ad
->new_ttime_total
+ 256*ttime
) / 8;
495 ad
->new_ttime_mean
= ad
->new_ttime_total
/ 256;
497 ad
->exit_prob
= (7*ad
->exit_prob
)/8;
499 aic
->ttime_samples
= (7*aic
->ttime_samples
+ 256) / 8;
500 aic
->ttime_total
= (7*aic
->ttime_total
+ 256*ttime
) / 8;
501 aic
->ttime_mean
= (aic
->ttime_total
+ 128) / aic
->ttime_samples
;
504 static void as_update_seekdist(struct as_data
*ad
, struct as_io_context
*aic
,
509 if (aic
->seek_samples
== 0) {
510 ad
->new_seek_total
= (7*ad
->new_seek_total
+ 256*(u64
)sdist
)/8;
511 ad
->new_seek_mean
= ad
->new_seek_total
/ 256;
515 * Don't allow the seek distance to get too large from the
516 * odd fragment, pagein, etc
518 if (aic
->seek_samples
<= 60) /* second&third seek */
519 sdist
= min(sdist
, (aic
->seek_mean
* 4) + 2*1024*1024);
521 sdist
= min(sdist
, (aic
->seek_mean
* 4) + 2*1024*64);
523 aic
->seek_samples
= (7*aic
->seek_samples
+ 256) / 8;
524 aic
->seek_total
= (7*aic
->seek_total
+ (u64
)256*sdist
) / 8;
525 total
= aic
->seek_total
+ (aic
->seek_samples
/2);
526 do_div(total
, aic
->seek_samples
);
527 aic
->seek_mean
= (sector_t
)total
;
531 * as_update_iohist keeps a decaying histogram of IO thinktimes, and
532 * updates @aic->ttime_mean based on that. It is called when a new
535 static void as_update_iohist(struct as_data
*ad
, struct as_io_context
*aic
,
538 int data_dir
= rq_is_sync(rq
);
539 unsigned long thinktime
= 0;
545 if (data_dir
== REQ_SYNC
) {
546 unsigned long in_flight
= atomic_read(&aic
->nr_queued
)
547 + atomic_read(&aic
->nr_dispatched
);
548 spin_lock(&aic
->lock
);
549 if (test_bit(AS_TASK_IORUNNING
, &aic
->state
) ||
550 test_bit(AS_TASK_IOSTARTED
, &aic
->state
)) {
551 /* Calculate read -> read thinktime */
552 if (test_bit(AS_TASK_IORUNNING
, &aic
->state
)
554 thinktime
= jiffies
- aic
->last_end_request
;
555 thinktime
= min(thinktime
, MAX_THINKTIME
-1);
557 as_update_thinktime(ad
, aic
, thinktime
);
559 /* Calculate read -> read seek distance */
560 if (aic
->last_request_pos
< rq
->sector
)
561 seek_dist
= rq
->sector
- aic
->last_request_pos
;
563 seek_dist
= aic
->last_request_pos
- rq
->sector
;
564 as_update_seekdist(ad
, aic
, seek_dist
);
566 aic
->last_request_pos
= rq
->sector
+ rq
->nr_sectors
;
567 set_bit(AS_TASK_IOSTARTED
, &aic
->state
);
568 spin_unlock(&aic
->lock
);
573 * as_close_req decides if one request is considered "close" to the
574 * previous one issued.
576 static int as_close_req(struct as_data
*ad
, struct as_io_context
*aic
,
579 unsigned long delay
; /* jiffies */
580 sector_t last
= ad
->last_sector
[ad
->batch_data_dir
];
581 sector_t next
= rq
->sector
;
582 sector_t delta
; /* acceptable close offset (in sectors) */
585 if (ad
->antic_status
== ANTIC_OFF
|| !ad
->ioc_finished
)
588 delay
= jiffies
- ad
->antic_start
;
592 else if (delay
<= (20 * HZ
/ 1000) && delay
<= ad
->antic_expire
)
593 delta
= 8192 << delay
;
597 if ((last
<= next
+ (delta
>>1)) && (next
<= last
+ delta
))
605 if (aic
->seek_samples
== 0) {
607 * Process has just started IO. Use past statistics to
608 * gauge success possibility
610 if (ad
->new_seek_mean
> s
) {
611 /* this request is better than what we're expecting */
616 if (aic
->seek_mean
> s
) {
617 /* this request is better than what we're expecting */
626 * as_can_break_anticipation returns true if we have been anticipating this
629 * It also returns true if the process against which we are anticipating
630 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
631 * dispatch it ASAP, because we know that application will not be submitting
634 * If the task which has submitted the request has exited, break anticipation.
636 * If this task has queued some other IO, do not enter enticipation.
638 static int as_can_break_anticipation(struct as_data
*ad
, struct request
*rq
)
640 struct io_context
*ioc
;
641 struct as_io_context
*aic
;
643 ioc
= ad
->io_context
;
645 spin_lock(&ioc
->lock
);
647 if (rq
&& ioc
== RQ_IOC(rq
)) {
648 /* request from same process */
649 spin_unlock(&ioc
->lock
);
653 if (ad
->ioc_finished
&& as_antic_expired(ad
)) {
655 * In this situation status should really be FINISHED,
656 * however the timer hasn't had the chance to run yet.
658 spin_unlock(&ioc
->lock
);
664 spin_unlock(&ioc
->lock
);
668 if (atomic_read(&aic
->nr_queued
) > 0) {
669 /* process has more requests queued */
670 spin_unlock(&ioc
->lock
);
674 if (atomic_read(&aic
->nr_dispatched
) > 0) {
675 /* process has more requests dispatched */
676 spin_unlock(&ioc
->lock
);
680 if (rq
&& rq_is_sync(rq
) && as_close_req(ad
, aic
, rq
)) {
682 * Found a close request that is not one of ours.
684 * This makes close requests from another process update
685 * our IO history. Is generally useful when there are
686 * two or more cooperating processes working in the same
689 if (!test_bit(AS_TASK_RUNNING
, &aic
->state
)) {
690 if (aic
->ttime_samples
== 0)
691 ad
->exit_prob
= (7*ad
->exit_prob
+ 256)/8;
693 ad
->exit_no_coop
= (7*ad
->exit_no_coop
)/8;
696 as_update_iohist(ad
, aic
, rq
);
697 spin_unlock(&ioc
->lock
);
701 if (!test_bit(AS_TASK_RUNNING
, &aic
->state
)) {
702 /* process anticipated on has exited */
703 if (aic
->ttime_samples
== 0)
704 ad
->exit_prob
= (7*ad
->exit_prob
+ 256)/8;
706 if (ad
->exit_no_coop
> 128) {
707 spin_unlock(&ioc
->lock
);
712 if (aic
->ttime_samples
== 0) {
713 if (ad
->new_ttime_mean
> ad
->antic_expire
) {
714 spin_unlock(&ioc
->lock
);
717 if (ad
->exit_prob
* ad
->exit_no_coop
> 128*256) {
718 spin_unlock(&ioc
->lock
);
721 } else if (aic
->ttime_mean
> ad
->antic_expire
) {
722 /* the process thinks too much between requests */
723 spin_unlock(&ioc
->lock
);
726 spin_unlock(&ioc
->lock
);
731 * as_can_anticipate indicates whether we should either run rq
732 * or keep anticipating a better request.
734 static int as_can_anticipate(struct as_data
*ad
, struct request
*rq
)
738 * Last request submitted was a write
742 if (ad
->antic_status
== ANTIC_FINISHED
)
744 * Don't restart if we have just finished. Run the next request
748 if (as_can_break_anticipation(ad
, rq
))
750 * This request is a good candidate. Don't keep anticipating,
756 * OK from here, we haven't finished, and don't have a decent request!
757 * Status is either ANTIC_OFF so start waiting,
758 * ANTIC_WAIT_REQ so continue waiting for request to finish
759 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
766 * as_update_rq must be called whenever a request (rq) is added to
767 * the sort_list. This function keeps caches up to date, and checks if the
768 * request might be one we are "anticipating"
770 static void as_update_rq(struct as_data
*ad
, struct request
*rq
)
772 const int data_dir
= rq_is_sync(rq
);
774 /* keep the next_rq cache up to date */
775 ad
->next_rq
[data_dir
] = as_choose_req(ad
, rq
, ad
->next_rq
[data_dir
]);
778 * have we been anticipating this request?
779 * or does it come from the same process as the one we are anticipating
782 if (ad
->antic_status
== ANTIC_WAIT_REQ
783 || ad
->antic_status
== ANTIC_WAIT_NEXT
) {
784 if (as_can_break_anticipation(ad
, rq
))
790 * Gathers timings and resizes the write batch automatically
792 static void update_write_batch(struct as_data
*ad
)
794 unsigned long batch
= ad
->batch_expire
[REQ_ASYNC
];
797 write_time
= (jiffies
- ad
->current_batch_expires
) + batch
;
801 if (write_time
> batch
&& !ad
->write_batch_idled
) {
802 if (write_time
> batch
* 3)
803 ad
->write_batch_count
/= 2;
805 ad
->write_batch_count
--;
806 } else if (write_time
< batch
&& ad
->current_write_count
== 0) {
807 if (batch
> write_time
* 3)
808 ad
->write_batch_count
*= 2;
810 ad
->write_batch_count
++;
813 if (ad
->write_batch_count
< 1)
814 ad
->write_batch_count
= 1;
818 * as_completed_request is to be called when a request has completed and
819 * returned something to the requesting process, be it an error or data.
821 static void as_completed_request(struct request_queue
*q
, struct request
*rq
)
823 struct as_data
*ad
= q
->elevator
->elevator_data
;
825 WARN_ON(!list_empty(&rq
->queuelist
));
827 if (RQ_STATE(rq
) != AS_RQ_REMOVED
) {
828 printk("rq->state %d\n", RQ_STATE(rq
));
833 if (ad
->changed_batch
&& ad
->nr_dispatched
== 1) {
834 kblockd_schedule_work(&ad
->antic_work
);
835 ad
->changed_batch
= 0;
837 if (ad
->batch_data_dir
== REQ_SYNC
)
840 WARN_ON(ad
->nr_dispatched
== 0);
844 * Start counting the batch from when a request of that direction is
845 * actually serviced. This should help devices with big TCQ windows
846 * and writeback caches
848 if (ad
->new_batch
&& ad
->batch_data_dir
== rq_is_sync(rq
)) {
849 update_write_batch(ad
);
850 ad
->current_batch_expires
= jiffies
+
851 ad
->batch_expire
[REQ_SYNC
];
855 if (ad
->io_context
== RQ_IOC(rq
) && ad
->io_context
) {
856 ad
->antic_start
= jiffies
;
857 ad
->ioc_finished
= 1;
858 if (ad
->antic_status
== ANTIC_WAIT_REQ
) {
860 * We were waiting on this request, now anticipate
863 as_antic_waitnext(ad
);
867 as_put_io_context(rq
);
869 RQ_SET_STATE(rq
, AS_RQ_POSTSCHED
);
873 * as_remove_queued_request removes a request from the pre dispatch queue
874 * without updating refcounts. It is expected the caller will drop the
875 * reference unless it replaces the request at somepart of the elevator
876 * (ie. the dispatch queue)
878 static void as_remove_queued_request(struct request_queue
*q
,
881 const int data_dir
= rq_is_sync(rq
);
882 struct as_data
*ad
= q
->elevator
->elevator_data
;
883 struct io_context
*ioc
;
885 WARN_ON(RQ_STATE(rq
) != AS_RQ_QUEUED
);
888 if (ioc
&& ioc
->aic
) {
889 BUG_ON(!atomic_read(&ioc
->aic
->nr_queued
));
890 atomic_dec(&ioc
->aic
->nr_queued
);
894 * Update the "next_rq" cache if we are about to remove its
897 if (ad
->next_rq
[data_dir
] == rq
)
898 ad
->next_rq
[data_dir
] = as_find_next_rq(ad
, rq
);
901 as_del_rq_rb(ad
, rq
);
905 * as_fifo_expired returns 0 if there are no expired requests on the fifo,
906 * 1 otherwise. It is ratelimited so that we only perform the check once per
907 * `fifo_expire' interval. Otherwise a large number of expired requests
908 * would create a hopeless seekstorm.
910 * See as_antic_expired comment.
912 static int as_fifo_expired(struct as_data
*ad
, int adir
)
917 delta_jif
= jiffies
- ad
->last_check_fifo
[adir
];
918 if (unlikely(delta_jif
< 0))
919 delta_jif
= -delta_jif
;
920 if (delta_jif
< ad
->fifo_expire
[adir
])
923 ad
->last_check_fifo
[adir
] = jiffies
;
925 if (list_empty(&ad
->fifo_list
[adir
]))
928 rq
= rq_entry_fifo(ad
->fifo_list
[adir
].next
);
930 return time_after(jiffies
, rq_fifo_time(rq
));
934 * as_batch_expired returns true if the current batch has expired. A batch
935 * is a set of reads or a set of writes.
937 static inline int as_batch_expired(struct as_data
*ad
)
939 if (ad
->changed_batch
|| ad
->new_batch
)
942 if (ad
->batch_data_dir
== REQ_SYNC
)
943 /* TODO! add a check so a complete fifo gets written? */
944 return time_after(jiffies
, ad
->current_batch_expires
);
946 return time_after(jiffies
, ad
->current_batch_expires
)
947 || ad
->current_write_count
== 0;
951 * move an entry to dispatch queue
953 static void as_move_to_dispatch(struct as_data
*ad
, struct request
*rq
)
955 const int data_dir
= rq_is_sync(rq
);
957 BUG_ON(RB_EMPTY_NODE(&rq
->rb_node
));
960 ad
->antic_status
= ANTIC_OFF
;
963 * This has to be set in order to be correctly updated by
966 ad
->last_sector
[data_dir
] = rq
->sector
+ rq
->nr_sectors
;
968 if (data_dir
== REQ_SYNC
) {
969 struct io_context
*ioc
= RQ_IOC(rq
);
970 /* In case we have to anticipate after this */
971 copy_io_context(&ad
->io_context
, &ioc
);
973 if (ad
->io_context
) {
974 put_io_context(ad
->io_context
);
975 ad
->io_context
= NULL
;
978 if (ad
->current_write_count
!= 0)
979 ad
->current_write_count
--;
981 ad
->ioc_finished
= 0;
983 ad
->next_rq
[data_dir
] = as_find_next_rq(ad
, rq
);
986 * take it off the sort and fifo list, add to dispatch queue
988 as_remove_queued_request(ad
->q
, rq
);
989 WARN_ON(RQ_STATE(rq
) != AS_RQ_QUEUED
);
991 elv_dispatch_sort(ad
->q
, rq
);
993 RQ_SET_STATE(rq
, AS_RQ_DISPATCHED
);
994 if (RQ_IOC(rq
) && RQ_IOC(rq
)->aic
)
995 atomic_inc(&RQ_IOC(rq
)->aic
->nr_dispatched
);
1000 * as_dispatch_request selects the best request according to
1001 * read/write expire, batch expire, etc, and moves it to the dispatch
1002 * queue. Returns 1 if a request was found, 0 otherwise.
1004 static int as_dispatch_request(struct request_queue
*q
, int force
)
1006 struct as_data
*ad
= q
->elevator
->elevator_data
;
1007 const int reads
= !list_empty(&ad
->fifo_list
[REQ_SYNC
]);
1008 const int writes
= !list_empty(&ad
->fifo_list
[REQ_ASYNC
]);
1011 if (unlikely(force
)) {
1013 * Forced dispatch, accounting is useless. Reset
1014 * accounting states and dump fifo_lists. Note that
1015 * batch_data_dir is reset to REQ_SYNC to avoid
1016 * screwing write batch accounting as write batch
1017 * accounting occurs on W->R transition.
1021 ad
->batch_data_dir
= REQ_SYNC
;
1022 ad
->changed_batch
= 0;
1025 while (ad
->next_rq
[REQ_SYNC
]) {
1026 as_move_to_dispatch(ad
, ad
->next_rq
[REQ_SYNC
]);
1029 ad
->last_check_fifo
[REQ_SYNC
] = jiffies
;
1031 while (ad
->next_rq
[REQ_ASYNC
]) {
1032 as_move_to_dispatch(ad
, ad
->next_rq
[REQ_ASYNC
]);
1035 ad
->last_check_fifo
[REQ_ASYNC
] = jiffies
;
1040 /* Signal that the write batch was uncontended, so we can't time it */
1041 if (ad
->batch_data_dir
== REQ_ASYNC
&& !reads
) {
1042 if (ad
->current_write_count
== 0 || !writes
)
1043 ad
->write_batch_idled
= 1;
1046 if (!(reads
|| writes
)
1047 || ad
->antic_status
== ANTIC_WAIT_REQ
1048 || ad
->antic_status
== ANTIC_WAIT_NEXT
1049 || ad
->changed_batch
)
1052 if (!(reads
&& writes
&& as_batch_expired(ad
))) {
1054 * batch is still running or no reads or no writes
1056 rq
= ad
->next_rq
[ad
->batch_data_dir
];
1058 if (ad
->batch_data_dir
== REQ_SYNC
&& ad
->antic_expire
) {
1059 if (as_fifo_expired(ad
, REQ_SYNC
))
1062 if (as_can_anticipate(ad
, rq
)) {
1063 as_antic_waitreq(ad
);
1069 /* we have a "next request" */
1070 if (reads
&& !writes
)
1071 ad
->current_batch_expires
=
1072 jiffies
+ ad
->batch_expire
[REQ_SYNC
];
1073 goto dispatch_request
;
1078 * at this point we are not running a batch. select the appropriate
1079 * data direction (read / write)
1083 BUG_ON(RB_EMPTY_ROOT(&ad
->sort_list
[REQ_SYNC
]));
1085 if (writes
&& ad
->batch_data_dir
== REQ_SYNC
)
1087 * Last batch was a read, switch to writes
1089 goto dispatch_writes
;
1091 if (ad
->batch_data_dir
== REQ_ASYNC
) {
1092 WARN_ON(ad
->new_batch
);
1093 ad
->changed_batch
= 1;
1095 ad
->batch_data_dir
= REQ_SYNC
;
1096 rq
= rq_entry_fifo(ad
->fifo_list
[REQ_SYNC
].next
);
1097 ad
->last_check_fifo
[ad
->batch_data_dir
] = jiffies
;
1098 goto dispatch_request
;
1102 * the last batch was a read
1107 BUG_ON(RB_EMPTY_ROOT(&ad
->sort_list
[REQ_ASYNC
]));
1109 if (ad
->batch_data_dir
== REQ_SYNC
) {
1110 ad
->changed_batch
= 1;
1113 * new_batch might be 1 when the queue runs out of
1114 * reads. A subsequent submission of a write might
1115 * cause a change of batch before the read is finished.
1119 ad
->batch_data_dir
= REQ_ASYNC
;
1120 ad
->current_write_count
= ad
->write_batch_count
;
1121 ad
->write_batch_idled
= 0;
1122 rq
= rq_entry_fifo(ad
->fifo_list
[REQ_ASYNC
].next
);
1123 ad
->last_check_fifo
[REQ_ASYNC
] = jiffies
;
1124 goto dispatch_request
;
1132 * If a request has expired, service it.
1135 if (as_fifo_expired(ad
, ad
->batch_data_dir
)) {
1137 rq
= rq_entry_fifo(ad
->fifo_list
[ad
->batch_data_dir
].next
);
1140 if (ad
->changed_batch
) {
1141 WARN_ON(ad
->new_batch
);
1143 if (ad
->nr_dispatched
)
1146 if (ad
->batch_data_dir
== REQ_ASYNC
)
1147 ad
->current_batch_expires
= jiffies
+
1148 ad
->batch_expire
[REQ_ASYNC
];
1152 ad
->changed_batch
= 0;
1156 * rq is the selected appropriate request.
1158 as_move_to_dispatch(ad
, rq
);
1164 * add rq to rbtree and fifo
1166 static void as_add_request(struct request_queue
*q
, struct request
*rq
)
1168 struct as_data
*ad
= q
->elevator
->elevator_data
;
1171 RQ_SET_STATE(rq
, AS_RQ_NEW
);
1173 data_dir
= rq_is_sync(rq
);
1175 rq
->elevator_private
= as_get_io_context(q
->node
);
1178 as_update_iohist(ad
, RQ_IOC(rq
)->aic
, rq
);
1179 atomic_inc(&RQ_IOC(rq
)->aic
->nr_queued
);
1182 as_add_rq_rb(ad
, rq
);
1185 * set expire time and add to fifo list
1187 rq_set_fifo_time(rq
, jiffies
+ ad
->fifo_expire
[data_dir
]);
1188 list_add_tail(&rq
->queuelist
, &ad
->fifo_list
[data_dir
]);
1190 as_update_rq(ad
, rq
); /* keep state machine up to date */
1191 RQ_SET_STATE(rq
, AS_RQ_QUEUED
);
1194 static void as_activate_request(struct request_queue
*q
, struct request
*rq
)
1196 WARN_ON(RQ_STATE(rq
) != AS_RQ_DISPATCHED
);
1197 RQ_SET_STATE(rq
, AS_RQ_REMOVED
);
1198 if (RQ_IOC(rq
) && RQ_IOC(rq
)->aic
)
1199 atomic_dec(&RQ_IOC(rq
)->aic
->nr_dispatched
);
1202 static void as_deactivate_request(struct request_queue
*q
, struct request
*rq
)
1204 WARN_ON(RQ_STATE(rq
) != AS_RQ_REMOVED
);
1205 RQ_SET_STATE(rq
, AS_RQ_DISPATCHED
);
1206 if (RQ_IOC(rq
) && RQ_IOC(rq
)->aic
)
1207 atomic_inc(&RQ_IOC(rq
)->aic
->nr_dispatched
);
1211 * as_queue_empty tells us if there are requests left in the device. It may
1212 * not be the case that a driver can get the next request even if the queue
1213 * is not empty - it is used in the block layer to check for plugging and
1214 * merging opportunities
1216 static int as_queue_empty(struct request_queue
*q
)
1218 struct as_data
*ad
= q
->elevator
->elevator_data
;
1220 return list_empty(&ad
->fifo_list
[REQ_ASYNC
])
1221 && list_empty(&ad
->fifo_list
[REQ_SYNC
]);
1225 as_merge(struct request_queue
*q
, struct request
**req
, struct bio
*bio
)
1227 struct as_data
*ad
= q
->elevator
->elevator_data
;
1228 sector_t rb_key
= bio
->bi_sector
+ bio_sectors(bio
);
1229 struct request
*__rq
;
1232 * check for front merge
1234 __rq
= elv_rb_find(&ad
->sort_list
[bio_data_dir(bio
)], rb_key
);
1235 if (__rq
&& elv_rq_merge_ok(__rq
, bio
)) {
1237 return ELEVATOR_FRONT_MERGE
;
1240 return ELEVATOR_NO_MERGE
;
1243 static void as_merged_request(struct request_queue
*q
, struct request
*req
,
1246 struct as_data
*ad
= q
->elevator
->elevator_data
;
1249 * if the merge was a front merge, we need to reposition request
1251 if (type
== ELEVATOR_FRONT_MERGE
) {
1252 as_del_rq_rb(ad
, req
);
1253 as_add_rq_rb(ad
, req
);
1255 * Note! At this stage of this and the next function, our next
1256 * request may not be optimal - eg the request may have "grown"
1257 * behind the disk head. We currently don't bother adjusting.
1262 static void as_merged_requests(struct request_queue
*q
, struct request
*req
,
1263 struct request
*next
)
1266 * if next expires before rq, assign its expire time to arq
1267 * and move into next position (next will be deleted) in fifo
1269 if (!list_empty(&req
->queuelist
) && !list_empty(&next
->queuelist
)) {
1270 if (time_before(rq_fifo_time(next
), rq_fifo_time(req
))) {
1271 list_move(&req
->queuelist
, &next
->queuelist
);
1272 rq_set_fifo_time(req
, rq_fifo_time(next
));
1277 * kill knowledge of next, this one is a goner
1279 as_remove_queued_request(q
, next
);
1280 as_put_io_context(next
);
1282 RQ_SET_STATE(next
, AS_RQ_MERGED
);
1286 * This is executed in a "deferred" process context, by kblockd. It calls the
1287 * driver's request_fn so the driver can submit that request.
1289 * IMPORTANT! This guy will reenter the elevator, so set up all queue global
1290 * state before calling, and don't rely on any state over calls.
1292 * FIXME! dispatch queue is not a queue at all!
1294 static void as_work_handler(struct work_struct
*work
)
1296 struct as_data
*ad
= container_of(work
, struct as_data
, antic_work
);
1297 struct request_queue
*q
= ad
->q
;
1298 unsigned long flags
;
1300 spin_lock_irqsave(q
->queue_lock
, flags
);
1301 blk_start_queueing(q
);
1302 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1305 static int as_may_queue(struct request_queue
*q
, int rw
)
1307 int ret
= ELV_MQUEUE_MAY
;
1308 struct as_data
*ad
= q
->elevator
->elevator_data
;
1309 struct io_context
*ioc
;
1310 if (ad
->antic_status
== ANTIC_WAIT_REQ
||
1311 ad
->antic_status
== ANTIC_WAIT_NEXT
) {
1312 ioc
= as_get_io_context(q
->node
);
1313 if (ad
->io_context
== ioc
)
1314 ret
= ELV_MQUEUE_MUST
;
1315 put_io_context(ioc
);
1321 static void as_exit_queue(elevator_t
*e
)
1323 struct as_data
*ad
= e
->elevator_data
;
1325 del_timer_sync(&ad
->antic_timer
);
1326 kblockd_flush_work(&ad
->antic_work
);
1328 BUG_ON(!list_empty(&ad
->fifo_list
[REQ_SYNC
]));
1329 BUG_ON(!list_empty(&ad
->fifo_list
[REQ_ASYNC
]));
1331 put_io_context(ad
->io_context
);
1336 * initialize elevator private data (as_data).
1338 static void *as_init_queue(struct request_queue
*q
)
1342 ad
= kmalloc_node(sizeof(*ad
), GFP_KERNEL
| __GFP_ZERO
, q
->node
);
1346 ad
->q
= q
; /* Identify what queue the data belongs to */
1348 /* anticipatory scheduling helpers */
1349 ad
->antic_timer
.function
= as_antic_timeout
;
1350 ad
->antic_timer
.data
= (unsigned long)q
;
1351 init_timer(&ad
->antic_timer
);
1352 INIT_WORK(&ad
->antic_work
, as_work_handler
);
1354 INIT_LIST_HEAD(&ad
->fifo_list
[REQ_SYNC
]);
1355 INIT_LIST_HEAD(&ad
->fifo_list
[REQ_ASYNC
]);
1356 ad
->sort_list
[REQ_SYNC
] = RB_ROOT
;
1357 ad
->sort_list
[REQ_ASYNC
] = RB_ROOT
;
1358 ad
->fifo_expire
[REQ_SYNC
] = default_read_expire
;
1359 ad
->fifo_expire
[REQ_ASYNC
] = default_write_expire
;
1360 ad
->antic_expire
= default_antic_expire
;
1361 ad
->batch_expire
[REQ_SYNC
] = default_read_batch_expire
;
1362 ad
->batch_expire
[REQ_ASYNC
] = default_write_batch_expire
;
1364 ad
->current_batch_expires
= jiffies
+ ad
->batch_expire
[REQ_SYNC
];
1365 ad
->write_batch_count
= ad
->batch_expire
[REQ_ASYNC
] / 10;
1366 if (ad
->write_batch_count
< 2)
1367 ad
->write_batch_count
= 2;
1377 as_var_show(unsigned int var
, char *page
)
1379 return sprintf(page
, "%d\n", var
);
1383 as_var_store(unsigned long *var
, const char *page
, size_t count
)
1385 char *p
= (char *) page
;
1387 *var
= simple_strtoul(p
, &p
, 10);
1391 static ssize_t
est_time_show(elevator_t
*e
, char *page
)
1393 struct as_data
*ad
= e
->elevator_data
;
1396 pos
+= sprintf(page
+pos
, "%lu %% exit probability\n",
1397 100*ad
->exit_prob
/256);
1398 pos
+= sprintf(page
+pos
, "%lu %% probability of exiting without a "
1399 "cooperating process submitting IO\n",
1400 100*ad
->exit_no_coop
/256);
1401 pos
+= sprintf(page
+pos
, "%lu ms new thinktime\n", ad
->new_ttime_mean
);
1402 pos
+= sprintf(page
+pos
, "%llu sectors new seek distance\n",
1403 (unsigned long long)ad
->new_seek_mean
);
1408 #define SHOW_FUNCTION(__FUNC, __VAR) \
1409 static ssize_t __FUNC(elevator_t *e, char *page) \
1411 struct as_data *ad = e->elevator_data; \
1412 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1414 SHOW_FUNCTION(as_read_expire_show
, ad
->fifo_expire
[REQ_SYNC
]);
1415 SHOW_FUNCTION(as_write_expire_show
, ad
->fifo_expire
[REQ_ASYNC
]);
1416 SHOW_FUNCTION(as_antic_expire_show
, ad
->antic_expire
);
1417 SHOW_FUNCTION(as_read_batch_expire_show
, ad
->batch_expire
[REQ_SYNC
]);
1418 SHOW_FUNCTION(as_write_batch_expire_show
, ad
->batch_expire
[REQ_ASYNC
]);
1419 #undef SHOW_FUNCTION
1421 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
1422 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
1424 struct as_data *ad = e->elevator_data; \
1425 int ret = as_var_store(__PTR, (page), count); \
1426 if (*(__PTR) < (MIN)) \
1428 else if (*(__PTR) > (MAX)) \
1430 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1433 STORE_FUNCTION(as_read_expire_store
, &ad
->fifo_expire
[REQ_SYNC
], 0, INT_MAX
);
1434 STORE_FUNCTION(as_write_expire_store
, &ad
->fifo_expire
[REQ_ASYNC
], 0, INT_MAX
);
1435 STORE_FUNCTION(as_antic_expire_store
, &ad
->antic_expire
, 0, INT_MAX
);
1436 STORE_FUNCTION(as_read_batch_expire_store
,
1437 &ad
->batch_expire
[REQ_SYNC
], 0, INT_MAX
);
1438 STORE_FUNCTION(as_write_batch_expire_store
,
1439 &ad
->batch_expire
[REQ_ASYNC
], 0, INT_MAX
);
1440 #undef STORE_FUNCTION
1442 #define AS_ATTR(name) \
1443 __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
1445 static struct elv_fs_entry as_attrs
[] = {
1446 __ATTR_RO(est_time
),
1447 AS_ATTR(read_expire
),
1448 AS_ATTR(write_expire
),
1449 AS_ATTR(antic_expire
),
1450 AS_ATTR(read_batch_expire
),
1451 AS_ATTR(write_batch_expire
),
1455 static struct elevator_type iosched_as
= {
1457 .elevator_merge_fn
= as_merge
,
1458 .elevator_merged_fn
= as_merged_request
,
1459 .elevator_merge_req_fn
= as_merged_requests
,
1460 .elevator_dispatch_fn
= as_dispatch_request
,
1461 .elevator_add_req_fn
= as_add_request
,
1462 .elevator_activate_req_fn
= as_activate_request
,
1463 .elevator_deactivate_req_fn
= as_deactivate_request
,
1464 .elevator_queue_empty_fn
= as_queue_empty
,
1465 .elevator_completed_req_fn
= as_completed_request
,
1466 .elevator_former_req_fn
= elv_rb_former_request
,
1467 .elevator_latter_req_fn
= elv_rb_latter_request
,
1468 .elevator_may_queue_fn
= as_may_queue
,
1469 .elevator_init_fn
= as_init_queue
,
1470 .elevator_exit_fn
= as_exit_queue
,
1474 .elevator_attrs
= as_attrs
,
1475 .elevator_name
= "anticipatory",
1476 .elevator_owner
= THIS_MODULE
,
1479 static int __init
as_init(void)
1481 elv_register(&iosched_as
);
1486 static void __exit
as_exit(void)
1488 DECLARE_COMPLETION_ONSTACK(all_gone
);
1489 elv_unregister(&iosched_as
);
1490 ioc_gone
= &all_gone
;
1491 /* ioc_gone's update must be visible before reading ioc_count */
1493 if (elv_ioc_count_read(ioc_count
))
1494 wait_for_completion(ioc_gone
);
1498 module_init(as_init
);
1499 module_exit(as_exit
);
1501 MODULE_AUTHOR("Nick Piggin");
1502 MODULE_LICENSE("GPL");
1503 MODULE_DESCRIPTION("anticipatory IO scheduler");