4 #include <linux/radix-tree.h>
5 #include <linux/rcupdate.h>
8 * This is the per-process anticipatory I/O scheduler state.
10 struct as_io_context
{
13 void (*dtor
)(struct as_io_context
*aic
); /* destructor */
14 void (*exit
)(struct as_io_context
*aic
); /* called on task exit */
17 atomic_t nr_queued
; /* queued reads & sync writes */
18 atomic_t nr_dispatched
; /* number of requests gone to the drivers */
20 /* IO History tracking */
22 unsigned long last_end_request
;
23 unsigned long ttime_total
;
24 unsigned long ttime_samples
;
25 unsigned long ttime_mean
;
27 unsigned int seek_samples
;
28 sector_t last_request_pos
;
34 struct cfq_io_context
{
36 unsigned long dead_key
;
38 struct cfq_queue
*cfqq
[2];
40 struct io_context
*ioc
;
42 unsigned long last_end_request
;
43 sector_t last_request_pos
;
45 unsigned long ttime_total
;
46 unsigned long ttime_samples
;
47 unsigned long ttime_mean
;
49 unsigned int seek_samples
;
53 struct list_head queue_list
;
54 struct hlist_node cic_list
;
56 void (*dtor
)(struct io_context
*); /* destructor */
57 void (*exit
)(struct io_context
*); /* called on task exit */
59 struct rcu_head rcu_head
;
63 * I/O subsystem state of the associated processes. It is refcounted
64 * and kmalloc'ed. These could be shared between processes.
70 /* all the fields below are protected by this lock */
73 unsigned short ioprio
;
74 unsigned short ioprio_changed
;
77 * For request batching
79 unsigned long last_waited
; /* Time last woken after wait for request */
80 int nr_batch_requests
; /* Number of requests left in the batch */
82 struct as_io_context
*aic
;
83 struct radix_tree_root radix_root
;
84 struct hlist_head cic_list
;
88 static inline struct io_context
*ioc_task_link(struct io_context
*ioc
)
91 * if ref count is zero, don't allow sharing (ioc is going away, it's
94 if (ioc
&& atomic_inc_not_zero(&ioc
->refcount
)) {
95 atomic_inc(&ioc
->nr_tasks
);