11 struct list_head rq_list
;
12 } ____cacheline_aligned_in_smp
;
15 unsigned int index_hw
;
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched
[2];
19 unsigned long rq_merged
;
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed
[2];
24 struct request_queue
*queue
;
26 } ____cacheline_aligned_in_smp
;
28 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx
*hctx
, bool async
);
29 void blk_mq_freeze_queue(struct request_queue
*q
);
30 void blk_mq_free_queue(struct request_queue
*q
);
31 int blk_mq_update_nr_requests(struct request_queue
*q
, unsigned int nr
);
32 void blk_mq_wake_waiters(struct request_queue
*q
);
33 bool blk_mq_dispatch_rq_list(struct request_queue
*, struct list_head
*);
34 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx
*hctx
, struct list_head
*list
);
35 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx
*hctx
);
36 bool blk_mq_get_driver_tag(struct request
*rq
, struct blk_mq_hw_ctx
**hctx
,
40 * Internal helpers for allocating/freeing the request map
42 void blk_mq_free_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
43 unsigned int hctx_idx
);
44 void blk_mq_free_rq_map(struct blk_mq_tags
*tags
);
45 struct blk_mq_tags
*blk_mq_alloc_rq_map(struct blk_mq_tag_set
*set
,
46 unsigned int hctx_idx
,
48 unsigned int reserved_tags
);
49 int blk_mq_alloc_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
50 unsigned int hctx_idx
, unsigned int depth
);
53 * Internal helpers for request insertion into sw queues
55 void __blk_mq_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
57 void blk_mq_insert_requests(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
58 struct list_head
*list
);
62 void blk_mq_enable_hotplug(void);
63 void blk_mq_disable_hotplug(void);
66 * CPU -> queue mappings
68 extern int blk_mq_hw_queue_to_node(unsigned int *map
, unsigned int);
70 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*q
,
73 return q
->queue_hw_ctx
[q
->mq_map
[cpu
]];
79 extern void blk_mq_sysfs_init(struct request_queue
*q
);
80 extern void blk_mq_sysfs_deinit(struct request_queue
*q
);
81 extern int __blk_mq_register_dev(struct device
*dev
, struct request_queue
*q
);
82 extern int blk_mq_sysfs_register(struct request_queue
*q
);
83 extern void blk_mq_sysfs_unregister(struct request_queue
*q
);
84 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
);
86 extern void blk_mq_rq_timed_out(struct request
*req
, bool reserved
);
88 void blk_mq_release(struct request_queue
*q
);
90 static inline struct blk_mq_ctx
*__blk_mq_get_ctx(struct request_queue
*q
,
93 return per_cpu_ptr(q
->queue_ctx
, cpu
);
97 * This assumes per-cpu software queueing queues. They could be per-node
98 * as well, for instance. For now this is hardcoded as-is. Note that we don't
99 * care about preemption, since we know the ctx's are persistent. This does
100 * mean that we can't rely on ctx always matching the currently running CPU.
102 static inline struct blk_mq_ctx
*blk_mq_get_ctx(struct request_queue
*q
)
104 return __blk_mq_get_ctx(q
, get_cpu());
107 static inline void blk_mq_put_ctx(struct blk_mq_ctx
*ctx
)
112 struct blk_mq_alloc_data
{
113 /* input parameter */
114 struct request_queue
*q
;
116 unsigned int shallow_depth
;
118 /* input & output parameter */
119 struct blk_mq_ctx
*ctx
;
120 struct blk_mq_hw_ctx
*hctx
;
123 static inline struct blk_mq_tags
*blk_mq_tags_from_data(struct blk_mq_alloc_data
*data
)
125 if (data
->flags
& BLK_MQ_REQ_INTERNAL
)
126 return data
->hctx
->sched_tags
;
128 return data
->hctx
->tags
;
132 * Internal helpers for request allocation/init/free
134 void blk_mq_rq_ctx_init(struct request_queue
*q
, struct blk_mq_ctx
*ctx
,
135 struct request
*rq
, unsigned int op
);
136 void __blk_mq_finish_request(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
138 void blk_mq_finish_request(struct request
*rq
);
139 struct request
*__blk_mq_alloc_request(struct blk_mq_alloc_data
*data
,
142 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx
*hctx
)
144 return test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
);
147 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx
*hctx
)
149 return hctx
->nr_ctx
&& hctx
->tags
;