4 #include <linux/blkdev.h>
8 struct blk_mq_cpu_notifier
{
11 int (*notify
)(void *data
, unsigned long action
, unsigned int cpu
);
14 struct blk_mq_ctxmap
{
15 unsigned int map_size
;
16 unsigned int bits_per_word
;
17 struct blk_align_bitmap
*map
;
20 struct blk_mq_hw_ctx
{
23 struct list_head dispatch
;
24 } ____cacheline_aligned_in_smp
;
26 unsigned long state
; /* BLK_MQ_S_* flags */
27 struct delayed_work run_work
;
28 struct delayed_work delay_work
;
29 cpumask_var_t cpumask
;
33 unsigned long flags
; /* BLK_MQ_F_* flags */
35 struct request_queue
*queue
;
36 unsigned int queue_num
;
40 struct blk_mq_ctxmap ctx_map
;
43 struct blk_mq_ctx
**ctxs
;
47 struct blk_mq_tags
*tags
;
51 #define BLK_MQ_MAX_DISPATCH_ORDER 10
52 unsigned long dispatched
[BLK_MQ_MAX_DISPATCH_ORDER
];
54 unsigned int numa_node
;
55 unsigned int cmd_size
; /* per-request extra data */
59 struct blk_mq_cpu_notifier cpu_notifier
;
63 struct blk_mq_tag_set
{
64 struct blk_mq_ops
*ops
;
65 unsigned int nr_hw_queues
;
66 unsigned int queue_depth
; /* max hw supported */
67 unsigned int reserved_tags
;
68 unsigned int cmd_size
; /* per-request extra data */
71 unsigned int flags
; /* BLK_MQ_F_* */
74 struct blk_mq_tags
**tags
;
76 struct mutex tag_list_lock
;
77 struct list_head tag_list
;
80 typedef int (queue_rq_fn
)(struct blk_mq_hw_ctx
*, struct request
*);
81 typedef struct blk_mq_hw_ctx
*(map_queue_fn
)(struct request_queue
*, const int);
82 typedef int (init_hctx_fn
)(struct blk_mq_hw_ctx
*, void *, unsigned int);
83 typedef void (exit_hctx_fn
)(struct blk_mq_hw_ctx
*, unsigned int);
84 typedef int (init_request_fn
)(void *, struct request
*, unsigned int,
85 unsigned int, unsigned int);
86 typedef void (exit_request_fn
)(void *, struct request
*, unsigned int,
93 queue_rq_fn
*queue_rq
;
96 * Map to specific hardware queue
98 map_queue_fn
*map_queue
;
101 * Called on request timeout
103 rq_timed_out_fn
*timeout
;
105 softirq_done_fn
*complete
;
108 * Called when the block layer side of a hardware queue has been
109 * set up, allowing the driver to allocate/init matching structures.
110 * Ditto for exit/teardown.
112 init_hctx_fn
*init_hctx
;
113 exit_hctx_fn
*exit_hctx
;
116 * Called for every command allocated by the block layer to allow
117 * the driver to set up driver specific data.
118 * Ditto for exit/teardown.
120 init_request_fn
*init_request
;
121 exit_request_fn
*exit_request
;
125 BLK_MQ_RQ_QUEUE_OK
= 0, /* queued fine */
126 BLK_MQ_RQ_QUEUE_BUSY
= 1, /* requeue IO for later */
127 BLK_MQ_RQ_QUEUE_ERROR
= 2, /* end IO with error */
129 BLK_MQ_F_SHOULD_MERGE
= 1 << 0,
130 BLK_MQ_F_TAG_SHARED
= 1 << 1,
131 BLK_MQ_F_SG_MERGE
= 1 << 2,
132 BLK_MQ_F_SYSFS_UP
= 1 << 3,
134 BLK_MQ_S_STOPPED
= 0,
135 BLK_MQ_S_TAG_ACTIVE
= 1,
137 BLK_MQ_MAX_DEPTH
= 10240,
139 BLK_MQ_CPU_WORK_BATCH
= 8,
142 struct request_queue
*blk_mq_init_queue(struct blk_mq_tag_set
*);
143 int blk_mq_register_disk(struct gendisk
*);
144 void blk_mq_unregister_disk(struct gendisk
*);
146 int blk_mq_alloc_tag_set(struct blk_mq_tag_set
*set
);
147 void blk_mq_free_tag_set(struct blk_mq_tag_set
*set
);
149 void blk_mq_flush_plug_list(struct blk_plug
*plug
, bool from_schedule
);
151 void blk_mq_insert_request(struct request
*, bool, bool, bool);
152 void blk_mq_run_queues(struct request_queue
*q
, bool async
);
153 void blk_mq_free_request(struct request
*rq
);
154 bool blk_mq_can_queue(struct blk_mq_hw_ctx
*);
155 struct request
*blk_mq_alloc_request(struct request_queue
*q
, int rw
,
156 gfp_t gfp
, bool reserved
);
157 struct request
*blk_mq_tag_to_rq(struct blk_mq_tags
*tags
, unsigned int tag
);
159 struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*, const int ctx_index
);
160 struct blk_mq_hw_ctx
*blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set
*, unsigned int, int);
162 void blk_mq_end_io(struct request
*rq
, int error
);
163 void __blk_mq_end_io(struct request
*rq
, int error
);
165 void blk_mq_requeue_request(struct request
*rq
);
166 void blk_mq_add_to_requeue_list(struct request
*rq
, bool at_head
);
167 void blk_mq_kick_requeue_list(struct request_queue
*q
);
168 void blk_mq_complete_request(struct request
*rq
);
170 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx
*hctx
);
171 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx
*hctx
);
172 void blk_mq_stop_hw_queues(struct request_queue
*q
);
173 void blk_mq_start_hw_queues(struct request_queue
*q
);
174 void blk_mq_start_stopped_hw_queues(struct request_queue
*q
, bool async
);
175 void blk_mq_delay_queue(struct blk_mq_hw_ctx
*hctx
, unsigned long msecs
);
176 void blk_mq_tag_busy_iter(struct blk_mq_tags
*tags
, void (*fn
)(void *data
, unsigned long *), void *data
);
179 * Driver command data is immediately after the request. So subtract request
180 * size to get back to the original request.
182 static inline struct request
*blk_mq_rq_from_pdu(void *pdu
)
184 return pdu
- sizeof(struct request
);
186 static inline void *blk_mq_rq_to_pdu(struct request
*rq
)
188 return (void *) rq
+ sizeof(*rq
);
191 #define queue_for_each_hw_ctx(q, hctx, i) \
192 for ((i) = 0; (i) < (q)->nr_hw_queues && \
193 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
195 #define queue_for_each_ctx(q, ctx, i) \
196 for ((i) = 0; (i) < (q)->nr_queues && \
197 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
199 #define hctx_for_each_ctx(hctx, ctx, i) \
200 for ((i) = 0; (i) < (hctx)->nr_ctx && \
201 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
203 #define blk_ctx_sum(q, sum) \
205 struct blk_mq_ctx *__x; \
206 unsigned int __ret = 0, __i; \
208 queue_for_each_ctx((q), __x, __i) \