2 * Functions related to tagged command queuing
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/slab.h>
13 * blk_queue_find_tag - find a request by its tag and queue
14 * @q: The request queue for the device
15 * @tag: The tag of the request
18 * Should be used when a device returns a tag and you want to match
21 * no locks need be held.
23 struct request
*blk_queue_find_tag(struct request_queue
*q
, int tag
)
25 return blk_map_queue_find_tag(q
->queue_tags
, tag
);
27 EXPORT_SYMBOL(blk_queue_find_tag
);
30 * __blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free
33 * Tries to free the specified @bqt. Returns true if it was
34 * actually freed and false if there are still references using it
36 static int __blk_free_tags(struct blk_queue_tag
*bqt
)
40 retval
= atomic_dec_and_test(&bqt
->refcnt
);
42 BUG_ON(find_first_bit(bqt
->tag_map
, bqt
->max_depth
) <
45 kfree(bqt
->tag_index
);
46 bqt
->tag_index
= NULL
;
58 * __blk_queue_free_tags - release tag maintenance info
59 * @q: the request queue for the device
62 * blk_cleanup_queue() will take care of calling this function, if tagging
63 * has been used. So there's no need to call this directly.
65 void __blk_queue_free_tags(struct request_queue
*q
)
67 struct blk_queue_tag
*bqt
= q
->queue_tags
;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED
, q
);
79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free
82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that
84 * might have been using this tag map.
86 void blk_free_tags(struct blk_queue_tag
*bqt
)
88 if (unlikely(!__blk_free_tags(bqt
)))
91 EXPORT_SYMBOL(blk_free_tags
);
94 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device
98 * This is used to disable tagged queuing to a device, yet leave
101 void blk_queue_free_tags(struct request_queue
*q
)
103 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED
, q
);
105 EXPORT_SYMBOL(blk_queue_free_tags
);
108 init_tag_map(struct request_queue
*q
, struct blk_queue_tag
*tags
, int depth
)
110 struct request
**tag_index
;
111 unsigned long *tag_map
;
114 if (q
&& depth
> q
->nr_requests
* 2) {
115 depth
= q
->nr_requests
* 2;
116 printk(KERN_ERR
"%s: adjusted depth to %d\n",
120 tag_index
= kzalloc(depth
* sizeof(struct request
*), GFP_ATOMIC
);
124 nr_ulongs
= ALIGN(depth
, BITS_PER_LONG
) / BITS_PER_LONG
;
125 tag_map
= kzalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
129 tags
->real_max_depth
= depth
;
130 tags
->max_depth
= depth
;
131 tags
->tag_index
= tag_index
;
132 tags
->tag_map
= tag_map
;
140 static struct blk_queue_tag
*__blk_queue_init_tags(struct request_queue
*q
,
143 struct blk_queue_tag
*tags
;
145 tags
= kmalloc(sizeof(struct blk_queue_tag
), GFP_ATOMIC
);
149 if (init_tag_map(q
, tags
, depth
))
152 atomic_set(&tags
->refcnt
, 1);
160 * blk_init_tags - initialize the tag info for an external tag map
161 * @depth: the maximum queue depth supported
163 struct blk_queue_tag
*blk_init_tags(int depth
)
165 return __blk_queue_init_tags(NULL
, depth
);
167 EXPORT_SYMBOL(blk_init_tags
);
170 * blk_queue_init_tags - initialize the queue tag info
171 * @q: the request queue for the device
172 * @depth: the maximum queue depth supported
173 * @tags: the tag to use
175 * Queue lock must be held here if the function is called to resize an
178 int blk_queue_init_tags(struct request_queue
*q
, int depth
,
179 struct blk_queue_tag
*tags
)
183 BUG_ON(tags
&& q
->queue_tags
&& tags
!= q
->queue_tags
);
185 if (!tags
&& !q
->queue_tags
) {
186 tags
= __blk_queue_init_tags(q
, depth
);
190 } else if (q
->queue_tags
) {
191 rc
= blk_queue_resize_tags(q
, depth
);
194 queue_flag_set(QUEUE_FLAG_QUEUED
, q
);
197 atomic_inc(&tags
->refcnt
);
200 * assign it, all done
202 q
->queue_tags
= tags
;
203 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED
, q
);
204 INIT_LIST_HEAD(&q
->tag_busy_list
);
210 EXPORT_SYMBOL(blk_queue_init_tags
);
213 * blk_queue_resize_tags - change the queueing depth
214 * @q: the request queue for the device
215 * @new_depth: the new max command queueing depth
218 * Must be called with the queue lock held.
220 int blk_queue_resize_tags(struct request_queue
*q
, int new_depth
)
222 struct blk_queue_tag
*bqt
= q
->queue_tags
;
223 struct request
**tag_index
;
224 unsigned long *tag_map
;
225 int max_depth
, nr_ulongs
;
231 * if we already have large enough real_max_depth. just
232 * adjust max_depth. *NOTE* as requests with tag value
233 * between new_depth and real_max_depth can be in-flight, tag
234 * map can not be shrunk blindly here.
236 if (new_depth
<= bqt
->real_max_depth
) {
237 bqt
->max_depth
= new_depth
;
242 * Currently cannot replace a shared tag map with a new
243 * one, so error out if this is the case
245 if (atomic_read(&bqt
->refcnt
) != 1)
249 * save the old state info, so we can copy it back
251 tag_index
= bqt
->tag_index
;
252 tag_map
= bqt
->tag_map
;
253 max_depth
= bqt
->real_max_depth
;
255 if (init_tag_map(q
, bqt
, new_depth
))
258 memcpy(bqt
->tag_index
, tag_index
, max_depth
* sizeof(struct request
*));
259 nr_ulongs
= ALIGN(max_depth
, BITS_PER_LONG
) / BITS_PER_LONG
;
260 memcpy(bqt
->tag_map
, tag_map
, nr_ulongs
* sizeof(unsigned long));
266 EXPORT_SYMBOL(blk_queue_resize_tags
);
269 * blk_queue_end_tag - end tag operations for a request
270 * @q: the request queue for the device
271 * @rq: the request that has completed
274 * Typically called when end_that_request_first() returns %0, meaning
275 * all transfers have been done for a request. It's important to call
276 * this function before end_that_request_last(), as that will put the
277 * request back on the free list thus corrupting the internal tag list.
280 * queue lock must be held.
282 void blk_queue_end_tag(struct request_queue
*q
, struct request
*rq
)
284 struct blk_queue_tag
*bqt
= q
->queue_tags
;
289 if (unlikely(tag
>= bqt
->real_max_depth
))
291 * This can happen after tag depth has been reduced.
292 * FIXME: how about a warning or info message here?
296 list_del_init(&rq
->queuelist
);
297 rq
->cmd_flags
&= ~REQ_QUEUED
;
300 if (unlikely(bqt
->tag_index
[tag
] == NULL
))
301 printk(KERN_ERR
"%s: tag %d is missing\n",
304 bqt
->tag_index
[tag
] = NULL
;
306 if (unlikely(!test_bit(tag
, bqt
->tag_map
))) {
307 printk(KERN_ERR
"%s: attempt to clear non-busy tag (%d)\n",
312 * The tag_map bit acts as a lock for tag_index[bit], so we need
313 * unlock memory barrier semantics.
315 clear_bit_unlock(tag
, bqt
->tag_map
);
317 EXPORT_SYMBOL(blk_queue_end_tag
);
320 * blk_queue_start_tag - find a free tag and assign it
321 * @q: the request queue for the device
322 * @rq: the block request that needs tagging
325 * This can either be used as a stand-alone helper, or possibly be
326 * assigned as the queue &prep_rq_fn (in which case &struct request
327 * automagically gets a tag assigned). Note that this function
328 * assumes that any type of request can be queued! if this is not
329 * true for your device, you must check the request type before
330 * calling this function. The request will also be removed from
331 * the request queue, so it's the drivers responsibility to readd
332 * it if it should need to be restarted for some reason.
335 * queue lock must be held.
337 int blk_queue_start_tag(struct request_queue
*q
, struct request
*rq
)
339 struct blk_queue_tag
*bqt
= q
->queue_tags
;
343 if (unlikely((rq
->cmd_flags
& REQ_QUEUED
))) {
345 "%s: request %p for device [%s] already tagged %d",
347 rq
->rq_disk
? rq
->rq_disk
->disk_name
: "?", rq
->tag
);
352 * Protect against shared tag maps, as we may not have exclusive
353 * access to the tag map.
355 * We reserve a few tags just for sync IO, since we don't want
356 * to starve sync IO on behalf of flooding async IO.
358 max_depth
= bqt
->max_depth
;
359 if (!rq_is_sync(rq
) && max_depth
> 1) {
363 if (q
->in_flight
[BLK_RW_ASYNC
] > max_depth
)
368 tag
= find_first_zero_bit(bqt
->tag_map
, max_depth
);
369 if (tag
>= max_depth
)
372 } while (test_and_set_bit_lock(tag
, bqt
->tag_map
));
374 * We need lock ordering semantics given by test_and_set_bit_lock.
375 * See blk_queue_end_tag for details.
378 rq
->cmd_flags
|= REQ_QUEUED
;
380 bqt
->tag_index
[tag
] = rq
;
381 blk_start_request(rq
);
382 list_add(&rq
->queuelist
, &q
->tag_busy_list
);
385 EXPORT_SYMBOL(blk_queue_start_tag
);
388 * blk_queue_invalidate_tags - invalidate all pending tags
389 * @q: the request queue for the device
392 * Hardware conditions may dictate a need to stop all pending requests.
393 * In this case, we will safely clear the block side of the tag queue and
394 * readd all requests to the request queue in the right order.
397 * queue lock must be held.
399 void blk_queue_invalidate_tags(struct request_queue
*q
)
401 struct list_head
*tmp
, *n
;
403 list_for_each_safe(tmp
, n
, &q
->tag_busy_list
)
404 blk_requeue_request(q
, list_entry_rq(tmp
));
406 EXPORT_SYMBOL(blk_queue_invalidate_tags
);