nommu: ramfs: remove unused local var
[linux-2.6/linux-2.6-openrd.git] / block / blk-tag.c
blob6b0f52c20964e484c95a2e6c120dc77e2b821b88
1 /*
2 * Functions related to tagged command queuing
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
9 #include "blk.h"
11 /**
12 * blk_queue_find_tag - find a request by its tag and queue
13 * @q: The request queue for the device
14 * @tag: The tag of the request
16 * Notes:
17 * Should be used when a device returns a tag and you want to match
18 * it with a request.
20 * no locks need be held.
21 **/
22 struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24 return blk_map_queue_find_tag(q->queue_tags, tag);
26 EXPORT_SYMBOL(blk_queue_find_tag);
28 /**
29 * __blk_free_tags - release a given set of tag maintenance info
30 * @bqt: the tag map to free
32 * Tries to free the specified @bqt. Returns true if it was
33 * actually freed and false if there are still references using it
35 static int __blk_free_tags(struct blk_queue_tag *bqt)
37 int retval;
39 retval = atomic_dec_and_test(&bqt->refcnt);
40 if (retval) {
41 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
42 bqt->max_depth);
44 kfree(bqt->tag_index);
45 bqt->tag_index = NULL;
47 kfree(bqt->tag_map);
48 bqt->tag_map = NULL;
50 kfree(bqt);
53 return retval;
56 /**
57 * __blk_queue_free_tags - release tag maintenance info
58 * @q: the request queue for the device
60 * Notes:
61 * blk_cleanup_queue() will take care of calling this function, if tagging
62 * has been used. So there's no need to call this directly.
63 **/
64 void __blk_queue_free_tags(struct request_queue *q)
66 struct blk_queue_tag *bqt = q->queue_tags;
68 if (!bqt)
69 return;
71 __blk_free_tags(bqt);
73 q->queue_tags = NULL;
74 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
77 /**
78 * blk_free_tags - release a given set of tag maintenance info
79 * @bqt: the tag map to free
81 * For externally managed @bqt frees the map. Callers of this
82 * function must guarantee to have released all the queues that
83 * might have been using this tag map.
85 void blk_free_tags(struct blk_queue_tag *bqt)
87 if (unlikely(!__blk_free_tags(bqt)))
88 BUG();
90 EXPORT_SYMBOL(blk_free_tags);
92 /**
93 * blk_queue_free_tags - release tag maintenance info
94 * @q: the request queue for the device
96 * Notes:
97 * This is used to disable tagged queuing to a device, yet leave
98 * queue in function.
99 **/
100 void blk_queue_free_tags(struct request_queue *q)
102 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104 EXPORT_SYMBOL(blk_queue_free_tags);
106 static int
107 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109 struct request **tag_index;
110 unsigned long *tag_map;
111 int nr_ulongs;
113 if (q && depth > q->nr_requests * 2) {
114 depth = q->nr_requests * 2;
115 printk(KERN_ERR "%s: adjusted depth to %d\n",
116 __func__, depth);
119 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
120 if (!tag_index)
121 goto fail;
123 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
124 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
125 if (!tag_map)
126 goto fail;
128 tags->real_max_depth = depth;
129 tags->max_depth = depth;
130 tags->tag_index = tag_index;
131 tags->tag_map = tag_map;
133 return 0;
134 fail:
135 kfree(tag_index);
136 return -ENOMEM;
139 static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
140 int depth)
142 struct blk_queue_tag *tags;
144 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
145 if (!tags)
146 goto fail;
148 if (init_tag_map(q, tags, depth))
149 goto fail;
151 atomic_set(&tags->refcnt, 1);
152 return tags;
153 fail:
154 kfree(tags);
155 return NULL;
159 * blk_init_tags - initialize the tag info for an external tag map
160 * @depth: the maximum queue depth supported
162 struct blk_queue_tag *blk_init_tags(int depth)
164 return __blk_queue_init_tags(NULL, depth);
166 EXPORT_SYMBOL(blk_init_tags);
169 * blk_queue_init_tags - initialize the queue tag info
170 * @q: the request queue for the device
171 * @depth: the maximum queue depth supported
172 * @tags: the tag to use
174 * Queue lock must be held here if the function is called to resize an
175 * existing map.
177 int blk_queue_init_tags(struct request_queue *q, int depth,
178 struct blk_queue_tag *tags)
180 int rc;
182 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184 if (!tags && !q->queue_tags) {
185 tags = __blk_queue_init_tags(q, depth);
187 if (!tags)
188 goto fail;
189 } else if (q->queue_tags) {
190 rc = blk_queue_resize_tags(q, depth);
191 if (rc)
192 return rc;
193 queue_flag_set(QUEUE_FLAG_QUEUED, q);
194 return 0;
195 } else
196 atomic_inc(&tags->refcnt);
199 * assign it, all done
201 q->queue_tags = tags;
202 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
203 INIT_LIST_HEAD(&q->tag_busy_list);
204 return 0;
205 fail:
206 kfree(tags);
207 return -ENOMEM;
209 EXPORT_SYMBOL(blk_queue_init_tags);
212 * blk_queue_resize_tags - change the queueing depth
213 * @q: the request queue for the device
214 * @new_depth: the new max command queueing depth
216 * Notes:
217 * Must be called with the queue lock held.
219 int blk_queue_resize_tags(struct request_queue *q, int new_depth)
221 struct blk_queue_tag *bqt = q->queue_tags;
222 struct request **tag_index;
223 unsigned long *tag_map;
224 int max_depth, nr_ulongs;
226 if (!bqt)
227 return -ENXIO;
230 * if we already have large enough real_max_depth. just
231 * adjust max_depth. *NOTE* as requests with tag value
232 * between new_depth and real_max_depth can be in-flight, tag
233 * map can not be shrunk blindly here.
235 if (new_depth <= bqt->real_max_depth) {
236 bqt->max_depth = new_depth;
237 return 0;
241 * Currently cannot replace a shared tag map with a new
242 * one, so error out if this is the case
244 if (atomic_read(&bqt->refcnt) != 1)
245 return -EBUSY;
248 * save the old state info, so we can copy it back
250 tag_index = bqt->tag_index;
251 tag_map = bqt->tag_map;
252 max_depth = bqt->real_max_depth;
254 if (init_tag_map(q, bqt, new_depth))
255 return -ENOMEM;
257 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
258 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
259 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
261 kfree(tag_index);
262 kfree(tag_map);
263 return 0;
265 EXPORT_SYMBOL(blk_queue_resize_tags);
268 * blk_queue_end_tag - end tag operations for a request
269 * @q: the request queue for the device
270 * @rq: the request that has completed
272 * Description:
273 * Typically called when end_that_request_first() returns %0, meaning
274 * all transfers have been done for a request. It's important to call
275 * this function before end_that_request_last(), as that will put the
276 * request back on the free list thus corrupting the internal tag list.
278 * Notes:
279 * queue lock must be held.
281 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
283 struct blk_queue_tag *bqt = q->queue_tags;
284 int tag = rq->tag;
286 BUG_ON(tag == -1);
288 if (unlikely(tag >= bqt->real_max_depth))
290 * This can happen after tag depth has been reduced.
291 * FIXME: how about a warning or info message here?
293 return;
295 list_del_init(&rq->queuelist);
296 rq->cmd_flags &= ~REQ_QUEUED;
297 rq->tag = -1;
299 if (unlikely(bqt->tag_index[tag] == NULL))
300 printk(KERN_ERR "%s: tag %d is missing\n",
301 __func__, tag);
303 bqt->tag_index[tag] = NULL;
305 if (unlikely(!test_bit(tag, bqt->tag_map))) {
306 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
307 __func__, tag);
308 return;
311 * The tag_map bit acts as a lock for tag_index[bit], so we need
312 * unlock memory barrier semantics.
314 clear_bit_unlock(tag, bqt->tag_map);
316 EXPORT_SYMBOL(blk_queue_end_tag);
319 * blk_queue_start_tag - find a free tag and assign it
320 * @q: the request queue for the device
321 * @rq: the block request that needs tagging
323 * Description:
324 * This can either be used as a stand-alone helper, or possibly be
325 * assigned as the queue &prep_rq_fn (in which case &struct request
326 * automagically gets a tag assigned). Note that this function
327 * assumes that any type of request can be queued! if this is not
328 * true for your device, you must check the request type before
329 * calling this function. The request will also be removed from
330 * the request queue, so it's the drivers responsibility to readd
331 * it if it should need to be restarted for some reason.
333 * Notes:
334 * queue lock must be held.
336 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
338 struct blk_queue_tag *bqt = q->queue_tags;
339 unsigned max_depth;
340 int tag;
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
343 printk(KERN_ERR
344 "%s: request %p for device [%s] already tagged %d",
345 __func__, rq,
346 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
347 BUG();
351 * Protect against shared tag maps, as we may not have exclusive
352 * access to the tag map.
354 * We reserve a few tags just for sync IO, since we don't want
355 * to starve sync IO on behalf of flooding async IO.
357 max_depth = bqt->max_depth;
358 if (!rq_is_sync(rq) && max_depth > 1) {
359 max_depth -= 2;
360 if (!max_depth)
361 max_depth = 1;
362 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
363 return 1;
366 do {
367 tag = find_first_zero_bit(bqt->tag_map, max_depth);
368 if (tag >= max_depth)
369 return 1;
371 } while (test_and_set_bit_lock(tag, bqt->tag_map));
373 * We need lock ordering semantics given by test_and_set_bit_lock.
374 * See blk_queue_end_tag for details.
377 rq->cmd_flags |= REQ_QUEUED;
378 rq->tag = tag;
379 bqt->tag_index[tag] = rq;
380 blk_start_request(rq);
381 list_add(&rq->queuelist, &q->tag_busy_list);
382 return 0;
384 EXPORT_SYMBOL(blk_queue_start_tag);
387 * blk_queue_invalidate_tags - invalidate all pending tags
388 * @q: the request queue for the device
390 * Description:
391 * Hardware conditions may dictate a need to stop all pending requests.
392 * In this case, we will safely clear the block side of the tag queue and
393 * readd all requests to the request queue in the right order.
395 * Notes:
396 * queue lock must be held.
398 void blk_queue_invalidate_tags(struct request_queue *q)
400 struct list_head *tmp, *n;
402 list_for_each_safe(tmp, n, &q->tag_busy_list)
403 blk_requeue_request(q, list_entry_rq(tmp));
405 EXPORT_SYMBOL(blk_queue_invalidate_tags);