MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / mmc / card / queue.c
blobdd77f257025135d16ffaa0f5ecdf6a8283e115f1
1 /*
2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/blkdev.h>
14 #if 0 // mask by Victor Yu. 12-02-2008
15 #include <linux/freezer.h>
16 #endif
17 #include <linux/kthread.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
21 #include "queue.h"
23 #define MMC_QUEUE_BOUNCESZ 65536
25 #define MMC_QUEUE_SUSPENDED (1 << 0)
28 * Prepare a MMC request. This just filters out odd stuff.
30 static int mmc_prep_request(struct request_queue *q, struct request *req)
32 #if 1 // mask by Victor Yu. 12-03-2008
34 * We only like normal block requests.
36 if (!blk_fs_request(req) && !blk_pc_request(req)) {
37 blk_dump_rq_flags(req, "MMC bad request");
38 return BLKPREP_KILL;
41 #if 0 // mask by Victor Yu. 12-02-2008
42 req->cmd_flags |= REQ_DONTPREP;
43 #else
44 req->flags |= REQ_DONTPREP;
45 #endif
47 return BLKPREP_OK;
48 #else
49 struct mmc_queue *mq = q->queuedata;
50 int ret = BLKPREP_KILL;
52 #if 0 // mask by Victor Yu. 12-02--2008
53 if (blk_special_request(req)) {
54 #else
55 if (req->flags & REQ_SPECIAL) {
56 #endif
58 * Special commands already have the command
59 * blocks already setup in req->special.
61 BUG_ON(!req->special);
63 ret = BLKPREP_OK;
64 #if 0 // mask by Victor Yu. 12-02--2008
65 } else if (blk_fs_request(req) || blk_pc_request(req)) {
66 #else
67 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
68 #endif
70 * Block I/O requests need translating according
71 * to the protocol.
73 ret = mq->prep_fn(mq, req);
74 } else {
76 * Everything else is invalid.
78 blk_dump_rq_flags(req, "MMC bad request");
81 if (ret == BLKPREP_OK) {
82 #if 0 // mask by Victor Yu. 12-02--2008
83 req->cmd_flags |= REQ_DONTPREP;
84 #else
85 req->flags |= REQ_DONTPREP;
86 #endif
89 return ret;
90 #endif
93 static int mmc_queue_thread(void *d)
95 struct mmc_queue *mq = d;
96 struct request_queue *q = mq->queue;
98 current->flags |= PF_MEMALLOC;
100 down(&mq->thread_sem);
101 do {
102 struct request *req = NULL;
104 spin_lock_irq(q->queue_lock);
105 set_current_state(TASK_INTERRUPTIBLE);
106 if (!blk_queue_plugged(q))
107 req = elv_next_request(q);
108 mq->req = req;
109 spin_unlock_irq(q->queue_lock);
111 if (!req) {
112 if (kthread_should_stop()) {
113 set_current_state(TASK_RUNNING);
114 break;
116 up(&mq->thread_sem);
117 schedule();
118 down(&mq->thread_sem);
119 continue;
121 set_current_state(TASK_RUNNING);
123 mq->issue_fn(mq, req);
124 } while (1);
125 up(&mq->thread_sem);
127 return 0;
131 * Generic MMC request handler. This is called for any queue on a
132 * particular host. When the host is not busy, we look for a request
133 * on any queue on this host, and attempt to issue it. This may
134 * not be the queue we were asked to process.
136 static void mmc_request(struct request_queue *q)
138 struct mmc_queue *mq = q->queuedata;
139 struct request *req;
140 int ret;
142 if (!mq) {
143 printk(KERN_ERR "MMC: killing requests for dead queue\n");
144 while ((req = elv_next_request(q)) != NULL) {
145 do {
146 ret = end_that_request_chunk(req, 0,
147 req->current_nr_sectors << 9);
148 } while (ret);
150 return;
153 if (!mq->req)
154 wake_up_process(mq->thread);
158 * mmc_init_queue - initialise a queue structure.
159 * @mq: mmc queue
160 * @card: mmc card to attach this queue
161 * @lock: queue lock
163 * Initialise a MMC card request queue.
165 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
167 struct mmc_host *host = card->host;
168 u64 limit = BLK_BOUNCE_HIGH;
169 int ret;
171 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
172 limit = *mmc_dev(host)->dma_mask;
174 mq->card = card;
175 mq->queue = blk_init_queue(mmc_request, lock);
176 if (!mq->queue)
177 return -ENOMEM;
179 mq->queue->queuedata = mq;
180 mq->req = NULL;
182 blk_queue_prep_rq(mq->queue, mmc_prep_request);
184 #ifdef CONFIG_MMC_BLOCK_BOUNCE
185 if (host->max_hw_segs == 1) {
186 unsigned int bouncesz;
188 bouncesz = MMC_QUEUE_BOUNCESZ;
190 if (bouncesz > host->max_req_size)
191 bouncesz = host->max_req_size;
192 if (bouncesz > host->max_seg_size)
193 bouncesz = host->max_seg_size;
195 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
196 if (!mq->bounce_buf) {
197 printk(KERN_WARNING "%s: unable to allocate "
198 "bounce buffer\n", mmc_card_name(card));
199 } else {
200 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
201 blk_queue_max_sectors(mq->queue, bouncesz / 512);
202 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
203 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
204 blk_queue_max_segment_size(mq->queue, bouncesz);
206 mq->sg = kmalloc(sizeof(struct scatterlist),
207 GFP_KERNEL);
208 if (!mq->sg) {
209 ret = -ENOMEM;
210 goto cleanup_queue;
213 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
214 bouncesz / 512, GFP_KERNEL);
215 if (!mq->bounce_sg) {
216 ret = -ENOMEM;
217 goto cleanup_queue;
221 #endif
223 if (!mq->bounce_buf) {
224 blk_queue_bounce_limit(mq->queue, limit);
225 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
226 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
227 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
228 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
230 mq->sg = kmalloc(sizeof(struct scatterlist) *
231 host->max_phys_segs, GFP_KERNEL);
232 if (!mq->sg) {
233 ret = -ENOMEM;
234 goto cleanup_queue;
238 init_MUTEX(&mq->thread_sem);
240 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
241 if (IS_ERR(mq->thread)) {
242 ret = PTR_ERR(mq->thread);
243 goto free_bounce_sg;
246 return 0;
247 free_bounce_sg:
248 if (mq->bounce_sg)
249 kfree(mq->bounce_sg);
250 mq->bounce_sg = NULL;
251 cleanup_queue:
252 if (mq->sg)
253 kfree(mq->sg);
254 mq->sg = NULL;
255 if (mq->bounce_buf)
256 kfree(mq->bounce_buf);
257 mq->bounce_buf = NULL;
258 blk_cleanup_queue(mq->queue);
259 return ret;
262 void mmc_cleanup_queue(struct mmc_queue *mq)
264 struct request_queue *q = mq->queue;
265 unsigned long flags;
267 /* Mark that we should start throwing out stragglers */
268 spin_lock_irqsave(q->queue_lock, flags);
269 q->queuedata = NULL;
270 spin_unlock_irqrestore(q->queue_lock, flags);
272 /* Make sure the queue isn't suspended, as that will deadlock */
273 mmc_queue_resume(mq);
275 /* Then terminate our worker thread */
276 kthread_stop(mq->thread);
278 if (mq->bounce_sg)
279 kfree(mq->bounce_sg);
280 mq->bounce_sg = NULL;
282 kfree(mq->sg);
283 mq->sg = NULL;
285 if (mq->bounce_buf)
286 kfree(mq->bounce_buf);
287 mq->bounce_buf = NULL;
289 blk_cleanup_queue(mq->queue);
291 mq->card = NULL;
293 EXPORT_SYMBOL(mmc_cleanup_queue);
296 * mmc_queue_suspend - suspend a MMC request queue
297 * @mq: MMC queue to suspend
299 * Stop the block request queue, and wait for our thread to
300 * complete any outstanding requests. This ensures that we
301 * won't suspend while a request is being processed.
303 void mmc_queue_suspend(struct mmc_queue *mq)
305 struct request_queue *q = mq->queue;
306 unsigned long flags;
308 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
309 mq->flags |= MMC_QUEUE_SUSPENDED;
311 spin_lock_irqsave(q->queue_lock, flags);
312 blk_stop_queue(q);
313 spin_unlock_irqrestore(q->queue_lock, flags);
315 down(&mq->thread_sem);
320 * mmc_queue_resume - resume a previously suspended MMC request queue
321 * @mq: MMC queue to resume
323 void mmc_queue_resume(struct mmc_queue *mq)
325 struct request_queue *q = mq->queue;
326 unsigned long flags;
328 if (mq->flags & MMC_QUEUE_SUSPENDED) {
329 mq->flags &= ~MMC_QUEUE_SUSPENDED;
331 up(&mq->thread_sem);
333 spin_lock_irqsave(q->queue_lock, flags);
334 blk_start_queue(q);
335 spin_unlock_irqrestore(q->queue_lock, flags);
339 static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
340 struct scatterlist *src, unsigned int src_len)
342 unsigned int chunk;
343 char *dst_buf, *src_buf;
344 unsigned int dst_size, src_size;
346 dst_buf = NULL;
347 src_buf = NULL;
348 dst_size = 0;
349 src_size = 0;
351 while (src_len) {
352 BUG_ON(dst_len == 0);
354 if (dst_size == 0) {
355 dst_buf = page_address(dst->page) + dst->offset;
356 dst_size = dst->length;
359 if (src_size == 0) {
360 src_buf = page_address(src->page) + src->offset;
361 src_size = src->length;
364 chunk = min(dst_size, src_size);
366 memcpy(dst_buf, src_buf, chunk);
368 dst_buf += chunk;
369 src_buf += chunk;
370 dst_size -= chunk;
371 src_size -= chunk;
373 if (dst_size == 0) {
374 dst++;
375 dst_len--;
378 if (src_size == 0) {
379 src++;
380 src_len--;
385 unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
387 unsigned int sg_len;
389 if (!mq->bounce_buf)
390 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
392 BUG_ON(!mq->bounce_sg);
394 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
396 mq->bounce_sg_len = sg_len;
399 * Shortcut in the event we only get a single entry.
401 if (sg_len == 1) {
402 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
403 return 1;
406 mq->sg[0].page = virt_to_page(mq->bounce_buf);
407 mq->sg[0].offset = offset_in_page(mq->bounce_buf);
408 mq->sg[0].length = 0;
410 while (sg_len) {
411 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
412 sg_len--;
415 return 1;
418 void mmc_queue_bounce_pre(struct mmc_queue *mq)
420 if (!mq->bounce_buf)
421 return;
423 if (mq->bounce_sg_len == 1)
424 return;
425 if (rq_data_dir(mq->req) != WRITE)
426 return;
428 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
431 void mmc_queue_bounce_post(struct mmc_queue *mq)
433 if (!mq->bounce_buf)
434 return;
436 if (mq->bounce_sg_len == 1)
437 return;
438 if (rq_data_dir(mq->req) != READ)
439 return;
441 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);