2 * linux/drivers/mmc/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/blkdev.h>
14 #include <linux/freezer.h>
15 #include <linux/kthread.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/host.h>
21 #define MMC_QUEUE_BOUNCESZ 65536
23 #define MMC_QUEUE_SUSPENDED (1 << 0)
26 * Prepare a MMC request. This just filters out odd stuff.
28 static int mmc_prep_request(struct request_queue
*q
, struct request
*req
)
31 * We only like normal block requests.
33 if (!blk_fs_request(req
) && !blk_pc_request(req
)) {
34 blk_dump_rq_flags(req
, "MMC bad request");
38 req
->cmd_flags
|= REQ_DONTPREP
;
43 static int mmc_queue_thread(void *d
)
45 struct mmc_queue
*mq
= d
;
46 struct request_queue
*q
= mq
->queue
;
48 current
->flags
|= PF_MEMALLOC
;
50 down(&mq
->thread_sem
);
52 struct request
*req
= NULL
;
54 spin_lock_irq(q
->queue_lock
);
55 set_current_state(TASK_INTERRUPTIBLE
);
56 if (!blk_queue_plugged(q
))
57 req
= elv_next_request(q
);
59 spin_unlock_irq(q
->queue_lock
);
62 if (kthread_should_stop()) {
63 set_current_state(TASK_RUNNING
);
68 down(&mq
->thread_sem
);
71 set_current_state(TASK_RUNNING
);
73 mq
->issue_fn(mq
, req
);
81 * Generic MMC request handler. This is called for any queue on a
82 * particular host. When the host is not busy, we look for a request
83 * on any queue on this host, and attempt to issue it. This may
84 * not be the queue we were asked to process.
86 static void mmc_request(request_queue_t
*q
)
88 struct mmc_queue
*mq
= q
->queuedata
;
93 printk(KERN_ERR
"MMC: killing requests for dead queue\n");
94 while ((req
= elv_next_request(q
)) != NULL
) {
96 ret
= end_that_request_chunk(req
, 0,
97 req
->current_nr_sectors
<< 9);
104 wake_up_process(mq
->thread
);
108 * mmc_init_queue - initialise a queue structure.
110 * @card: mmc card to attach this queue
113 * Initialise a MMC card request queue.
115 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
, spinlock_t
*lock
)
117 struct mmc_host
*host
= card
->host
;
118 u64 limit
= BLK_BOUNCE_HIGH
;
120 unsigned int bouncesz
;
122 if (mmc_dev(host
)->dma_mask
&& *mmc_dev(host
)->dma_mask
)
123 limit
= *mmc_dev(host
)->dma_mask
;
126 mq
->queue
= blk_init_queue(mmc_request
, lock
);
130 mq
->queue
->queuedata
= mq
;
133 blk_queue_prep_rq(mq
->queue
, mmc_prep_request
);
135 #ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host
->max_hw_segs
== 1) {
137 bouncesz
= MMC_QUEUE_BOUNCESZ
;
139 if (bouncesz
> host
->max_req_size
)
140 bouncesz
= host
->max_req_size
;
141 if (bouncesz
> host
->max_seg_size
)
142 bouncesz
= host
->max_seg_size
;
144 mq
->bounce_buf
= kmalloc(bouncesz
, GFP_KERNEL
);
145 if (!mq
->bounce_buf
) {
146 printk(KERN_WARNING
"%s: unable to allocate "
147 "bounce buffer\n", mmc_card_name(card
));
149 blk_queue_bounce_limit(mq
->queue
, BLK_BOUNCE_HIGH
);
150 blk_queue_max_sectors(mq
->queue
, bouncesz
/ 512);
151 blk_queue_max_phys_segments(mq
->queue
, bouncesz
/ 512);
152 blk_queue_max_hw_segments(mq
->queue
, bouncesz
/ 512);
153 blk_queue_max_segment_size(mq
->queue
, bouncesz
);
155 mq
->sg
= kmalloc(sizeof(struct scatterlist
),
159 goto free_bounce_buf
;
162 mq
->bounce_sg
= kmalloc(sizeof(struct scatterlist
) *
163 bouncesz
/ 512, GFP_KERNEL
);
164 if (!mq
->bounce_sg
) {
172 if (!mq
->bounce_buf
) {
173 blk_queue_bounce_limit(mq
->queue
, limit
);
174 blk_queue_max_sectors(mq
->queue
, host
->max_req_size
/ 512);
175 blk_queue_max_phys_segments(mq
->queue
, host
->max_phys_segs
);
176 blk_queue_max_hw_segments(mq
->queue
, host
->max_hw_segs
);
177 blk_queue_max_segment_size(mq
->queue
, host
->max_seg_size
);
179 mq
->sg
= kmalloc(sizeof(struct scatterlist
) *
180 host
->max_phys_segs
, GFP_KERNEL
);
187 init_MUTEX(&mq
->thread_sem
);
189 mq
->thread
= kthread_run(mmc_queue_thread
, mq
, "mmcqd");
190 if (IS_ERR(mq
->thread
)) {
191 ret
= PTR_ERR(mq
->thread
);
198 kfree(mq
->bounce_sg
);
199 mq
->bounce_sg
= NULL
;
205 kfree(mq
->bounce_buf
);
206 mq
->bounce_buf
= NULL
;
208 blk_cleanup_queue(mq
->queue
);
212 void mmc_cleanup_queue(struct mmc_queue
*mq
)
214 request_queue_t
*q
= mq
->queue
;
217 /* Mark that we should start throwing out stragglers */
218 spin_lock_irqsave(q
->queue_lock
, flags
);
220 spin_unlock_irqrestore(q
->queue_lock
, flags
);
222 /* Make sure the queue isn't suspended, as that will deadlock */
223 mmc_queue_resume(mq
);
225 /* Then terminate our worker thread */
226 kthread_stop(mq
->thread
);
229 kfree(mq
->bounce_sg
);
230 mq
->bounce_sg
= NULL
;
236 kfree(mq
->bounce_buf
);
237 mq
->bounce_buf
= NULL
;
239 blk_cleanup_queue(mq
->queue
);
243 EXPORT_SYMBOL(mmc_cleanup_queue
);
246 * mmc_queue_suspend - suspend a MMC request queue
247 * @mq: MMC queue to suspend
249 * Stop the block request queue, and wait for our thread to
250 * complete any outstanding requests. This ensures that we
251 * won't suspend while a request is being processed.
253 void mmc_queue_suspend(struct mmc_queue
*mq
)
255 request_queue_t
*q
= mq
->queue
;
258 if (!(mq
->flags
& MMC_QUEUE_SUSPENDED
)) {
259 mq
->flags
|= MMC_QUEUE_SUSPENDED
;
261 spin_lock_irqsave(q
->queue_lock
, flags
);
263 spin_unlock_irqrestore(q
->queue_lock
, flags
);
265 down(&mq
->thread_sem
);
270 * mmc_queue_resume - resume a previously suspended MMC request queue
271 * @mq: MMC queue to resume
273 void mmc_queue_resume(struct mmc_queue
*mq
)
275 request_queue_t
*q
= mq
->queue
;
278 if (mq
->flags
& MMC_QUEUE_SUSPENDED
) {
279 mq
->flags
&= ~MMC_QUEUE_SUSPENDED
;
283 spin_lock_irqsave(q
->queue_lock
, flags
);
285 spin_unlock_irqrestore(q
->queue_lock
, flags
);
289 static void copy_sg(struct scatterlist
*dst
, unsigned int dst_len
,
290 struct scatterlist
*src
, unsigned int src_len
)
293 char *dst_buf
, *src_buf
;
294 unsigned int dst_size
, src_size
;
302 BUG_ON(dst_len
== 0);
305 dst_buf
= page_address(dst
->page
) + dst
->offset
;
306 dst_size
= dst
->length
;
310 src_buf
= page_address(src
->page
) + src
->offset
;
311 src_size
= src
->length
;
314 chunk
= min(dst_size
, src_size
);
316 memcpy(dst_buf
, src_buf
, chunk
);
335 unsigned int mmc_queue_map_sg(struct mmc_queue
*mq
)
340 return blk_rq_map_sg(mq
->queue
, mq
->req
, mq
->sg
);
342 BUG_ON(!mq
->bounce_sg
);
344 sg_len
= blk_rq_map_sg(mq
->queue
, mq
->req
, mq
->bounce_sg
);
346 mq
->bounce_sg_len
= sg_len
;
349 * Shortcut in the event we only get a single entry.
352 memcpy(mq
->sg
, mq
->bounce_sg
, sizeof(struct scatterlist
));
356 mq
->sg
[0].page
= virt_to_page(mq
->bounce_buf
);
357 mq
->sg
[0].offset
= offset_in_page(mq
->bounce_buf
);
358 mq
->sg
[0].length
= 0;
361 mq
->sg
[0].length
+= mq
->bounce_sg
[sg_len
- 1].length
;
368 void mmc_queue_bounce_pre(struct mmc_queue
*mq
)
373 if (mq
->bounce_sg_len
== 1)
375 if (rq_data_dir(mq
->req
) != WRITE
)
378 copy_sg(mq
->sg
, 1, mq
->bounce_sg
, mq
->bounce_sg_len
);
381 void mmc_queue_bounce_post(struct mmc_queue
*mq
)
386 if (mq
->bounce_sg_len
== 1)
388 if (rq_data_dir(mq
->req
) != READ
)
391 copy_sg(mq
->bounce_sg
, mq
->bounce_sg_len
, mq
->sg
, 1);