2 * linux/drivers/mmc/mmc_queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/blkdev.h>
13 #include <linux/kthread.h>
15 #include <linux/mmc/card.h>
16 #include <linux/mmc/host.h>
17 #include "mmc_queue.h"
19 #define MMC_QUEUE_SUSPENDED (1 << 0)
22 * Prepare a MMC request. Essentially, this means passing the
23 * preparation off to the media driver. The media driver will
24 * create a mmc_io_request in req->special.
26 static int mmc_prep_request(struct request_queue
*q
, struct request
*req
)
28 struct mmc_queue
*mq
= q
->queuedata
;
29 int ret
= BLKPREP_KILL
;
31 if (blk_special_request(req
)) {
33 * Special commands already have the command
34 * blocks already setup in req->special.
36 BUG_ON(!req
->special
);
39 } else if (blk_fs_request(req
) || blk_pc_request(req
)) {
41 * Block I/O requests need translating according
44 ret
= mq
->prep_fn(mq
, req
);
47 * Everything else is invalid.
49 blk_dump_rq_flags(req
, "MMC bad request");
52 if (ret
== BLKPREP_OK
)
53 req
->cmd_flags
|= REQ_DONTPREP
;
58 static int mmc_queue_thread(void *d
)
60 struct mmc_queue
*mq
= d
;
61 struct request_queue
*q
= mq
->queue
;
64 * Set iothread to ensure that we aren't put to sleep by
65 * the process freezing. We handle suspension ourselves.
67 current
->flags
|= PF_MEMALLOC
|PF_NOFREEZE
;
69 down(&mq
->thread_sem
);
71 struct request
*req
= NULL
;
73 spin_lock_irq(q
->queue_lock
);
74 set_current_state(TASK_INTERRUPTIBLE
);
75 if (!blk_queue_plugged(q
))
76 req
= elv_next_request(q
);
78 spin_unlock_irq(q
->queue_lock
);
81 if (kthread_should_stop())
85 down(&mq
->thread_sem
);
88 set_current_state(TASK_RUNNING
);
90 mq
->issue_fn(mq
, req
);
98 * Generic MMC request handler. This is called for any queue on a
99 * particular host. When the host is not busy, we look for a request
100 * on any queue on this host, and attempt to issue it. This may
101 * not be the queue we were asked to process.
103 static void mmc_request(request_queue_t
*q
)
105 struct mmc_queue
*mq
= q
->queuedata
;
110 printk(KERN_ERR
"MMC: killing requests for dead queue\n");
111 while ((req
= elv_next_request(q
)) != NULL
) {
113 ret
= end_that_request_chunk(req
, 0,
114 req
->current_nr_sectors
<< 9);
121 wake_up_process(mq
->thread
);
125 * mmc_init_queue - initialise a queue structure.
127 * @card: mmc card to attach this queue
130 * Initialise a MMC card request queue.
132 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
, spinlock_t
*lock
)
134 struct mmc_host
*host
= card
->host
;
135 u64 limit
= BLK_BOUNCE_HIGH
;
138 if (mmc_dev(host
)->dma_mask
&& *mmc_dev(host
)->dma_mask
)
139 limit
= *mmc_dev(host
)->dma_mask
;
142 mq
->queue
= blk_init_queue(mmc_request
, lock
);
146 blk_queue_prep_rq(mq
->queue
, mmc_prep_request
);
147 blk_queue_bounce_limit(mq
->queue
, limit
);
148 blk_queue_max_sectors(mq
->queue
, host
->max_sectors
);
149 blk_queue_max_phys_segments(mq
->queue
, host
->max_phys_segs
);
150 blk_queue_max_hw_segments(mq
->queue
, host
->max_hw_segs
);
151 blk_queue_max_segment_size(mq
->queue
, host
->max_seg_size
);
153 mq
->queue
->queuedata
= mq
;
156 mq
->sg
= kmalloc(sizeof(struct scatterlist
) * host
->max_phys_segs
,
163 init_MUTEX(&mq
->thread_sem
);
165 mq
->thread
= kthread_run(mmc_queue_thread
, mq
, "mmcqd");
166 if (IS_ERR(mq
->thread
)) {
167 ret
= PTR_ERR(mq
->thread
);
177 blk_cleanup_queue(mq
->queue
);
180 EXPORT_SYMBOL(mmc_init_queue
);
182 void mmc_cleanup_queue(struct mmc_queue
*mq
)
184 request_queue_t
*q
= mq
->queue
;
187 /* Mark that we should start throwing out stragglers */
188 spin_lock_irqsave(q
->queue_lock
, flags
);
190 spin_unlock_irqrestore(q
->queue_lock
, flags
);
192 /* Then terminate our worker thread */
193 kthread_stop(mq
->thread
);
198 blk_cleanup_queue(mq
->queue
);
202 EXPORT_SYMBOL(mmc_cleanup_queue
);
205 * mmc_queue_suspend - suspend a MMC request queue
206 * @mq: MMC queue to suspend
208 * Stop the block request queue, and wait for our thread to
209 * complete any outstanding requests. This ensures that we
210 * won't suspend while a request is being processed.
212 void mmc_queue_suspend(struct mmc_queue
*mq
)
214 request_queue_t
*q
= mq
->queue
;
217 if (!(mq
->flags
& MMC_QUEUE_SUSPENDED
)) {
218 mq
->flags
|= MMC_QUEUE_SUSPENDED
;
220 spin_lock_irqsave(q
->queue_lock
, flags
);
222 spin_unlock_irqrestore(q
->queue_lock
, flags
);
224 down(&mq
->thread_sem
);
227 EXPORT_SYMBOL(mmc_queue_suspend
);
230 * mmc_queue_resume - resume a previously suspended MMC request queue
231 * @mq: MMC queue to resume
233 void mmc_queue_resume(struct mmc_queue
*mq
)
235 request_queue_t
*q
= mq
->queue
;
238 if (mq
->flags
& MMC_QUEUE_SUSPENDED
) {
239 mq
->flags
&= ~MMC_QUEUE_SUSPENDED
;
243 spin_lock_irqsave(q
->queue_lock
, flags
);
245 spin_unlock_irqrestore(q
->queue_lock
, flags
);
248 EXPORT_SYMBOL(mmc_queue_resume
);