2 * linux/drivers/mmc/mmc_queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/blkdev.h>
13 #include <linux/kthread.h>
15 #include <linux/mmc/card.h>
16 #include <linux/mmc/host.h>
17 #include "mmc_queue.h"
19 #define MMC_QUEUE_SUSPENDED (1 << 0)
22 * Prepare a MMC request. Essentially, this means passing the
23 * preparation off to the media driver. The media driver will
24 * create a mmc_io_request in req->special.
26 static int mmc_prep_request(struct request_queue
*q
, struct request
*req
)
28 struct mmc_queue
*mq
= q
->queuedata
;
29 int ret
= BLKPREP_KILL
;
31 if (blk_special_request(req
)) {
33 * Special commands already have the command
34 * blocks already setup in req->special.
36 BUG_ON(!req
->special
);
39 } else if (blk_fs_request(req
) || blk_pc_request(req
)) {
41 * Block I/O requests need translating according
44 ret
= mq
->prep_fn(mq
, req
);
47 * Everything else is invalid.
49 blk_dump_rq_flags(req
, "MMC bad request");
52 if (ret
== BLKPREP_OK
)
53 req
->cmd_flags
|= REQ_DONTPREP
;
58 static int mmc_queue_thread(void *d
)
60 struct mmc_queue
*mq
= d
;
61 struct request_queue
*q
= mq
->queue
;
64 * Set iothread to ensure that we aren't put to sleep by
65 * the process freezing. We handle suspension ourselves.
67 current
->flags
|= PF_MEMALLOC
|PF_NOFREEZE
;
69 down(&mq
->thread_sem
);
71 struct request
*req
= NULL
;
73 spin_lock_irq(q
->queue_lock
);
74 set_current_state(TASK_INTERRUPTIBLE
);
75 if (!blk_queue_plugged(q
))
76 req
= elv_next_request(q
);
78 spin_unlock_irq(q
->queue_lock
);
81 if (kthread_should_stop()) {
82 set_current_state(TASK_RUNNING
);
87 down(&mq
->thread_sem
);
90 set_current_state(TASK_RUNNING
);
92 mq
->issue_fn(mq
, req
);
100 * Generic MMC request handler. This is called for any queue on a
101 * particular host. When the host is not busy, we look for a request
102 * on any queue on this host, and attempt to issue it. This may
103 * not be the queue we were asked to process.
105 static void mmc_request(request_queue_t
*q
)
107 struct mmc_queue
*mq
= q
->queuedata
;
112 printk(KERN_ERR
"MMC: killing requests for dead queue\n");
113 while ((req
= elv_next_request(q
)) != NULL
) {
115 ret
= end_that_request_chunk(req
, 0,
116 req
->current_nr_sectors
<< 9);
123 wake_up_process(mq
->thread
);
127 * mmc_init_queue - initialise a queue structure.
129 * @card: mmc card to attach this queue
132 * Initialise a MMC card request queue.
134 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
, spinlock_t
*lock
)
136 struct mmc_host
*host
= card
->host
;
137 u64 limit
= BLK_BOUNCE_HIGH
;
140 if (mmc_dev(host
)->dma_mask
&& *mmc_dev(host
)->dma_mask
)
141 limit
= *mmc_dev(host
)->dma_mask
;
144 mq
->queue
= blk_init_queue(mmc_request
, lock
);
148 blk_queue_prep_rq(mq
->queue
, mmc_prep_request
);
149 blk_queue_bounce_limit(mq
->queue
, limit
);
150 blk_queue_max_sectors(mq
->queue
, host
->max_req_size
/ 512);
151 blk_queue_max_phys_segments(mq
->queue
, host
->max_phys_segs
);
152 blk_queue_max_hw_segments(mq
->queue
, host
->max_hw_segs
);
153 blk_queue_max_segment_size(mq
->queue
, host
->max_seg_size
);
155 mq
->queue
->queuedata
= mq
;
158 mq
->sg
= kmalloc(sizeof(struct scatterlist
) * host
->max_phys_segs
,
165 init_MUTEX(&mq
->thread_sem
);
167 mq
->thread
= kthread_run(mmc_queue_thread
, mq
, "mmcqd");
168 if (IS_ERR(mq
->thread
)) {
169 ret
= PTR_ERR(mq
->thread
);
179 blk_cleanup_queue(mq
->queue
);
182 EXPORT_SYMBOL(mmc_init_queue
);
184 void mmc_cleanup_queue(struct mmc_queue
*mq
)
186 request_queue_t
*q
= mq
->queue
;
189 /* Mark that we should start throwing out stragglers */
190 spin_lock_irqsave(q
->queue_lock
, flags
);
192 spin_unlock_irqrestore(q
->queue_lock
, flags
);
194 /* Then terminate our worker thread */
195 kthread_stop(mq
->thread
);
200 blk_cleanup_queue(mq
->queue
);
204 EXPORT_SYMBOL(mmc_cleanup_queue
);
207 * mmc_queue_suspend - suspend a MMC request queue
208 * @mq: MMC queue to suspend
210 * Stop the block request queue, and wait for our thread to
211 * complete any outstanding requests. This ensures that we
212 * won't suspend while a request is being processed.
214 void mmc_queue_suspend(struct mmc_queue
*mq
)
216 request_queue_t
*q
= mq
->queue
;
219 if (!(mq
->flags
& MMC_QUEUE_SUSPENDED
)) {
220 mq
->flags
|= MMC_QUEUE_SUSPENDED
;
222 spin_lock_irqsave(q
->queue_lock
, flags
);
224 spin_unlock_irqrestore(q
->queue_lock
, flags
);
226 down(&mq
->thread_sem
);
229 EXPORT_SYMBOL(mmc_queue_suspend
);
232 * mmc_queue_resume - resume a previously suspended MMC request queue
233 * @mq: MMC queue to resume
235 void mmc_queue_resume(struct mmc_queue
*mq
)
237 request_queue_t
*q
= mq
->queue
;
240 if (mq
->flags
& MMC_QUEUE_SUSPENDED
) {
241 mq
->flags
&= ~MMC_QUEUE_SUSPENDED
;
245 spin_lock_irqsave(q
->queue_lock
, flags
);
247 spin_unlock_irqrestore(q
->queue_lock
, flags
);
250 EXPORT_SYMBOL(mmc_queue_resume
);