2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <asm/scatterlist.h>
22 #include <linux/scatterlist.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/mmc.h>
27 #include <linux/mmc/sd.h>
38 extern int mmc_attach_mmc(struct mmc_host
*host
, u32 ocr
);
39 extern int mmc_attach_sd(struct mmc_host
*host
, u32 ocr
);
40 extern int mmc_attach_sdio(struct mmc_host
*host
, u32 ocr
);
42 static struct workqueue_struct
*workqueue
;
45 * Internal function. Schedule delayed work in the MMC work queue.
47 static int mmc_schedule_delayed_work(struct delayed_work
*work
,
50 return queue_delayed_work(workqueue
, work
, delay
);
54 * Internal function. Flush all scheduled work from the MMC work queue.
56 static void mmc_flush_scheduled_work(void)
58 flush_workqueue(workqueue
);
62 * mmc_request_done - finish processing an MMC request
63 * @host: MMC host which completed request
64 * @mrq: MMC request which request
66 * MMC drivers should call this function when they have completed
67 * their processing of a request.
69 void mmc_request_done(struct mmc_host
*host
, struct mmc_request
*mrq
)
71 struct mmc_command
*cmd
= mrq
->cmd
;
74 if (err
&& cmd
->retries
) {
75 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
76 mmc_hostname(host
), cmd
->opcode
, err
);
80 host
->ops
->request(host
, mrq
);
82 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
83 mmc_hostname(host
), cmd
->opcode
, err
,
84 cmd
->resp
[0], cmd
->resp
[1],
85 cmd
->resp
[2], cmd
->resp
[3]);
88 pr_debug("%s: %d bytes transferred: %d\n",
90 mrq
->data
->bytes_xfered
, mrq
->data
->error
);
94 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
95 mmc_hostname(host
), mrq
->stop
->opcode
,
97 mrq
->stop
->resp
[0], mrq
->stop
->resp
[1],
98 mrq
->stop
->resp
[2], mrq
->stop
->resp
[3]);
106 EXPORT_SYMBOL(mmc_request_done
);
109 mmc_start_request(struct mmc_host
*host
, struct mmc_request
*mrq
)
111 #ifdef CONFIG_MMC_DEBUG
115 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
116 mmc_hostname(host
), mrq
->cmd
->opcode
,
117 mrq
->cmd
->arg
, mrq
->cmd
->flags
);
120 pr_debug("%s: blksz %d blocks %d flags %08x "
121 "tsac %d ms nsac %d\n",
122 mmc_hostname(host
), mrq
->data
->blksz
,
123 mrq
->data
->blocks
, mrq
->data
->flags
,
124 mrq
->data
->timeout_ns
/ 1000000,
125 mrq
->data
->timeout_clks
);
129 pr_debug("%s: CMD%u arg %08x flags %08x\n",
130 mmc_hostname(host
), mrq
->stop
->opcode
,
131 mrq
->stop
->arg
, mrq
->stop
->flags
);
134 WARN_ON(!host
->claimed
);
139 BUG_ON(mrq
->data
->blksz
> host
->max_blk_size
);
140 BUG_ON(mrq
->data
->blocks
> host
->max_blk_count
);
141 BUG_ON(mrq
->data
->blocks
* mrq
->data
->blksz
>
144 #ifdef CONFIG_MMC_DEBUG
146 for (i
= 0;i
< mrq
->data
->sg_len
;i
++)
147 sz
+= mrq
->data
->sg
[i
].length
;
148 BUG_ON(sz
!= mrq
->data
->blocks
* mrq
->data
->blksz
);
151 mrq
->cmd
->data
= mrq
->data
;
152 mrq
->data
->error
= 0;
153 mrq
->data
->mrq
= mrq
;
155 mrq
->data
->stop
= mrq
->stop
;
156 mrq
->stop
->error
= 0;
157 mrq
->stop
->mrq
= mrq
;
160 host
->ops
->request(host
, mrq
);
163 static void mmc_wait_done(struct mmc_request
*mrq
)
165 complete(mrq
->done_data
);
169 * mmc_wait_for_req - start a request and wait for completion
170 * @host: MMC host to start command
171 * @mrq: MMC request to start
173 * Start a new MMC custom command request for a host, and wait
174 * for the command to complete. Does not attempt to parse the
177 void mmc_wait_for_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
179 DECLARE_COMPLETION_ONSTACK(complete
);
181 mrq
->done_data
= &complete
;
182 mrq
->done
= mmc_wait_done
;
184 mmc_start_request(host
, mrq
);
186 wait_for_completion(&complete
);
189 EXPORT_SYMBOL(mmc_wait_for_req
);
192 * mmc_wait_for_cmd - start a command and wait for completion
193 * @host: MMC host to start command
194 * @cmd: MMC command to start
195 * @retries: maximum number of retries
197 * Start a new MMC command for a host, and wait for the command
198 * to complete. Return any error that occurred while the command
199 * was executing. Do not attempt to parse the response.
201 int mmc_wait_for_cmd(struct mmc_host
*host
, struct mmc_command
*cmd
, int retries
)
203 struct mmc_request mrq
;
205 WARN_ON(!host
->claimed
);
207 memset(&mrq
, 0, sizeof(struct mmc_request
));
209 memset(cmd
->resp
, 0, sizeof(cmd
->resp
));
210 cmd
->retries
= retries
;
215 mmc_wait_for_req(host
, &mrq
);
220 EXPORT_SYMBOL(mmc_wait_for_cmd
);
223 * mmc_set_data_timeout - set the timeout for a data command
224 * @data: data phase for command
225 * @card: the MMC card associated with the data transfer
227 * Computes the data timeout parameters according to the
228 * correct algorithm given the card type.
230 void mmc_set_data_timeout(struct mmc_data
*data
, const struct mmc_card
*card
)
235 * SDIO cards only define an upper 1 s limit on access.
237 if (mmc_card_sdio(card
)) {
238 data
->timeout_ns
= 1000000000;
239 data
->timeout_clks
= 0;
244 * SD cards use a 100 multiplier rather than 10
246 mult
= mmc_card_sd(card
) ? 100 : 10;
249 * Scale up the multiplier (and therefore the timeout) by
250 * the r2w factor for writes.
252 if (data
->flags
& MMC_DATA_WRITE
)
253 mult
<<= card
->csd
.r2w_factor
;
255 data
->timeout_ns
= card
->csd
.tacc_ns
* mult
;
256 data
->timeout_clks
= card
->csd
.tacc_clks
* mult
;
259 * SD cards also have an upper limit on the timeout.
261 if (mmc_card_sd(card
)) {
262 unsigned int timeout_us
, limit_us
;
264 timeout_us
= data
->timeout_ns
/ 1000;
265 timeout_us
+= data
->timeout_clks
* 1000 /
266 (card
->host
->ios
.clock
/ 1000);
268 if (data
->flags
& MMC_DATA_WRITE
)
274 * SDHC cards always use these fixed values.
276 if (timeout_us
> limit_us
|| mmc_card_blockaddr(card
)) {
277 data
->timeout_ns
= limit_us
* 1000;
278 data
->timeout_clks
= 0;
282 EXPORT_SYMBOL(mmc_set_data_timeout
);
285 * __mmc_claim_host - exclusively claim a host
286 * @host: mmc host to claim
287 * @abort: whether or not the operation should be aborted
289 * Claim a host for a set of operations. If @abort is non null and
290 * dereference a non-zero value then this will return prematurely with
291 * that non-zero value without acquiring the lock. Returns zero
292 * with the lock held otherwise.
294 int __mmc_claim_host(struct mmc_host
*host
, atomic_t
*abort
)
296 DECLARE_WAITQUEUE(wait
, current
);
302 add_wait_queue(&host
->wq
, &wait
);
303 spin_lock_irqsave(&host
->lock
, flags
);
305 set_current_state(TASK_UNINTERRUPTIBLE
);
306 stop
= abort
? atomic_read(abort
) : 0;
307 if (stop
|| !host
->claimed
)
309 spin_unlock_irqrestore(&host
->lock
, flags
);
311 spin_lock_irqsave(&host
->lock
, flags
);
313 set_current_state(TASK_RUNNING
);
318 spin_unlock_irqrestore(&host
->lock
, flags
);
319 remove_wait_queue(&host
->wq
, &wait
);
323 EXPORT_SYMBOL(__mmc_claim_host
);
326 * mmc_release_host - release a host
327 * @host: mmc host to release
329 * Release a MMC host, allowing others to claim the host
330 * for their operations.
332 void mmc_release_host(struct mmc_host
*host
)
336 WARN_ON(!host
->claimed
);
338 spin_lock_irqsave(&host
->lock
, flags
);
340 spin_unlock_irqrestore(&host
->lock
, flags
);
345 EXPORT_SYMBOL(mmc_release_host
);
348 * Internal function that does the actual ios call to the host driver,
349 * optionally printing some debug output.
351 static inline void mmc_set_ios(struct mmc_host
*host
)
353 struct mmc_ios
*ios
= &host
->ios
;
355 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
356 "width %u timing %u\n",
357 mmc_hostname(host
), ios
->clock
, ios
->bus_mode
,
358 ios
->power_mode
, ios
->chip_select
, ios
->vdd
,
359 ios
->bus_width
, ios
->timing
);
361 host
->ops
->set_ios(host
, ios
);
365 * Control chip select pin on a host.
367 void mmc_set_chip_select(struct mmc_host
*host
, int mode
)
369 host
->ios
.chip_select
= mode
;
374 * Sets the host clock to the highest possible frequency that
377 void mmc_set_clock(struct mmc_host
*host
, unsigned int hz
)
379 WARN_ON(hz
< host
->f_min
);
381 if (hz
> host
->f_max
)
384 host
->ios
.clock
= hz
;
389 * Change the bus mode (open drain/push-pull) of a host.
391 void mmc_set_bus_mode(struct mmc_host
*host
, unsigned int mode
)
393 host
->ios
.bus_mode
= mode
;
398 * Change data bus width of a host.
400 void mmc_set_bus_width(struct mmc_host
*host
, unsigned int width
)
402 host
->ios
.bus_width
= width
;
407 * Mask off any voltages we don't support and select
410 u32
mmc_select_voltage(struct mmc_host
*host
, u32 ocr
)
414 ocr
&= host
->ocr_avail
;
432 * Select timing parameters for host.
434 void mmc_set_timing(struct mmc_host
*host
, unsigned int timing
)
436 host
->ios
.timing
= timing
;
441 * Apply power to the MMC stack. This is a two-stage process.
442 * First, we enable power to the card without the clock running.
443 * We then wait a bit for the power to stabilise. Finally,
444 * enable the bus drivers and clock to the card.
446 * We must _NOT_ enable the clock prior to power stablising.
448 * If a host does all the power sequencing itself, ignore the
449 * initial MMC_POWER_UP stage.
451 static void mmc_power_up(struct mmc_host
*host
)
453 int bit
= fls(host
->ocr_avail
) - 1;
456 host
->ios
.bus_mode
= MMC_BUSMODE_OPENDRAIN
;
457 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
458 host
->ios
.power_mode
= MMC_POWER_UP
;
459 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
460 host
->ios
.timing
= MMC_TIMING_LEGACY
;
465 host
->ios
.clock
= host
->f_min
;
466 host
->ios
.power_mode
= MMC_POWER_ON
;
472 static void mmc_power_off(struct mmc_host
*host
)
476 host
->ios
.bus_mode
= MMC_BUSMODE_OPENDRAIN
;
477 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
478 host
->ios
.power_mode
= MMC_POWER_OFF
;
479 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
480 host
->ios
.timing
= MMC_TIMING_LEGACY
;
485 * Cleanup when the last reference to the bus operator is dropped.
487 void __mmc_release_bus(struct mmc_host
*host
)
490 BUG_ON(host
->bus_refs
);
491 BUG_ON(!host
->bus_dead
);
493 host
->bus_ops
= NULL
;
497 * Increase reference count of bus operator
499 static inline void mmc_bus_get(struct mmc_host
*host
)
503 spin_lock_irqsave(&host
->lock
, flags
);
505 spin_unlock_irqrestore(&host
->lock
, flags
);
509 * Decrease reference count of bus operator and free it if
510 * it is the last reference.
512 static inline void mmc_bus_put(struct mmc_host
*host
)
516 spin_lock_irqsave(&host
->lock
, flags
);
518 if ((host
->bus_refs
== 0) && host
->bus_ops
)
519 __mmc_release_bus(host
);
520 spin_unlock_irqrestore(&host
->lock
, flags
);
524 * Assign a mmc bus handler to a host. Only one bus handler may control a
525 * host at any given time.
527 void mmc_attach_bus(struct mmc_host
*host
, const struct mmc_bus_ops
*ops
)
534 WARN_ON(!host
->claimed
);
536 spin_lock_irqsave(&host
->lock
, flags
);
538 BUG_ON(host
->bus_ops
);
539 BUG_ON(host
->bus_refs
);
545 spin_unlock_irqrestore(&host
->lock
, flags
);
549 * Remove the current bus handler from a host. Assumes that there are
550 * no interesting cards left, so the bus is powered down.
552 void mmc_detach_bus(struct mmc_host
*host
)
558 WARN_ON(!host
->claimed
);
559 WARN_ON(!host
->bus_ops
);
561 spin_lock_irqsave(&host
->lock
, flags
);
565 spin_unlock_irqrestore(&host
->lock
, flags
);
573 * mmc_detect_change - process change of state on a MMC socket
574 * @host: host which changed state.
575 * @delay: optional delay to wait before detection (jiffies)
577 * MMC drivers should call this when they detect a card has been
578 * inserted or removed. The MMC layer will confirm that any
579 * present card is still functional, and initialize any newly
582 void mmc_detect_change(struct mmc_host
*host
, unsigned long delay
)
584 #ifdef CONFIG_MMC_DEBUG
586 spin_lock_irqsave(&host
->lock
, flags
);
587 WARN_ON(host
->removed
);
588 spin_unlock_irqrestore(&host
->lock
, flags
);
591 mmc_schedule_delayed_work(&host
->detect
, delay
);
594 EXPORT_SYMBOL(mmc_detect_change
);
597 void mmc_rescan(struct work_struct
*work
)
599 struct mmc_host
*host
=
600 container_of(work
, struct mmc_host
, detect
.work
);
606 if (host
->bus_ops
== NULL
) {
608 * Only we can add a new handler, so it's safe to
609 * release the lock here.
613 mmc_claim_host(host
);
618 mmc_send_if_cond(host
, host
->ocr_avail
);
621 * First we search for SDIO...
623 err
= mmc_send_io_op_cond(host
, 0, &ocr
);
625 if (mmc_attach_sdio(host
, ocr
))
631 * ...then normal SD...
633 err
= mmc_send_app_op_cond(host
, 0, &ocr
);
635 if (mmc_attach_sd(host
, ocr
))
641 * ...and finally MMC.
643 err
= mmc_send_op_cond(host
, 0, &ocr
);
645 if (mmc_attach_mmc(host
, ocr
))
650 mmc_release_host(host
);
653 if (host
->bus_ops
->detect
&& !host
->bus_dead
)
654 host
->bus_ops
->detect(host
);
660 void mmc_start_host(struct mmc_host
*host
)
663 mmc_detect_change(host
, 0);
666 void mmc_stop_host(struct mmc_host
*host
)
668 #ifdef CONFIG_MMC_DEBUG
670 spin_lock_irqsave(&host
->lock
, flags
);
672 spin_unlock_irqrestore(&host
->lock
, flags
);
675 mmc_flush_scheduled_work();
678 if (host
->bus_ops
&& !host
->bus_dead
) {
679 if (host
->bus_ops
->remove
)
680 host
->bus_ops
->remove(host
);
682 mmc_claim_host(host
);
683 mmc_detach_bus(host
);
684 mmc_release_host(host
);
696 * mmc_suspend_host - suspend a host
698 * @state: suspend mode (PM_SUSPEND_xxx)
700 int mmc_suspend_host(struct mmc_host
*host
, pm_message_t state
)
702 mmc_flush_scheduled_work();
705 if (host
->bus_ops
&& !host
->bus_dead
) {
706 if (host
->bus_ops
->suspend
)
707 host
->bus_ops
->suspend(host
);
708 if (!host
->bus_ops
->resume
) {
709 if (host
->bus_ops
->remove
)
710 host
->bus_ops
->remove(host
);
712 mmc_claim_host(host
);
713 mmc_detach_bus(host
);
714 mmc_release_host(host
);
724 EXPORT_SYMBOL(mmc_suspend_host
);
727 * mmc_resume_host - resume a previously suspended host
730 int mmc_resume_host(struct mmc_host
*host
)
733 if (host
->bus_ops
&& !host
->bus_dead
) {
735 BUG_ON(!host
->bus_ops
->resume
);
736 host
->bus_ops
->resume(host
);
741 * We add a slight delay here so that resume can progress
744 mmc_detect_change(host
, 1);
749 EXPORT_SYMBOL(mmc_resume_host
);
753 static int __init
mmc_init(void)
757 workqueue
= create_singlethread_workqueue("kmmcd");
761 ret
= mmc_register_bus();
763 goto destroy_workqueue
;
765 ret
= mmc_register_host_class();
769 ret
= sdio_register_bus();
771 goto unregister_host_class
;
775 unregister_host_class
:
776 mmc_unregister_host_class();
778 mmc_unregister_bus();
780 destroy_workqueue(workqueue
);
785 static void __exit
mmc_exit(void)
787 sdio_unregister_bus();
788 mmc_unregister_host_class();
789 mmc_unregister_bus();
790 destroy_workqueue(workqueue
);
793 subsys_initcall(mmc_init
);
794 module_exit(mmc_exit
);
796 MODULE_LICENSE("GPL");