2 * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
4 * Copyright (C) 2007 Google Inc,
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 * Author: San Mehat (san@android.com)
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/device.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/highmem.h>
26 #include <linux/log2.h>
27 #include <linux/mmc/host.h>
28 #include <linux/mmc/card.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/platform_device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/debugfs.h>
35 #include <linux/memory.h>
37 #include <asm/cacheflush.h>
38 #include <asm/div64.h>
39 #include <asm/sizes.h>
41 #include <asm/mach/mmc.h>
42 #include <mach/msm_iomap.h>
44 #include <mach/htc_pwrsink.h>
48 #define DRIVER_NAME "msm-sdcc"
50 static unsigned int msmsdcc_fmin
= 144000;
51 static unsigned int msmsdcc_fmax
= 50000000;
52 static unsigned int msmsdcc_4bit
= 1;
53 static unsigned int msmsdcc_pwrsave
= 1;
54 static unsigned int msmsdcc_piopoll
= 1;
55 static unsigned int msmsdcc_sdioirq
;
57 #define PIO_SPINMAX 30
58 #define CMD_SPINMAX 20
61 msmsdcc_start_command(struct msmsdcc_host
*host
, struct mmc_command
*cmd
,
65 msmsdcc_request_end(struct msmsdcc_host
*host
, struct mmc_request
*mrq
)
67 writel(0, host
->base
+ MMCICOMMAND
);
69 BUG_ON(host
->curr
.data
);
71 host
->curr
.mrq
= NULL
;
72 host
->curr
.cmd
= NULL
;
75 mrq
->data
->bytes_xfered
= host
->curr
.data_xfered
;
76 if (mrq
->cmd
->error
== -ETIMEDOUT
)
80 * Need to drop the host lock here; mmc_request_done may call
81 * back into the driver...
83 spin_unlock(&host
->lock
);
84 mmc_request_done(host
->mmc
, mrq
);
85 spin_lock(&host
->lock
);
89 msmsdcc_stop_data(struct msmsdcc_host
*host
)
91 writel(0, host
->base
+ MMCIDATACTRL
);
92 host
->curr
.data
= NULL
;
93 host
->curr
.got_dataend
= host
->curr
.got_datablkend
= 0;
96 uint32_t msmsdcc_fifo_addr(struct msmsdcc_host
*host
)
98 switch (host
->pdev_id
) {
100 return MSM_SDC1_PHYS
+ MMCIFIFO
;
102 return MSM_SDC2_PHYS
+ MMCIFIFO
;
104 return MSM_SDC3_PHYS
+ MMCIFIFO
;
106 return MSM_SDC4_PHYS
+ MMCIFIFO
;
113 msmsdcc_dma_complete_func(struct msm_dmov_cmd
*cmd
,
115 struct msm_dmov_errdata
*err
)
117 struct msmsdcc_dma_data
*dma_data
=
118 container_of(cmd
, struct msmsdcc_dma_data
, hdr
);
119 struct msmsdcc_host
*host
= dma_data
->host
;
121 struct mmc_request
*mrq
;
123 spin_lock_irqsave(&host
->lock
, flags
);
124 mrq
= host
->curr
.mrq
;
127 if (!(result
& DMOV_RSLT_VALID
)) {
128 pr_err("msmsdcc: Invalid DataMover result\n");
132 if (result
& DMOV_RSLT_DONE
) {
133 host
->curr
.data_xfered
= host
->curr
.xfer_size
;
136 if (result
& DMOV_RSLT_ERROR
)
137 pr_err("%s: DMA error (0x%.8x)\n",
138 mmc_hostname(host
->mmc
), result
);
139 if (result
& DMOV_RSLT_FLUSH
)
140 pr_err("%s: DMA channel flushed (0x%.8x)\n",
141 mmc_hostname(host
->mmc
), result
);
143 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
144 err
->flush
[0], err
->flush
[1], err
->flush
[2],
145 err
->flush
[3], err
->flush
[4], err
->flush
[5]);
146 if (!mrq
->data
->error
)
147 mrq
->data
->error
= -EIO
;
150 dma_unmap_sg(mmc_dev(host
->mmc
), host
->dma
.sg
, host
->dma
.num_ents
,
153 if (host
->curr
.user_pages
) {
154 struct scatterlist
*sg
= host
->dma
.sg
;
157 for (i
= 0; i
< host
->dma
.num_ents
; i
++)
158 flush_dcache_page(sg_page(sg
++));
163 if ((host
->curr
.got_dataend
&& host
->curr
.got_datablkend
)
164 || mrq
->data
->error
) {
167 * If we've already gotten our DATAEND / DATABLKEND
168 * for this request, then complete it through here.
170 msmsdcc_stop_data(host
);
172 if (!mrq
->data
->error
)
173 host
->curr
.data_xfered
= host
->curr
.xfer_size
;
174 if (!mrq
->data
->stop
|| mrq
->cmd
->error
) {
175 writel(0, host
->base
+ MMCICOMMAND
);
176 host
->curr
.mrq
= NULL
;
177 host
->curr
.cmd
= NULL
;
178 mrq
->data
->bytes_xfered
= host
->curr
.data_xfered
;
180 spin_unlock_irqrestore(&host
->lock
, flags
);
181 mmc_request_done(host
->mmc
, mrq
);
184 msmsdcc_start_command(host
, mrq
->data
->stop
, 0);
188 spin_unlock_irqrestore(&host
->lock
, flags
);
192 static int validate_dma(struct msmsdcc_host
*host
, struct mmc_data
*data
)
194 if (host
->dma
.channel
== -1)
197 if ((data
->blksz
* data
->blocks
) < MCI_FIFOSIZE
)
199 if ((data
->blksz
* data
->blocks
) % MCI_FIFOSIZE
)
204 static int msmsdcc_config_dma(struct msmsdcc_host
*host
, struct mmc_data
*data
)
206 struct msmsdcc_nc_dmadata
*nc
;
212 struct scatterlist
*sg
= data
->sg
;
214 rc
= validate_dma(host
, data
);
218 host
->dma
.sg
= data
->sg
;
219 host
->dma
.num_ents
= data
->sg_len
;
223 switch (host
->pdev_id
) {
225 crci
= MSMSDCC_CRCI_SDC1
;
228 crci
= MSMSDCC_CRCI_SDC2
;
231 crci
= MSMSDCC_CRCI_SDC3
;
234 crci
= MSMSDCC_CRCI_SDC4
;
238 host
->dma
.num_ents
= 0;
242 if (data
->flags
& MMC_DATA_READ
)
243 host
->dma
.dir
= DMA_FROM_DEVICE
;
245 host
->dma
.dir
= DMA_TO_DEVICE
;
247 host
->curr
.user_pages
= 0;
249 n
= dma_map_sg(mmc_dev(host
->mmc
), host
->dma
.sg
,
250 host
->dma
.num_ents
, host
->dma
.dir
);
252 if (n
!= host
->dma
.num_ents
) {
253 pr_err("%s: Unable to map in all sg elements\n",
254 mmc_hostname(host
->mmc
));
256 host
->dma
.num_ents
= 0;
261 for (i
= 0; i
< host
->dma
.num_ents
; i
++) {
262 box
->cmd
= CMD_MODE_BOX
;
264 if (i
== (host
->dma
.num_ents
- 1))
266 rows
= (sg_dma_len(sg
) % MCI_FIFOSIZE
) ?
267 (sg_dma_len(sg
) / MCI_FIFOSIZE
) + 1 :
268 (sg_dma_len(sg
) / MCI_FIFOSIZE
) ;
270 if (data
->flags
& MMC_DATA_READ
) {
271 box
->src_row_addr
= msmsdcc_fifo_addr(host
);
272 box
->dst_row_addr
= sg_dma_address(sg
);
274 box
->src_dst_len
= (MCI_FIFOSIZE
<< 16) |
276 box
->row_offset
= MCI_FIFOSIZE
;
278 box
->num_rows
= rows
* ((1 << 16) + 1);
279 box
->cmd
|= CMD_SRC_CRCI(crci
);
281 box
->src_row_addr
= sg_dma_address(sg
);
282 box
->dst_row_addr
= msmsdcc_fifo_addr(host
);
284 box
->src_dst_len
= (MCI_FIFOSIZE
<< 16) |
286 box
->row_offset
= (MCI_FIFOSIZE
<< 16);
288 box
->num_rows
= rows
* ((1 << 16) + 1);
289 box
->cmd
|= CMD_DST_CRCI(crci
);
295 /* location of command block must be 64 bit aligned */
296 BUG_ON(host
->dma
.cmd_busaddr
& 0x07);
298 nc
->cmdptr
= (host
->dma
.cmd_busaddr
>> 3) | CMD_PTR_LP
;
299 host
->dma
.hdr
.cmdptr
= DMOV_CMD_PTR_LIST
|
300 DMOV_CMD_ADDR(host
->dma
.cmdptr_busaddr
);
301 host
->dma
.hdr
.complete_func
= msmsdcc_dma_complete_func
;
307 msmsdcc_start_data(struct msmsdcc_host
*host
, struct mmc_data
*data
)
309 unsigned int datactrl
, timeout
;
310 unsigned long long clks
;
311 void __iomem
*base
= host
->base
;
312 unsigned int pio_irqmask
= 0;
314 host
->curr
.data
= data
;
315 host
->curr
.xfer_size
= data
->blksz
* data
->blocks
;
316 host
->curr
.xfer_remain
= host
->curr
.xfer_size
;
317 host
->curr
.data_xfered
= 0;
318 host
->curr
.got_dataend
= 0;
319 host
->curr
.got_datablkend
= 0;
321 memset(&host
->pio
, 0, sizeof(host
->pio
));
323 clks
= (unsigned long long)data
->timeout_ns
* host
->clk_rate
;
324 do_div(clks
, NSEC_PER_SEC
);
325 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
326 writel(timeout
, base
+ MMCIDATATIMER
);
328 writel(host
->curr
.xfer_size
, base
+ MMCIDATALENGTH
);
330 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 4);
332 if (!msmsdcc_config_dma(host
, data
))
333 datactrl
|= MCI_DPSM_DMAENABLE
;
335 host
->pio
.sg
= data
->sg
;
336 host
->pio
.sg_len
= data
->sg_len
;
337 host
->pio
.sg_off
= 0;
339 if (data
->flags
& MMC_DATA_READ
) {
340 pio_irqmask
= MCI_RXFIFOHALFFULLMASK
;
341 if (host
->curr
.xfer_remain
< MCI_FIFOSIZE
)
342 pio_irqmask
|= MCI_RXDATAAVLBLMASK
;
344 pio_irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
347 if (data
->flags
& MMC_DATA_READ
)
348 datactrl
|= MCI_DPSM_DIRECTION
;
350 writel(pio_irqmask
, base
+ MMCIMASK1
);
351 writel(datactrl
, base
+ MMCIDATACTRL
);
353 if (datactrl
& MCI_DPSM_DMAENABLE
) {
355 msm_dmov_enqueue_cmd(host
->dma
.channel
, &host
->dma
.hdr
);
360 msmsdcc_start_command(struct msmsdcc_host
*host
, struct mmc_command
*cmd
, u32 c
)
362 void __iomem
*base
= host
->base
;
364 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
365 writel(0, base
+ MMCICOMMAND
);
366 udelay(2 + ((5 * 1000000) / host
->clk_rate
));
369 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
371 if (cmd
->flags
& MMC_RSP_PRESENT
) {
372 if (cmd
->flags
& MMC_RSP_136
)
373 c
|= MCI_CPSM_LONGRSP
;
374 c
|= MCI_CPSM_RESPONSE
;
377 if (cmd
->opcode
== 17 || cmd
->opcode
== 18 ||
378 cmd
->opcode
== 24 || cmd
->opcode
== 25 ||
380 c
|= MCI_CSPM_DATCMD
;
382 if (cmd
== cmd
->mrq
->stop
)
383 c
|= MCI_CSPM_MCIABORT
;
385 host
->curr
.cmd
= cmd
;
389 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
390 writel(c
, base
+ MMCICOMMAND
);
394 msmsdcc_data_err(struct msmsdcc_host
*host
, struct mmc_data
*data
,
397 if (status
& MCI_DATACRCFAIL
) {
398 pr_err("%s: Data CRC error\n", mmc_hostname(host
->mmc
));
399 pr_err("%s: opcode 0x%.8x\n", __func__
,
400 data
->mrq
->cmd
->opcode
);
401 pr_err("%s: blksz %d, blocks %d\n", __func__
,
402 data
->blksz
, data
->blocks
);
403 data
->error
= -EILSEQ
;
404 } else if (status
& MCI_DATATIMEOUT
) {
405 pr_err("%s: Data timeout\n", mmc_hostname(host
->mmc
));
406 data
->error
= -ETIMEDOUT
;
407 } else if (status
& MCI_RXOVERRUN
) {
408 pr_err("%s: RX overrun\n", mmc_hostname(host
->mmc
));
410 } else if (status
& MCI_TXUNDERRUN
) {
411 pr_err("%s: TX underrun\n", mmc_hostname(host
->mmc
));
414 pr_err("%s: Unknown error (0x%.8x)\n",
415 mmc_hostname(host
->mmc
), status
);
422 msmsdcc_pio_read(struct msmsdcc_host
*host
, char *buffer
, unsigned int remain
)
424 void __iomem
*base
= host
->base
;
425 uint32_t *ptr
= (uint32_t *) buffer
;
428 while (readl(base
+ MMCISTATUS
) & MCI_RXDATAAVLBL
) {
430 *ptr
= readl(base
+ MMCIFIFO
+ (count
% MCI_FIFOSIZE
));
432 count
+= sizeof(uint32_t);
434 remain
-= sizeof(uint32_t);
442 msmsdcc_pio_write(struct msmsdcc_host
*host
, char *buffer
,
443 unsigned int remain
, u32 status
)
445 void __iomem
*base
= host
->base
;
449 unsigned int count
, maxcnt
;
451 maxcnt
= status
& MCI_TXFIFOEMPTY
? MCI_FIFOSIZE
:
453 count
= min(remain
, maxcnt
);
455 writesl(base
+ MMCIFIFO
, ptr
, count
>> 2);
462 status
= readl(base
+ MMCISTATUS
);
463 } while (status
& MCI_TXFIFOHALFEMPTY
);
469 msmsdcc_spin_on_status(struct msmsdcc_host
*host
, uint32_t mask
, int maxspin
)
472 if ((readl(host
->base
+ MMCISTATUS
) & mask
))
481 msmsdcc_pio_irq(int irq
, void *dev_id
)
483 struct msmsdcc_host
*host
= dev_id
;
484 void __iomem
*base
= host
->base
;
487 status
= readl(base
+ MMCISTATUS
);
491 unsigned int remain
, len
;
494 if (!(status
& (MCI_TXFIFOHALFEMPTY
| MCI_RXDATAAVLBL
))) {
495 if (host
->curr
.xfer_remain
== 0 || !msmsdcc_piopoll
)
498 if (msmsdcc_spin_on_status(host
,
499 (MCI_TXFIFOHALFEMPTY
|
506 /* Map the current scatter buffer */
507 local_irq_save(flags
);
508 buffer
= kmap_atomic(sg_page(host
->pio
.sg
),
509 KM_BIO_SRC_IRQ
) + host
->pio
.sg
->offset
;
510 buffer
+= host
->pio
.sg_off
;
511 remain
= host
->pio
.sg
->length
- host
->pio
.sg_off
;
513 if (status
& MCI_RXACTIVE
)
514 len
= msmsdcc_pio_read(host
, buffer
, remain
);
515 if (status
& MCI_TXACTIVE
)
516 len
= msmsdcc_pio_write(host
, buffer
, remain
, status
);
518 /* Unmap the buffer */
519 kunmap_atomic(buffer
, KM_BIO_SRC_IRQ
);
520 local_irq_restore(flags
);
522 host
->pio
.sg_off
+= len
;
523 host
->curr
.xfer_remain
-= len
;
524 host
->curr
.data_xfered
+= len
;
528 /* This sg page is full - do some housekeeping */
529 if (status
& MCI_RXACTIVE
&& host
->curr
.user_pages
)
530 flush_dcache_page(sg_page(host
->pio
.sg
));
532 if (!--host
->pio
.sg_len
) {
533 memset(&host
->pio
, 0, sizeof(host
->pio
));
537 /* Advance to next sg */
539 host
->pio
.sg_off
= 0;
542 status
= readl(base
+ MMCISTATUS
);
545 if (status
& MCI_RXACTIVE
&& host
->curr
.xfer_remain
< MCI_FIFOSIZE
)
546 writel(MCI_RXDATAAVLBLMASK
, base
+ MMCIMASK1
);
548 if (!host
->curr
.xfer_remain
)
549 writel(0, base
+ MMCIMASK1
);
554 static void msmsdcc_do_cmdirq(struct msmsdcc_host
*host
, uint32_t status
)
556 struct mmc_command
*cmd
= host
->curr
.cmd
;
557 void __iomem
*base
= host
->base
;
559 host
->curr
.cmd
= NULL
;
560 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
561 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
562 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
563 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
565 del_timer(&host
->command_timer
);
566 if (status
& MCI_CMDTIMEOUT
) {
567 cmd
->error
= -ETIMEDOUT
;
568 } else if (status
& MCI_CMDCRCFAIL
&&
569 cmd
->flags
& MMC_RSP_CRC
) {
570 pr_err("%s: Command CRC error\n", mmc_hostname(host
->mmc
));
571 cmd
->error
= -EILSEQ
;
574 if (!cmd
->data
|| cmd
->error
) {
575 if (host
->curr
.data
&& host
->dma
.sg
)
576 msm_dmov_stop_cmd(host
->dma
.channel
,
578 else if (host
->curr
.data
) { /* Non DMA */
579 msmsdcc_stop_data(host
);
580 msmsdcc_request_end(host
, cmd
->mrq
);
581 } else /* host->data == NULL */
582 msmsdcc_request_end(host
, cmd
->mrq
);
583 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
))
584 msmsdcc_start_data(host
, cmd
->data
);
588 msmsdcc_irq(int irq
, void *dev_id
)
590 struct msmsdcc_host
*host
= dev_id
;
591 void __iomem
*base
= host
->base
;
596 spin_lock(&host
->lock
);
599 struct mmc_data
*data
;
600 status
= readl(base
+ MMCISTATUS
);
602 status
&= (readl(base
+ MMCIMASK0
) |
603 MCI_DATABLOCKENDMASK
);
604 writel(status
, base
+ MMCICLEAR
);
606 data
= host
->curr
.data
;
608 /* Check for data errors */
609 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|
610 MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
611 msmsdcc_data_err(host
, data
, status
);
612 host
->curr
.data_xfered
= 0;
614 msm_dmov_stop_cmd(host
->dma
.channel
,
617 msmsdcc_stop_data(host
);
619 msmsdcc_request_end(host
,
622 msmsdcc_start_command(host
,
628 /* Check for data done */
629 if (!host
->curr
.got_dataend
&& (status
& MCI_DATAEND
))
630 host
->curr
.got_dataend
= 1;
632 if (!host
->curr
.got_datablkend
&&
633 (status
& MCI_DATABLOCKEND
)) {
634 host
->curr
.got_datablkend
= 1;
637 if (host
->curr
.got_dataend
&&
638 host
->curr
.got_datablkend
) {
640 * If DMA is still in progress, we complete
641 * via the completion handler
643 if (!host
->dma
.busy
) {
645 * There appears to be an issue in the
646 * controller where if you request a
647 * small block transfer (< fifo size),
648 * you may get your DATAEND/DATABLKEND
649 * irq without the PIO data irq.
651 * Check to see if theres still data
652 * to be read, and simulate a PIO irq.
654 if (readl(base
+ MMCISTATUS
) &
656 msmsdcc_pio_irq(1, host
);
658 msmsdcc_stop_data(host
);
660 host
->curr
.data_xfered
=
661 host
->curr
.xfer_size
;
664 msmsdcc_request_end(host
,
667 msmsdcc_start_command(host
,
673 if (status
& (MCI_CMDSENT
| MCI_CMDRESPEND
| MCI_CMDCRCFAIL
|
674 MCI_CMDTIMEOUT
) && host
->curr
.cmd
) {
675 msmsdcc_do_cmdirq(host
, status
);
678 if (status
& MCI_SDIOINTOPER
) {
680 status
&= ~MCI_SDIOINTOPER
;
685 spin_unlock(&host
->lock
);
688 * We have to delay handling the card interrupt as it calls
689 * back into the driver.
692 mmc_signal_sdio_irq(host
->mmc
);
694 return IRQ_RETVAL(ret
);
698 msmsdcc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
700 struct msmsdcc_host
*host
= mmc_priv(mmc
);
703 WARN_ON(host
->curr
.mrq
!= NULL
);
704 WARN_ON(host
->pwr
== 0);
706 spin_lock_irqsave(&host
->lock
, flags
);
711 if (mrq
->data
&& !(mrq
->data
->flags
& MMC_DATA_READ
)) {
713 mrq
->data
->bytes_xfered
= mrq
->data
->blksz
*
716 mrq
->cmd
->error
= -ENOMEDIUM
;
718 spin_unlock_irqrestore(&host
->lock
, flags
);
719 mmc_request_done(mmc
, mrq
);
723 host
->curr
.mrq
= mrq
;
725 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
726 msmsdcc_start_data(host
, mrq
->data
);
728 msmsdcc_start_command(host
, mrq
->cmd
, 0);
730 if (host
->cmdpoll
&& !msmsdcc_spin_on_status(host
,
731 MCI_CMDRESPEND
|MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
,
733 uint32_t status
= readl(host
->base
+ MMCISTATUS
);
734 msmsdcc_do_cmdirq(host
, status
);
735 writel(MCI_CMDRESPEND
| MCI_CMDCRCFAIL
| MCI_CMDTIMEOUT
,
736 host
->base
+ MMCICLEAR
);
737 host
->stats
.cmdpoll_hits
++;
739 host
->stats
.cmdpoll_misses
++;
740 mod_timer(&host
->command_timer
, jiffies
+ HZ
);
742 spin_unlock_irqrestore(&host
->lock
, flags
);
746 msmsdcc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
748 struct msmsdcc_host
*host
= mmc_priv(mmc
);
749 u32 clk
= 0, pwr
= 0;
754 if (!host
->clks_on
) {
755 clk_enable(host
->pclk
);
756 clk_enable(host
->clk
);
759 if (ios
->clock
!= host
->clk_rate
) {
760 rc
= clk_set_rate(host
->clk
, ios
->clock
);
762 pr_err("%s: Error setting clock rate (%d)\n",
763 mmc_hostname(host
->mmc
), rc
);
765 host
->clk_rate
= ios
->clock
;
767 clk
|= MCI_CLK_ENABLE
;
770 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
771 clk
|= (2 << 10); /* Set WIDEBUS */
773 if (ios
->clock
> 400000 && msmsdcc_pwrsave
)
774 clk
|= (1 << 9); /* PWRSAVE */
776 clk
|= (1 << 12); /* FLOW_ENA */
777 clk
|= (1 << 15); /* feedback clock */
779 if (host
->plat
->translate_vdd
)
780 pwr
|= host
->plat
->translate_vdd(mmc_dev(mmc
), ios
->vdd
);
782 switch (ios
->power_mode
) {
784 htc_pwrsink_set(PWRSINK_SDCARD
, 0);
790 htc_pwrsink_set(PWRSINK_SDCARD
, 100);
795 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
)
798 writel(clk
, host
->base
+ MMCICLOCK
);
800 if (host
->pwr
!= pwr
) {
802 writel(pwr
, host
->base
+ MMCIPOWER
);
805 if (!(clk
& MCI_CLK_ENABLE
) && host
->clks_on
) {
806 clk_disable(host
->clk
);
807 clk_disable(host
->pclk
);
812 static void msmsdcc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
814 struct msmsdcc_host
*host
= mmc_priv(mmc
);
818 spin_lock_irqsave(&host
->lock
, flags
);
819 if (msmsdcc_sdioirq
== 1) {
820 status
= readl(host
->base
+ MMCIMASK0
);
822 status
|= MCI_SDIOINTOPERMASK
;
824 status
&= ~MCI_SDIOINTOPERMASK
;
825 host
->saved_irq0mask
= status
;
826 writel(status
, host
->base
+ MMCIMASK0
);
828 spin_unlock_irqrestore(&host
->lock
, flags
);
831 static const struct mmc_host_ops msmsdcc_ops
= {
832 .request
= msmsdcc_request
,
833 .set_ios
= msmsdcc_set_ios
,
834 .enable_sdio_irq
= msmsdcc_enable_sdio_irq
,
838 msmsdcc_check_status(unsigned long data
)
840 struct msmsdcc_host
*host
= (struct msmsdcc_host
*)data
;
843 if (!host
->plat
->status
) {
844 mmc_detect_change(host
->mmc
, 0);
848 status
= host
->plat
->status(mmc_dev(host
->mmc
));
849 host
->eject
= !status
;
850 if (status
^ host
->oldstat
) {
851 pr_info("%s: Slot status change detected (%d -> %d)\n",
852 mmc_hostname(host
->mmc
), host
->oldstat
, status
);
854 mmc_detect_change(host
->mmc
, (5 * HZ
) / 2);
856 mmc_detect_change(host
->mmc
, 0);
859 host
->oldstat
= status
;
862 if (host
->timer
.function
)
863 mod_timer(&host
->timer
, jiffies
+ HZ
);
867 msmsdcc_platform_status_irq(int irq
, void *dev_id
)
869 struct msmsdcc_host
*host
= dev_id
;
871 printk(KERN_DEBUG
"%s: %d\n", __func__
, irq
);
872 msmsdcc_check_status((unsigned long) host
);
877 msmsdcc_status_notify_cb(int card_present
, void *dev_id
)
879 struct msmsdcc_host
*host
= dev_id
;
881 printk(KERN_DEBUG
"%s: card_present %d\n", mmc_hostname(host
->mmc
),
883 msmsdcc_check_status((unsigned long) host
);
887 * called when a command expires.
888 * Dump some debugging, and then error
889 * out the transaction.
892 msmsdcc_command_expired(unsigned long _data
)
894 struct msmsdcc_host
*host
= (struct msmsdcc_host
*) _data
;
895 struct mmc_request
*mrq
;
898 spin_lock_irqsave(&host
->lock
, flags
);
899 mrq
= host
->curr
.mrq
;
902 pr_info("%s: Command expiry misfire\n",
903 mmc_hostname(host
->mmc
));
904 spin_unlock_irqrestore(&host
->lock
, flags
);
908 pr_err("%s: Command timeout (%p %p %p %p)\n",
909 mmc_hostname(host
->mmc
), mrq
, mrq
->cmd
,
910 mrq
->data
, host
->dma
.sg
);
912 mrq
->cmd
->error
= -ETIMEDOUT
;
913 msmsdcc_stop_data(host
);
915 writel(0, host
->base
+ MMCICOMMAND
);
917 host
->curr
.mrq
= NULL
;
918 host
->curr
.cmd
= NULL
;
920 spin_unlock_irqrestore(&host
->lock
, flags
);
921 mmc_request_done(host
->mmc
, mrq
);
925 msmsdcc_init_dma(struct msmsdcc_host
*host
)
927 memset(&host
->dma
, 0, sizeof(struct msmsdcc_dma_data
));
928 host
->dma
.host
= host
;
929 host
->dma
.channel
= -1;
934 host
->dma
.nc
= dma_alloc_coherent(NULL
,
935 sizeof(struct msmsdcc_nc_dmadata
),
936 &host
->dma
.nc_busaddr
,
938 if (host
->dma
.nc
== NULL
) {
939 pr_err("Unable to allocate DMA buffer\n");
942 memset(host
->dma
.nc
, 0x00, sizeof(struct msmsdcc_nc_dmadata
));
943 host
->dma
.cmd_busaddr
= host
->dma
.nc_busaddr
;
944 host
->dma
.cmdptr_busaddr
= host
->dma
.nc_busaddr
+
945 offsetof(struct msmsdcc_nc_dmadata
, cmdptr
);
946 host
->dma
.channel
= host
->dmares
->start
;
951 #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
953 do_resume_work(struct work_struct
*work
)
955 struct msmsdcc_host
*host
=
956 container_of(work
, struct msmsdcc_host
, resume_task
);
957 struct mmc_host
*mmc
= host
->mmc
;
960 mmc_resume_host(mmc
);
962 enable_irq(host
->stat_irq
);
968 msmsdcc_probe(struct platform_device
*pdev
)
970 struct mmc_platform_data
*plat
= pdev
->dev
.platform_data
;
971 struct msmsdcc_host
*host
;
972 struct mmc_host
*mmc
;
973 struct resource
*cmd_irqres
= NULL
;
974 struct resource
*pio_irqres
= NULL
;
975 struct resource
*stat_irqres
= NULL
;
976 struct resource
*memres
= NULL
;
977 struct resource
*dmares
= NULL
;
980 /* must have platform data */
982 pr_err("%s: Platform data not available\n", __func__
);
987 if (pdev
->id
< 1 || pdev
->id
> 4)
990 if (pdev
->resource
== NULL
|| pdev
->num_resources
< 2) {
991 pr_err("%s: Invalid resource\n", __func__
);
995 memres
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
996 dmares
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
997 cmd_irqres
= platform_get_resource_byname(pdev
, IORESOURCE_IRQ
,
999 pio_irqres
= platform_get_resource_byname(pdev
, IORESOURCE_IRQ
,
1001 stat_irqres
= platform_get_resource_byname(pdev
, IORESOURCE_IRQ
,
1004 if (!cmd_irqres
|| !pio_irqres
|| !memres
) {
1005 pr_err("%s: Invalid resource\n", __func__
);
1010 * Setup our host structure
1013 mmc
= mmc_alloc_host(sizeof(struct msmsdcc_host
), &pdev
->dev
);
1019 host
= mmc_priv(mmc
);
1020 host
->pdev_id
= pdev
->id
;
1026 host
->base
= ioremap(memres
->start
, PAGE_SIZE
);
1032 host
->cmd_irqres
= cmd_irqres
;
1033 host
->pio_irqres
= pio_irqres
;
1034 host
->memres
= memres
;
1035 host
->dmares
= dmares
;
1036 spin_lock_init(&host
->lock
);
1041 msmsdcc_init_dma(host
);
1044 * Setup main peripheral bus clock
1046 host
->pclk
= clk_get(&pdev
->dev
, "sdc_pclk");
1047 if (IS_ERR(host
->pclk
)) {
1048 ret
= PTR_ERR(host
->pclk
);
1052 ret
= clk_enable(host
->pclk
);
1056 host
->pclk_rate
= clk_get_rate(host
->pclk
);
1059 * Setup SDC MMC clock
1061 host
->clk
= clk_get(&pdev
->dev
, "sdc_clk");
1062 if (IS_ERR(host
->clk
)) {
1063 ret
= PTR_ERR(host
->clk
);
1067 ret
= clk_enable(host
->clk
);
1071 ret
= clk_set_rate(host
->clk
, msmsdcc_fmin
);
1073 pr_err("%s: Clock rate set failed (%d)\n", __func__
, ret
);
1077 host
->clk_rate
= clk_get_rate(host
->clk
);
1082 * Setup MMC host structure
1084 mmc
->ops
= &msmsdcc_ops
;
1085 mmc
->f_min
= msmsdcc_fmin
;
1086 mmc
->f_max
= msmsdcc_fmax
;
1087 mmc
->ocr_avail
= plat
->ocr_mask
;
1090 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
1091 if (msmsdcc_sdioirq
)
1092 mmc
->caps
|= MMC_CAP_SDIO_IRQ
;
1093 mmc
->caps
|= MMC_CAP_MMC_HIGHSPEED
| MMC_CAP_SD_HIGHSPEED
;
1095 mmc
->max_phys_segs
= NR_SG
;
1096 mmc
->max_hw_segs
= NR_SG
;
1097 mmc
->max_blk_size
= 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
1098 mmc
->max_blk_count
= 65536;
1100 mmc
->max_req_size
= 33554432; /* MCI_DATA_LENGTH is 25 bits */
1101 mmc
->max_seg_size
= mmc
->max_req_size
;
1103 writel(0, host
->base
+ MMCIMASK0
);
1104 writel(0x5e007ff, host
->base
+ MMCICLEAR
); /* Add: 1 << 25 */
1106 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1107 host
->saved_irq0mask
= MCI_IRQENABLE
;
1110 * Setup card detect change
1113 memset(&host
->timer
, 0, sizeof(host
->timer
));
1115 if (stat_irqres
&& !(stat_irqres
->flags
& IORESOURCE_DISABLED
)) {
1116 unsigned long irqflags
= IRQF_SHARED
|
1117 (stat_irqres
->flags
& IRQF_TRIGGER_MASK
);
1119 host
->stat_irq
= stat_irqres
->start
;
1120 ret
= request_irq(host
->stat_irq
,
1121 msmsdcc_platform_status_irq
,
1123 DRIVER_NAME
" (slot)",
1126 pr_err("%s: Unable to get slot IRQ %d (%d)\n",
1127 mmc_hostname(mmc
), host
->stat_irq
, ret
);
1130 } else if (plat
->register_status_notify
) {
1131 plat
->register_status_notify(msmsdcc_status_notify_cb
, host
);
1132 } else if (!plat
->status
)
1133 pr_err("%s: No card detect facilities available\n",
1136 init_timer(&host
->timer
);
1137 host
->timer
.data
= (unsigned long)host
;
1138 host
->timer
.function
= msmsdcc_check_status
;
1139 host
->timer
.expires
= jiffies
+ HZ
;
1140 add_timer(&host
->timer
);
1144 host
->oldstat
= host
->plat
->status(mmc_dev(host
->mmc
));
1145 host
->eject
= !host
->oldstat
;
1149 * Setup a command timer. We currently need this due to
1150 * some 'strange' timeout / error handling situations.
1152 init_timer(&host
->command_timer
);
1153 host
->command_timer
.data
= (unsigned long) host
;
1154 host
->command_timer
.function
= msmsdcc_command_expired
;
1156 ret
= request_irq(cmd_irqres
->start
, msmsdcc_irq
, IRQF_SHARED
,
1157 DRIVER_NAME
" (cmd)", host
);
1161 ret
= request_irq(pio_irqres
->start
, msmsdcc_pio_irq
, IRQF_SHARED
,
1162 DRIVER_NAME
" (pio)", host
);
1166 mmc_set_drvdata(pdev
, mmc
);
1169 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
1170 mmc_hostname(mmc
), (unsigned long long)memres
->start
,
1171 (unsigned int) cmd_irqres
->start
,
1172 (unsigned int) host
->stat_irq
, host
->dma
.channel
);
1173 pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc
),
1174 (mmc
->caps
& MMC_CAP_4_BIT_DATA
? "enabled" : "disabled"));
1175 pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
1176 mmc_hostname(mmc
), msmsdcc_fmin
, msmsdcc_fmax
, host
->pclk_rate
);
1177 pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc
), host
->eject
);
1178 pr_info("%s: Power save feature enable = %d\n",
1179 mmc_hostname(mmc
), msmsdcc_pwrsave
);
1181 if (host
->dma
.channel
!= -1) {
1182 pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
1183 mmc_hostname(mmc
), host
->dma
.nc
, host
->dma
.nc_busaddr
);
1184 pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
1185 mmc_hostname(mmc
), host
->dma
.cmd_busaddr
,
1186 host
->dma
.cmdptr_busaddr
);
1188 pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc
));
1189 if (host
->timer
.function
)
1190 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc
));
1194 free_irq(cmd_irqres
->start
, host
);
1197 free_irq(host
->stat_irq
, host
);
1199 clk_disable(host
->clk
);
1203 clk_disable(host
->pclk
);
1205 clk_put(host
->pclk
);
1213 msmsdcc_suspend(struct platform_device
*dev
, pm_message_t state
)
1215 struct mmc_host
*mmc
= mmc_get_drvdata(dev
);
1219 struct msmsdcc_host
*host
= mmc_priv(mmc
);
1222 disable_irq(host
->stat_irq
);
1224 if (mmc
->card
&& mmc
->card
->type
!= MMC_TYPE_SDIO
)
1225 rc
= mmc_suspend_host(mmc
, state
);
1227 writel(0, host
->base
+ MMCIMASK0
);
1229 if (host
->clks_on
) {
1230 clk_disable(host
->clk
);
1231 clk_disable(host
->pclk
);
1240 msmsdcc_resume(struct platform_device
*dev
)
1242 struct mmc_host
*mmc
= mmc_get_drvdata(dev
);
1243 unsigned long flags
;
1246 struct msmsdcc_host
*host
= mmc_priv(mmc
);
1248 spin_lock_irqsave(&host
->lock
, flags
);
1250 if (!host
->clks_on
) {
1251 clk_enable(host
->pclk
);
1252 clk_enable(host
->clk
);
1256 writel(host
->saved_irq0mask
, host
->base
+ MMCIMASK0
);
1258 spin_unlock_irqrestore(&host
->lock
, flags
);
1260 if (mmc
->card
&& mmc
->card
->type
!= MMC_TYPE_SDIO
)
1261 mmc_resume_host(mmc
);
1263 enable_irq(host
->stat_irq
);
1264 else if (host
->stat_irq
)
1265 enable_irq(host
->stat_irq
);
1270 static struct platform_driver msmsdcc_driver
= {
1271 .probe
= msmsdcc_probe
,
1272 .suspend
= msmsdcc_suspend
,
1273 .resume
= msmsdcc_resume
,
1279 static int __init
msmsdcc_init(void)
1281 return platform_driver_register(&msmsdcc_driver
);
1284 static void __exit
msmsdcc_exit(void)
1286 platform_driver_unregister(&msmsdcc_driver
);
1289 module_init(msmsdcc_init
);
1290 module_exit(msmsdcc_exit
);
1292 MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
1293 MODULE_LICENSE("GPL");