2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/log2.h>
20 #include <linux/mmc/host.h>
21 #include <linux/amba/bus.h>
22 #include <linux/clk.h>
23 #include <linux/scatterlist.h>
24 #include <linux/gpio.h>
25 #include <linux/amba/mmci.h>
26 #include <linux/regulator/consumer.h>
28 #include <asm/cacheflush.h>
29 #include <asm/div64.h>
31 #include <asm/sizes.h>
35 #define DRIVER_NAME "mmci-pl18x"
37 #define DBG(host,fmt,args...) \
38 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
40 static unsigned int fmax
= 515633;
43 * This must be called with host->lock held
45 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
50 if (desired
>= host
->mclk
) {
52 host
->cclk
= host
->mclk
;
54 clk
= host
->mclk
/ (2 * desired
) - 1;
57 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
59 if (host
->hw_designer
== 0x80)
60 clk
|= MCI_FCEN
; /* Bug fix in ST IP block */
61 clk
|= MCI_CLK_ENABLE
;
62 /* This hasn't proven to be worthwhile */
63 /* clk |= MCI_CLK_PWRSAVE; */
66 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
69 writel(clk
, host
->base
+ MMCICLOCK
);
73 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
75 writel(0, host
->base
+ MMCICOMMAND
);
83 mrq
->data
->bytes_xfered
= host
->data_xfered
;
86 * Need to drop the host lock here; mmc_request_done may call
87 * back into the driver...
89 spin_unlock(&host
->lock
);
90 mmc_request_done(host
->mmc
, mrq
);
91 spin_lock(&host
->lock
);
94 static void mmci_stop_data(struct mmci_host
*host
)
96 writel(0, host
->base
+ MMCIDATACTRL
);
97 writel(0, host
->base
+ MMCIMASK1
);
101 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
103 unsigned int datactrl
, timeout
, irqmask
;
104 unsigned long long clks
;
108 DBG(host
, "blksz %04x blks %04x flags %08x\n",
109 data
->blksz
, data
->blocks
, data
->flags
);
112 host
->size
= data
->blksz
;
113 host
->data_xfered
= 0;
115 mmci_init_sg(host
, data
);
117 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
118 do_div(clks
, 1000000000UL);
120 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
123 writel(timeout
, base
+ MMCIDATATIMER
);
124 writel(host
->size
, base
+ MMCIDATALENGTH
);
126 blksz_bits
= ffs(data
->blksz
) - 1;
127 BUG_ON(1 << blksz_bits
!= data
->blksz
);
129 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
130 if (data
->flags
& MMC_DATA_READ
) {
131 datactrl
|= MCI_DPSM_DIRECTION
;
132 irqmask
= MCI_RXFIFOHALFFULLMASK
;
135 * If we have less than a FIFOSIZE of bytes to transfer,
136 * trigger a PIO interrupt as soon as any data is available.
138 if (host
->size
< MCI_FIFOSIZE
)
139 irqmask
|= MCI_RXDATAAVLBLMASK
;
142 * We don't actually need to include "FIFO empty" here
143 * since its implicit in "FIFO half empty".
145 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
148 writel(datactrl
, base
+ MMCIDATACTRL
);
149 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
150 writel(irqmask
, base
+ MMCIMASK1
);
154 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
156 void __iomem
*base
= host
->base
;
158 DBG(host
, "op %02x arg %08x flags %08x\n",
159 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
161 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
162 writel(0, base
+ MMCICOMMAND
);
166 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
167 if (cmd
->flags
& MMC_RSP_PRESENT
) {
168 if (cmd
->flags
& MMC_RSP_136
)
169 c
|= MCI_CPSM_LONGRSP
;
170 c
|= MCI_CPSM_RESPONSE
;
173 c
|= MCI_CPSM_INTERRUPT
;
177 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
178 writel(c
, base
+ MMCICOMMAND
);
182 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
185 if (status
& MCI_DATABLOCKEND
) {
186 host
->data_xfered
+= data
->blksz
;
188 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
189 if (status
& MCI_DATACRCFAIL
)
190 data
->error
= -EILSEQ
;
191 else if (status
& MCI_DATATIMEOUT
)
192 data
->error
= -ETIMEDOUT
;
193 else if (status
& (MCI_TXUNDERRUN
|MCI_RXOVERRUN
))
195 status
|= MCI_DATAEND
;
198 * We hit an error condition. Ensure that any data
199 * partially written to a page is properly coherent.
201 if (host
->sg_len
&& data
->flags
& MMC_DATA_READ
)
202 flush_dcache_page(sg_page(host
->sg_ptr
));
204 if (status
& MCI_DATAEND
) {
205 mmci_stop_data(host
);
208 mmci_request_end(host
, data
->mrq
);
210 mmci_start_command(host
, data
->stop
, 0);
216 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
219 void __iomem
*base
= host
->base
;
223 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
224 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
225 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
226 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
228 if (status
& MCI_CMDTIMEOUT
) {
229 cmd
->error
= -ETIMEDOUT
;
230 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
231 cmd
->error
= -EILSEQ
;
234 if (!cmd
->data
|| cmd
->error
) {
236 /* Terminate the DMA transfer */
237 if (dma_inprogress(host
))
238 mmci_dma_data_error(host
);
239 mmci_stop_data(host
);
241 mmci_request_end(host
, cmd
->mrq
);
242 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
243 mmci_start_data(host
, cmd
->data
);
247 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
249 void __iomem
*base
= host
->base
;
252 int host_remain
= host
->size
;
255 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
263 readsl(base
+ MMCIFIFO
, ptr
, count
>> 2);
267 host_remain
-= count
;
272 status
= readl(base
+ MMCISTATUS
);
273 } while (status
& MCI_RXDATAAVLBL
);
278 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
280 void __iomem
*base
= host
->base
;
284 unsigned int count
, maxcnt
;
286 maxcnt
= status
& MCI_TXFIFOEMPTY
? MCI_FIFOSIZE
: MCI_FIFOHALFSIZE
;
287 count
= min(remain
, maxcnt
);
289 writesl(base
+ MMCIFIFO
, ptr
, count
>> 2);
297 status
= readl(base
+ MMCISTATUS
);
298 } while (status
& MCI_TXFIFOHALFEMPTY
);
304 * PIO data transfer IRQ handler.
306 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
308 struct mmci_host
*host
= dev_id
;
309 void __iomem
*base
= host
->base
;
312 status
= readl(base
+ MMCISTATUS
);
314 DBG(host
, "irq1 %08x\n", status
);
318 unsigned int remain
, len
;
322 * For write, we only need to test the half-empty flag
323 * here - if the FIFO is completely empty, then by
324 * definition it is more than half empty.
326 * For read, check for data available.
328 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
332 * Map the current scatter buffer.
334 buffer
= mmci_kmap_atomic(host
, &flags
) + host
->sg_off
;
335 remain
= host
->sg_ptr
->length
- host
->sg_off
;
338 if (status
& MCI_RXACTIVE
)
339 len
= mmci_pio_read(host
, buffer
, remain
);
340 if (status
& MCI_TXACTIVE
)
341 len
= mmci_pio_write(host
, buffer
, remain
, status
);
346 mmci_kunmap_atomic(host
, buffer
, &flags
);
356 * If we were reading, and we have completed this
357 * page, ensure that the data cache is coherent.
359 if (status
& MCI_RXACTIVE
)
360 flush_dcache_page(sg_page(host
->sg_ptr
));
362 if (!mmci_next_sg(host
))
365 status
= readl(base
+ MMCISTATUS
);
369 * If we're nearing the end of the read, switch to
370 * "any data available" mode.
372 if (status
& MCI_RXACTIVE
&& host
->size
< MCI_FIFOSIZE
)
373 writel(MCI_RXDATAAVLBLMASK
, base
+ MMCIMASK1
);
376 * If we run out of data, disable the data IRQs; this
377 * prevents a race where the FIFO becomes empty before
378 * the chip itself has disabled the data path, and
379 * stops us racing with our data end IRQ.
381 if (host
->size
== 0) {
382 writel(0, base
+ MMCIMASK1
);
383 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
390 * Handle completion of command and data transfers.
392 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
394 struct mmci_host
*host
= dev_id
;
398 spin_lock(&host
->lock
);
401 struct mmc_command
*cmd
;
402 struct mmc_data
*data
;
404 status
= readl(host
->base
+ MMCISTATUS
);
405 status
&= readl(host
->base
+ MMCIMASK0
);
406 writel(status
, host
->base
+ MMCICLEAR
);
408 DBG(host
, "irq0 %08x\n", status
);
411 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|
412 MCI_RXOVERRUN
|MCI_DATAEND
|MCI_DATABLOCKEND
) && data
)
413 mmci_data_irq(host
, data
, status
);
416 if (status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
417 mmci_cmd_irq(host
, cmd
, status
);
422 spin_unlock(&host
->lock
);
424 return IRQ_RETVAL(ret
);
427 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
429 struct mmci_host
*host
= mmc_priv(mmc
);
432 WARN_ON(host
->mrq
!= NULL
);
434 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
435 printk(KERN_ERR
"%s: Unsupported block size (%d bytes)\n",
436 mmc_hostname(mmc
), mrq
->data
->blksz
);
437 mrq
->cmd
->error
= -EINVAL
;
438 mmc_request_done(mmc
, mrq
);
442 spin_lock_irqsave(&host
->lock
, flags
);
446 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
447 mmci_start_data(host
, mrq
->data
);
449 mmci_start_command(host
, mrq
->cmd
, 0);
451 spin_unlock_irqrestore(&host
->lock
, flags
);
454 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
456 struct mmci_host
*host
= mmc_priv(mmc
);
460 switch (ios
->power_mode
) {
463 regulator_is_enabled(host
->vcc
))
464 regulator_disable(host
->vcc
);
467 #ifdef CONFIG_REGULATOR
469 /* This implicitly enables the regulator */
470 mmc_regulator_set_ocr(host
->vcc
, ios
->vdd
);
473 * The translate_vdd function is not used if you have
474 * an external regulator, or your design is really weird.
475 * Using it would mean sending in power control BOTH using
476 * a regulator AND the 4 MMCIPWR bits. If we don't have
477 * a regulator, we might have some other platform specific
478 * power control behind this translate function.
480 if (!host
->vcc
&& host
->plat
->translate_vdd
)
481 pwr
|= host
->plat
->translate_vdd(mmc_dev(mmc
), ios
->vdd
);
482 /* The ST version does not have this, fall through to POWER_ON */
483 if (host
->hw_designer
!= AMBA_VENDOR_ST
) {
492 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
493 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
497 * The ST Micro variant use the ROD bit for something
498 * else and only has OD (Open Drain).
504 spin_lock_irqsave(&host
->lock
, flags
);
506 mmci_set_clkreg(host
, ios
->clock
);
508 if (host
->pwr
!= pwr
) {
510 writel(pwr
, host
->base
+ MMCIPOWER
);
513 spin_unlock_irqrestore(&host
->lock
, flags
);
516 static int mmci_get_ro(struct mmc_host
*mmc
)
518 struct mmci_host
*host
= mmc_priv(mmc
);
520 if (host
->gpio_wp
== -ENOSYS
)
523 return gpio_get_value(host
->gpio_wp
);
526 static int mmci_get_cd(struct mmc_host
*mmc
)
528 struct mmci_host
*host
= mmc_priv(mmc
);
531 if (host
->gpio_cd
== -ENOSYS
)
532 status
= host
->plat
->status(mmc_dev(host
->mmc
));
534 status
= gpio_get_value(host
->gpio_cd
);
539 static const struct mmc_host_ops mmci_ops
= {
540 .request
= mmci_request
,
541 .set_ios
= mmci_set_ios
,
542 .get_ro
= mmci_get_ro
,
543 .get_cd
= mmci_get_cd
,
546 static void mmci_check_status(unsigned long data
)
548 struct mmci_host
*host
= (struct mmci_host
*)data
;
549 unsigned int status
= mmci_get_cd(host
->mmc
);
551 if (status
^ host
->oldstat
)
552 mmc_detect_change(host
->mmc
, 0);
554 host
->oldstat
= status
;
555 mod_timer(&host
->timer
, jiffies
+ HZ
);
558 static int __devinit
mmci_probe(struct amba_device
*dev
, struct amba_id
*id
)
560 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
561 struct mmci_host
*host
;
562 struct mmc_host
*mmc
;
565 /* must have platform data */
571 ret
= amba_request_regions(dev
, DRIVER_NAME
);
575 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
581 host
= mmc_priv(mmc
);
584 host
->gpio_wp
= -ENOSYS
;
585 host
->gpio_cd
= -ENOSYS
;
587 host
->hw_designer
= amba_manf(dev
);
588 host
->hw_revision
= amba_rev(dev
);
589 DBG(host
, "designer ID = 0x%02x\n", host
->hw_designer
);
590 DBG(host
, "revision = 0x%01x\n", host
->hw_revision
);
592 host
->clk
= clk_get(&dev
->dev
, NULL
);
593 if (IS_ERR(host
->clk
)) {
594 ret
= PTR_ERR(host
->clk
);
599 ret
= clk_enable(host
->clk
);
604 host
->mclk
= clk_get_rate(host
->clk
);
606 * According to the spec, mclk is max 100 MHz,
607 * so we try to adjust the clock down to this,
610 if (host
->mclk
> 100000000) {
611 ret
= clk_set_rate(host
->clk
, 100000000);
614 host
->mclk
= clk_get_rate(host
->clk
);
615 DBG(host
, "eventual mclk rate: %u Hz\n", host
->mclk
);
617 host
->base
= ioremap(dev
->res
.start
, resource_size(&dev
->res
));
623 mmc
->ops
= &mmci_ops
;
624 mmc
->f_min
= (host
->mclk
+ 511) / 512;
625 mmc
->f_max
= min(host
->mclk
, fmax
);
626 #ifdef CONFIG_REGULATOR
627 /* If we're using the regulator framework, try to fetch a regulator */
628 host
->vcc
= regulator_get(&dev
->dev
, "vmmc");
629 if (IS_ERR(host
->vcc
))
632 int mask
= mmc_regulator_get_ocrmask(host
->vcc
);
635 dev_err(&dev
->dev
, "error getting OCR mask (%d)\n",
638 host
->mmc
->ocr_avail
= (u32
) mask
;
641 "Provided ocr_mask/setpower will not be used "
642 "(using regulator instead)\n");
646 /* Fall back to platform data if no regulator is found */
647 if (host
->vcc
== NULL
)
648 mmc
->ocr_avail
= plat
->ocr_mask
;
649 mmc
->caps
= plat
->capabilities
;
654 mmc
->max_hw_segs
= 16;
655 mmc
->max_phys_segs
= NR_SG
;
658 * Since we only have a 16-bit data length register, we must
659 * ensure that we don't exceed 2^16-1 bytes in a single request.
661 mmc
->max_req_size
= 65535;
664 * Set the maximum segment size. Since we aren't doing DMA
665 * (yet) we are only limited by the data length register.
667 mmc
->max_seg_size
= mmc
->max_req_size
;
670 * Block size can be up to 2048 bytes, but must be a power of two.
672 mmc
->max_blk_size
= 2048;
675 * No limit on the number of blocks transferred.
677 mmc
->max_blk_count
= mmc
->max_req_size
;
679 spin_lock_init(&host
->lock
);
681 writel(0, host
->base
+ MMCIMASK0
);
682 writel(0, host
->base
+ MMCIMASK1
);
683 writel(0xfff, host
->base
+ MMCICLEAR
);
685 if (gpio_is_valid(plat
->gpio_cd
)) {
686 ret
= gpio_request(plat
->gpio_cd
, DRIVER_NAME
" (cd)");
688 ret
= gpio_direction_input(plat
->gpio_cd
);
690 host
->gpio_cd
= plat
->gpio_cd
;
691 else if (ret
!= -ENOSYS
)
694 if (gpio_is_valid(plat
->gpio_wp
)) {
695 ret
= gpio_request(plat
->gpio_wp
, DRIVER_NAME
" (wp)");
697 ret
= gpio_direction_input(plat
->gpio_wp
);
699 host
->gpio_wp
= plat
->gpio_wp
;
700 else if (ret
!= -ENOSYS
)
704 ret
= request_irq(dev
->irq
[0], mmci_irq
, IRQF_SHARED
, DRIVER_NAME
" (cmd)", host
);
708 ret
= request_irq(dev
->irq
[1], mmci_pio_irq
, IRQF_SHARED
, DRIVER_NAME
" (pio)", host
);
712 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
714 amba_set_drvdata(dev
, mmc
);
715 host
->oldstat
= mmci_get_cd(host
->mmc
);
719 printk(KERN_INFO
"%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
720 mmc_hostname(mmc
), amba_rev(dev
), amba_config(dev
),
721 (unsigned long long)dev
->res
.start
, dev
->irq
[0], dev
->irq
[1]);
723 init_timer(&host
->timer
);
724 host
->timer
.data
= (unsigned long)host
;
725 host
->timer
.function
= mmci_check_status
;
726 host
->timer
.expires
= jiffies
+ HZ
;
727 add_timer(&host
->timer
);
732 free_irq(dev
->irq
[0], host
);
734 if (host
->gpio_wp
!= -ENOSYS
)
735 gpio_free(host
->gpio_wp
);
737 if (host
->gpio_cd
!= -ENOSYS
)
738 gpio_free(host
->gpio_cd
);
742 clk_disable(host
->clk
);
748 amba_release_regions(dev
);
753 static int __devexit
mmci_remove(struct amba_device
*dev
)
755 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
757 amba_set_drvdata(dev
, NULL
);
760 struct mmci_host
*host
= mmc_priv(mmc
);
762 del_timer_sync(&host
->timer
);
764 mmc_remove_host(mmc
);
766 writel(0, host
->base
+ MMCIMASK0
);
767 writel(0, host
->base
+ MMCIMASK1
);
769 writel(0, host
->base
+ MMCICOMMAND
);
770 writel(0, host
->base
+ MMCIDATACTRL
);
772 free_irq(dev
->irq
[0], host
);
773 free_irq(dev
->irq
[1], host
);
775 if (host
->gpio_wp
!= -ENOSYS
)
776 gpio_free(host
->gpio_wp
);
777 if (host
->gpio_cd
!= -ENOSYS
)
778 gpio_free(host
->gpio_cd
);
781 clk_disable(host
->clk
);
784 if (regulator_is_enabled(host
->vcc
))
785 regulator_disable(host
->vcc
);
786 regulator_put(host
->vcc
);
790 amba_release_regions(dev
);
797 static int mmci_suspend(struct amba_device
*dev
, pm_message_t state
)
799 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
803 struct mmci_host
*host
= mmc_priv(mmc
);
805 ret
= mmc_suspend_host(mmc
, state
);
807 writel(0, host
->base
+ MMCIMASK0
);
813 static int mmci_resume(struct amba_device
*dev
)
815 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
819 struct mmci_host
*host
= mmc_priv(mmc
);
821 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
823 ret
= mmc_resume_host(mmc
);
829 #define mmci_suspend NULL
830 #define mmci_resume NULL
833 static struct amba_id mmci_ids
[] = {
842 /* ST Micro variants */
854 static struct amba_driver mmci_driver
= {
859 .remove
= __devexit_p(mmci_remove
),
860 .suspend
= mmci_suspend
,
861 .resume
= mmci_resume
,
862 .id_table
= mmci_ids
,
865 static int __init
mmci_init(void)
867 return amba_driver_register(&mmci_driver
);
870 static void __exit
mmci_exit(void)
872 amba_driver_unregister(&mmci_driver
);
875 module_init(mmci_init
);
876 module_exit(mmci_exit
);
877 module_param(fmax
, uint
, 0444);
879 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
880 MODULE_LICENSE("GPL");