2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/log2.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/card.h>
24 #include <linux/amba/bus.h>
25 #include <linux/clk.h>
26 #include <linux/scatterlist.h>
27 #include <linux/gpio.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/dmaengine.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/amba/mmci.h>
33 #include <asm/div64.h>
35 #include <asm/sizes.h>
39 #define DRIVER_NAME "mmci-pl18x"
41 static unsigned int fmax
= 515633;
44 * struct variant_data - MMCI variant-specific quirks
45 * @clkreg: default value for MCICLOCK register
46 * @clkreg_enable: enable value for MMCICLOCK register
47 * @datalength_bits: number of bits in the MMCIDATALENGTH register
48 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
49 * is asserted (likewise for RX)
50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
51 * is asserted (likewise for RX)
52 * @sdio: variant supports SDIO
53 * @st_clkdiv: true if using a ST-specific clock divider algorithm
54 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
58 unsigned int clkreg_enable
;
59 unsigned int datalength_bits
;
60 unsigned int fifosize
;
61 unsigned int fifohalfsize
;
64 bool blksz_datactrl16
;
67 static struct variant_data variant_arm
= {
69 .fifohalfsize
= 8 * 4,
70 .datalength_bits
= 16,
73 static struct variant_data variant_arm_extended_fifo
= {
75 .fifohalfsize
= 64 * 4,
76 .datalength_bits
= 16,
79 static struct variant_data variant_u300
= {
81 .fifohalfsize
= 8 * 4,
82 .clkreg_enable
= MCI_ST_U300_HWFCEN
,
83 .datalength_bits
= 16,
87 static struct variant_data variant_ux500
= {
89 .fifohalfsize
= 8 * 4,
90 .clkreg
= MCI_CLK_ENABLE
,
91 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
92 .datalength_bits
= 24,
97 static struct variant_data variant_ux500v2
= {
99 .fifohalfsize
= 8 * 4,
100 .clkreg
= MCI_CLK_ENABLE
,
101 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
102 .datalength_bits
= 24,
105 .blksz_datactrl16
= true,
109 * This must be called with host->lock held
111 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
113 struct variant_data
*variant
= host
->variant
;
114 u32 clk
= variant
->clkreg
;
117 if (desired
>= host
->mclk
) {
118 clk
= MCI_CLK_BYPASS
;
119 if (variant
->st_clkdiv
)
120 clk
|= MCI_ST_UX500_NEG_EDGE
;
121 host
->cclk
= host
->mclk
;
122 } else if (variant
->st_clkdiv
) {
124 * DB8500 TRM says f = mclk / (clkdiv + 2)
125 * => clkdiv = (mclk / f) - 2
126 * Round the divider up so we don't exceed the max
129 clk
= DIV_ROUND_UP(host
->mclk
, desired
) - 2;
132 host
->cclk
= host
->mclk
/ (clk
+ 2);
135 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
136 * => clkdiv = mclk / (2 * f) - 1
138 clk
= host
->mclk
/ (2 * desired
) - 1;
141 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
144 clk
|= variant
->clkreg_enable
;
145 clk
|= MCI_CLK_ENABLE
;
146 /* This hasn't proven to be worthwhile */
147 /* clk |= MCI_CLK_PWRSAVE; */
150 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
152 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
153 clk
|= MCI_ST_8BIT_BUS
;
155 writel(clk
, host
->base
+ MMCICLOCK
);
159 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
161 writel(0, host
->base
+ MMCICOMMAND
);
169 * Need to drop the host lock here; mmc_request_done may call
170 * back into the driver...
172 spin_unlock(&host
->lock
);
173 mmc_request_done(host
->mmc
, mrq
);
174 spin_lock(&host
->lock
);
177 static void mmci_set_mask1(struct mmci_host
*host
, unsigned int mask
)
179 void __iomem
*base
= host
->base
;
181 if (host
->singleirq
) {
182 unsigned int mask0
= readl(base
+ MMCIMASK0
);
184 mask0
&= ~MCI_IRQ1MASK
;
187 writel(mask0
, base
+ MMCIMASK0
);
190 writel(mask
, base
+ MMCIMASK1
);
193 static void mmci_stop_data(struct mmci_host
*host
)
195 writel(0, host
->base
+ MMCIDATACTRL
);
196 mmci_set_mask1(host
, 0);
200 static void mmci_init_sg(struct mmci_host
*host
, struct mmc_data
*data
)
202 unsigned int flags
= SG_MITER_ATOMIC
;
204 if (data
->flags
& MMC_DATA_READ
)
205 flags
|= SG_MITER_TO_SG
;
207 flags
|= SG_MITER_FROM_SG
;
209 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
213 * All the DMA operation mode stuff goes inside this ifdef.
214 * This assumes that you have a generic DMA device interface,
215 * no custom DMA interfaces are supported.
217 #ifdef CONFIG_DMA_ENGINE
218 static void __devinit
mmci_dma_setup(struct mmci_host
*host
)
220 struct mmci_platform_data
*plat
= host
->plat
;
221 const char *rxname
, *txname
;
224 if (!plat
|| !plat
->dma_filter
) {
225 dev_info(mmc_dev(host
->mmc
), "no DMA platform data\n");
229 /* initialize pre request cookie */
230 host
->next_data
.cookie
= 1;
232 /* Try to acquire a generic DMA engine slave channel */
234 dma_cap_set(DMA_SLAVE
, mask
);
237 * If only an RX channel is specified, the driver will
238 * attempt to use it bidirectionally, however if it is
239 * is specified but cannot be located, DMA will be disabled.
241 if (plat
->dma_rx_param
) {
242 host
->dma_rx_channel
= dma_request_channel(mask
,
245 /* E.g if no DMA hardware is present */
246 if (!host
->dma_rx_channel
)
247 dev_err(mmc_dev(host
->mmc
), "no RX DMA channel\n");
250 if (plat
->dma_tx_param
) {
251 host
->dma_tx_channel
= dma_request_channel(mask
,
254 if (!host
->dma_tx_channel
)
255 dev_warn(mmc_dev(host
->mmc
), "no TX DMA channel\n");
257 host
->dma_tx_channel
= host
->dma_rx_channel
;
260 if (host
->dma_rx_channel
)
261 rxname
= dma_chan_name(host
->dma_rx_channel
);
265 if (host
->dma_tx_channel
)
266 txname
= dma_chan_name(host
->dma_tx_channel
);
270 dev_info(mmc_dev(host
->mmc
), "DMA channels RX %s, TX %s\n",
274 * Limit the maximum segment size in any SG entry according to
275 * the parameters of the DMA engine device.
277 if (host
->dma_tx_channel
) {
278 struct device
*dev
= host
->dma_tx_channel
->device
->dev
;
279 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
281 if (max_seg_size
< host
->mmc
->max_seg_size
)
282 host
->mmc
->max_seg_size
= max_seg_size
;
284 if (host
->dma_rx_channel
) {
285 struct device
*dev
= host
->dma_rx_channel
->device
->dev
;
286 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
288 if (max_seg_size
< host
->mmc
->max_seg_size
)
289 host
->mmc
->max_seg_size
= max_seg_size
;
294 * This is used in __devinit or __devexit so inline it
295 * so it can be discarded.
297 static inline void mmci_dma_release(struct mmci_host
*host
)
299 struct mmci_platform_data
*plat
= host
->plat
;
301 if (host
->dma_rx_channel
)
302 dma_release_channel(host
->dma_rx_channel
);
303 if (host
->dma_tx_channel
&& plat
->dma_tx_param
)
304 dma_release_channel(host
->dma_tx_channel
);
305 host
->dma_rx_channel
= host
->dma_tx_channel
= NULL
;
308 static void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
310 struct dma_chan
*chan
= host
->dma_current
;
311 enum dma_data_direction dir
;
315 /* Wait up to 1ms for the DMA to complete */
317 status
= readl(host
->base
+ MMCISTATUS
);
318 if (!(status
& MCI_RXDATAAVLBLMASK
) || i
>= 100)
324 * Check to see whether we still have some data left in the FIFO -
325 * this catches DMA controllers which are unable to monitor the
326 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
327 * contiguous buffers. On TX, we'll get a FIFO underrun error.
329 if (status
& MCI_RXDATAAVLBLMASK
) {
330 dmaengine_terminate_all(chan
);
335 if (data
->flags
& MMC_DATA_WRITE
) {
338 dir
= DMA_FROM_DEVICE
;
341 if (!data
->host_cookie
)
342 dma_unmap_sg(chan
->device
->dev
, data
->sg
, data
->sg_len
, dir
);
345 * Use of DMA with scatter-gather is impossible.
346 * Give up with DMA and switch back to PIO mode.
348 if (status
& MCI_RXDATAAVLBLMASK
) {
349 dev_err(mmc_dev(host
->mmc
), "buggy DMA detected. Taking evasive action.\n");
350 mmci_dma_release(host
);
354 static void mmci_dma_data_error(struct mmci_host
*host
)
356 dev_err(mmc_dev(host
->mmc
), "error during DMA transfer!\n");
357 dmaengine_terminate_all(host
->dma_current
);
360 static int mmci_dma_prep_data(struct mmci_host
*host
, struct mmc_data
*data
,
361 struct mmci_host_next
*next
)
363 struct variant_data
*variant
= host
->variant
;
364 struct dma_slave_config conf
= {
365 .src_addr
= host
->phybase
+ MMCIFIFO
,
366 .dst_addr
= host
->phybase
+ MMCIFIFO
,
367 .src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
368 .dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
369 .src_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
370 .dst_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
372 struct dma_chan
*chan
;
373 struct dma_device
*device
;
374 struct dma_async_tx_descriptor
*desc
;
377 /* Check if next job is already prepared */
378 if (data
->host_cookie
&& !next
&&
379 host
->dma_current
&& host
->dma_desc_current
)
383 host
->dma_current
= NULL
;
384 host
->dma_desc_current
= NULL
;
387 if (data
->flags
& MMC_DATA_READ
) {
388 conf
.direction
= DMA_FROM_DEVICE
;
389 chan
= host
->dma_rx_channel
;
391 conf
.direction
= DMA_TO_DEVICE
;
392 chan
= host
->dma_tx_channel
;
395 /* If there's no DMA channel, fall back to PIO */
399 /* If less than or equal to the fifo size, don't bother with DMA */
400 if (data
->blksz
* data
->blocks
<= variant
->fifosize
)
403 device
= chan
->device
;
404 nr_sg
= dma_map_sg(device
->dev
, data
->sg
, data
->sg_len
, conf
.direction
);
408 dmaengine_slave_config(chan
, &conf
);
409 desc
= device
->device_prep_slave_sg(chan
, data
->sg
, nr_sg
,
410 conf
.direction
, DMA_CTRL_ACK
);
415 next
->dma_chan
= chan
;
416 next
->dma_desc
= desc
;
418 host
->dma_current
= chan
;
419 host
->dma_desc_current
= desc
;
426 dmaengine_terminate_all(chan
);
427 dma_unmap_sg(device
->dev
, data
->sg
, data
->sg_len
, conf
.direction
);
431 static int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
434 struct mmc_data
*data
= host
->data
;
436 ret
= mmci_dma_prep_data(host
, host
->data
, NULL
);
440 /* Okay, go for it. */
441 dev_vdbg(mmc_dev(host
->mmc
),
442 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
443 data
->sg_len
, data
->blksz
, data
->blocks
, data
->flags
);
444 dmaengine_submit(host
->dma_desc_current
);
445 dma_async_issue_pending(host
->dma_current
);
447 datactrl
|= MCI_DPSM_DMAENABLE
;
449 /* Trigger the DMA transfer */
450 writel(datactrl
, host
->base
+ MMCIDATACTRL
);
453 * Let the MMCI say when the data is ended and it's time
454 * to fire next DMA request. When that happens, MMCI will
455 * call mmci_data_end()
457 writel(readl(host
->base
+ MMCIMASK0
) | MCI_DATAENDMASK
,
458 host
->base
+ MMCIMASK0
);
462 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
464 struct mmci_host_next
*next
= &host
->next_data
;
466 if (data
->host_cookie
&& data
->host_cookie
!= next
->cookie
) {
467 printk(KERN_WARNING
"[%s] invalid cookie: data->host_cookie %d"
468 " host->next_data.cookie %d\n",
469 __func__
, data
->host_cookie
, host
->next_data
.cookie
);
470 data
->host_cookie
= 0;
473 if (!data
->host_cookie
)
476 host
->dma_desc_current
= next
->dma_desc
;
477 host
->dma_current
= next
->dma_chan
;
479 next
->dma_desc
= NULL
;
480 next
->dma_chan
= NULL
;
483 static void mmci_pre_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
486 struct mmci_host
*host
= mmc_priv(mmc
);
487 struct mmc_data
*data
= mrq
->data
;
488 struct mmci_host_next
*nd
= &host
->next_data
;
493 if (data
->host_cookie
) {
494 data
->host_cookie
= 0;
498 /* if config for dma */
499 if (((data
->flags
& MMC_DATA_WRITE
) && host
->dma_tx_channel
) ||
500 ((data
->flags
& MMC_DATA_READ
) && host
->dma_rx_channel
)) {
501 if (mmci_dma_prep_data(host
, data
, nd
))
502 data
->host_cookie
= 0;
504 data
->host_cookie
= ++nd
->cookie
< 0 ? 1 : nd
->cookie
;
508 static void mmci_post_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
511 struct mmci_host
*host
= mmc_priv(mmc
);
512 struct mmc_data
*data
= mrq
->data
;
513 struct dma_chan
*chan
;
514 enum dma_data_direction dir
;
519 if (data
->flags
& MMC_DATA_READ
) {
520 dir
= DMA_FROM_DEVICE
;
521 chan
= host
->dma_rx_channel
;
524 chan
= host
->dma_tx_channel
;
528 /* if config for dma */
531 dmaengine_terminate_all(chan
);
532 if (err
|| data
->host_cookie
)
533 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
,
535 mrq
->data
->host_cookie
= 0;
540 /* Blank functions if the DMA engine is not available */
541 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
544 static inline void mmci_dma_setup(struct mmci_host
*host
)
548 static inline void mmci_dma_release(struct mmci_host
*host
)
552 static inline void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
556 static inline void mmci_dma_data_error(struct mmci_host
*host
)
560 static inline int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
565 #define mmci_pre_request NULL
566 #define mmci_post_request NULL
570 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
572 struct variant_data
*variant
= host
->variant
;
573 unsigned int datactrl
, timeout
, irqmask
;
574 unsigned long long clks
;
578 dev_dbg(mmc_dev(host
->mmc
), "blksz %04x blks %04x flags %08x\n",
579 data
->blksz
, data
->blocks
, data
->flags
);
582 host
->size
= data
->blksz
* data
->blocks
;
583 data
->bytes_xfered
= 0;
585 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
586 do_div(clks
, 1000000000UL);
588 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
591 writel(timeout
, base
+ MMCIDATATIMER
);
592 writel(host
->size
, base
+ MMCIDATALENGTH
);
594 blksz_bits
= ffs(data
->blksz
) - 1;
595 BUG_ON(1 << blksz_bits
!= data
->blksz
);
597 if (variant
->blksz_datactrl16
)
598 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 16);
600 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
602 if (data
->flags
& MMC_DATA_READ
)
603 datactrl
|= MCI_DPSM_DIRECTION
;
606 * Attempt to use DMA operation mode, if this
607 * should fail, fall back to PIO mode
609 if (!mmci_dma_start_data(host
, datactrl
))
612 /* IRQ mode, map the SG list for CPU reading/writing */
613 mmci_init_sg(host
, data
);
615 if (data
->flags
& MMC_DATA_READ
) {
616 irqmask
= MCI_RXFIFOHALFFULLMASK
;
619 * If we have less than the fifo 'half-full' threshold to
620 * transfer, trigger a PIO interrupt as soon as any data
623 if (host
->size
< variant
->fifohalfsize
)
624 irqmask
|= MCI_RXDATAAVLBLMASK
;
627 * We don't actually need to include "FIFO empty" here
628 * since its implicit in "FIFO half empty".
630 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
633 /* The ST Micro variants has a special bit to enable SDIO */
634 if (variant
->sdio
&& host
->mmc
->card
)
635 if (mmc_card_sdio(host
->mmc
->card
))
636 datactrl
|= MCI_ST_DPSM_SDIOEN
;
638 writel(datactrl
, base
+ MMCIDATACTRL
);
639 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
640 mmci_set_mask1(host
, irqmask
);
644 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
646 void __iomem
*base
= host
->base
;
648 dev_dbg(mmc_dev(host
->mmc
), "op %02x arg %08x flags %08x\n",
649 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
651 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
652 writel(0, base
+ MMCICOMMAND
);
656 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
657 if (cmd
->flags
& MMC_RSP_PRESENT
) {
658 if (cmd
->flags
& MMC_RSP_136
)
659 c
|= MCI_CPSM_LONGRSP
;
660 c
|= MCI_CPSM_RESPONSE
;
663 c
|= MCI_CPSM_INTERRUPT
;
667 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
668 writel(c
, base
+ MMCICOMMAND
);
672 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
675 /* First check for errors */
676 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
677 MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
680 /* Terminate the DMA transfer */
681 if (dma_inprogress(host
))
682 mmci_dma_data_error(host
);
685 * Calculate how far we are into the transfer. Note that
686 * the data counter gives the number of bytes transferred
687 * on the MMC bus, not on the host side. On reads, this
688 * can be as much as a FIFO-worth of data ahead. This
689 * matters for FIFO overruns only.
691 remain
= readl(host
->base
+ MMCIDATACNT
);
692 success
= data
->blksz
* data
->blocks
- remain
;
694 dev_dbg(mmc_dev(host
->mmc
), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
696 if (status
& MCI_DATACRCFAIL
) {
697 /* Last block was not successful */
699 data
->error
= -EILSEQ
;
700 } else if (status
& MCI_DATATIMEOUT
) {
701 data
->error
= -ETIMEDOUT
;
702 } else if (status
& MCI_STARTBITERR
) {
703 data
->error
= -ECOMM
;
704 } else if (status
& MCI_TXUNDERRUN
) {
706 } else if (status
& MCI_RXOVERRUN
) {
707 if (success
> host
->variant
->fifosize
)
708 success
-= host
->variant
->fifosize
;
713 data
->bytes_xfered
= round_down(success
, data
->blksz
);
716 if (status
& MCI_DATABLOCKEND
)
717 dev_err(mmc_dev(host
->mmc
), "stray MCI_DATABLOCKEND interrupt\n");
719 if (status
& MCI_DATAEND
|| data
->error
) {
720 if (dma_inprogress(host
))
721 mmci_dma_unmap(host
, data
);
722 mmci_stop_data(host
);
725 /* The error clause is handled above, success! */
726 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
729 mmci_request_end(host
, data
->mrq
);
731 mmci_start_command(host
, data
->stop
, 0);
737 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
740 void __iomem
*base
= host
->base
;
744 if (status
& MCI_CMDTIMEOUT
) {
745 cmd
->error
= -ETIMEDOUT
;
746 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
747 cmd
->error
= -EILSEQ
;
749 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
750 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
751 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
752 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
755 if (!cmd
->data
|| cmd
->error
) {
757 mmci_stop_data(host
);
758 mmci_request_end(host
, cmd
->mrq
);
759 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
760 mmci_start_data(host
, cmd
->data
);
764 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
766 void __iomem
*base
= host
->base
;
769 int host_remain
= host
->size
;
772 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
780 readsl(base
+ MMCIFIFO
, ptr
, count
>> 2);
784 host_remain
-= count
;
789 status
= readl(base
+ MMCISTATUS
);
790 } while (status
& MCI_RXDATAAVLBL
);
795 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
797 struct variant_data
*variant
= host
->variant
;
798 void __iomem
*base
= host
->base
;
802 unsigned int count
, maxcnt
;
804 maxcnt
= status
& MCI_TXFIFOEMPTY
?
805 variant
->fifosize
: variant
->fifohalfsize
;
806 count
= min(remain
, maxcnt
);
809 * The ST Micro variant for SDIO transfer sizes
810 * less then 8 bytes should have clock H/W flow
814 mmc_card_sdio(host
->mmc
->card
)) {
816 writel(readl(host
->base
+ MMCICLOCK
) &
817 ~variant
->clkreg_enable
,
818 host
->base
+ MMCICLOCK
);
820 writel(readl(host
->base
+ MMCICLOCK
) |
821 variant
->clkreg_enable
,
822 host
->base
+ MMCICLOCK
);
826 * SDIO especially may want to send something that is
827 * not divisible by 4 (as opposed to card sectors
828 * etc), and the FIFO only accept full 32-bit writes.
829 * So compensate by adding +3 on the count, a single
830 * byte become a 32bit write, 7 bytes will be two
833 writesl(base
+ MMCIFIFO
, ptr
, (count
+ 3) >> 2);
841 status
= readl(base
+ MMCISTATUS
);
842 } while (status
& MCI_TXFIFOHALFEMPTY
);
848 * PIO data transfer IRQ handler.
850 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
852 struct mmci_host
*host
= dev_id
;
853 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
854 struct variant_data
*variant
= host
->variant
;
855 void __iomem
*base
= host
->base
;
859 status
= readl(base
+ MMCISTATUS
);
861 dev_dbg(mmc_dev(host
->mmc
), "irq1 (pio) %08x\n", status
);
863 local_irq_save(flags
);
866 unsigned int remain
, len
;
870 * For write, we only need to test the half-empty flag
871 * here - if the FIFO is completely empty, then by
872 * definition it is more than half empty.
874 * For read, check for data available.
876 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
879 if (!sg_miter_next(sg_miter
))
882 buffer
= sg_miter
->addr
;
883 remain
= sg_miter
->length
;
886 if (status
& MCI_RXACTIVE
)
887 len
= mmci_pio_read(host
, buffer
, remain
);
888 if (status
& MCI_TXACTIVE
)
889 len
= mmci_pio_write(host
, buffer
, remain
, status
);
891 sg_miter
->consumed
= len
;
899 status
= readl(base
+ MMCISTATUS
);
902 sg_miter_stop(sg_miter
);
904 local_irq_restore(flags
);
907 * If we have less than the fifo 'half-full' threshold to transfer,
908 * trigger a PIO interrupt as soon as any data is available.
910 if (status
& MCI_RXACTIVE
&& host
->size
< variant
->fifohalfsize
)
911 mmci_set_mask1(host
, MCI_RXDATAAVLBLMASK
);
914 * If we run out of data, disable the data IRQs; this
915 * prevents a race where the FIFO becomes empty before
916 * the chip itself has disabled the data path, and
917 * stops us racing with our data end IRQ.
919 if (host
->size
== 0) {
920 mmci_set_mask1(host
, 0);
921 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
928 * Handle completion of command and data transfers.
930 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
932 struct mmci_host
*host
= dev_id
;
936 spin_lock(&host
->lock
);
939 struct mmc_command
*cmd
;
940 struct mmc_data
*data
;
942 status
= readl(host
->base
+ MMCISTATUS
);
944 if (host
->singleirq
) {
945 if (status
& readl(host
->base
+ MMCIMASK1
))
946 mmci_pio_irq(irq
, dev_id
);
948 status
&= ~MCI_IRQ1MASK
;
951 status
&= readl(host
->base
+ MMCIMASK0
);
952 writel(status
, host
->base
+ MMCICLEAR
);
954 dev_dbg(mmc_dev(host
->mmc
), "irq0 (data+cmd) %08x\n", status
);
957 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
958 MCI_TXUNDERRUN
|MCI_RXOVERRUN
|MCI_DATAEND
|
959 MCI_DATABLOCKEND
) && data
)
960 mmci_data_irq(host
, data
, status
);
963 if (status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
964 mmci_cmd_irq(host
, cmd
, status
);
969 spin_unlock(&host
->lock
);
971 return IRQ_RETVAL(ret
);
974 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
976 struct mmci_host
*host
= mmc_priv(mmc
);
979 WARN_ON(host
->mrq
!= NULL
);
981 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
982 dev_err(mmc_dev(mmc
), "unsupported block size (%d bytes)\n",
984 mrq
->cmd
->error
= -EINVAL
;
985 mmc_request_done(mmc
, mrq
);
989 spin_lock_irqsave(&host
->lock
, flags
);
994 mmci_get_next_data(host
, mrq
->data
);
996 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
997 mmci_start_data(host
, mrq
->data
);
999 mmci_start_command(host
, mrq
->cmd
, 0);
1001 spin_unlock_irqrestore(&host
->lock
, flags
);
1004 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1006 struct mmci_host
*host
= mmc_priv(mmc
);
1008 unsigned long flags
;
1011 switch (ios
->power_mode
) {
1014 ret
= mmc_regulator_set_ocr(mmc
, host
->vcc
, 0);
1018 ret
= mmc_regulator_set_ocr(mmc
, host
->vcc
, ios
->vdd
);
1020 dev_err(mmc_dev(mmc
), "unable to set OCR\n");
1022 * The .set_ios() function in the mmc_host_ops
1023 * struct return void, and failing to set the
1024 * power should be rare so we print an error
1030 if (host
->plat
->vdd_handler
)
1031 pwr
|= host
->plat
->vdd_handler(mmc_dev(mmc
), ios
->vdd
,
1033 /* The ST version does not have this, fall through to POWER_ON */
1034 if (host
->hw_designer
!= AMBA_VENDOR_ST
) {
1043 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
1044 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
1048 * The ST Micro variant use the ROD bit for something
1049 * else and only has OD (Open Drain).
1055 spin_lock_irqsave(&host
->lock
, flags
);
1057 mmci_set_clkreg(host
, ios
->clock
);
1059 if (host
->pwr
!= pwr
) {
1061 writel(pwr
, host
->base
+ MMCIPOWER
);
1064 spin_unlock_irqrestore(&host
->lock
, flags
);
1067 static int mmci_get_ro(struct mmc_host
*mmc
)
1069 struct mmci_host
*host
= mmc_priv(mmc
);
1071 if (host
->gpio_wp
== -ENOSYS
)
1074 return gpio_get_value_cansleep(host
->gpio_wp
);
1077 static int mmci_get_cd(struct mmc_host
*mmc
)
1079 struct mmci_host
*host
= mmc_priv(mmc
);
1080 struct mmci_platform_data
*plat
= host
->plat
;
1081 unsigned int status
;
1083 if (host
->gpio_cd
== -ENOSYS
) {
1085 return 1; /* Assume always present */
1087 status
= plat
->status(mmc_dev(host
->mmc
));
1089 status
= !!gpio_get_value_cansleep(host
->gpio_cd
)
1093 * Use positive logic throughout - status is zero for no card,
1094 * non-zero for card inserted.
1099 static irqreturn_t
mmci_cd_irq(int irq
, void *dev_id
)
1101 struct mmci_host
*host
= dev_id
;
1103 mmc_detect_change(host
->mmc
, msecs_to_jiffies(500));
1108 static const struct mmc_host_ops mmci_ops
= {
1109 .request
= mmci_request
,
1110 .pre_req
= mmci_pre_request
,
1111 .post_req
= mmci_post_request
,
1112 .set_ios
= mmci_set_ios
,
1113 .get_ro
= mmci_get_ro
,
1114 .get_cd
= mmci_get_cd
,
1117 static int __devinit
mmci_probe(struct amba_device
*dev
,
1118 const struct amba_id
*id
)
1120 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
1121 struct variant_data
*variant
= id
->data
;
1122 struct mmci_host
*host
;
1123 struct mmc_host
*mmc
;
1126 /* must have platform data */
1132 ret
= amba_request_regions(dev
, DRIVER_NAME
);
1136 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
1142 host
= mmc_priv(mmc
);
1145 host
->gpio_wp
= -ENOSYS
;
1146 host
->gpio_cd
= -ENOSYS
;
1147 host
->gpio_cd_irq
= -1;
1149 host
->hw_designer
= amba_manf(dev
);
1150 host
->hw_revision
= amba_rev(dev
);
1151 dev_dbg(mmc_dev(mmc
), "designer ID = 0x%02x\n", host
->hw_designer
);
1152 dev_dbg(mmc_dev(mmc
), "revision = 0x%01x\n", host
->hw_revision
);
1154 host
->clk
= clk_get(&dev
->dev
, NULL
);
1155 if (IS_ERR(host
->clk
)) {
1156 ret
= PTR_ERR(host
->clk
);
1161 ret
= clk_enable(host
->clk
);
1166 host
->variant
= variant
;
1167 host
->mclk
= clk_get_rate(host
->clk
);
1169 * According to the spec, mclk is max 100 MHz,
1170 * so we try to adjust the clock down to this,
1173 if (host
->mclk
> 100000000) {
1174 ret
= clk_set_rate(host
->clk
, 100000000);
1177 host
->mclk
= clk_get_rate(host
->clk
);
1178 dev_dbg(mmc_dev(mmc
), "eventual mclk rate: %u Hz\n",
1181 host
->phybase
= dev
->res
.start
;
1182 host
->base
= ioremap(dev
->res
.start
, resource_size(&dev
->res
));
1188 mmc
->ops
= &mmci_ops
;
1190 * The ARM and ST versions of the block have slightly different
1191 * clock divider equations which means that the minimum divider
1194 if (variant
->st_clkdiv
)
1195 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 257);
1197 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 512);
1199 * If the platform data supplies a maximum operating
1200 * frequency, this takes precedence. Else, we fall back
1201 * to using the module parameter, which has a (low)
1202 * default value in case it is not specified. Either
1203 * value must not exceed the clock rate into the block,
1207 mmc
->f_max
= min(host
->mclk
, plat
->f_max
);
1209 mmc
->f_max
= min(host
->mclk
, fmax
);
1210 dev_dbg(mmc_dev(mmc
), "clocking block at %u Hz\n", mmc
->f_max
);
1212 #ifdef CONFIG_REGULATOR
1213 /* If we're using the regulator framework, try to fetch a regulator */
1214 host
->vcc
= regulator_get(&dev
->dev
, "vmmc");
1215 if (IS_ERR(host
->vcc
))
1218 int mask
= mmc_regulator_get_ocrmask(host
->vcc
);
1221 dev_err(&dev
->dev
, "error getting OCR mask (%d)\n",
1224 host
->mmc
->ocr_avail
= (u32
) mask
;
1227 "Provided ocr_mask/setpower will not be used "
1228 "(using regulator instead)\n");
1232 /* Fall back to platform data if no regulator is found */
1233 if (host
->vcc
== NULL
)
1234 mmc
->ocr_avail
= plat
->ocr_mask
;
1235 mmc
->caps
= plat
->capabilities
;
1240 mmc
->max_segs
= NR_SG
;
1243 * Since only a certain number of bits are valid in the data length
1244 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1247 mmc
->max_req_size
= (1 << variant
->datalength_bits
) - 1;
1250 * Set the maximum segment size. Since we aren't doing DMA
1251 * (yet) we are only limited by the data length register.
1253 mmc
->max_seg_size
= mmc
->max_req_size
;
1256 * Block size can be up to 2048 bytes, but must be a power of two.
1258 mmc
->max_blk_size
= 2048;
1261 * No limit on the number of blocks transferred.
1263 mmc
->max_blk_count
= mmc
->max_req_size
;
1265 spin_lock_init(&host
->lock
);
1267 writel(0, host
->base
+ MMCIMASK0
);
1268 writel(0, host
->base
+ MMCIMASK1
);
1269 writel(0xfff, host
->base
+ MMCICLEAR
);
1271 if (gpio_is_valid(plat
->gpio_cd
)) {
1272 ret
= gpio_request(plat
->gpio_cd
, DRIVER_NAME
" (cd)");
1274 ret
= gpio_direction_input(plat
->gpio_cd
);
1276 host
->gpio_cd
= plat
->gpio_cd
;
1277 else if (ret
!= -ENOSYS
)
1281 * A gpio pin that will detect cards when inserted and removed
1282 * will most likely want to trigger on the edges if it is
1283 * 0 when ejected and 1 when inserted (or mutatis mutandis
1284 * for the inverted case) so we request triggers on both
1287 ret
= request_any_context_irq(gpio_to_irq(plat
->gpio_cd
),
1289 IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING
,
1290 DRIVER_NAME
" (cd)", host
);
1292 host
->gpio_cd_irq
= gpio_to_irq(plat
->gpio_cd
);
1294 if (gpio_is_valid(plat
->gpio_wp
)) {
1295 ret
= gpio_request(plat
->gpio_wp
, DRIVER_NAME
" (wp)");
1297 ret
= gpio_direction_input(plat
->gpio_wp
);
1299 host
->gpio_wp
= plat
->gpio_wp
;
1300 else if (ret
!= -ENOSYS
)
1304 if ((host
->plat
->status
|| host
->gpio_cd
!= -ENOSYS
)
1305 && host
->gpio_cd_irq
< 0)
1306 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
1308 ret
= request_irq(dev
->irq
[0], mmci_irq
, IRQF_SHARED
, DRIVER_NAME
" (cmd)", host
);
1312 if (dev
->irq
[1] == NO_IRQ
)
1313 host
->singleirq
= true;
1315 ret
= request_irq(dev
->irq
[1], mmci_pio_irq
, IRQF_SHARED
,
1316 DRIVER_NAME
" (pio)", host
);
1321 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1323 amba_set_drvdata(dev
, mmc
);
1325 dev_info(&dev
->dev
, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1326 mmc_hostname(mmc
), amba_part(dev
), amba_manf(dev
),
1327 amba_rev(dev
), (unsigned long long)dev
->res
.start
,
1328 dev
->irq
[0], dev
->irq
[1]);
1330 mmci_dma_setup(host
);
1337 free_irq(dev
->irq
[0], host
);
1339 if (host
->gpio_wp
!= -ENOSYS
)
1340 gpio_free(host
->gpio_wp
);
1342 if (host
->gpio_cd_irq
>= 0)
1343 free_irq(host
->gpio_cd_irq
, host
);
1344 if (host
->gpio_cd
!= -ENOSYS
)
1345 gpio_free(host
->gpio_cd
);
1347 iounmap(host
->base
);
1349 clk_disable(host
->clk
);
1355 amba_release_regions(dev
);
1360 static int __devexit
mmci_remove(struct amba_device
*dev
)
1362 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
1364 amba_set_drvdata(dev
, NULL
);
1367 struct mmci_host
*host
= mmc_priv(mmc
);
1369 mmc_remove_host(mmc
);
1371 writel(0, host
->base
+ MMCIMASK0
);
1372 writel(0, host
->base
+ MMCIMASK1
);
1374 writel(0, host
->base
+ MMCICOMMAND
);
1375 writel(0, host
->base
+ MMCIDATACTRL
);
1377 mmci_dma_release(host
);
1378 free_irq(dev
->irq
[0], host
);
1379 if (!host
->singleirq
)
1380 free_irq(dev
->irq
[1], host
);
1382 if (host
->gpio_wp
!= -ENOSYS
)
1383 gpio_free(host
->gpio_wp
);
1384 if (host
->gpio_cd_irq
>= 0)
1385 free_irq(host
->gpio_cd_irq
, host
);
1386 if (host
->gpio_cd
!= -ENOSYS
)
1387 gpio_free(host
->gpio_cd
);
1389 iounmap(host
->base
);
1390 clk_disable(host
->clk
);
1394 mmc_regulator_set_ocr(mmc
, host
->vcc
, 0);
1395 regulator_put(host
->vcc
);
1399 amba_release_regions(dev
);
1406 static int mmci_suspend(struct amba_device
*dev
, pm_message_t state
)
1408 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
1412 struct mmci_host
*host
= mmc_priv(mmc
);
1414 ret
= mmc_suspend_host(mmc
);
1416 writel(0, host
->base
+ MMCIMASK0
);
1422 static int mmci_resume(struct amba_device
*dev
)
1424 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
1428 struct mmci_host
*host
= mmc_priv(mmc
);
1430 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1432 ret
= mmc_resume_host(mmc
);
1438 #define mmci_suspend NULL
1439 #define mmci_resume NULL
1442 static struct amba_id mmci_ids
[] = {
1446 .data
= &variant_arm
,
1451 .data
= &variant_arm_extended_fifo
,
1456 .data
= &variant_arm
,
1458 /* ST Micro variants */
1462 .data
= &variant_u300
,
1467 .data
= &variant_u300
,
1472 .data
= &variant_ux500
,
1477 .data
= &variant_ux500v2
,
1482 static struct amba_driver mmci_driver
= {
1484 .name
= DRIVER_NAME
,
1486 .probe
= mmci_probe
,
1487 .remove
= __devexit_p(mmci_remove
),
1488 .suspend
= mmci_suspend
,
1489 .resume
= mmci_resume
,
1490 .id_table
= mmci_ids
,
1493 static int __init
mmci_init(void)
1495 return amba_driver_register(&mmci_driver
);
1498 static void __exit
mmci_exit(void)
1500 amba_driver_unregister(&mmci_driver
);
1503 module_init(mmci_init
);
1504 module_exit(mmci_exit
);
1505 module_param(fmax
, uint
, 0444);
1507 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1508 MODULE_LICENSE("GPL");