2 * linux/drivers/mmc/pxa.c - PXA MMCI driver
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This hardware is really sick:
11 * - No way to clear interrupts.
12 * - Have to turn off the clock whenever we touch the device.
13 * - Doesn't tell you how many data blocks were transferred.
16 * 1 and 3 byte data transfers not supported
17 * max block length up to 1023
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/platform_device.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/mmc/host.h>
28 #include <linux/mmc/protocol.h>
32 #include <asm/scatterlist.h>
33 #include <asm/sizes.h>
35 #include <asm/arch/pxa-regs.h>
36 #include <asm/arch/mmc.h>
40 #ifdef CONFIG_MMC_DEBUG
41 #define DBG(x...) printk(KERN_DEBUG x)
43 #define DBG(x...) do { } while (0)
46 #define DRIVER_NAME "pxa2xx-mci"
60 unsigned int power_mode
;
61 struct pxamci_platform_data
*pdata
;
63 struct mmc_request
*mrq
;
64 struct mmc_command
*cmd
;
65 struct mmc_data
*data
;
68 struct pxa_dma_desc
*sg_cpu
;
74 static inline unsigned int ns_to_clocks(unsigned int ns
)
76 return (ns
* (CLOCKRATE
/ 1000000) + 999) / 1000;
79 static void pxamci_stop_clock(struct pxamci_host
*host
)
81 if (readl(host
->base
+ MMC_STAT
) & STAT_CLK_EN
) {
82 unsigned long timeout
= 10000;
85 writel(STOP_CLOCK
, host
->base
+ MMC_STRPCL
);
88 v
= readl(host
->base
+ MMC_STAT
);
89 if (!(v
& STAT_CLK_EN
))
95 dev_err(mmc_dev(host
->mmc
), "unable to stop clock\n");
99 static void pxamci_enable_irq(struct pxamci_host
*host
, unsigned int mask
)
103 spin_lock_irqsave(&host
->lock
, flags
);
104 host
->imask
&= ~mask
;
105 writel(host
->imask
, host
->base
+ MMC_I_MASK
);
106 spin_unlock_irqrestore(&host
->lock
, flags
);
109 static void pxamci_disable_irq(struct pxamci_host
*host
, unsigned int mask
)
113 spin_lock_irqsave(&host
->lock
, flags
);
115 writel(host
->imask
, host
->base
+ MMC_I_MASK
);
116 spin_unlock_irqrestore(&host
->lock
, flags
);
119 static void pxamci_setup_data(struct pxamci_host
*host
, struct mmc_data
*data
)
121 unsigned int nob
= data
->blocks
;
122 unsigned int timeout
;
128 if (data
->flags
& MMC_DATA_STREAM
)
131 writel(nob
, host
->base
+ MMC_NOB
);
132 writel(1 << data
->blksz_bits
, host
->base
+ MMC_BLKLEN
);
134 timeout
= ns_to_clocks(data
->timeout_ns
) + data
->timeout_clks
;
135 writel((timeout
+ 255) / 256, host
->base
+ MMC_RDTO
);
137 if (data
->flags
& MMC_DATA_READ
) {
138 host
->dma_dir
= DMA_FROM_DEVICE
;
139 dcmd
= DCMD_INCTRGADDR
| DCMD_FLOWTRG
;
141 DRCMRRXMMC
= host
->dma
| DRCMR_MAPVLD
;
143 host
->dma_dir
= DMA_TO_DEVICE
;
144 dcmd
= DCMD_INCSRCADDR
| DCMD_FLOWSRC
;
146 DRCMRTXMMC
= host
->dma
| DRCMR_MAPVLD
;
149 dcmd
|= DCMD_BURST32
| DCMD_WIDTH1
;
151 host
->dma_len
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
154 for (i
= 0; i
< host
->dma_len
; i
++) {
155 if (data
->flags
& MMC_DATA_READ
) {
156 host
->sg_cpu
[i
].dsadr
= host
->res
->start
+ MMC_RXFIFO
;
157 host
->sg_cpu
[i
].dtadr
= sg_dma_address(&data
->sg
[i
]);
159 host
->sg_cpu
[i
].dsadr
= sg_dma_address(&data
->sg
[i
]);
160 host
->sg_cpu
[i
].dtadr
= host
->res
->start
+ MMC_TXFIFO
;
162 host
->sg_cpu
[i
].dcmd
= dcmd
| sg_dma_len(&data
->sg
[i
]);
163 host
->sg_cpu
[i
].ddadr
= host
->sg_dma
+ (i
+ 1) *
164 sizeof(struct pxa_dma_desc
);
166 host
->sg_cpu
[host
->dma_len
- 1].ddadr
= DDADR_STOP
;
169 DDADR(host
->dma
) = host
->sg_dma
;
170 DCSR(host
->dma
) = DCSR_RUN
;
173 static void pxamci_start_cmd(struct pxamci_host
*host
, struct mmc_command
*cmd
, unsigned int cmdat
)
175 WARN_ON(host
->cmd
!= NULL
);
178 if (cmd
->flags
& MMC_RSP_BUSY
)
181 #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
182 switch (RSP_TYPE(mmc_resp_type(cmd
))) {
183 case RSP_TYPE(MMC_RSP_R1
): /* r1, r1b, r6 */
184 cmdat
|= CMDAT_RESP_SHORT
;
186 case RSP_TYPE(MMC_RSP_R3
):
187 cmdat
|= CMDAT_RESP_R3
;
189 case RSP_TYPE(MMC_RSP_R2
):
190 cmdat
|= CMDAT_RESP_R2
;
196 writel(cmd
->opcode
, host
->base
+ MMC_CMD
);
197 writel(cmd
->arg
>> 16, host
->base
+ MMC_ARGH
);
198 writel(cmd
->arg
& 0xffff, host
->base
+ MMC_ARGL
);
199 writel(cmdat
, host
->base
+ MMC_CMDAT
);
200 writel(host
->clkrt
, host
->base
+ MMC_CLKRT
);
202 writel(START_CLOCK
, host
->base
+ MMC_STRPCL
);
204 pxamci_enable_irq(host
, END_CMD_RES
);
207 static void pxamci_finish_request(struct pxamci_host
*host
, struct mmc_request
*mrq
)
209 DBG("PXAMCI: request done\n");
213 mmc_request_done(host
->mmc
, mrq
);
216 static int pxamci_cmd_done(struct pxamci_host
*host
, unsigned int stat
)
218 struct mmc_command
*cmd
= host
->cmd
;
228 * Did I mention this is Sick. We always need to
229 * discard the upper 8 bits of the first 16-bit word.
231 v
= readl(host
->base
+ MMC_RES
) & 0xffff;
232 for (i
= 0; i
< 4; i
++) {
233 u32 w1
= readl(host
->base
+ MMC_RES
) & 0xffff;
234 u32 w2
= readl(host
->base
+ MMC_RES
) & 0xffff;
235 cmd
->resp
[i
] = v
<< 24 | w1
<< 8 | w2
>> 8;
239 if (stat
& STAT_TIME_OUT_RESPONSE
) {
240 cmd
->error
= MMC_ERR_TIMEOUT
;
241 } else if (stat
& STAT_RES_CRC_ERR
&& cmd
->flags
& MMC_RSP_CRC
) {
244 * workaround for erratum #42:
245 * Intel PXA27x Family Processor Specification Update Rev 001
247 if (cmd
->opcode
== MMC_ALL_SEND_CID
||
248 cmd
->opcode
== MMC_SEND_CSD
||
249 cmd
->opcode
== MMC_SEND_CID
) {
250 /* a bogus CRC error can appear if the msb of
251 the 15 byte response is a one */
252 if ((cmd
->resp
[0] & 0x80000000) == 0)
253 cmd
->error
= MMC_ERR_BADCRC
;
255 DBG("ignoring CRC from command %d - *risky*\n",cmd
->opcode
);
258 cmd
->error
= MMC_ERR_BADCRC
;
262 pxamci_disable_irq(host
, END_CMD_RES
);
263 if (host
->data
&& cmd
->error
== MMC_ERR_NONE
) {
264 pxamci_enable_irq(host
, DATA_TRAN_DONE
);
266 pxamci_finish_request(host
, host
->mrq
);
272 static int pxamci_data_done(struct pxamci_host
*host
, unsigned int stat
)
274 struct mmc_data
*data
= host
->data
;
280 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, host
->dma_len
,
283 if (stat
& STAT_READ_TIME_OUT
)
284 data
->error
= MMC_ERR_TIMEOUT
;
285 else if (stat
& (STAT_CRC_READ_ERROR
|STAT_CRC_WRITE_ERROR
))
286 data
->error
= MMC_ERR_BADCRC
;
289 * There appears to be a hardware design bug here. There seems to
290 * be no way to find out how much data was transferred to the card.
291 * This means that if there was an error on any block, we mark all
292 * data blocks as being in error.
294 if (data
->error
== MMC_ERR_NONE
)
295 data
->bytes_xfered
= data
->blocks
<< data
->blksz_bits
;
297 data
->bytes_xfered
= 0;
299 pxamci_disable_irq(host
, DATA_TRAN_DONE
);
302 if (host
->mrq
->stop
&& data
->error
== MMC_ERR_NONE
) {
303 pxamci_stop_clock(host
);
304 pxamci_start_cmd(host
, host
->mrq
->stop
, 0);
306 pxamci_finish_request(host
, host
->mrq
);
312 static irqreturn_t
pxamci_irq(int irq
, void *devid
, struct pt_regs
*regs
)
314 struct pxamci_host
*host
= devid
;
318 ireg
= readl(host
->base
+ MMC_I_REG
);
320 DBG("PXAMCI: irq %08x\n", ireg
);
323 unsigned stat
= readl(host
->base
+ MMC_STAT
);
325 DBG("PXAMCI: stat %08x\n", stat
);
327 if (ireg
& END_CMD_RES
)
328 handled
|= pxamci_cmd_done(host
, stat
);
329 if (ireg
& DATA_TRAN_DONE
)
330 handled
|= pxamci_data_done(host
, stat
);
333 return IRQ_RETVAL(handled
);
336 static void pxamci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
338 struct pxamci_host
*host
= mmc_priv(mmc
);
341 WARN_ON(host
->mrq
!= NULL
);
345 pxamci_stop_clock(host
);
348 host
->cmdat
&= ~CMDAT_INIT
;
351 pxamci_setup_data(host
, mrq
->data
);
353 cmdat
&= ~CMDAT_BUSY
;
354 cmdat
|= CMDAT_DATAEN
| CMDAT_DMAEN
;
355 if (mrq
->data
->flags
& MMC_DATA_WRITE
)
356 cmdat
|= CMDAT_WRITE
;
358 if (mrq
->data
->flags
& MMC_DATA_STREAM
)
359 cmdat
|= CMDAT_STREAM
;
362 pxamci_start_cmd(host
, mrq
->cmd
, cmdat
);
365 static int pxamci_get_ro(struct mmc_host
*mmc
)
367 struct pxamci_host
*host
= mmc_priv(mmc
);
369 if (host
->pdata
&& host
->pdata
->get_ro
)
370 return host
->pdata
->get_ro(mmc
->dev
);
371 /* Host doesn't support read only detection so assume writeable */
375 static void pxamci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
377 struct pxamci_host
*host
= mmc_priv(mmc
);
379 DBG("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
380 ios
->clock
, ios
->power_mode
, ios
->vdd
/ 100,
384 unsigned int clk
= CLOCKRATE
/ ios
->clock
;
385 if (CLOCKRATE
/ clk
> ios
->clock
)
387 host
->clkrt
= fls(clk
) - 1;
388 pxa_set_cken(CKEN12_MMC
, 1);
391 * we write clkrt on the next command
394 pxamci_stop_clock(host
);
395 pxa_set_cken(CKEN12_MMC
, 0);
398 if (host
->power_mode
!= ios
->power_mode
) {
399 host
->power_mode
= ios
->power_mode
;
401 if (host
->pdata
&& host
->pdata
->setpower
)
402 host
->pdata
->setpower(mmc
->dev
, ios
->vdd
);
404 if (ios
->power_mode
== MMC_POWER_ON
)
405 host
->cmdat
|= CMDAT_INIT
;
408 DBG("pxamci_set_ios: clkrt = %x cmdat = %x\n",
409 host
->clkrt
, host
->cmdat
);
412 static struct mmc_host_ops pxamci_ops
= {
413 .request
= pxamci_request
,
414 .get_ro
= pxamci_get_ro
,
415 .set_ios
= pxamci_set_ios
,
418 static void pxamci_dma_irq(int dma
, void *devid
, struct pt_regs
*regs
)
420 printk(KERN_ERR
"DMA%d: IRQ???\n", dma
);
421 DCSR(dma
) = DCSR_STARTINTR
|DCSR_ENDINTR
|DCSR_BUSERR
;
424 static irqreturn_t
pxamci_detect_irq(int irq
, void *devid
, struct pt_regs
*regs
)
426 struct pxamci_host
*host
= mmc_priv(devid
);
428 mmc_detect_change(devid
, host
->pdata
->detect_delay
);
432 static int pxamci_probe(struct platform_device
*pdev
)
434 struct mmc_host
*mmc
;
435 struct pxamci_host
*host
= NULL
;
439 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
440 irq
= platform_get_irq(pdev
, 0);
444 r
= request_mem_region(r
->start
, SZ_4K
, DRIVER_NAME
);
448 mmc
= mmc_alloc_host(sizeof(struct pxamci_host
), &pdev
->dev
);
454 mmc
->ops
= &pxamci_ops
;
455 mmc
->f_min
= CLOCKRATE_MIN
;
456 mmc
->f_max
= CLOCKRATE_MAX
;
459 * We can do SG-DMA, but we don't because we never know how much
460 * data we successfully wrote to the card.
462 mmc
->max_phys_segs
= NR_SG
;
465 * Our hardware DMA can handle a maximum of one page per SG entry.
467 mmc
->max_seg_size
= PAGE_SIZE
;
469 host
= mmc_priv(mmc
);
472 host
->pdata
= pdev
->dev
.platform_data
;
473 mmc
->ocr_avail
= host
->pdata
?
474 host
->pdata
->ocr_mask
:
475 MMC_VDD_32_33
|MMC_VDD_33_34
;
477 host
->sg_cpu
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
, &host
->sg_dma
, GFP_KERNEL
);
483 spin_lock_init(&host
->lock
);
486 host
->imask
= MMC_I_MASK_ALL
;
488 host
->base
= ioremap(r
->start
, SZ_4K
);
495 * Ensure that the host controller is shut down, and setup
498 pxamci_stop_clock(host
);
499 writel(0, host
->base
+ MMC_SPI
);
500 writel(64, host
->base
+ MMC_RESTO
);
501 writel(host
->imask
, host
->base
+ MMC_I_MASK
);
503 host
->dma
= pxa_request_dma(DRIVER_NAME
, DMA_PRIO_LOW
,
504 pxamci_dma_irq
, host
);
510 ret
= request_irq(host
->irq
, pxamci_irq
, 0, DRIVER_NAME
, host
);
514 platform_set_drvdata(pdev
, mmc
);
516 if (host
->pdata
&& host
->pdata
->init
)
517 host
->pdata
->init(&pdev
->dev
, pxamci_detect_irq
, mmc
);
526 pxa_free_dma(host
->dma
);
530 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, host
->sg_cpu
, host
->sg_dma
);
538 static int pxamci_remove(struct platform_device
*pdev
)
540 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
542 platform_set_drvdata(pdev
, NULL
);
545 struct pxamci_host
*host
= mmc_priv(mmc
);
547 if (host
->pdata
&& host
->pdata
->exit
)
548 host
->pdata
->exit(&pdev
->dev
, mmc
);
550 mmc_remove_host(mmc
);
552 pxamci_stop_clock(host
);
553 writel(TXFIFO_WR_REQ
|RXFIFO_RD_REQ
|CLK_IS_OFF
|STOP_CMD
|
554 END_CMD_RES
|PRG_DONE
|DATA_TRAN_DONE
,
555 host
->base
+ MMC_I_MASK
);
560 free_irq(host
->irq
, host
);
561 pxa_free_dma(host
->dma
);
563 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, host
->sg_cpu
, host
->sg_dma
);
565 release_resource(host
->res
);
573 static int pxamci_suspend(struct platform_device
*dev
, pm_message_t state
)
575 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
579 ret
= mmc_suspend_host(mmc
, state
);
584 static int pxamci_resume(struct platform_device
*dev
)
586 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
590 ret
= mmc_resume_host(mmc
);
595 #define pxamci_suspend NULL
596 #define pxamci_resume NULL
599 static struct platform_driver pxamci_driver
= {
600 .probe
= pxamci_probe
,
601 .remove
= pxamci_remove
,
602 .suspend
= pxamci_suspend
,
603 .resume
= pxamci_resume
,
609 static int __init
pxamci_init(void)
611 return platform_driver_register(&pxamci_driver
);
614 static void __exit
pxamci_exit(void)
616 platform_driver_unregister(&pxamci_driver
);
619 module_init(pxamci_init
);
620 module_exit(pxamci_exit
);
622 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
623 MODULE_LICENSE("GPL");