2 * linux/drivers/mmc/tmio_mmc.c
4 * Copyright (C) 2004 Ian Molton
5 * Copyright (C) 2007 Ian Molton
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Driver for the MMC / SD / SDIO cell found in:
13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
15 * This driver draws mainly on scattered spec sheets, Reverse engineering
16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
17 * support). (Further 4 bit support from a later datasheet).
20 * Investigate using a workqueue for PIO transfers
23 * Better Power management
24 * Handle MMC errors better
25 * double buffer support
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/dmaengine.h>
32 #include <linux/highmem.h>
33 #include <linux/interrupt.h>
35 #include <linux/irq.h>
36 #include <linux/mfd/core.h>
37 #include <linux/mfd/tmio.h>
38 #include <linux/mmc/host.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/scatterlist.h>
42 #include <linux/workqueue.h>
43 #include <linux/spinlock.h>
45 #define CTL_SD_CMD 0x00
46 #define CTL_ARG_REG 0x04
47 #define CTL_STOP_INTERNAL_ACTION 0x08
48 #define CTL_XFER_BLK_COUNT 0xa
49 #define CTL_RESPONSE 0x0c
50 #define CTL_STATUS 0x1c
51 #define CTL_IRQ_MASK 0x20
52 #define CTL_SD_CARD_CLK_CTL 0x24
53 #define CTL_SD_XFER_LEN 0x26
54 #define CTL_SD_MEM_CARD_OPT 0x28
55 #define CTL_SD_ERROR_DETAIL_STATUS 0x2c
56 #define CTL_SD_DATA_PORT 0x30
57 #define CTL_TRANSACTION_CTL 0x34
58 #define CTL_SDIO_STATUS 0x36
59 #define CTL_SDIO_IRQ_MASK 0x38
60 #define CTL_RESET_SD 0xe0
61 #define CTL_SDIO_REGS 0x100
62 #define CTL_CLK_AND_WAIT_CTL 0x138
63 #define CTL_RESET_SDIO 0x1e0
65 /* Definitions for values the CTRL_STATUS register can take. */
66 #define TMIO_STAT_CMDRESPEND 0x00000001
67 #define TMIO_STAT_DATAEND 0x00000004
68 #define TMIO_STAT_CARD_REMOVE 0x00000008
69 #define TMIO_STAT_CARD_INSERT 0x00000010
70 #define TMIO_STAT_SIGSTATE 0x00000020
71 #define TMIO_STAT_WRPROTECT 0x00000080
72 #define TMIO_STAT_CARD_REMOVE_A 0x00000100
73 #define TMIO_STAT_CARD_INSERT_A 0x00000200
74 #define TMIO_STAT_SIGSTATE_A 0x00000400
75 #define TMIO_STAT_CMD_IDX_ERR 0x00010000
76 #define TMIO_STAT_CRCFAIL 0x00020000
77 #define TMIO_STAT_STOPBIT_ERR 0x00040000
78 #define TMIO_STAT_DATATIMEOUT 0x00080000
79 #define TMIO_STAT_RXOVERFLOW 0x00100000
80 #define TMIO_STAT_TXUNDERRUN 0x00200000
81 #define TMIO_STAT_CMDTIMEOUT 0x00400000
82 #define TMIO_STAT_RXRDY 0x01000000
83 #define TMIO_STAT_TXRQ 0x02000000
84 #define TMIO_STAT_ILL_FUNC 0x20000000
85 #define TMIO_STAT_CMD_BUSY 0x40000000
86 #define TMIO_STAT_ILL_ACCESS 0x80000000
88 /* Definitions for values the CTRL_SDIO_STATUS register can take. */
89 #define TMIO_SDIO_STAT_IOIRQ 0x0001
90 #define TMIO_SDIO_STAT_EXPUB52 0x4000
91 #define TMIO_SDIO_STAT_EXWT 0x8000
92 #define TMIO_SDIO_MASK_ALL 0xc007
94 /* Define some IRQ masks */
95 /* This is the mask used at reset by the chip */
96 #define TMIO_MASK_ALL 0x837f031d
97 #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
98 #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
99 #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
100 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
101 #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
103 #define enable_mmc_irqs(host, i) \
106 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
107 mask &= ~((i) & TMIO_MASK_IRQ); \
108 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
111 #define disable_mmc_irqs(host, i) \
114 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
115 mask |= ((i) & TMIO_MASK_IRQ); \
116 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
119 #define ack_mmc_irqs(host, i) \
121 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
124 /* This is arbitrary, just noone needed any higher alignment yet */
127 struct tmio_mmc_host
{
129 unsigned long bus_shift
;
130 struct mmc_command
*cmd
;
131 struct mmc_request
*mrq
;
132 struct mmc_data
*data
;
133 struct mmc_host
*mmc
;
135 unsigned int sdio_irq_enabled
;
137 /* Callbacks for clock / power control */
138 void (*set_pwr
)(struct platform_device
*host
, int state
);
139 void (*set_clk_div
)(struct platform_device
*host
, int state
);
141 /* pio related stuff */
142 struct scatterlist
*sg_ptr
;
143 struct scatterlist
*sg_orig
;
147 struct platform_device
*pdev
;
150 struct dma_chan
*chan_rx
;
151 struct dma_chan
*chan_tx
;
152 struct tasklet_struct dma_complete
;
153 struct tasklet_struct dma_issue
;
154 #ifdef CONFIG_TMIO_MMC_DMA
155 unsigned int dma_sglen
;
156 u8 bounce_buf
[PAGE_CACHE_SIZE
] __attribute__((aligned(MAX_ALIGN
)));
157 struct scatterlist bounce_sg
;
160 /* Track lost interrupts */
161 struct delayed_work delayed_reset_work
;
163 unsigned long last_req_ts
;
166 static void tmio_check_bounce_buffer(struct tmio_mmc_host
*host
);
168 static u16
sd_ctrl_read16(struct tmio_mmc_host
*host
, int addr
)
170 return readw(host
->ctl
+ (addr
<< host
->bus_shift
));
173 static void sd_ctrl_read16_rep(struct tmio_mmc_host
*host
, int addr
,
176 readsw(host
->ctl
+ (addr
<< host
->bus_shift
), buf
, count
);
179 static u32
sd_ctrl_read32(struct tmio_mmc_host
*host
, int addr
)
181 return readw(host
->ctl
+ (addr
<< host
->bus_shift
)) |
182 readw(host
->ctl
+ ((addr
+ 2) << host
->bus_shift
)) << 16;
185 static void sd_ctrl_write16(struct tmio_mmc_host
*host
, int addr
, u16 val
)
187 writew(val
, host
->ctl
+ (addr
<< host
->bus_shift
));
190 static void sd_ctrl_write16_rep(struct tmio_mmc_host
*host
, int addr
,
193 writesw(host
->ctl
+ (addr
<< host
->bus_shift
), buf
, count
);
196 static void sd_ctrl_write32(struct tmio_mmc_host
*host
, int addr
, u32 val
)
198 writew(val
, host
->ctl
+ (addr
<< host
->bus_shift
));
199 writew(val
>> 16, host
->ctl
+ ((addr
+ 2) << host
->bus_shift
));
202 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
204 host
->sg_len
= data
->sg_len
;
205 host
->sg_ptr
= data
->sg
;
206 host
->sg_orig
= data
->sg
;
210 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
212 host
->sg_ptr
= sg_next(host
->sg_ptr
);
214 return --host
->sg_len
;
217 static char *tmio_mmc_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
219 local_irq_save(*flags
);
220 return kmap_atomic(sg_page(sg
), KM_BIO_SRC_IRQ
) + sg
->offset
;
223 static void tmio_mmc_kunmap_atomic(void *virt
, unsigned long *flags
)
225 kunmap_atomic(virt
, KM_BIO_SRC_IRQ
);
226 local_irq_restore(*flags
);
229 #ifdef CONFIG_MMC_DEBUG
231 #define STATUS_TO_TEXT(a, status, i) \
233 if (status & TMIO_STAT_##a) { \
240 void pr_debug_status(u32 status
)
243 printk(KERN_DEBUG
"status: %08x = ", status
);
244 STATUS_TO_TEXT(CARD_REMOVE
, status
, i
);
245 STATUS_TO_TEXT(CARD_INSERT
, status
, i
);
246 STATUS_TO_TEXT(SIGSTATE
, status
, i
);
247 STATUS_TO_TEXT(WRPROTECT
, status
, i
);
248 STATUS_TO_TEXT(CARD_REMOVE_A
, status
, i
);
249 STATUS_TO_TEXT(CARD_INSERT_A
, status
, i
);
250 STATUS_TO_TEXT(SIGSTATE_A
, status
, i
);
251 STATUS_TO_TEXT(CMD_IDX_ERR
, status
, i
);
252 STATUS_TO_TEXT(STOPBIT_ERR
, status
, i
);
253 STATUS_TO_TEXT(ILL_FUNC
, status
, i
);
254 STATUS_TO_TEXT(CMD_BUSY
, status
, i
);
255 STATUS_TO_TEXT(CMDRESPEND
, status
, i
);
256 STATUS_TO_TEXT(DATAEND
, status
, i
);
257 STATUS_TO_TEXT(CRCFAIL
, status
, i
);
258 STATUS_TO_TEXT(DATATIMEOUT
, status
, i
);
259 STATUS_TO_TEXT(CMDTIMEOUT
, status
, i
);
260 STATUS_TO_TEXT(RXOVERFLOW
, status
, i
);
261 STATUS_TO_TEXT(TXUNDERRUN
, status
, i
);
262 STATUS_TO_TEXT(RXRDY
, status
, i
);
263 STATUS_TO_TEXT(TXRQ
, status
, i
);
264 STATUS_TO_TEXT(ILL_ACCESS
, status
, i
);
269 #define pr_debug_status(s) do { } while (0)
272 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
274 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
277 host
->sdio_irq_enabled
= 1;
278 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
279 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
,
280 (TMIO_SDIO_MASK_ALL
& ~TMIO_SDIO_STAT_IOIRQ
));
282 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, TMIO_SDIO_MASK_ALL
);
283 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
284 host
->sdio_irq_enabled
= 0;
288 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
, int new_clock
)
293 for (clock
= host
->mmc
->f_min
, clk
= 0x80000080;
294 new_clock
>= (clock
<<1); clk
>>= 1)
299 if (host
->set_clk_div
)
300 host
->set_clk_div(host
->pdev
, (clk
>>22) & 1);
302 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& 0x1ff);
305 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
307 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
308 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
311 * Testing on sh-mobile showed that SDIO IRQs are unmasked when
312 * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
313 * device IRQ here and restore the SDIO IRQ mask before
314 * re-enabling the device IRQ.
316 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
317 disable_irq(host
->irq
);
318 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
320 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
321 tmio_mmc_enable_sdio_irq(host
->mmc
, host
->sdio_irq_enabled
);
322 enable_irq(host
->irq
);
324 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~0x0100 &
325 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
329 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
331 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
332 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
334 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, 0x0100 |
335 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
337 /* see comment in tmio_mmc_clk_stop above */
338 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
339 disable_irq(host
->irq
);
340 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
342 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
343 tmio_mmc_enable_sdio_irq(host
->mmc
, host
->sdio_irq_enabled
);
344 enable_irq(host
->irq
);
348 static void reset(struct tmio_mmc_host
*host
)
350 /* FIXME - should we set stop clock reg here */
351 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
352 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
354 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
355 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
359 static void tmio_mmc_reset_work(struct work_struct
*work
)
361 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
362 delayed_reset_work
.work
);
363 struct mmc_request
*mrq
;
366 spin_lock_irqsave(&host
->lock
, flags
);
369 /* request already finished */
371 || time_is_after_jiffies(host
->last_req_ts
+
372 msecs_to_jiffies(2000))) {
373 spin_unlock_irqrestore(&host
->lock
, flags
);
377 dev_warn(&host
->pdev
->dev
,
378 "timeout waiting for hardware interrupt (CMD%u)\n",
382 host
->data
->error
= -ETIMEDOUT
;
384 host
->cmd
->error
= -ETIMEDOUT
;
386 mrq
->cmd
->error
= -ETIMEDOUT
;
392 spin_unlock_irqrestore(&host
->lock
, flags
);
396 mmc_request_done(host
->mmc
, mrq
);
400 tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
402 struct mmc_request
*mrq
= host
->mrq
;
411 cancel_delayed_work(&host
->delayed_reset_work
);
413 mmc_request_done(host
->mmc
, mrq
);
416 /* These are the bitmasks the tmio chip requires to implement the MMC response
417 * types. Note that R1 and R6 are the same in this scheme. */
418 #define APP_CMD 0x0040
419 #define RESP_NONE 0x0300
420 #define RESP_R1 0x0400
421 #define RESP_R1B 0x0500
422 #define RESP_R2 0x0600
423 #define RESP_R3 0x0700
424 #define DATA_PRESENT 0x0800
425 #define TRANSFER_READ 0x1000
426 #define TRANSFER_MULTI 0x2000
427 #define SECURITY_CMD 0x4000
430 tmio_mmc_start_command(struct tmio_mmc_host
*host
, struct mmc_command
*cmd
)
432 struct mmc_data
*data
= host
->data
;
435 /* Command 12 is handled by hardware */
436 if (cmd
->opcode
== 12 && !cmd
->arg
) {
437 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x001);
441 switch (mmc_resp_type(cmd
)) {
442 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
443 case MMC_RSP_R1
: c
|= RESP_R1
; break;
444 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
445 case MMC_RSP_R2
: c
|= RESP_R2
; break;
446 case MMC_RSP_R3
: c
|= RESP_R3
; break;
448 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
454 /* FIXME - this seems to be ok commented out but the spec suggest this bit
455 * should be set when issuing app commands.
456 * if(cmd->flags & MMC_FLAG_ACMD)
461 if (data
->blocks
> 1) {
462 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x100);
465 if (data
->flags
& MMC_DATA_READ
)
469 enable_mmc_irqs(host
, TMIO_MASK_CMD
);
471 /* Fire off the command */
472 sd_ctrl_write32(host
, CTL_ARG_REG
, cmd
->arg
);
473 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
479 * This chip always returns (at least?) as much data as you ask for.
480 * I'm unsure what happens if you ask for less than a block. This should be
481 * looked into to ensure that a funny length read doesnt hose the controller.
483 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
485 struct mmc_data
*data
= host
->data
;
492 pr_debug("Spurious PIO IRQ\n");
496 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
497 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
499 count
= host
->sg_ptr
->length
- host
->sg_off
;
500 if (count
> data
->blksz
)
503 pr_debug("count: %08x offset: %08x flags %08x\n",
504 count
, host
->sg_off
, data
->flags
);
506 /* Transfer the data */
507 if (data
->flags
& MMC_DATA_READ
)
508 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
510 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
512 host
->sg_off
+= count
;
514 tmio_mmc_kunmap_atomic(sg_virt
, &flags
);
516 if (host
->sg_off
== host
->sg_ptr
->length
)
517 tmio_mmc_next_sg(host
);
522 /* needs to be called with host->lock held */
523 static void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
525 struct mmc_data
*data
= host
->data
;
526 struct mmc_command
*stop
;
531 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
536 /* FIXME - return correct transfer count on errors */
538 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
540 data
->bytes_xfered
= 0;
542 pr_debug("Completed data request\n");
545 * FIXME: other drivers allow an optional stop command of any given type
546 * which we dont do, as the chip can auto generate them.
547 * Perhaps we can be smarter about when to use auto CMD12 and
548 * only issue the auto request when we know this is the desired
549 * stop command, allowing fallback to the stop command the
550 * upper layers expect. For now, we do what works.
553 if (data
->flags
& MMC_DATA_READ
) {
555 disable_mmc_irqs(host
, TMIO_MASK_READOP
);
557 tmio_check_bounce_buffer(host
);
558 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
562 disable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
563 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
568 if (stop
->opcode
== 12 && !stop
->arg
)
569 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x000);
574 tmio_mmc_finish_request(host
);
577 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
)
579 struct mmc_data
*data
;
580 spin_lock(&host
->lock
);
586 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
)) {
588 * Has all data been written out yet? Testing on SuperH showed,
589 * that in most cases the first interrupt comes already with the
590 * BUSY status bit clear, but on some operations, like mount or
591 * in the beginning of a write / sync / umount, there is one
592 * DATAEND interrupt with the BUSY bit set, in this cases
593 * waiting for one more interrupt fixes the problem.
595 if (!(sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_CMD_BUSY
)) {
596 disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
597 tasklet_schedule(&host
->dma_complete
);
599 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
)) {
600 disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
601 tasklet_schedule(&host
->dma_complete
);
603 tmio_mmc_do_data_irq(host
);
606 spin_unlock(&host
->lock
);
609 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
,
612 struct mmc_command
*cmd
= host
->cmd
;
615 spin_lock(&host
->lock
);
618 pr_debug("Spurious CMD irq\n");
624 /* This controller is sicker than the PXA one. Not only do we need to
625 * drop the top 8 bits of the first response word, we also need to
626 * modify the order of the response for short response command types.
629 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
630 cmd
->resp
[i
] = sd_ctrl_read32(host
, addr
);
632 if (cmd
->flags
& MMC_RSP_136
) {
633 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
634 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
635 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
637 } else if (cmd
->flags
& MMC_RSP_R3
) {
638 cmd
->resp
[0] = cmd
->resp
[3];
641 if (stat
& TMIO_STAT_CMDTIMEOUT
)
642 cmd
->error
= -ETIMEDOUT
;
643 else if (stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
)
644 cmd
->error
= -EILSEQ
;
646 /* If there is data to handle we enable data IRQs here, and
647 * we will ultimatley finish the request in the data_end handler.
648 * If theres no data or we encountered an error, finish now.
650 if (host
->data
&& !cmd
->error
) {
651 if (host
->data
->flags
& MMC_DATA_READ
) {
653 enable_mmc_irqs(host
, TMIO_MASK_READOP
);
656 enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
658 tasklet_schedule(&host
->dma_issue
);
661 tmio_mmc_finish_request(host
);
665 spin_unlock(&host
->lock
);
670 static irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
672 struct tmio_mmc_host
*host
= devid
;
673 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
674 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
675 unsigned int ireg
, irq_mask
, status
;
676 unsigned int sdio_ireg
, sdio_irq_mask
, sdio_status
;
678 pr_debug("MMC IRQ begin\n");
680 status
= sd_ctrl_read32(host
, CTL_STATUS
);
681 irq_mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
);
682 ireg
= status
& TMIO_MASK_IRQ
& ~irq_mask
;
685 if (!ireg
&& pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
686 sdio_status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
687 sdio_irq_mask
= sd_ctrl_read16(host
, CTL_SDIO_IRQ_MASK
);
688 sdio_ireg
= sdio_status
& TMIO_SDIO_MASK_ALL
& ~sdio_irq_mask
;
690 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, sdio_status
& ~TMIO_SDIO_MASK_ALL
);
692 if (sdio_ireg
&& !host
->sdio_irq_enabled
) {
693 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
694 sdio_status
, sdio_irq_mask
, sdio_ireg
);
695 tmio_mmc_enable_sdio_irq(host
->mmc
, 0);
699 if (host
->mmc
->caps
& MMC_CAP_SDIO_IRQ
&&
700 sdio_ireg
& TMIO_SDIO_STAT_IOIRQ
)
701 mmc_signal_sdio_irq(host
->mmc
);
707 pr_debug_status(status
);
708 pr_debug_status(ireg
);
711 disable_mmc_irqs(host
, status
& ~irq_mask
);
713 pr_warning("tmio_mmc: Spurious irq, disabling! "
714 "0x%08x 0x%08x 0x%08x\n", status
, irq_mask
, ireg
);
715 pr_debug_status(status
);
721 /* Card insert / remove attempts */
722 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
723 ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
724 TMIO_STAT_CARD_REMOVE
);
725 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
728 /* CRC and other errors */
729 /* if (ireg & TMIO_STAT_ERR_IRQ)
730 * handled |= tmio_error_irq(host, irq, stat);
733 /* Command completion */
734 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
736 TMIO_STAT_CMDRESPEND
|
737 TMIO_STAT_CMDTIMEOUT
);
738 tmio_mmc_cmd_irq(host
, status
);
742 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
743 ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
744 tmio_mmc_pio_irq(host
);
747 /* Data transfer completion */
748 if (ireg
& TMIO_STAT_DATAEND
) {
749 ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
750 tmio_mmc_data_irq(host
);
753 /* Check status - keep going until we've handled it all */
754 status
= sd_ctrl_read32(host
, CTL_STATUS
);
755 irq_mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
);
756 ireg
= status
& TMIO_MASK_IRQ
& ~irq_mask
;
758 pr_debug("Status at end of loop: %08x\n", status
);
759 pr_debug_status(status
);
761 pr_debug("MMC IRQ end\n");
767 #ifdef CONFIG_TMIO_MMC_DMA
768 static void tmio_check_bounce_buffer(struct tmio_mmc_host
*host
)
770 if (host
->sg_ptr
== &host
->bounce_sg
) {
772 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
773 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
774 tmio_mmc_kunmap_atomic(sg_vaddr
, &flags
);
778 static void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
780 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
781 /* Switch DMA mode on or off - SuperH specific? */
782 sd_ctrl_write16(host
, 0xd8, enable
? 2 : 0);
786 static void tmio_dma_complete(void *arg
)
788 struct tmio_mmc_host
*host
= arg
;
790 dev_dbg(&host
->pdev
->dev
, "Command completed\n");
793 dev_warn(&host
->pdev
->dev
, "NULL data in DMA completion!\n");
795 enable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
798 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host
*host
)
800 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
801 struct dma_async_tx_descriptor
*desc
= NULL
;
802 struct dma_chan
*chan
= host
->chan_rx
;
803 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
804 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
807 bool aligned
= true, multiple
= true;
808 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
810 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
811 if (sg_tmp
->offset
& align
)
813 if (sg_tmp
->length
& align
) {
819 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
820 align
>= MAX_ALIGN
)) || !multiple
) {
825 /* The only sg element can be unaligned, use our bounce buffer then */
827 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
828 host
->sg_ptr
= &host
->bounce_sg
;
832 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_FROM_DEVICE
);
834 host
->dma_sglen
= ret
;
835 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
836 DMA_FROM_DEVICE
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
840 desc
->callback
= tmio_dma_complete
;
841 desc
->callback_param
= host
;
842 cookie
= desc
->tx_submit(desc
);
847 chan
->device
->device_issue_pending(chan
);
850 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
851 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
855 /* DMA failed, fall back to PIO */
858 host
->chan_rx
= NULL
;
859 dma_release_channel(chan
);
860 /* Free the Tx channel too */
861 chan
= host
->chan_tx
;
863 host
->chan_tx
= NULL
;
864 dma_release_channel(chan
);
866 dev_warn(&host
->pdev
->dev
,
867 "DMA failed: %d, falling back to PIO\n", ret
);
868 tmio_mmc_enable_dma(host
, false);
871 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d, sg[%d]\n", __func__
,
872 desc
, cookie
, host
->sg_len
);
875 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host
*host
)
877 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
878 struct dma_async_tx_descriptor
*desc
= NULL
;
879 struct dma_chan
*chan
= host
->chan_tx
;
880 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
881 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
884 bool aligned
= true, multiple
= true;
885 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
887 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
888 if (sg_tmp
->offset
& align
)
890 if (sg_tmp
->length
& align
) {
896 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
897 align
>= MAX_ALIGN
)) || !multiple
) {
902 /* The only sg element can be unaligned, use our bounce buffer then */
905 void *sg_vaddr
= tmio_mmc_kmap_atomic(sg
, &flags
);
906 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
907 memcpy(host
->bounce_buf
, sg_vaddr
, host
->bounce_sg
.length
);
908 tmio_mmc_kunmap_atomic(sg_vaddr
, &flags
);
909 host
->sg_ptr
= &host
->bounce_sg
;
913 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_TO_DEVICE
);
915 host
->dma_sglen
= ret
;
916 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
917 DMA_TO_DEVICE
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
921 desc
->callback
= tmio_dma_complete
;
922 desc
->callback_param
= host
;
923 cookie
= desc
->tx_submit(desc
);
929 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
930 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
934 /* DMA failed, fall back to PIO */
937 host
->chan_tx
= NULL
;
938 dma_release_channel(chan
);
939 /* Free the Rx channel too */
940 chan
= host
->chan_rx
;
942 host
->chan_rx
= NULL
;
943 dma_release_channel(chan
);
945 dev_warn(&host
->pdev
->dev
,
946 "DMA failed: %d, falling back to PIO\n", ret
);
947 tmio_mmc_enable_dma(host
, false);
950 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d\n", __func__
,
954 static void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
955 struct mmc_data
*data
)
957 if (data
->flags
& MMC_DATA_READ
) {
959 tmio_mmc_start_dma_rx(host
);
962 tmio_mmc_start_dma_tx(host
);
966 static void tmio_issue_tasklet_fn(unsigned long priv
)
968 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)priv
;
969 struct dma_chan
*chan
= host
->chan_tx
;
971 chan
->device
->device_issue_pending(chan
);
974 static void tmio_tasklet_fn(unsigned long arg
)
976 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)arg
;
979 spin_lock_irqsave(&host
->lock
, flags
);
984 if (host
->data
->flags
& MMC_DATA_READ
)
985 dma_unmap_sg(host
->chan_rx
->device
->dev
,
986 host
->sg_ptr
, host
->sg_len
,
989 dma_unmap_sg(host
->chan_tx
->device
->dev
,
990 host
->sg_ptr
, host
->sg_len
,
993 tmio_mmc_do_data_irq(host
);
995 spin_unlock_irqrestore(&host
->lock
, flags
);
998 /* It might be necessary to make filter MFD specific */
999 static bool tmio_mmc_filter(struct dma_chan
*chan
, void *arg
)
1001 dev_dbg(chan
->device
->dev
, "%s: slave data %p\n", __func__
, arg
);
1002 chan
->private = arg
;
1006 static void tmio_mmc_request_dma(struct tmio_mmc_host
*host
,
1007 struct tmio_mmc_data
*pdata
)
1009 /* We can only either use DMA for both Tx and Rx or not use it at all */
1011 dma_cap_mask_t mask
;
1014 dma_cap_set(DMA_SLAVE
, mask
);
1016 host
->chan_tx
= dma_request_channel(mask
, tmio_mmc_filter
,
1017 pdata
->dma
->chan_priv_tx
);
1018 dev_dbg(&host
->pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
1024 host
->chan_rx
= dma_request_channel(mask
, tmio_mmc_filter
,
1025 pdata
->dma
->chan_priv_rx
);
1026 dev_dbg(&host
->pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
1029 if (!host
->chan_rx
) {
1030 dma_release_channel(host
->chan_tx
);
1031 host
->chan_tx
= NULL
;
1035 tasklet_init(&host
->dma_complete
, tmio_tasklet_fn
, (unsigned long)host
);
1036 tasklet_init(&host
->dma_issue
, tmio_issue_tasklet_fn
, (unsigned long)host
);
1038 tmio_mmc_enable_dma(host
, true);
1042 static void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
1044 if (host
->chan_tx
) {
1045 struct dma_chan
*chan
= host
->chan_tx
;
1046 host
->chan_tx
= NULL
;
1047 dma_release_channel(chan
);
1049 if (host
->chan_rx
) {
1050 struct dma_chan
*chan
= host
->chan_rx
;
1051 host
->chan_rx
= NULL
;
1052 dma_release_channel(chan
);
1056 static void tmio_check_bounce_buffer(struct tmio_mmc_host
*host
)
1060 static void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
1061 struct mmc_data
*data
)
1065 static void tmio_mmc_request_dma(struct tmio_mmc_host
*host
,
1066 struct tmio_mmc_data
*pdata
)
1068 host
->chan_tx
= NULL
;
1069 host
->chan_rx
= NULL
;
1072 static void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
1077 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
1078 struct mmc_data
*data
)
1080 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
1081 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
1083 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
1084 data
->blksz
, data
->blocks
);
1086 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
1087 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
1088 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
1090 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
1091 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
1092 mmc_hostname(host
->mmc
), data
->blksz
);
1097 tmio_mmc_init_sg(host
, data
);
1100 /* Set transfer length / blocksize */
1101 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
1102 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
1104 tmio_mmc_start_dma(host
, data
);
1109 /* Process requests from the MMC layer */
1110 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1112 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1116 pr_debug("request not null\n");
1118 host
->last_req_ts
= jiffies
;
1123 ret
= tmio_mmc_start_data(host
, mrq
->data
);
1128 ret
= tmio_mmc_start_command(host
, mrq
->cmd
);
1130 schedule_delayed_work(&host
->delayed_reset_work
,
1131 msecs_to_jiffies(2000));
1137 mrq
->cmd
->error
= ret
;
1138 mmc_request_done(mmc
, mrq
);
1141 /* Set MMC clock / power.
1142 * Note: This controller uses a simple divider scheme therefore it cannot
1143 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
1144 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
1147 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1149 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1152 tmio_mmc_set_clock(host
, ios
->clock
);
1154 /* Power sequence - OFF -> ON -> UP */
1155 switch (ios
->power_mode
) {
1156 case MMC_POWER_OFF
: /* power down SD bus */
1158 host
->set_pwr(host
->pdev
, 0);
1159 tmio_mmc_clk_stop(host
);
1161 case MMC_POWER_ON
: /* power up SD bus */
1163 host
->set_pwr(host
->pdev
, 1);
1165 case MMC_POWER_UP
: /* start bus clock */
1166 tmio_mmc_clk_start(host
);
1170 switch (ios
->bus_width
) {
1171 case MMC_BUS_WIDTH_1
:
1172 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x80e0);
1174 case MMC_BUS_WIDTH_4
:
1175 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x00e0);
1179 /* Let things settle. delay taken from winCE driver */
1183 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
1185 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1186 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
1187 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
1189 return ((pdata
->flags
& TMIO_MMC_WRPROTECT_DISABLE
) ||
1190 (sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_WRPROTECT
)) ? 0 : 1;
1193 static int tmio_mmc_get_cd(struct mmc_host
*mmc
)
1195 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1196 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
1197 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
1202 return pdata
->get_cd(host
->pdev
);
1205 static const struct mmc_host_ops tmio_mmc_ops
= {
1206 .request
= tmio_mmc_request
,
1207 .set_ios
= tmio_mmc_set_ios
,
1208 .get_ro
= tmio_mmc_get_ro
,
1209 .get_cd
= tmio_mmc_get_cd
,
1210 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
1214 static int tmio_mmc_suspend(struct platform_device
*dev
, pm_message_t state
)
1216 struct mfd_cell
*cell
= (struct mfd_cell
*)dev
->dev
.platform_data
;
1217 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
1220 ret
= mmc_suspend_host(mmc
);
1222 /* Tell MFD core it can disable us now.*/
1223 if (!ret
&& cell
->disable
)
1229 static int tmio_mmc_resume(struct platform_device
*dev
)
1231 struct mfd_cell
*cell
= (struct mfd_cell
*)dev
->dev
.platform_data
;
1232 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
1235 /* Tell the MFD core we are ready to be enabled */
1237 ret
= cell
->resume(dev
);
1242 mmc_resume_host(mmc
);
1248 #define tmio_mmc_suspend NULL
1249 #define tmio_mmc_resume NULL
1252 static int __devinit
tmio_mmc_probe(struct platform_device
*dev
)
1254 struct mfd_cell
*cell
= (struct mfd_cell
*)dev
->dev
.platform_data
;
1255 struct tmio_mmc_data
*pdata
;
1256 struct resource
*res_ctl
;
1257 struct tmio_mmc_host
*host
;
1258 struct mmc_host
*mmc
;
1260 u32 irq_mask
= TMIO_MASK_CMD
;
1262 if (dev
->num_resources
!= 2)
1265 res_ctl
= platform_get_resource(dev
, IORESOURCE_MEM
, 0);
1269 pdata
= cell
->driver_data
;
1270 if (!pdata
|| !pdata
->hclk
)
1275 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &dev
->dev
);
1279 host
= mmc_priv(mmc
);
1282 platform_set_drvdata(dev
, mmc
);
1284 host
->set_pwr
= pdata
->set_pwr
;
1285 host
->set_clk_div
= pdata
->set_clk_div
;
1287 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
1288 host
->bus_shift
= resource_size(res_ctl
) >> 10;
1290 host
->ctl
= ioremap(res_ctl
->start
, resource_size(res_ctl
));
1294 mmc
->ops
= &tmio_mmc_ops
;
1295 mmc
->caps
= MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
1296 mmc
->f_max
= pdata
->hclk
;
1297 mmc
->f_min
= mmc
->f_max
/ 512;
1299 mmc
->max_blk_size
= 512;
1300 mmc
->max_blk_count
= (PAGE_CACHE_SIZE
/ mmc
->max_blk_size
) *
1302 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1303 mmc
->max_seg_size
= mmc
->max_req_size
;
1304 if (pdata
->ocr_mask
)
1305 mmc
->ocr_avail
= pdata
->ocr_mask
;
1307 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
1309 /* Tell the MFD core we are ready to be enabled */
1311 ret
= cell
->enable(dev
);
1316 tmio_mmc_clk_stop(host
);
1319 ret
= platform_get_irq(dev
, 0);
1325 disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1326 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
1327 tmio_mmc_enable_sdio_irq(mmc
, 0);
1329 ret
= request_irq(host
->irq
, tmio_mmc_irq
, IRQF_DISABLED
|
1330 IRQF_TRIGGER_FALLING
, dev_name(&dev
->dev
), host
);
1334 spin_lock_init(&host
->lock
);
1336 /* Init delayed work for request timeouts */
1337 INIT_DELAYED_WORK(&host
->delayed_reset_work
, tmio_mmc_reset_work
);
1339 /* See if we also get DMA */
1340 tmio_mmc_request_dma(host
, pdata
);
1344 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host
->mmc
),
1345 (unsigned long)host
->ctl
, host
->irq
);
1347 /* Unmask the IRQs we want to know about */
1349 irq_mask
|= TMIO_MASK_READOP
;
1351 irq_mask
|= TMIO_MASK_WRITEOP
;
1352 enable_mmc_irqs(host
, irq_mask
);
1367 static int __devexit
tmio_mmc_remove(struct platform_device
*dev
)
1369 struct mfd_cell
*cell
= (struct mfd_cell
*)dev
->dev
.platform_data
;
1370 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
1372 platform_set_drvdata(dev
, NULL
);
1375 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1376 mmc_remove_host(mmc
);
1377 cancel_delayed_work_sync(&host
->delayed_reset_work
);
1378 tmio_mmc_release_dma(host
);
1379 free_irq(host
->irq
, host
);
1389 /* ------------------- device registration ----------------------- */
1391 static struct platform_driver tmio_mmc_driver
= {
1394 .owner
= THIS_MODULE
,
1396 .probe
= tmio_mmc_probe
,
1397 .remove
= __devexit_p(tmio_mmc_remove
),
1398 .suspend
= tmio_mmc_suspend
,
1399 .resume
= tmio_mmc_resume
,
1403 static int __init
tmio_mmc_init(void)
1405 return platform_driver_register(&tmio_mmc_driver
);
1408 static void __exit
tmio_mmc_exit(void)
1410 platform_driver_unregister(&tmio_mmc_driver
);
1413 module_init(tmio_mmc_init
);
1414 module_exit(tmio_mmc_exit
);
1416 MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
1417 MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
1418 MODULE_LICENSE("GPL v2");
1419 MODULE_ALIAS("platform:tmio-mmc");