2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/highmem.h>
20 #include <linux/log2.h>
21 #include <linux/mmc/host.h>
22 #include <linux/amba/bus.h>
23 #include <linux/clk.h>
24 #include <linux/scatterlist.h>
25 #include <linux/gpio.h>
26 #include <linux/amba/mmci.h>
27 #include <linux/regulator/consumer.h>
29 #include <asm/div64.h>
31 #include <asm/sizes.h>
35 #define DRIVER_NAME "mmci-pl18x"
37 static unsigned int fmax
= 515633;
40 * This must be called with host->lock held
42 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
47 if (desired
>= host
->mclk
) {
49 host
->cclk
= host
->mclk
;
51 clk
= host
->mclk
/ (2 * desired
) - 1;
54 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
56 if (host
->hw_designer
== AMBA_VENDOR_ST
)
57 clk
|= MCI_ST_FCEN
; /* Bug fix in ST IP block */
58 clk
|= MCI_CLK_ENABLE
;
59 /* This hasn't proven to be worthwhile */
60 /* clk |= MCI_CLK_PWRSAVE; */
63 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
65 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
66 clk
|= MCI_ST_8BIT_BUS
;
68 writel(clk
, host
->base
+ MMCICLOCK
);
72 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
74 writel(0, host
->base
+ MMCICOMMAND
);
82 mrq
->data
->bytes_xfered
= host
->data_xfered
;
85 * Need to drop the host lock here; mmc_request_done may call
86 * back into the driver...
88 spin_unlock(&host
->lock
);
89 mmc_request_done(host
->mmc
, mrq
);
90 spin_lock(&host
->lock
);
93 static void mmci_stop_data(struct mmci_host
*host
)
95 writel(0, host
->base
+ MMCIDATACTRL
);
96 writel(0, host
->base
+ MMCIMASK1
);
100 static void mmci_init_sg(struct mmci_host
*host
, struct mmc_data
*data
)
102 unsigned int flags
= SG_MITER_ATOMIC
;
104 if (data
->flags
& MMC_DATA_READ
)
105 flags
|= SG_MITER_TO_SG
;
107 flags
|= SG_MITER_FROM_SG
;
109 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
112 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
114 unsigned int datactrl
, timeout
, irqmask
;
115 unsigned long long clks
;
119 dev_dbg(mmc_dev(host
->mmc
), "blksz %04x blks %04x flags %08x\n",
120 data
->blksz
, data
->blocks
, data
->flags
);
123 host
->size
= data
->blksz
* data
->blocks
;
124 host
->data_xfered
= 0;
126 mmci_init_sg(host
, data
);
128 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
129 do_div(clks
, 1000000000UL);
131 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
134 writel(timeout
, base
+ MMCIDATATIMER
);
135 writel(host
->size
, base
+ MMCIDATALENGTH
);
137 blksz_bits
= ffs(data
->blksz
) - 1;
138 BUG_ON(1 << blksz_bits
!= data
->blksz
);
140 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
141 if (data
->flags
& MMC_DATA_READ
) {
142 datactrl
|= MCI_DPSM_DIRECTION
;
143 irqmask
= MCI_RXFIFOHALFFULLMASK
;
146 * If we have less than a FIFOSIZE of bytes to transfer,
147 * trigger a PIO interrupt as soon as any data is available.
149 if (host
->size
< MCI_FIFOSIZE
)
150 irqmask
|= MCI_RXDATAAVLBLMASK
;
153 * We don't actually need to include "FIFO empty" here
154 * since its implicit in "FIFO half empty".
156 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
159 writel(datactrl
, base
+ MMCIDATACTRL
);
160 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
161 writel(irqmask
, base
+ MMCIMASK1
);
165 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
167 void __iomem
*base
= host
->base
;
169 dev_dbg(mmc_dev(host
->mmc
), "op %02x arg %08x flags %08x\n",
170 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
172 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
173 writel(0, base
+ MMCICOMMAND
);
177 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
178 if (cmd
->flags
& MMC_RSP_PRESENT
) {
179 if (cmd
->flags
& MMC_RSP_136
)
180 c
|= MCI_CPSM_LONGRSP
;
181 c
|= MCI_CPSM_RESPONSE
;
184 c
|= MCI_CPSM_INTERRUPT
;
188 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
189 writel(c
, base
+ MMCICOMMAND
);
193 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
196 if (status
& MCI_DATABLOCKEND
) {
197 host
->data_xfered
+= data
->blksz
;
198 #ifdef CONFIG_ARCH_U300
200 * On the U300 some signal or other is
201 * badly routed so that a data write does
202 * not properly terminate with a MCI_DATAEND
203 * status flag. This quirk will make writes
206 if (data
->flags
& MMC_DATA_WRITE
)
207 status
|= MCI_DATAEND
;
210 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
211 dev_dbg(mmc_dev(host
->mmc
), "MCI ERROR IRQ (status %08x)\n", status
);
212 if (status
& MCI_DATACRCFAIL
)
213 data
->error
= -EILSEQ
;
214 else if (status
& MCI_DATATIMEOUT
)
215 data
->error
= -ETIMEDOUT
;
216 else if (status
& (MCI_TXUNDERRUN
|MCI_RXOVERRUN
))
218 status
|= MCI_DATAEND
;
221 * We hit an error condition. Ensure that any data
222 * partially written to a page is properly coherent.
224 if (data
->flags
& MMC_DATA_READ
) {
225 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
228 local_irq_save(flags
);
229 if (sg_miter_next(sg_miter
)) {
230 flush_dcache_page(sg_miter
->page
);
231 sg_miter_stop(sg_miter
);
233 local_irq_restore(flags
);
236 if (status
& MCI_DATAEND
) {
237 mmci_stop_data(host
);
240 mmci_request_end(host
, data
->mrq
);
242 mmci_start_command(host
, data
->stop
, 0);
248 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
251 void __iomem
*base
= host
->base
;
255 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
256 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
257 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
258 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
260 if (status
& MCI_CMDTIMEOUT
) {
261 cmd
->error
= -ETIMEDOUT
;
262 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
263 cmd
->error
= -EILSEQ
;
266 if (!cmd
->data
|| cmd
->error
) {
268 mmci_stop_data(host
);
269 mmci_request_end(host
, cmd
->mrq
);
270 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
271 mmci_start_data(host
, cmd
->data
);
275 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
277 void __iomem
*base
= host
->base
;
280 int host_remain
= host
->size
;
283 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
291 readsl(base
+ MMCIFIFO
, ptr
, count
>> 2);
295 host_remain
-= count
;
300 status
= readl(base
+ MMCISTATUS
);
301 } while (status
& MCI_RXDATAAVLBL
);
306 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
308 void __iomem
*base
= host
->base
;
312 unsigned int count
, maxcnt
;
314 maxcnt
= status
& MCI_TXFIFOEMPTY
? MCI_FIFOSIZE
: MCI_FIFOHALFSIZE
;
315 count
= min(remain
, maxcnt
);
317 writesl(base
+ MMCIFIFO
, ptr
, count
>> 2);
325 status
= readl(base
+ MMCISTATUS
);
326 } while (status
& MCI_TXFIFOHALFEMPTY
);
332 * PIO data transfer IRQ handler.
334 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
336 struct mmci_host
*host
= dev_id
;
337 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
338 void __iomem
*base
= host
->base
;
342 status
= readl(base
+ MMCISTATUS
);
344 dev_dbg(mmc_dev(host
->mmc
), "irq1 (pio) %08x\n", status
);
346 local_irq_save(flags
);
349 unsigned int remain
, len
;
353 * For write, we only need to test the half-empty flag
354 * here - if the FIFO is completely empty, then by
355 * definition it is more than half empty.
357 * For read, check for data available.
359 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
362 if (!sg_miter_next(sg_miter
))
365 buffer
= sg_miter
->addr
;
366 remain
= sg_miter
->length
;
369 if (status
& MCI_RXACTIVE
)
370 len
= mmci_pio_read(host
, buffer
, remain
);
371 if (status
& MCI_TXACTIVE
)
372 len
= mmci_pio_write(host
, buffer
, remain
, status
);
374 sg_miter
->consumed
= len
;
382 if (status
& MCI_RXACTIVE
)
383 flush_dcache_page(sg_miter
->page
);
385 status
= readl(base
+ MMCISTATUS
);
388 sg_miter_stop(sg_miter
);
390 local_irq_restore(flags
);
393 * If we're nearing the end of the read, switch to
394 * "any data available" mode.
396 if (status
& MCI_RXACTIVE
&& host
->size
< MCI_FIFOSIZE
)
397 writel(MCI_RXDATAAVLBLMASK
, base
+ MMCIMASK1
);
400 * If we run out of data, disable the data IRQs; this
401 * prevents a race where the FIFO becomes empty before
402 * the chip itself has disabled the data path, and
403 * stops us racing with our data end IRQ.
405 if (host
->size
== 0) {
406 writel(0, base
+ MMCIMASK1
);
407 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
414 * Handle completion of command and data transfers.
416 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
418 struct mmci_host
*host
= dev_id
;
422 spin_lock(&host
->lock
);
425 struct mmc_command
*cmd
;
426 struct mmc_data
*data
;
428 status
= readl(host
->base
+ MMCISTATUS
);
429 status
&= readl(host
->base
+ MMCIMASK0
);
430 writel(status
, host
->base
+ MMCICLEAR
);
432 dev_dbg(mmc_dev(host
->mmc
), "irq0 (data+cmd) %08x\n", status
);
435 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|
436 MCI_RXOVERRUN
|MCI_DATAEND
|MCI_DATABLOCKEND
) && data
)
437 mmci_data_irq(host
, data
, status
);
440 if (status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
441 mmci_cmd_irq(host
, cmd
, status
);
446 spin_unlock(&host
->lock
);
448 return IRQ_RETVAL(ret
);
451 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
453 struct mmci_host
*host
= mmc_priv(mmc
);
456 WARN_ON(host
->mrq
!= NULL
);
458 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
459 dev_err(mmc_dev(mmc
), "unsupported block size (%d bytes)\n",
461 mrq
->cmd
->error
= -EINVAL
;
462 mmc_request_done(mmc
, mrq
);
466 spin_lock_irqsave(&host
->lock
, flags
);
470 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
471 mmci_start_data(host
, mrq
->data
);
473 mmci_start_command(host
, mrq
->cmd
, 0);
475 spin_unlock_irqrestore(&host
->lock
, flags
);
478 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
480 struct mmci_host
*host
= mmc_priv(mmc
);
484 switch (ios
->power_mode
) {
487 regulator_is_enabled(host
->vcc
))
488 regulator_disable(host
->vcc
);
491 #ifdef CONFIG_REGULATOR
493 /* This implicitly enables the regulator */
494 mmc_regulator_set_ocr(host
->vcc
, ios
->vdd
);
496 if (host
->plat
->vdd_handler
)
497 pwr
|= host
->plat
->vdd_handler(mmc_dev(mmc
), ios
->vdd
,
499 /* The ST version does not have this, fall through to POWER_ON */
500 if (host
->hw_designer
!= AMBA_VENDOR_ST
) {
509 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
510 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
514 * The ST Micro variant use the ROD bit for something
515 * else and only has OD (Open Drain).
521 spin_lock_irqsave(&host
->lock
, flags
);
523 mmci_set_clkreg(host
, ios
->clock
);
525 if (host
->pwr
!= pwr
) {
527 writel(pwr
, host
->base
+ MMCIPOWER
);
530 spin_unlock_irqrestore(&host
->lock
, flags
);
533 static int mmci_get_ro(struct mmc_host
*mmc
)
535 struct mmci_host
*host
= mmc_priv(mmc
);
537 if (host
->gpio_wp
== -ENOSYS
)
540 return gpio_get_value(host
->gpio_wp
);
543 static int mmci_get_cd(struct mmc_host
*mmc
)
545 struct mmci_host
*host
= mmc_priv(mmc
);
548 if (host
->gpio_cd
== -ENOSYS
)
549 status
= host
->plat
->status(mmc_dev(host
->mmc
));
551 status
= gpio_get_value(host
->gpio_cd
);
556 static const struct mmc_host_ops mmci_ops
= {
557 .request
= mmci_request
,
558 .set_ios
= mmci_set_ios
,
559 .get_ro
= mmci_get_ro
,
560 .get_cd
= mmci_get_cd
,
563 static int __devinit
mmci_probe(struct amba_device
*dev
, struct amba_id
*id
)
565 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
566 struct mmci_host
*host
;
567 struct mmc_host
*mmc
;
570 /* must have platform data */
576 ret
= amba_request_regions(dev
, DRIVER_NAME
);
580 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
586 host
= mmc_priv(mmc
);
589 host
->gpio_wp
= -ENOSYS
;
590 host
->gpio_cd
= -ENOSYS
;
592 host
->hw_designer
= amba_manf(dev
);
593 host
->hw_revision
= amba_rev(dev
);
594 dev_dbg(mmc_dev(mmc
), "designer ID = 0x%02x\n", host
->hw_designer
);
595 dev_dbg(mmc_dev(mmc
), "revision = 0x%01x\n", host
->hw_revision
);
597 host
->clk
= clk_get(&dev
->dev
, NULL
);
598 if (IS_ERR(host
->clk
)) {
599 ret
= PTR_ERR(host
->clk
);
604 ret
= clk_enable(host
->clk
);
609 host
->mclk
= clk_get_rate(host
->clk
);
611 * According to the spec, mclk is max 100 MHz,
612 * so we try to adjust the clock down to this,
615 if (host
->mclk
> 100000000) {
616 ret
= clk_set_rate(host
->clk
, 100000000);
619 host
->mclk
= clk_get_rate(host
->clk
);
620 dev_dbg(mmc_dev(mmc
), "eventual mclk rate: %u Hz\n",
623 host
->base
= ioremap(dev
->res
.start
, resource_size(&dev
->res
));
629 mmc
->ops
= &mmci_ops
;
630 mmc
->f_min
= (host
->mclk
+ 511) / 512;
632 * If the platform data supplies a maximum operating
633 * frequency, this takes precedence. Else, we fall back
634 * to using the module parameter, which has a (low)
635 * default value in case it is not specified. Either
636 * value must not exceed the clock rate into the block,
640 mmc
->f_max
= min(host
->mclk
, plat
->f_max
);
642 mmc
->f_max
= min(host
->mclk
, fmax
);
643 dev_dbg(mmc_dev(mmc
), "clocking block at %u Hz\n", mmc
->f_max
);
645 #ifdef CONFIG_REGULATOR
646 /* If we're using the regulator framework, try to fetch a regulator */
647 host
->vcc
= regulator_get(&dev
->dev
, "vmmc");
648 if (IS_ERR(host
->vcc
))
651 int mask
= mmc_regulator_get_ocrmask(host
->vcc
);
654 dev_err(&dev
->dev
, "error getting OCR mask (%d)\n",
657 host
->mmc
->ocr_avail
= (u32
) mask
;
660 "Provided ocr_mask/setpower will not be used "
661 "(using regulator instead)\n");
665 /* Fall back to platform data if no regulator is found */
666 if (host
->vcc
== NULL
)
667 mmc
->ocr_avail
= plat
->ocr_mask
;
668 mmc
->caps
= plat
->capabilities
;
669 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
674 mmc
->max_hw_segs
= 16;
675 mmc
->max_phys_segs
= NR_SG
;
678 * Since we only have a 16-bit data length register, we must
679 * ensure that we don't exceed 2^16-1 bytes in a single request.
681 mmc
->max_req_size
= 65535;
684 * Set the maximum segment size. Since we aren't doing DMA
685 * (yet) we are only limited by the data length register.
687 mmc
->max_seg_size
= mmc
->max_req_size
;
690 * Block size can be up to 2048 bytes, but must be a power of two.
692 mmc
->max_blk_size
= 2048;
695 * No limit on the number of blocks transferred.
697 mmc
->max_blk_count
= mmc
->max_req_size
;
699 spin_lock_init(&host
->lock
);
701 writel(0, host
->base
+ MMCIMASK0
);
702 writel(0, host
->base
+ MMCIMASK1
);
703 writel(0xfff, host
->base
+ MMCICLEAR
);
705 if (gpio_is_valid(plat
->gpio_cd
)) {
706 ret
= gpio_request(plat
->gpio_cd
, DRIVER_NAME
" (cd)");
708 ret
= gpio_direction_input(plat
->gpio_cd
);
710 host
->gpio_cd
= plat
->gpio_cd
;
711 else if (ret
!= -ENOSYS
)
714 if (gpio_is_valid(plat
->gpio_wp
)) {
715 ret
= gpio_request(plat
->gpio_wp
, DRIVER_NAME
" (wp)");
717 ret
= gpio_direction_input(plat
->gpio_wp
);
719 host
->gpio_wp
= plat
->gpio_wp
;
720 else if (ret
!= -ENOSYS
)
724 ret
= request_irq(dev
->irq
[0], mmci_irq
, IRQF_SHARED
, DRIVER_NAME
" (cmd)", host
);
728 ret
= request_irq(dev
->irq
[1], mmci_pio_irq
, IRQF_SHARED
, DRIVER_NAME
" (pio)", host
);
732 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
734 amba_set_drvdata(dev
, mmc
);
738 dev_info(&dev
->dev
, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
739 mmc_hostname(mmc
), amba_rev(dev
), amba_config(dev
),
740 (unsigned long long)dev
->res
.start
, dev
->irq
[0], dev
->irq
[1]);
745 free_irq(dev
->irq
[0], host
);
747 if (host
->gpio_wp
!= -ENOSYS
)
748 gpio_free(host
->gpio_wp
);
750 if (host
->gpio_cd
!= -ENOSYS
)
751 gpio_free(host
->gpio_cd
);
755 clk_disable(host
->clk
);
761 amba_release_regions(dev
);
766 static int __devexit
mmci_remove(struct amba_device
*dev
)
768 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
770 amba_set_drvdata(dev
, NULL
);
773 struct mmci_host
*host
= mmc_priv(mmc
);
775 mmc_remove_host(mmc
);
777 writel(0, host
->base
+ MMCIMASK0
);
778 writel(0, host
->base
+ MMCIMASK1
);
780 writel(0, host
->base
+ MMCICOMMAND
);
781 writel(0, host
->base
+ MMCIDATACTRL
);
783 free_irq(dev
->irq
[0], host
);
784 free_irq(dev
->irq
[1], host
);
786 if (host
->gpio_wp
!= -ENOSYS
)
787 gpio_free(host
->gpio_wp
);
788 if (host
->gpio_cd
!= -ENOSYS
)
789 gpio_free(host
->gpio_cd
);
792 clk_disable(host
->clk
);
795 if (regulator_is_enabled(host
->vcc
))
796 regulator_disable(host
->vcc
);
797 regulator_put(host
->vcc
);
801 amba_release_regions(dev
);
808 static int mmci_suspend(struct amba_device
*dev
, pm_message_t state
)
810 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
814 struct mmci_host
*host
= mmc_priv(mmc
);
816 ret
= mmc_suspend_host(mmc
);
818 writel(0, host
->base
+ MMCIMASK0
);
824 static int mmci_resume(struct amba_device
*dev
)
826 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
830 struct mmci_host
*host
= mmc_priv(mmc
);
832 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
834 ret
= mmc_resume_host(mmc
);
840 #define mmci_suspend NULL
841 #define mmci_resume NULL
844 static struct amba_id mmci_ids
[] = {
853 /* ST Micro variants */
865 static struct amba_driver mmci_driver
= {
870 .remove
= __devexit_p(mmci_remove
),
871 .suspend
= mmci_suspend
,
872 .resume
= mmci_resume
,
873 .id_table
= mmci_ids
,
876 static int __init
mmci_init(void)
878 return amba_driver_register(&mmci_driver
);
881 static void __exit
mmci_exit(void)
883 amba_driver_unregister(&mmci_driver
);
886 module_init(mmci_init
);
887 module_exit(mmci_exit
);
888 module_param(fmax
, uint
, 0444);
890 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
891 MODULE_LICENSE("GPL");