2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/highmem.h>
20 #include <linux/log2.h>
21 #include <linux/mmc/host.h>
22 #include <linux/amba/bus.h>
23 #include <linux/clk.h>
24 #include <linux/scatterlist.h>
25 #include <linux/gpio.h>
26 #include <linux/amba/mmci.h>
27 #include <linux/regulator/consumer.h>
29 #include <asm/cacheflush.h>
30 #include <asm/div64.h>
32 #include <asm/sizes.h>
36 #define DRIVER_NAME "mmci-pl18x"
38 static unsigned int fmax
= 515633;
41 * This must be called with host->lock held
43 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
48 if (desired
>= host
->mclk
) {
50 host
->cclk
= host
->mclk
;
52 clk
= host
->mclk
/ (2 * desired
) - 1;
55 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
57 if (host
->hw_designer
== AMBA_VENDOR_ST
)
58 clk
|= MCI_FCEN
; /* Bug fix in ST IP block */
59 clk
|= MCI_CLK_ENABLE
;
60 /* This hasn't proven to be worthwhile */
61 /* clk |= MCI_CLK_PWRSAVE; */
64 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
67 writel(clk
, host
->base
+ MMCICLOCK
);
71 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
73 writel(0, host
->base
+ MMCICOMMAND
);
81 mrq
->data
->bytes_xfered
= host
->data_xfered
;
84 * Need to drop the host lock here; mmc_request_done may call
85 * back into the driver...
87 spin_unlock(&host
->lock
);
88 mmc_request_done(host
->mmc
, mrq
);
89 spin_lock(&host
->lock
);
92 static void mmci_stop_data(struct mmci_host
*host
)
94 writel(0, host
->base
+ MMCIDATACTRL
);
95 writel(0, host
->base
+ MMCIMASK1
);
99 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
101 unsigned int datactrl
, timeout
, irqmask
;
102 unsigned long long clks
;
106 dev_dbg(mmc_dev(host
->mmc
), "blksz %04x blks %04x flags %08x\n",
107 data
->blksz
, data
->blocks
, data
->flags
);
110 host
->size
= data
->blksz
;
111 host
->data_xfered
= 0;
113 mmci_init_sg(host
, data
);
115 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
116 do_div(clks
, 1000000000UL);
118 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
121 writel(timeout
, base
+ MMCIDATATIMER
);
122 writel(host
->size
, base
+ MMCIDATALENGTH
);
124 blksz_bits
= ffs(data
->blksz
) - 1;
125 BUG_ON(1 << blksz_bits
!= data
->blksz
);
127 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
128 if (data
->flags
& MMC_DATA_READ
) {
129 datactrl
|= MCI_DPSM_DIRECTION
;
130 irqmask
= MCI_RXFIFOHALFFULLMASK
;
133 * If we have less than a FIFOSIZE of bytes to transfer,
134 * trigger a PIO interrupt as soon as any data is available.
136 if (host
->size
< MCI_FIFOSIZE
)
137 irqmask
|= MCI_RXDATAAVLBLMASK
;
140 * We don't actually need to include "FIFO empty" here
141 * since its implicit in "FIFO half empty".
143 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
146 writel(datactrl
, base
+ MMCIDATACTRL
);
147 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
148 writel(irqmask
, base
+ MMCIMASK1
);
152 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
154 void __iomem
*base
= host
->base
;
156 dev_dbg(mmc_dev(host
->mmc
), "op %02x arg %08x flags %08x\n",
157 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
159 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
160 writel(0, base
+ MMCICOMMAND
);
164 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
165 if (cmd
->flags
& MMC_RSP_PRESENT
) {
166 if (cmd
->flags
& MMC_RSP_136
)
167 c
|= MCI_CPSM_LONGRSP
;
168 c
|= MCI_CPSM_RESPONSE
;
171 c
|= MCI_CPSM_INTERRUPT
;
175 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
176 writel(c
, base
+ MMCICOMMAND
);
180 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
183 if (status
& MCI_DATABLOCKEND
) {
184 host
->data_xfered
+= data
->blksz
;
185 #ifdef CONFIG_ARCH_U300
187 * On the U300 some signal or other is
188 * badly routed so that a data write does
189 * not properly terminate with a MCI_DATAEND
190 * status flag. This quirk will make writes
193 if (data
->flags
& MMC_DATA_WRITE
)
194 status
|= MCI_DATAEND
;
197 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
198 dev_dbg(mmc_dev(host
->mmc
), "MCI ERROR IRQ (status %08x)\n", status
);
199 if (status
& MCI_DATACRCFAIL
)
200 data
->error
= -EILSEQ
;
201 else if (status
& MCI_DATATIMEOUT
)
202 data
->error
= -ETIMEDOUT
;
203 else if (status
& (MCI_TXUNDERRUN
|MCI_RXOVERRUN
))
205 status
|= MCI_DATAEND
;
208 * We hit an error condition. Ensure that any data
209 * partially written to a page is properly coherent.
211 if (host
->sg_len
&& data
->flags
& MMC_DATA_READ
)
212 flush_dcache_page(sg_page(host
->sg_ptr
));
214 if (status
& MCI_DATAEND
) {
215 mmci_stop_data(host
);
218 mmci_request_end(host
, data
->mrq
);
220 mmci_start_command(host
, data
->stop
, 0);
226 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
229 void __iomem
*base
= host
->base
;
233 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
234 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
235 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
236 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
238 if (status
& MCI_CMDTIMEOUT
) {
239 cmd
->error
= -ETIMEDOUT
;
240 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
241 cmd
->error
= -EILSEQ
;
244 if (!cmd
->data
|| cmd
->error
) {
246 mmci_stop_data(host
);
247 mmci_request_end(host
, cmd
->mrq
);
248 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
249 mmci_start_data(host
, cmd
->data
);
253 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
255 void __iomem
*base
= host
->base
;
258 int host_remain
= host
->size
;
261 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
269 readsl(base
+ MMCIFIFO
, ptr
, count
>> 2);
273 host_remain
-= count
;
278 status
= readl(base
+ MMCISTATUS
);
279 } while (status
& MCI_RXDATAAVLBL
);
284 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
286 void __iomem
*base
= host
->base
;
290 unsigned int count
, maxcnt
;
292 maxcnt
= status
& MCI_TXFIFOEMPTY
? MCI_FIFOSIZE
: MCI_FIFOHALFSIZE
;
293 count
= min(remain
, maxcnt
);
295 writesl(base
+ MMCIFIFO
, ptr
, count
>> 2);
303 status
= readl(base
+ MMCISTATUS
);
304 } while (status
& MCI_TXFIFOHALFEMPTY
);
310 * PIO data transfer IRQ handler.
312 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
314 struct mmci_host
*host
= dev_id
;
315 void __iomem
*base
= host
->base
;
318 status
= readl(base
+ MMCISTATUS
);
320 dev_dbg(mmc_dev(host
->mmc
), "irq1 (pio) %08x\n", status
);
324 unsigned int remain
, len
;
328 * For write, we only need to test the half-empty flag
329 * here - if the FIFO is completely empty, then by
330 * definition it is more than half empty.
332 * For read, check for data available.
334 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
338 * Map the current scatter buffer.
340 buffer
= mmci_kmap_atomic(host
, &flags
) + host
->sg_off
;
341 remain
= host
->sg_ptr
->length
- host
->sg_off
;
344 if (status
& MCI_RXACTIVE
)
345 len
= mmci_pio_read(host
, buffer
, remain
);
346 if (status
& MCI_TXACTIVE
)
347 len
= mmci_pio_write(host
, buffer
, remain
, status
);
352 mmci_kunmap_atomic(host
, buffer
, &flags
);
362 * If we were reading, and we have completed this
363 * page, ensure that the data cache is coherent.
365 if (status
& MCI_RXACTIVE
)
366 flush_dcache_page(sg_page(host
->sg_ptr
));
368 if (!mmci_next_sg(host
))
371 status
= readl(base
+ MMCISTATUS
);
375 * If we're nearing the end of the read, switch to
376 * "any data available" mode.
378 if (status
& MCI_RXACTIVE
&& host
->size
< MCI_FIFOSIZE
)
379 writel(MCI_RXDATAAVLBLMASK
, base
+ MMCIMASK1
);
382 * If we run out of data, disable the data IRQs; this
383 * prevents a race where the FIFO becomes empty before
384 * the chip itself has disabled the data path, and
385 * stops us racing with our data end IRQ.
387 if (host
->size
== 0) {
388 writel(0, base
+ MMCIMASK1
);
389 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
396 * Handle completion of command and data transfers.
398 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
400 struct mmci_host
*host
= dev_id
;
404 spin_lock(&host
->lock
);
407 struct mmc_command
*cmd
;
408 struct mmc_data
*data
;
410 status
= readl(host
->base
+ MMCISTATUS
);
411 status
&= readl(host
->base
+ MMCIMASK0
);
412 writel(status
, host
->base
+ MMCICLEAR
);
414 dev_dbg(mmc_dev(host
->mmc
), "irq0 (data+cmd) %08x\n", status
);
417 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|
418 MCI_RXOVERRUN
|MCI_DATAEND
|MCI_DATABLOCKEND
) && data
)
419 mmci_data_irq(host
, data
, status
);
422 if (status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
423 mmci_cmd_irq(host
, cmd
, status
);
428 spin_unlock(&host
->lock
);
430 return IRQ_RETVAL(ret
);
433 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
435 struct mmci_host
*host
= mmc_priv(mmc
);
438 WARN_ON(host
->mrq
!= NULL
);
440 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
441 dev_err(mmc_dev(mmc
), "unsupported block size (%d bytes)\n",
443 mrq
->cmd
->error
= -EINVAL
;
444 mmc_request_done(mmc
, mrq
);
448 spin_lock_irqsave(&host
->lock
, flags
);
452 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
453 mmci_start_data(host
, mrq
->data
);
455 mmci_start_command(host
, mrq
->cmd
, 0);
457 spin_unlock_irqrestore(&host
->lock
, flags
);
460 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
462 struct mmci_host
*host
= mmc_priv(mmc
);
466 switch (ios
->power_mode
) {
469 regulator_is_enabled(host
->vcc
))
470 regulator_disable(host
->vcc
);
473 #ifdef CONFIG_REGULATOR
475 /* This implicitly enables the regulator */
476 mmc_regulator_set_ocr(host
->vcc
, ios
->vdd
);
479 * The translate_vdd function is not used if you have
480 * an external regulator, or your design is really weird.
481 * Using it would mean sending in power control BOTH using
482 * a regulator AND the 4 MMCIPWR bits. If we don't have
483 * a regulator, we might have some other platform specific
484 * power control behind this translate function.
486 if (!host
->vcc
&& host
->plat
->translate_vdd
)
487 pwr
|= host
->plat
->translate_vdd(mmc_dev(mmc
), ios
->vdd
);
488 /* The ST version does not have this, fall through to POWER_ON */
489 if (host
->hw_designer
!= AMBA_VENDOR_ST
) {
498 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
499 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
503 * The ST Micro variant use the ROD bit for something
504 * else and only has OD (Open Drain).
510 spin_lock_irqsave(&host
->lock
, flags
);
512 mmci_set_clkreg(host
, ios
->clock
);
514 if (host
->pwr
!= pwr
) {
516 writel(pwr
, host
->base
+ MMCIPOWER
);
519 spin_unlock_irqrestore(&host
->lock
, flags
);
522 static int mmci_get_ro(struct mmc_host
*mmc
)
524 struct mmci_host
*host
= mmc_priv(mmc
);
526 if (host
->gpio_wp
== -ENOSYS
)
529 return gpio_get_value(host
->gpio_wp
);
532 static int mmci_get_cd(struct mmc_host
*mmc
)
534 struct mmci_host
*host
= mmc_priv(mmc
);
537 if (host
->gpio_cd
== -ENOSYS
)
538 status
= host
->plat
->status(mmc_dev(host
->mmc
));
540 status
= gpio_get_value(host
->gpio_cd
);
545 static const struct mmc_host_ops mmci_ops
= {
546 .request
= mmci_request
,
547 .set_ios
= mmci_set_ios
,
548 .get_ro
= mmci_get_ro
,
549 .get_cd
= mmci_get_cd
,
552 static void mmci_check_status(unsigned long data
)
554 struct mmci_host
*host
= (struct mmci_host
*)data
;
555 unsigned int status
= mmci_get_cd(host
->mmc
);
557 if (status
^ host
->oldstat
)
558 mmc_detect_change(host
->mmc
, 0);
560 host
->oldstat
= status
;
561 mod_timer(&host
->timer
, jiffies
+ HZ
);
564 static int __devinit
mmci_probe(struct amba_device
*dev
, struct amba_id
*id
)
566 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
567 struct mmci_host
*host
;
568 struct mmc_host
*mmc
;
571 /* must have platform data */
577 ret
= amba_request_regions(dev
, DRIVER_NAME
);
581 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
587 host
= mmc_priv(mmc
);
590 host
->gpio_wp
= -ENOSYS
;
591 host
->gpio_cd
= -ENOSYS
;
593 host
->hw_designer
= amba_manf(dev
);
594 host
->hw_revision
= amba_rev(dev
);
595 dev_dbg(mmc_dev(mmc
), "designer ID = 0x%02x\n", host
->hw_designer
);
596 dev_dbg(mmc_dev(mmc
), "revision = 0x%01x\n", host
->hw_revision
);
598 host
->clk
= clk_get(&dev
->dev
, NULL
);
599 if (IS_ERR(host
->clk
)) {
600 ret
= PTR_ERR(host
->clk
);
605 ret
= clk_enable(host
->clk
);
610 host
->mclk
= clk_get_rate(host
->clk
);
612 * According to the spec, mclk is max 100 MHz,
613 * so we try to adjust the clock down to this,
616 if (host
->mclk
> 100000000) {
617 ret
= clk_set_rate(host
->clk
, 100000000);
620 host
->mclk
= clk_get_rate(host
->clk
);
621 dev_dbg(mmc_dev(mmc
), "eventual mclk rate: %u Hz\n",
624 host
->base
= ioremap(dev
->res
.start
, resource_size(&dev
->res
));
630 mmc
->ops
= &mmci_ops
;
631 mmc
->f_min
= (host
->mclk
+ 511) / 512;
632 mmc
->f_max
= min(host
->mclk
, fmax
);
633 dev_dbg(mmc_dev(mmc
), "clocking block at %u Hz\n", mmc
->f_max
);
635 #ifdef CONFIG_REGULATOR
636 /* If we're using the regulator framework, try to fetch a regulator */
637 host
->vcc
= regulator_get(&dev
->dev
, "vmmc");
638 if (IS_ERR(host
->vcc
))
641 int mask
= mmc_regulator_get_ocrmask(host
->vcc
);
644 dev_err(&dev
->dev
, "error getting OCR mask (%d)\n",
647 host
->mmc
->ocr_avail
= (u32
) mask
;
650 "Provided ocr_mask/setpower will not be used "
651 "(using regulator instead)\n");
655 /* Fall back to platform data if no regulator is found */
656 if (host
->vcc
== NULL
)
657 mmc
->ocr_avail
= plat
->ocr_mask
;
658 mmc
->caps
= plat
->capabilities
;
663 mmc
->max_hw_segs
= 16;
664 mmc
->max_phys_segs
= NR_SG
;
667 * Since we only have a 16-bit data length register, we must
668 * ensure that we don't exceed 2^16-1 bytes in a single request.
670 mmc
->max_req_size
= 65535;
673 * Set the maximum segment size. Since we aren't doing DMA
674 * (yet) we are only limited by the data length register.
676 mmc
->max_seg_size
= mmc
->max_req_size
;
679 * Block size can be up to 2048 bytes, but must be a power of two.
681 mmc
->max_blk_size
= 2048;
684 * No limit on the number of blocks transferred.
686 mmc
->max_blk_count
= mmc
->max_req_size
;
688 spin_lock_init(&host
->lock
);
690 writel(0, host
->base
+ MMCIMASK0
);
691 writel(0, host
->base
+ MMCIMASK1
);
692 writel(0xfff, host
->base
+ MMCICLEAR
);
694 if (gpio_is_valid(plat
->gpio_cd
)) {
695 ret
= gpio_request(plat
->gpio_cd
, DRIVER_NAME
" (cd)");
697 ret
= gpio_direction_input(plat
->gpio_cd
);
699 host
->gpio_cd
= plat
->gpio_cd
;
700 else if (ret
!= -ENOSYS
)
703 if (gpio_is_valid(plat
->gpio_wp
)) {
704 ret
= gpio_request(plat
->gpio_wp
, DRIVER_NAME
" (wp)");
706 ret
= gpio_direction_input(plat
->gpio_wp
);
708 host
->gpio_wp
= plat
->gpio_wp
;
709 else if (ret
!= -ENOSYS
)
713 ret
= request_irq(dev
->irq
[0], mmci_irq
, IRQF_SHARED
, DRIVER_NAME
" (cmd)", host
);
717 ret
= request_irq(dev
->irq
[1], mmci_pio_irq
, IRQF_SHARED
, DRIVER_NAME
" (pio)", host
);
721 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
723 amba_set_drvdata(dev
, mmc
);
724 host
->oldstat
= mmci_get_cd(host
->mmc
);
728 dev_info(&dev
->dev
, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
729 mmc_hostname(mmc
), amba_rev(dev
), amba_config(dev
),
730 (unsigned long long)dev
->res
.start
, dev
->irq
[0], dev
->irq
[1]);
732 init_timer(&host
->timer
);
733 host
->timer
.data
= (unsigned long)host
;
734 host
->timer
.function
= mmci_check_status
;
735 host
->timer
.expires
= jiffies
+ HZ
;
736 add_timer(&host
->timer
);
741 free_irq(dev
->irq
[0], host
);
743 if (host
->gpio_wp
!= -ENOSYS
)
744 gpio_free(host
->gpio_wp
);
746 if (host
->gpio_cd
!= -ENOSYS
)
747 gpio_free(host
->gpio_cd
);
751 clk_disable(host
->clk
);
757 amba_release_regions(dev
);
762 static int __devexit
mmci_remove(struct amba_device
*dev
)
764 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
766 amba_set_drvdata(dev
, NULL
);
769 struct mmci_host
*host
= mmc_priv(mmc
);
771 del_timer_sync(&host
->timer
);
773 mmc_remove_host(mmc
);
775 writel(0, host
->base
+ MMCIMASK0
);
776 writel(0, host
->base
+ MMCIMASK1
);
778 writel(0, host
->base
+ MMCICOMMAND
);
779 writel(0, host
->base
+ MMCIDATACTRL
);
781 free_irq(dev
->irq
[0], host
);
782 free_irq(dev
->irq
[1], host
);
784 if (host
->gpio_wp
!= -ENOSYS
)
785 gpio_free(host
->gpio_wp
);
786 if (host
->gpio_cd
!= -ENOSYS
)
787 gpio_free(host
->gpio_cd
);
790 clk_disable(host
->clk
);
793 if (regulator_is_enabled(host
->vcc
))
794 regulator_disable(host
->vcc
);
795 regulator_put(host
->vcc
);
799 amba_release_regions(dev
);
806 static int mmci_suspend(struct amba_device
*dev
, pm_message_t state
)
808 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
812 struct mmci_host
*host
= mmc_priv(mmc
);
814 ret
= mmc_suspend_host(mmc
, state
);
816 writel(0, host
->base
+ MMCIMASK0
);
822 static int mmci_resume(struct amba_device
*dev
)
824 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
828 struct mmci_host
*host
= mmc_priv(mmc
);
830 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
832 ret
= mmc_resume_host(mmc
);
838 #define mmci_suspend NULL
839 #define mmci_resume NULL
842 static struct amba_id mmci_ids
[] = {
851 /* ST Micro variants */
863 static struct amba_driver mmci_driver
= {
868 .remove
= __devexit_p(mmci_remove
),
869 .suspend
= mmci_suspend
,
870 .resume
= mmci_resume
,
871 .id_table
= mmci_ids
,
874 static int __init
mmci_init(void)
876 return amba_driver_register(&mmci_driver
);
879 static void __exit
mmci_exit(void)
881 amba_driver_unregister(&mmci_driver
);
884 module_init(mmci_init
);
885 module_exit(mmci_exit
);
886 module_param(fmax
, uint
, 0444);
888 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
889 MODULE_LICENSE("GPL");