MIPS: handle write_combine in pci_mmap_page_range
[linux-2.6/linux-loongson.git] / drivers / mmc / host / mmci.c
blob36875dcfa492450a3137b97fe59bdc1865bc35d4
1 /*
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/log2.h>
20 #include <linux/mmc/host.h>
21 #include <linux/amba/bus.h>
22 #include <linux/clk.h>
23 #include <linux/scatterlist.h>
25 #include <asm/cacheflush.h>
26 #include <asm/div64.h>
27 #include <asm/io.h>
28 #include <asm/sizes.h>
29 #include <asm/mach/mmc.h>
31 #include "mmci.h"
33 #define DRIVER_NAME "mmci-pl18x"
35 #define DBG(host,fmt,args...) \
36 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
38 static unsigned int fmax = 515633;
40 static void
41 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
43 writel(0, host->base + MMCICOMMAND);
45 BUG_ON(host->data);
47 host->mrq = NULL;
48 host->cmd = NULL;
50 if (mrq->data)
51 mrq->data->bytes_xfered = host->data_xfered;
54 * Need to drop the host lock here; mmc_request_done may call
55 * back into the driver...
57 spin_unlock(&host->lock);
58 mmc_request_done(host->mmc, mrq);
59 spin_lock(&host->lock);
62 static void mmci_stop_data(struct mmci_host *host)
64 writel(0, host->base + MMCIDATACTRL);
65 writel(0, host->base + MMCIMASK1);
66 host->data = NULL;
69 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
71 unsigned int datactrl, timeout, irqmask;
72 unsigned long long clks;
73 void __iomem *base;
74 int blksz_bits;
76 DBG(host, "blksz %04x blks %04x flags %08x\n",
77 data->blksz, data->blocks, data->flags);
79 host->data = data;
80 host->size = data->blksz;
81 host->data_xfered = 0;
83 mmci_init_sg(host, data);
85 clks = (unsigned long long)data->timeout_ns * host->cclk;
86 do_div(clks, 1000000000UL);
88 timeout = data->timeout_clks + (unsigned int)clks;
90 base = host->base;
91 writel(timeout, base + MMCIDATATIMER);
92 writel(host->size, base + MMCIDATALENGTH);
94 blksz_bits = ffs(data->blksz) - 1;
95 BUG_ON(1 << blksz_bits != data->blksz);
97 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
98 if (data->flags & MMC_DATA_READ) {
99 datactrl |= MCI_DPSM_DIRECTION;
100 irqmask = MCI_RXFIFOHALFFULLMASK;
103 * If we have less than a FIFOSIZE of bytes to transfer,
104 * trigger a PIO interrupt as soon as any data is available.
106 if (host->size < MCI_FIFOSIZE)
107 irqmask |= MCI_RXDATAAVLBLMASK;
108 } else {
110 * We don't actually need to include "FIFO empty" here
111 * since its implicit in "FIFO half empty".
113 irqmask = MCI_TXFIFOHALFEMPTYMASK;
116 writel(datactrl, base + MMCIDATACTRL);
117 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
118 writel(irqmask, base + MMCIMASK1);
121 static void
122 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
124 void __iomem *base = host->base;
126 DBG(host, "op %02x arg %08x flags %08x\n",
127 cmd->opcode, cmd->arg, cmd->flags);
129 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
130 writel(0, base + MMCICOMMAND);
131 udelay(1);
134 c |= cmd->opcode | MCI_CPSM_ENABLE;
135 if (cmd->flags & MMC_RSP_PRESENT) {
136 if (cmd->flags & MMC_RSP_136)
137 c |= MCI_CPSM_LONGRSP;
138 c |= MCI_CPSM_RESPONSE;
140 if (/*interrupt*/0)
141 c |= MCI_CPSM_INTERRUPT;
143 host->cmd = cmd;
145 writel(cmd->arg, base + MMCIARGUMENT);
146 writel(c, base + MMCICOMMAND);
149 static void
150 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
151 unsigned int status)
153 if (status & MCI_DATABLOCKEND) {
154 host->data_xfered += data->blksz;
156 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
157 if (status & MCI_DATACRCFAIL)
158 data->error = -EILSEQ;
159 else if (status & MCI_DATATIMEOUT)
160 data->error = -ETIMEDOUT;
161 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
162 data->error = -EIO;
163 status |= MCI_DATAEND;
166 * We hit an error condition. Ensure that any data
167 * partially written to a page is properly coherent.
169 if (host->sg_len && data->flags & MMC_DATA_READ)
170 flush_dcache_page(sg_page(host->sg_ptr));
172 if (status & MCI_DATAEND) {
173 mmci_stop_data(host);
175 if (!data->stop) {
176 mmci_request_end(host, data->mrq);
177 } else {
178 mmci_start_command(host, data->stop, 0);
183 static void
184 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
185 unsigned int status)
187 void __iomem *base = host->base;
189 host->cmd = NULL;
191 cmd->resp[0] = readl(base + MMCIRESPONSE0);
192 cmd->resp[1] = readl(base + MMCIRESPONSE1);
193 cmd->resp[2] = readl(base + MMCIRESPONSE2);
194 cmd->resp[3] = readl(base + MMCIRESPONSE3);
196 if (status & MCI_CMDTIMEOUT) {
197 cmd->error = -ETIMEDOUT;
198 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
199 cmd->error = -EILSEQ;
202 if (!cmd->data || cmd->error) {
203 if (host->data)
204 mmci_stop_data(host);
205 mmci_request_end(host, cmd->mrq);
206 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
207 mmci_start_data(host, cmd->data);
211 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
213 void __iomem *base = host->base;
214 char *ptr = buffer;
215 u32 status;
216 int host_remain = host->size;
218 do {
219 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
221 if (count > remain)
222 count = remain;
224 if (count <= 0)
225 break;
227 readsl(base + MMCIFIFO, ptr, count >> 2);
229 ptr += count;
230 remain -= count;
231 host_remain -= count;
233 if (remain == 0)
234 break;
236 status = readl(base + MMCISTATUS);
237 } while (status & MCI_RXDATAAVLBL);
239 return ptr - buffer;
242 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
244 void __iomem *base = host->base;
245 char *ptr = buffer;
247 do {
248 unsigned int count, maxcnt;
250 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
251 count = min(remain, maxcnt);
253 writesl(base + MMCIFIFO, ptr, count >> 2);
255 ptr += count;
256 remain -= count;
258 if (remain == 0)
259 break;
261 status = readl(base + MMCISTATUS);
262 } while (status & MCI_TXFIFOHALFEMPTY);
264 return ptr - buffer;
268 * PIO data transfer IRQ handler.
270 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
272 struct mmci_host *host = dev_id;
273 void __iomem *base = host->base;
274 u32 status;
276 status = readl(base + MMCISTATUS);
278 DBG(host, "irq1 %08x\n", status);
280 do {
281 unsigned long flags;
282 unsigned int remain, len;
283 char *buffer;
286 * For write, we only need to test the half-empty flag
287 * here - if the FIFO is completely empty, then by
288 * definition it is more than half empty.
290 * For read, check for data available.
292 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
293 break;
296 * Map the current scatter buffer.
298 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
299 remain = host->sg_ptr->length - host->sg_off;
301 len = 0;
302 if (status & MCI_RXACTIVE)
303 len = mmci_pio_read(host, buffer, remain);
304 if (status & MCI_TXACTIVE)
305 len = mmci_pio_write(host, buffer, remain, status);
308 * Unmap the buffer.
310 mmci_kunmap_atomic(host, buffer, &flags);
312 host->sg_off += len;
313 host->size -= len;
314 remain -= len;
316 if (remain)
317 break;
320 * If we were reading, and we have completed this
321 * page, ensure that the data cache is coherent.
323 if (status & MCI_RXACTIVE)
324 flush_dcache_page(sg_page(host->sg_ptr));
326 if (!mmci_next_sg(host))
327 break;
329 status = readl(base + MMCISTATUS);
330 } while (1);
333 * If we're nearing the end of the read, switch to
334 * "any data available" mode.
336 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
337 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
340 * If we run out of data, disable the data IRQs; this
341 * prevents a race where the FIFO becomes empty before
342 * the chip itself has disabled the data path, and
343 * stops us racing with our data end IRQ.
345 if (host->size == 0) {
346 writel(0, base + MMCIMASK1);
347 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
350 return IRQ_HANDLED;
354 * Handle completion of command and data transfers.
356 static irqreturn_t mmci_irq(int irq, void *dev_id)
358 struct mmci_host *host = dev_id;
359 u32 status;
360 int ret = 0;
362 spin_lock(&host->lock);
364 do {
365 struct mmc_command *cmd;
366 struct mmc_data *data;
368 status = readl(host->base + MMCISTATUS);
369 status &= readl(host->base + MMCIMASK0);
370 writel(status, host->base + MMCICLEAR);
372 DBG(host, "irq0 %08x\n", status);
374 data = host->data;
375 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
376 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
377 mmci_data_irq(host, data, status);
379 cmd = host->cmd;
380 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
381 mmci_cmd_irq(host, cmd, status);
383 ret = 1;
384 } while (status);
386 spin_unlock(&host->lock);
388 return IRQ_RETVAL(ret);
391 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
393 struct mmci_host *host = mmc_priv(mmc);
394 unsigned long flags;
396 WARN_ON(host->mrq != NULL);
398 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
399 printk(KERN_ERR "%s: Unsupported block size (%d bytes)\n",
400 mmc_hostname(mmc), mrq->data->blksz);
401 mrq->cmd->error = -EINVAL;
402 mmc_request_done(mmc, mrq);
403 return;
406 spin_lock_irqsave(&host->lock, flags);
408 host->mrq = mrq;
410 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
411 mmci_start_data(host, mrq->data);
413 mmci_start_command(host, mrq->cmd, 0);
415 spin_unlock_irqrestore(&host->lock, flags);
418 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
420 struct mmci_host *host = mmc_priv(mmc);
421 u32 clk = 0, pwr = 0;
423 if (ios->clock) {
424 if (ios->clock >= host->mclk) {
425 clk = MCI_CLK_BYPASS;
426 host->cclk = host->mclk;
427 } else {
428 clk = host->mclk / (2 * ios->clock) - 1;
429 if (clk >= 256)
430 clk = 255;
431 host->cclk = host->mclk / (2 * (clk + 1));
433 if (host->hw_designer == 0x80)
434 clk |= MCI_FCEN; /* Bug fix in ST IP block */
435 clk |= MCI_CLK_ENABLE;
438 if (host->plat->translate_vdd)
439 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
441 switch (ios->power_mode) {
442 case MMC_POWER_OFF:
443 break;
444 case MMC_POWER_UP:
445 /* The ST version does not have this, fall through to POWER_ON */
446 if (host->hw_designer != 0x80) {
447 pwr |= MCI_PWR_UP;
448 break;
450 case MMC_POWER_ON:
451 pwr |= MCI_PWR_ON;
452 break;
455 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
456 if (host->hw_designer != 0x80)
457 pwr |= MCI_ROD;
458 else {
460 * The ST Micro variant use the ROD bit for something
461 * else and only has OD (Open Drain).
463 pwr |= MCI_OD;
467 writel(clk, host->base + MMCICLOCK);
469 if (host->pwr != pwr) {
470 host->pwr = pwr;
471 writel(pwr, host->base + MMCIPOWER);
475 static const struct mmc_host_ops mmci_ops = {
476 .request = mmci_request,
477 .set_ios = mmci_set_ios,
480 static void mmci_check_status(unsigned long data)
482 struct mmci_host *host = (struct mmci_host *)data;
483 unsigned int status;
485 status = host->plat->status(mmc_dev(host->mmc));
486 if (status ^ host->oldstat)
487 mmc_detect_change(host->mmc, 0);
489 host->oldstat = status;
490 mod_timer(&host->timer, jiffies + HZ);
493 static int __devinit mmci_probe(struct amba_device *dev, void *id)
495 struct mmc_platform_data *plat = dev->dev.platform_data;
496 struct mmci_host *host;
497 struct mmc_host *mmc;
498 int ret;
500 /* must have platform data */
501 if (!plat) {
502 ret = -EINVAL;
503 goto out;
506 ret = amba_request_regions(dev, DRIVER_NAME);
507 if (ret)
508 goto out;
510 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
511 if (!mmc) {
512 ret = -ENOMEM;
513 goto rel_regions;
516 host = mmc_priv(mmc);
517 host->mmc = mmc;
518 /* Bits 12 thru 19 is the designer */
519 host->hw_designer = (dev->periphid >> 12) & 0xff;
520 /* Bits 20 thru 23 is the revison */
521 host->hw_revision = (dev->periphid >> 20) & 0xf;
522 DBG(host, "designer ID = 0x%02x\n", host->hw_designer);
523 DBG(host, "revision = 0x%01x\n", host->hw_revision);
524 host->clk = clk_get(&dev->dev, NULL);
525 if (IS_ERR(host->clk)) {
526 ret = PTR_ERR(host->clk);
527 host->clk = NULL;
528 goto host_free;
531 ret = clk_enable(host->clk);
532 if (ret)
533 goto clk_free;
535 host->plat = plat;
536 host->mclk = clk_get_rate(host->clk);
538 * According to the spec, mclk is max 100 MHz,
539 * so we try to adjust the clock down to this,
540 * (if possible).
542 if (host->mclk > 100000000) {
543 ret = clk_set_rate(host->clk, 100000000);
544 if (ret < 0)
545 goto clk_disable;
546 host->mclk = clk_get_rate(host->clk);
547 DBG(host, "eventual mclk rate: %u Hz\n", host->mclk);
549 host->base = ioremap(dev->res.start, SZ_4K);
550 if (!host->base) {
551 ret = -ENOMEM;
552 goto clk_disable;
555 mmc->ops = &mmci_ops;
556 mmc->f_min = (host->mclk + 511) / 512;
557 mmc->f_max = min(host->mclk, fmax);
558 mmc->ocr_avail = plat->ocr_mask;
561 * We can do SGIO
563 mmc->max_hw_segs = 16;
564 mmc->max_phys_segs = NR_SG;
567 * Since we only have a 16-bit data length register, we must
568 * ensure that we don't exceed 2^16-1 bytes in a single request.
570 mmc->max_req_size = 65535;
573 * Set the maximum segment size. Since we aren't doing DMA
574 * (yet) we are only limited by the data length register.
576 mmc->max_seg_size = mmc->max_req_size;
579 * Block size can be up to 2048 bytes, but must be a power of two.
581 mmc->max_blk_size = 2048;
584 * No limit on the number of blocks transferred.
586 mmc->max_blk_count = mmc->max_req_size;
588 spin_lock_init(&host->lock);
590 writel(0, host->base + MMCIMASK0);
591 writel(0, host->base + MMCIMASK1);
592 writel(0xfff, host->base + MMCICLEAR);
594 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
595 if (ret)
596 goto unmap;
598 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
599 if (ret)
600 goto irq0_free;
602 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
604 amba_set_drvdata(dev, mmc);
606 mmc_add_host(mmc);
608 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
609 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
610 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
612 init_timer(&host->timer);
613 host->timer.data = (unsigned long)host;
614 host->timer.function = mmci_check_status;
615 host->timer.expires = jiffies + HZ;
616 add_timer(&host->timer);
618 return 0;
620 irq0_free:
621 free_irq(dev->irq[0], host);
622 unmap:
623 iounmap(host->base);
624 clk_disable:
625 clk_disable(host->clk);
626 clk_free:
627 clk_put(host->clk);
628 host_free:
629 mmc_free_host(mmc);
630 rel_regions:
631 amba_release_regions(dev);
632 out:
633 return ret;
636 static int __devexit mmci_remove(struct amba_device *dev)
638 struct mmc_host *mmc = amba_get_drvdata(dev);
640 amba_set_drvdata(dev, NULL);
642 if (mmc) {
643 struct mmci_host *host = mmc_priv(mmc);
645 del_timer_sync(&host->timer);
647 mmc_remove_host(mmc);
649 writel(0, host->base + MMCIMASK0);
650 writel(0, host->base + MMCIMASK1);
652 writel(0, host->base + MMCICOMMAND);
653 writel(0, host->base + MMCIDATACTRL);
655 free_irq(dev->irq[0], host);
656 free_irq(dev->irq[1], host);
658 iounmap(host->base);
659 clk_disable(host->clk);
660 clk_put(host->clk);
662 mmc_free_host(mmc);
664 amba_release_regions(dev);
667 return 0;
670 #ifdef CONFIG_PM
671 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
673 struct mmc_host *mmc = amba_get_drvdata(dev);
674 int ret = 0;
676 if (mmc) {
677 struct mmci_host *host = mmc_priv(mmc);
679 ret = mmc_suspend_host(mmc, state);
680 if (ret == 0)
681 writel(0, host->base + MMCIMASK0);
684 return ret;
687 static int mmci_resume(struct amba_device *dev)
689 struct mmc_host *mmc = amba_get_drvdata(dev);
690 int ret = 0;
692 if (mmc) {
693 struct mmci_host *host = mmc_priv(mmc);
695 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
697 ret = mmc_resume_host(mmc);
700 return ret;
702 #else
703 #define mmci_suspend NULL
704 #define mmci_resume NULL
705 #endif
707 static struct amba_id mmci_ids[] = {
709 .id = 0x00041180,
710 .mask = 0x000fffff,
713 .id = 0x00041181,
714 .mask = 0x000fffff,
716 /* ST Micro variants */
718 .id = 0x00180180,
719 .mask = 0x00ffffff,
722 .id = 0x00280180,
723 .mask = 0x00ffffff,
725 { 0, 0 },
728 static struct amba_driver mmci_driver = {
729 .drv = {
730 .name = DRIVER_NAME,
732 .probe = mmci_probe,
733 .remove = __devexit_p(mmci_remove),
734 .suspend = mmci_suspend,
735 .resume = mmci_resume,
736 .id_table = mmci_ids,
739 static int __init mmci_init(void)
741 return amba_driver_register(&mmci_driver);
744 static void __exit mmci_exit(void)
746 amba_driver_unregister(&mmci_driver);
749 module_init(mmci_init);
750 module_exit(mmci_exit);
751 module_param(fmax, uint, 0444);
753 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
754 MODULE_LICENSE("GPL");