usb: cdc-wdm: Fix race between write and disconnect
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / mmc / host / msm_sdcc.c
blob4c068e5fe6b2502ec45df9c9edd60a462b5fc0e7
1 /*
2 * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
4 * Copyright (C) 2007 Google Inc,
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Based on mmci.c
13 * Author: San Mehat (san@android.com)
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/device.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/highmem.h>
26 #include <linux/log2.h>
27 #include <linux/mmc/host.h>
28 #include <linux/mmc/card.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/platform_device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/debugfs.h>
34 #include <linux/io.h>
35 #include <linux/memory.h>
37 #include <asm/cacheflush.h>
38 #include <asm/div64.h>
39 #include <asm/sizes.h>
41 #include <mach/mmc.h>
42 #include <mach/msm_iomap.h>
43 #include <mach/dma.h>
45 #include "msm_sdcc.h"
47 #define DRIVER_NAME "msm-sdcc"
49 static unsigned int msmsdcc_fmin = 144000;
50 static unsigned int msmsdcc_fmax = 50000000;
51 static unsigned int msmsdcc_4bit = 1;
52 static unsigned int msmsdcc_pwrsave = 1;
53 static unsigned int msmsdcc_piopoll = 1;
54 static unsigned int msmsdcc_sdioirq;
56 #define PIO_SPINMAX 30
57 #define CMD_SPINMAX 20
59 static void
60 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
61 u32 c);
63 static void
64 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
66 writel(0, host->base + MMCICOMMAND);
68 BUG_ON(host->curr.data);
70 host->curr.mrq = NULL;
71 host->curr.cmd = NULL;
73 if (mrq->data)
74 mrq->data->bytes_xfered = host->curr.data_xfered;
75 if (mrq->cmd->error == -ETIMEDOUT)
76 mdelay(5);
79 * Need to drop the host lock here; mmc_request_done may call
80 * back into the driver...
82 spin_unlock(&host->lock);
83 mmc_request_done(host->mmc, mrq);
84 spin_lock(&host->lock);
87 static void
88 msmsdcc_stop_data(struct msmsdcc_host *host)
90 writel(0, host->base + MMCIDATACTRL);
91 host->curr.data = NULL;
92 host->curr.got_dataend = host->curr.got_datablkend = 0;
95 uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
97 switch (host->pdev_id) {
98 case 1:
99 return MSM_SDC1_PHYS + MMCIFIFO;
100 case 2:
101 return MSM_SDC2_PHYS + MMCIFIFO;
102 case 3:
103 return MSM_SDC3_PHYS + MMCIFIFO;
104 case 4:
105 return MSM_SDC4_PHYS + MMCIFIFO;
107 BUG();
108 return 0;
111 static void
112 msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
113 unsigned int result,
114 struct msm_dmov_errdata *err)
116 struct msmsdcc_dma_data *dma_data =
117 container_of(cmd, struct msmsdcc_dma_data, hdr);
118 struct msmsdcc_host *host = dma_data->host;
119 unsigned long flags;
120 struct mmc_request *mrq;
122 spin_lock_irqsave(&host->lock, flags);
123 mrq = host->curr.mrq;
124 BUG_ON(!mrq);
126 if (!(result & DMOV_RSLT_VALID)) {
127 pr_err("msmsdcc: Invalid DataMover result\n");
128 goto out;
131 if (result & DMOV_RSLT_DONE) {
132 host->curr.data_xfered = host->curr.xfer_size;
133 } else {
134 /* Error or flush */
135 if (result & DMOV_RSLT_ERROR)
136 pr_err("%s: DMA error (0x%.8x)\n",
137 mmc_hostname(host->mmc), result);
138 if (result & DMOV_RSLT_FLUSH)
139 pr_err("%s: DMA channel flushed (0x%.8x)\n",
140 mmc_hostname(host->mmc), result);
141 if (err)
142 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
143 err->flush[0], err->flush[1], err->flush[2],
144 err->flush[3], err->flush[4], err->flush[5]);
145 if (!mrq->data->error)
146 mrq->data->error = -EIO;
148 host->dma.busy = 0;
149 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
150 host->dma.dir);
152 if (host->curr.user_pages) {
153 struct scatterlist *sg = host->dma.sg;
154 int i;
156 for (i = 0; i < host->dma.num_ents; i++)
157 flush_dcache_page(sg_page(sg++));
160 host->dma.sg = NULL;
162 if ((host->curr.got_dataend && host->curr.got_datablkend)
163 || mrq->data->error) {
166 * If we've already gotten our DATAEND / DATABLKEND
167 * for this request, then complete it through here.
169 msmsdcc_stop_data(host);
171 if (!mrq->data->error)
172 host->curr.data_xfered = host->curr.xfer_size;
173 if (!mrq->data->stop || mrq->cmd->error) {
174 writel(0, host->base + MMCICOMMAND);
175 host->curr.mrq = NULL;
176 host->curr.cmd = NULL;
177 mrq->data->bytes_xfered = host->curr.data_xfered;
179 spin_unlock_irqrestore(&host->lock, flags);
180 mmc_request_done(host->mmc, mrq);
181 return;
182 } else
183 msmsdcc_start_command(host, mrq->data->stop, 0);
186 out:
187 spin_unlock_irqrestore(&host->lock, flags);
188 return;
191 static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
193 if (host->dma.channel == -1)
194 return -ENOENT;
196 if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
197 return -EINVAL;
198 if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
199 return -EINVAL;
200 return 0;
203 static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
205 struct msmsdcc_nc_dmadata *nc;
206 dmov_box *box;
207 uint32_t rows;
208 uint32_t crci;
209 unsigned int n;
210 int i, rc;
211 struct scatterlist *sg = data->sg;
213 rc = validate_dma(host, data);
214 if (rc)
215 return rc;
217 host->dma.sg = data->sg;
218 host->dma.num_ents = data->sg_len;
220 nc = host->dma.nc;
222 switch (host->pdev_id) {
223 case 1:
224 crci = MSMSDCC_CRCI_SDC1;
225 break;
226 case 2:
227 crci = MSMSDCC_CRCI_SDC2;
228 break;
229 case 3:
230 crci = MSMSDCC_CRCI_SDC3;
231 break;
232 case 4:
233 crci = MSMSDCC_CRCI_SDC4;
234 break;
235 default:
236 host->dma.sg = NULL;
237 host->dma.num_ents = 0;
238 return -ENOENT;
241 if (data->flags & MMC_DATA_READ)
242 host->dma.dir = DMA_FROM_DEVICE;
243 else
244 host->dma.dir = DMA_TO_DEVICE;
246 host->curr.user_pages = 0;
248 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
249 host->dma.num_ents, host->dma.dir);
251 if (n != host->dma.num_ents) {
252 pr_err("%s: Unable to map in all sg elements\n",
253 mmc_hostname(host->mmc));
254 host->dma.sg = NULL;
255 host->dma.num_ents = 0;
256 return -ENOMEM;
259 box = &nc->cmd[0];
260 for (i = 0; i < host->dma.num_ents; i++) {
261 box->cmd = CMD_MODE_BOX;
263 if (i == (host->dma.num_ents - 1))
264 box->cmd |= CMD_LC;
265 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
266 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
267 (sg_dma_len(sg) / MCI_FIFOSIZE) ;
269 if (data->flags & MMC_DATA_READ) {
270 box->src_row_addr = msmsdcc_fifo_addr(host);
271 box->dst_row_addr = sg_dma_address(sg);
273 box->src_dst_len = (MCI_FIFOSIZE << 16) |
274 (MCI_FIFOSIZE);
275 box->row_offset = MCI_FIFOSIZE;
277 box->num_rows = rows * ((1 << 16) + 1);
278 box->cmd |= CMD_SRC_CRCI(crci);
279 } else {
280 box->src_row_addr = sg_dma_address(sg);
281 box->dst_row_addr = msmsdcc_fifo_addr(host);
283 box->src_dst_len = (MCI_FIFOSIZE << 16) |
284 (MCI_FIFOSIZE);
285 box->row_offset = (MCI_FIFOSIZE << 16);
287 box->num_rows = rows * ((1 << 16) + 1);
288 box->cmd |= CMD_DST_CRCI(crci);
290 box++;
291 sg++;
294 /* location of command block must be 64 bit aligned */
295 BUG_ON(host->dma.cmd_busaddr & 0x07);
297 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
298 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
299 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
300 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
302 return 0;
305 static void
306 msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
308 unsigned int datactrl, timeout;
309 unsigned long long clks;
310 void __iomem *base = host->base;
311 unsigned int pio_irqmask = 0;
313 host->curr.data = data;
314 host->curr.xfer_size = data->blksz * data->blocks;
315 host->curr.xfer_remain = host->curr.xfer_size;
316 host->curr.data_xfered = 0;
317 host->curr.got_dataend = 0;
318 host->curr.got_datablkend = 0;
320 memset(&host->pio, 0, sizeof(host->pio));
322 clks = (unsigned long long)data->timeout_ns * host->clk_rate;
323 do_div(clks, NSEC_PER_SEC);
324 timeout = data->timeout_clks + (unsigned int)clks;
325 writel(timeout, base + MMCIDATATIMER);
327 writel(host->curr.xfer_size, base + MMCIDATALENGTH);
329 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
331 if (!msmsdcc_config_dma(host, data))
332 datactrl |= MCI_DPSM_DMAENABLE;
333 else {
334 host->pio.sg = data->sg;
335 host->pio.sg_len = data->sg_len;
336 host->pio.sg_off = 0;
338 if (data->flags & MMC_DATA_READ) {
339 pio_irqmask = MCI_RXFIFOHALFFULLMASK;
340 if (host->curr.xfer_remain < MCI_FIFOSIZE)
341 pio_irqmask |= MCI_RXDATAAVLBLMASK;
342 } else
343 pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
346 if (data->flags & MMC_DATA_READ)
347 datactrl |= MCI_DPSM_DIRECTION;
349 writel(pio_irqmask, base + MMCIMASK1);
350 writel(datactrl, base + MMCIDATACTRL);
352 if (datactrl & MCI_DPSM_DMAENABLE) {
353 host->dma.busy = 1;
354 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
358 static void
359 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
361 void __iomem *base = host->base;
363 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
364 writel(0, base + MMCICOMMAND);
365 udelay(2 + ((5 * 1000000) / host->clk_rate));
368 c |= cmd->opcode | MCI_CPSM_ENABLE;
370 if (cmd->flags & MMC_RSP_PRESENT) {
371 if (cmd->flags & MMC_RSP_136)
372 c |= MCI_CPSM_LONGRSP;
373 c |= MCI_CPSM_RESPONSE;
376 if (cmd->opcode == 17 || cmd->opcode == 18 ||
377 cmd->opcode == 24 || cmd->opcode == 25 ||
378 cmd->opcode == 53)
379 c |= MCI_CSPM_DATCMD;
381 if (cmd == cmd->mrq->stop)
382 c |= MCI_CSPM_MCIABORT;
384 host->curr.cmd = cmd;
386 host->stats.cmds++;
388 writel(cmd->arg, base + MMCIARGUMENT);
389 writel(c, base + MMCICOMMAND);
392 static void
393 msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
394 unsigned int status)
396 if (status & MCI_DATACRCFAIL) {
397 pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
398 pr_err("%s: opcode 0x%.8x\n", __func__,
399 data->mrq->cmd->opcode);
400 pr_err("%s: blksz %d, blocks %d\n", __func__,
401 data->blksz, data->blocks);
402 data->error = -EILSEQ;
403 } else if (status & MCI_DATATIMEOUT) {
404 pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
405 data->error = -ETIMEDOUT;
406 } else if (status & MCI_RXOVERRUN) {
407 pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
408 data->error = -EIO;
409 } else if (status & MCI_TXUNDERRUN) {
410 pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
411 data->error = -EIO;
412 } else {
413 pr_err("%s: Unknown error (0x%.8x)\n",
414 mmc_hostname(host->mmc), status);
415 data->error = -EIO;
420 static int
421 msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
423 void __iomem *base = host->base;
424 uint32_t *ptr = (uint32_t *) buffer;
425 int count = 0;
427 while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
429 *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
430 ptr++;
431 count += sizeof(uint32_t);
433 remain -= sizeof(uint32_t);
434 if (remain == 0)
435 break;
437 return count;
440 static int
441 msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
442 unsigned int remain, u32 status)
444 void __iomem *base = host->base;
445 char *ptr = buffer;
447 do {
448 unsigned int count, maxcnt;
450 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
451 MCI_FIFOHALFSIZE;
452 count = min(remain, maxcnt);
454 writesl(base + MMCIFIFO, ptr, count >> 2);
455 ptr += count;
456 remain -= count;
458 if (remain == 0)
459 break;
461 status = readl(base + MMCISTATUS);
462 } while (status & MCI_TXFIFOHALFEMPTY);
464 return ptr - buffer;
467 static int
468 msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
470 while (maxspin) {
471 if ((readl(host->base + MMCISTATUS) & mask))
472 return 0;
473 udelay(1);
474 --maxspin;
476 return -ETIMEDOUT;
479 static int
480 msmsdcc_pio_irq(int irq, void *dev_id)
482 struct msmsdcc_host *host = dev_id;
483 void __iomem *base = host->base;
484 uint32_t status;
486 status = readl(base + MMCISTATUS);
488 do {
489 unsigned long flags;
490 unsigned int remain, len;
491 char *buffer;
493 if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
494 if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
495 break;
497 if (msmsdcc_spin_on_status(host,
498 (MCI_TXFIFOHALFEMPTY |
499 MCI_RXDATAAVLBL),
500 PIO_SPINMAX)) {
501 break;
505 /* Map the current scatter buffer */
506 local_irq_save(flags);
507 buffer = kmap_atomic(sg_page(host->pio.sg),
508 KM_BIO_SRC_IRQ) + host->pio.sg->offset;
509 buffer += host->pio.sg_off;
510 remain = host->pio.sg->length - host->pio.sg_off;
511 len = 0;
512 if (status & MCI_RXACTIVE)
513 len = msmsdcc_pio_read(host, buffer, remain);
514 if (status & MCI_TXACTIVE)
515 len = msmsdcc_pio_write(host, buffer, remain, status);
517 /* Unmap the buffer */
518 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
519 local_irq_restore(flags);
521 host->pio.sg_off += len;
522 host->curr.xfer_remain -= len;
523 host->curr.data_xfered += len;
524 remain -= len;
526 if (remain == 0) {
527 /* This sg page is full - do some housekeeping */
528 if (status & MCI_RXACTIVE && host->curr.user_pages)
529 flush_dcache_page(sg_page(host->pio.sg));
531 if (!--host->pio.sg_len) {
532 memset(&host->pio, 0, sizeof(host->pio));
533 break;
536 /* Advance to next sg */
537 host->pio.sg++;
538 host->pio.sg_off = 0;
541 status = readl(base + MMCISTATUS);
542 } while (1);
544 if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
545 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
547 if (!host->curr.xfer_remain)
548 writel(0, base + MMCIMASK1);
550 return IRQ_HANDLED;
553 static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
555 struct mmc_command *cmd = host->curr.cmd;
556 void __iomem *base = host->base;
558 host->curr.cmd = NULL;
559 cmd->resp[0] = readl(base + MMCIRESPONSE0);
560 cmd->resp[1] = readl(base + MMCIRESPONSE1);
561 cmd->resp[2] = readl(base + MMCIRESPONSE2);
562 cmd->resp[3] = readl(base + MMCIRESPONSE3);
564 del_timer(&host->command_timer);
565 if (status & MCI_CMDTIMEOUT) {
566 cmd->error = -ETIMEDOUT;
567 } else if (status & MCI_CMDCRCFAIL &&
568 cmd->flags & MMC_RSP_CRC) {
569 pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
570 cmd->error = -EILSEQ;
573 if (!cmd->data || cmd->error) {
574 if (host->curr.data && host->dma.sg)
575 msm_dmov_stop_cmd(host->dma.channel,
576 &host->dma.hdr, 0);
577 else if (host->curr.data) { /* Non DMA */
578 msmsdcc_stop_data(host);
579 msmsdcc_request_end(host, cmd->mrq);
580 } else /* host->data == NULL */
581 msmsdcc_request_end(host, cmd->mrq);
582 } else if (!(cmd->data->flags & MMC_DATA_READ))
583 msmsdcc_start_data(host, cmd->data);
586 static void
587 msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
588 void __iomem *base)
590 struct mmc_data *data = host->curr.data;
592 if (!data)
593 return;
595 /* Check for data errors */
596 if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
597 MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
598 msmsdcc_data_err(host, data, status);
599 host->curr.data_xfered = 0;
600 if (host->dma.sg)
601 msm_dmov_stop_cmd(host->dma.channel,
602 &host->dma.hdr, 0);
603 else {
604 msmsdcc_stop_data(host);
605 if (!data->stop)
606 msmsdcc_request_end(host, data->mrq);
607 else
608 msmsdcc_start_command(host, data->stop, 0);
612 /* Check for data done */
613 if (!host->curr.got_dataend && (status & MCI_DATAEND))
614 host->curr.got_dataend = 1;
616 if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
617 host->curr.got_datablkend = 1;
620 * If DMA is still in progress, we complete via the completion handler
622 if (host->curr.got_dataend && host->curr.got_datablkend &&
623 !host->dma.busy) {
625 * There appears to be an issue in the controller where
626 * if you request a small block transfer (< fifo size),
627 * you may get your DATAEND/DATABLKEND irq without the
628 * PIO data irq.
630 * Check to see if there is still data to be read,
631 * and simulate a PIO irq.
633 if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
634 msmsdcc_pio_irq(1, host);
636 msmsdcc_stop_data(host);
637 if (!data->error)
638 host->curr.data_xfered = host->curr.xfer_size;
640 if (!data->stop)
641 msmsdcc_request_end(host, data->mrq);
642 else
643 msmsdcc_start_command(host, data->stop, 0);
647 static irqreturn_t
648 msmsdcc_irq(int irq, void *dev_id)
650 struct msmsdcc_host *host = dev_id;
651 void __iomem *base = host->base;
652 u32 status;
653 int ret = 0;
654 int cardint = 0;
656 spin_lock(&host->lock);
658 do {
659 status = readl(base + MMCISTATUS);
661 status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
662 writel(status, base + MMCICLEAR);
664 msmsdcc_handle_irq_data(host, status, base);
666 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
667 MCI_CMDTIMEOUT) && host->curr.cmd) {
668 msmsdcc_do_cmdirq(host, status);
671 if (status & MCI_SDIOINTOPER) {
672 cardint = 1;
673 status &= ~MCI_SDIOINTOPER;
675 ret = 1;
676 } while (status);
678 spin_unlock(&host->lock);
681 * We have to delay handling the card interrupt as it calls
682 * back into the driver.
684 if (cardint)
685 mmc_signal_sdio_irq(host->mmc);
687 return IRQ_RETVAL(ret);
690 static void
691 msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
693 struct msmsdcc_host *host = mmc_priv(mmc);
694 unsigned long flags;
696 WARN_ON(host->curr.mrq != NULL);
697 WARN_ON(host->pwr == 0);
699 spin_lock_irqsave(&host->lock, flags);
701 host->stats.reqs++;
703 if (host->eject) {
704 if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
705 mrq->cmd->error = 0;
706 mrq->data->bytes_xfered = mrq->data->blksz *
707 mrq->data->blocks;
708 } else
709 mrq->cmd->error = -ENOMEDIUM;
711 spin_unlock_irqrestore(&host->lock, flags);
712 mmc_request_done(mmc, mrq);
713 return;
716 host->curr.mrq = mrq;
718 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
719 msmsdcc_start_data(host, mrq->data);
721 msmsdcc_start_command(host, mrq->cmd, 0);
723 if (host->cmdpoll && !msmsdcc_spin_on_status(host,
724 MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
725 CMD_SPINMAX)) {
726 uint32_t status = readl(host->base + MMCISTATUS);
727 msmsdcc_do_cmdirq(host, status);
728 writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
729 host->base + MMCICLEAR);
730 host->stats.cmdpoll_hits++;
731 } else {
732 host->stats.cmdpoll_misses++;
733 mod_timer(&host->command_timer, jiffies + HZ);
735 spin_unlock_irqrestore(&host->lock, flags);
738 static void
739 msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
741 struct msmsdcc_host *host = mmc_priv(mmc);
742 u32 clk = 0, pwr = 0;
743 int rc;
745 if (ios->clock) {
747 if (!host->clks_on) {
748 clk_enable(host->pclk);
749 clk_enable(host->clk);
750 host->clks_on = 1;
752 if (ios->clock != host->clk_rate) {
753 rc = clk_set_rate(host->clk, ios->clock);
754 if (rc < 0)
755 pr_err("%s: Error setting clock rate (%d)\n",
756 mmc_hostname(host->mmc), rc);
757 else
758 host->clk_rate = ios->clock;
760 clk |= MCI_CLK_ENABLE;
763 if (ios->bus_width == MMC_BUS_WIDTH_4)
764 clk |= (2 << 10); /* Set WIDEBUS */
766 if (ios->clock > 400000 && msmsdcc_pwrsave)
767 clk |= (1 << 9); /* PWRSAVE */
769 clk |= (1 << 12); /* FLOW_ENA */
770 clk |= (1 << 15); /* feedback clock */
772 if (host->plat->translate_vdd)
773 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
775 switch (ios->power_mode) {
776 case MMC_POWER_OFF:
777 break;
778 case MMC_POWER_UP:
779 pwr |= MCI_PWR_UP;
780 break;
781 case MMC_POWER_ON:
782 pwr |= MCI_PWR_ON;
783 break;
786 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
787 pwr |= MCI_OD;
789 writel(clk, host->base + MMCICLOCK);
791 if (host->pwr != pwr) {
792 host->pwr = pwr;
793 writel(pwr, host->base + MMCIPOWER);
796 if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
797 clk_disable(host->clk);
798 clk_disable(host->pclk);
799 host->clks_on = 0;
803 static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
805 struct msmsdcc_host *host = mmc_priv(mmc);
806 unsigned long flags;
807 u32 status;
809 spin_lock_irqsave(&host->lock, flags);
810 if (msmsdcc_sdioirq == 1) {
811 status = readl(host->base + MMCIMASK0);
812 if (enable)
813 status |= MCI_SDIOINTOPERMASK;
814 else
815 status &= ~MCI_SDIOINTOPERMASK;
816 host->saved_irq0mask = status;
817 writel(status, host->base + MMCIMASK0);
819 spin_unlock_irqrestore(&host->lock, flags);
822 static const struct mmc_host_ops msmsdcc_ops = {
823 .request = msmsdcc_request,
824 .set_ios = msmsdcc_set_ios,
825 .enable_sdio_irq = msmsdcc_enable_sdio_irq,
828 static void
829 msmsdcc_check_status(unsigned long data)
831 struct msmsdcc_host *host = (struct msmsdcc_host *)data;
832 unsigned int status;
834 if (!host->plat->status) {
835 mmc_detect_change(host->mmc, 0);
836 goto out;
839 status = host->plat->status(mmc_dev(host->mmc));
840 host->eject = !status;
841 if (status ^ host->oldstat) {
842 pr_info("%s: Slot status change detected (%d -> %d)\n",
843 mmc_hostname(host->mmc), host->oldstat, status);
844 if (status)
845 mmc_detect_change(host->mmc, (5 * HZ) / 2);
846 else
847 mmc_detect_change(host->mmc, 0);
850 host->oldstat = status;
852 out:
853 if (host->timer.function)
854 mod_timer(&host->timer, jiffies + HZ);
857 static irqreturn_t
858 msmsdcc_platform_status_irq(int irq, void *dev_id)
860 struct msmsdcc_host *host = dev_id;
862 printk(KERN_DEBUG "%s: %d\n", __func__, irq);
863 msmsdcc_check_status((unsigned long) host);
864 return IRQ_HANDLED;
867 static void
868 msmsdcc_status_notify_cb(int card_present, void *dev_id)
870 struct msmsdcc_host *host = dev_id;
872 printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
873 card_present);
874 msmsdcc_check_status((unsigned long) host);
878 * called when a command expires.
879 * Dump some debugging, and then error
880 * out the transaction.
882 static void
883 msmsdcc_command_expired(unsigned long _data)
885 struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
886 struct mmc_request *mrq;
887 unsigned long flags;
889 spin_lock_irqsave(&host->lock, flags);
890 mrq = host->curr.mrq;
892 if (!mrq) {
893 pr_info("%s: Command expiry misfire\n",
894 mmc_hostname(host->mmc));
895 spin_unlock_irqrestore(&host->lock, flags);
896 return;
899 pr_err("%s: Command timeout (%p %p %p %p)\n",
900 mmc_hostname(host->mmc), mrq, mrq->cmd,
901 mrq->data, host->dma.sg);
903 mrq->cmd->error = -ETIMEDOUT;
904 msmsdcc_stop_data(host);
906 writel(0, host->base + MMCICOMMAND);
908 host->curr.mrq = NULL;
909 host->curr.cmd = NULL;
911 spin_unlock_irqrestore(&host->lock, flags);
912 mmc_request_done(host->mmc, mrq);
915 static int
916 msmsdcc_init_dma(struct msmsdcc_host *host)
918 memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
919 host->dma.host = host;
920 host->dma.channel = -1;
922 if (!host->dmares)
923 return -ENODEV;
925 host->dma.nc = dma_alloc_coherent(NULL,
926 sizeof(struct msmsdcc_nc_dmadata),
927 &host->dma.nc_busaddr,
928 GFP_KERNEL);
929 if (host->dma.nc == NULL) {
930 pr_err("Unable to allocate DMA buffer\n");
931 return -ENOMEM;
933 memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
934 host->dma.cmd_busaddr = host->dma.nc_busaddr;
935 host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
936 offsetof(struct msmsdcc_nc_dmadata, cmdptr);
937 host->dma.channel = host->dmares->start;
939 return 0;
942 #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
943 static void
944 do_resume_work(struct work_struct *work)
946 struct msmsdcc_host *host =
947 container_of(work, struct msmsdcc_host, resume_task);
948 struct mmc_host *mmc = host->mmc;
950 if (mmc) {
951 mmc_resume_host(mmc);
952 if (host->stat_irq)
953 enable_irq(host->stat_irq);
956 #endif
958 static int
959 msmsdcc_probe(struct platform_device *pdev)
961 struct mmc_platform_data *plat = pdev->dev.platform_data;
962 struct msmsdcc_host *host;
963 struct mmc_host *mmc;
964 struct resource *cmd_irqres = NULL;
965 struct resource *pio_irqres = NULL;
966 struct resource *stat_irqres = NULL;
967 struct resource *memres = NULL;
968 struct resource *dmares = NULL;
969 int ret;
971 /* must have platform data */
972 if (!plat) {
973 pr_err("%s: Platform data not available\n", __func__);
974 ret = -EINVAL;
975 goto out;
978 if (pdev->id < 1 || pdev->id > 4)
979 return -EINVAL;
981 if (pdev->resource == NULL || pdev->num_resources < 2) {
982 pr_err("%s: Invalid resource\n", __func__);
983 return -ENXIO;
986 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
987 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
988 cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
989 "cmd_irq");
990 pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
991 "pio_irq");
992 stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
993 "status_irq");
995 if (!cmd_irqres || !pio_irqres || !memres) {
996 pr_err("%s: Invalid resource\n", __func__);
997 return -ENXIO;
1001 * Setup our host structure
1004 mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
1005 if (!mmc) {
1006 ret = -ENOMEM;
1007 goto out;
1010 host = mmc_priv(mmc);
1011 host->pdev_id = pdev->id;
1012 host->plat = plat;
1013 host->mmc = mmc;
1015 host->cmdpoll = 1;
1017 host->base = ioremap(memres->start, PAGE_SIZE);
1018 if (!host->base) {
1019 ret = -ENOMEM;
1020 goto out;
1023 host->cmd_irqres = cmd_irqres;
1024 host->pio_irqres = pio_irqres;
1025 host->memres = memres;
1026 host->dmares = dmares;
1027 spin_lock_init(&host->lock);
1030 * Setup DMA
1032 msmsdcc_init_dma(host);
1035 * Setup main peripheral bus clock
1037 host->pclk = clk_get(&pdev->dev, "sdc_pclk");
1038 if (IS_ERR(host->pclk)) {
1039 ret = PTR_ERR(host->pclk);
1040 goto host_free;
1043 ret = clk_enable(host->pclk);
1044 if (ret)
1045 goto pclk_put;
1047 host->pclk_rate = clk_get_rate(host->pclk);
1050 * Setup SDC MMC clock
1052 host->clk = clk_get(&pdev->dev, "sdc_clk");
1053 if (IS_ERR(host->clk)) {
1054 ret = PTR_ERR(host->clk);
1055 goto pclk_disable;
1058 ret = clk_enable(host->clk);
1059 if (ret)
1060 goto clk_put;
1062 ret = clk_set_rate(host->clk, msmsdcc_fmin);
1063 if (ret) {
1064 pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
1065 goto clk_disable;
1068 host->clk_rate = clk_get_rate(host->clk);
1070 host->clks_on = 1;
1073 * Setup MMC host structure
1075 mmc->ops = &msmsdcc_ops;
1076 mmc->f_min = msmsdcc_fmin;
1077 mmc->f_max = msmsdcc_fmax;
1078 mmc->ocr_avail = plat->ocr_mask;
1080 if (msmsdcc_4bit)
1081 mmc->caps |= MMC_CAP_4_BIT_DATA;
1082 if (msmsdcc_sdioirq)
1083 mmc->caps |= MMC_CAP_SDIO_IRQ;
1084 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1086 mmc->max_phys_segs = NR_SG;
1087 mmc->max_hw_segs = NR_SG;
1088 mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
1089 mmc->max_blk_count = 65536;
1091 mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
1092 mmc->max_seg_size = mmc->max_req_size;
1094 writel(0, host->base + MMCIMASK0);
1095 writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
1097 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1098 host->saved_irq0mask = MCI_IRQENABLE;
1101 * Setup card detect change
1104 memset(&host->timer, 0, sizeof(host->timer));
1106 if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
1107 unsigned long irqflags = IRQF_SHARED |
1108 (stat_irqres->flags & IRQF_TRIGGER_MASK);
1110 host->stat_irq = stat_irqres->start;
1111 ret = request_irq(host->stat_irq,
1112 msmsdcc_platform_status_irq,
1113 irqflags,
1114 DRIVER_NAME " (slot)",
1115 host);
1116 if (ret) {
1117 pr_err("%s: Unable to get slot IRQ %d (%d)\n",
1118 mmc_hostname(mmc), host->stat_irq, ret);
1119 goto clk_disable;
1121 } else if (plat->register_status_notify) {
1122 plat->register_status_notify(msmsdcc_status_notify_cb, host);
1123 } else if (!plat->status)
1124 pr_err("%s: No card detect facilities available\n",
1125 mmc_hostname(mmc));
1126 else {
1127 init_timer(&host->timer);
1128 host->timer.data = (unsigned long)host;
1129 host->timer.function = msmsdcc_check_status;
1130 host->timer.expires = jiffies + HZ;
1131 add_timer(&host->timer);
1134 if (plat->status) {
1135 host->oldstat = host->plat->status(mmc_dev(host->mmc));
1136 host->eject = !host->oldstat;
1140 * Setup a command timer. We currently need this due to
1141 * some 'strange' timeout / error handling situations.
1143 init_timer(&host->command_timer);
1144 host->command_timer.data = (unsigned long) host;
1145 host->command_timer.function = msmsdcc_command_expired;
1147 ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
1148 DRIVER_NAME " (cmd)", host);
1149 if (ret)
1150 goto stat_irq_free;
1152 ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
1153 DRIVER_NAME " (pio)", host);
1154 if (ret)
1155 goto cmd_irq_free;
1157 mmc_set_drvdata(pdev, mmc);
1158 mmc_add_host(mmc);
1160 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
1161 mmc_hostname(mmc), (unsigned long long)memres->start,
1162 (unsigned int) cmd_irqres->start,
1163 (unsigned int) host->stat_irq, host->dma.channel);
1164 pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
1165 (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
1166 pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
1167 mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
1168 pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
1169 pr_info("%s: Power save feature enable = %d\n",
1170 mmc_hostname(mmc), msmsdcc_pwrsave);
1172 if (host->dma.channel != -1) {
1173 pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
1174 mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
1175 pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
1176 mmc_hostname(mmc), host->dma.cmd_busaddr,
1177 host->dma.cmdptr_busaddr);
1178 } else
1179 pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
1180 if (host->timer.function)
1181 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1183 return 0;
1184 cmd_irq_free:
1185 free_irq(cmd_irqres->start, host);
1186 stat_irq_free:
1187 if (host->stat_irq)
1188 free_irq(host->stat_irq, host);
1189 clk_disable:
1190 clk_disable(host->clk);
1191 clk_put:
1192 clk_put(host->clk);
1193 pclk_disable:
1194 clk_disable(host->pclk);
1195 pclk_put:
1196 clk_put(host->pclk);
1197 host_free:
1198 mmc_free_host(mmc);
1199 out:
1200 return ret;
1203 static int
1204 msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1206 struct mmc_host *mmc = mmc_get_drvdata(dev);
1207 int rc = 0;
1209 if (mmc) {
1210 struct msmsdcc_host *host = mmc_priv(mmc);
1212 if (host->stat_irq)
1213 disable_irq(host->stat_irq);
1215 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1216 rc = mmc_suspend_host(mmc, state);
1217 if (!rc) {
1218 writel(0, host->base + MMCIMASK0);
1220 if (host->clks_on) {
1221 clk_disable(host->clk);
1222 clk_disable(host->pclk);
1223 host->clks_on = 0;
1227 return rc;
1230 static int
1231 msmsdcc_resume(struct platform_device *dev)
1233 struct mmc_host *mmc = mmc_get_drvdata(dev);
1234 unsigned long flags;
1236 if (mmc) {
1237 struct msmsdcc_host *host = mmc_priv(mmc);
1239 spin_lock_irqsave(&host->lock, flags);
1241 if (!host->clks_on) {
1242 clk_enable(host->pclk);
1243 clk_enable(host->clk);
1244 host->clks_on = 1;
1247 writel(host->saved_irq0mask, host->base + MMCIMASK0);
1249 spin_unlock_irqrestore(&host->lock, flags);
1251 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1252 mmc_resume_host(mmc);
1253 if (host->stat_irq)
1254 enable_irq(host->stat_irq);
1256 return 0;
1259 static struct platform_driver msmsdcc_driver = {
1260 .probe = msmsdcc_probe,
1261 .suspend = msmsdcc_suspend,
1262 .resume = msmsdcc_resume,
1263 .driver = {
1264 .name = "msm_sdcc",
1268 static int __init msmsdcc_init(void)
1270 return platform_driver_register(&msmsdcc_driver);
1273 static void __exit msmsdcc_exit(void)
1275 platform_driver_unregister(&msmsdcc_driver);
1278 module_init(msmsdcc_init);
1279 module_exit(msmsdcc_exit);
1281 MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
1282 MODULE_LICENSE("GPL");