mmc: dw_mmc: gather each reset code into functions
[linux-2.6/btrfs-unstable.git] / drivers / mmc / host / dw_mmc.c
blob068f61190bc9ae62e6811f047840d3edbdf7f679
1 /*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
37 #include <linux/of.h>
38 #include <linux/of_gpio.h>
40 #include "dw_mmc.h"
42 /* Common flag combinations */
43 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
44 SDMMC_INT_HTO | SDMMC_INT_SBE | \
45 SDMMC_INT_EBE)
46 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
47 SDMMC_INT_RESP_ERR)
48 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
49 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
50 #define DW_MCI_SEND_STATUS 1
51 #define DW_MCI_RECV_STATUS 2
52 #define DW_MCI_DMA_THRESHOLD 16
54 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
55 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57 #ifdef CONFIG_MMC_DW_IDMAC
58 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
59 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
60 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
61 SDMMC_IDMAC_INT_TI)
63 struct idmac_desc {
64 u32 des0; /* Control Descriptor */
65 #define IDMAC_DES0_DIC BIT(1)
66 #define IDMAC_DES0_LD BIT(2)
67 #define IDMAC_DES0_FD BIT(3)
68 #define IDMAC_DES0_CH BIT(4)
69 #define IDMAC_DES0_ER BIT(5)
70 #define IDMAC_DES0_CES BIT(30)
71 #define IDMAC_DES0_OWN BIT(31)
73 u32 des1; /* Buffer sizes */
74 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
75 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
77 u32 des2; /* buffer 1 physical address */
79 u32 des3; /* buffer 2 physical address */
81 #endif /* CONFIG_MMC_DW_IDMAC */
83 static const u8 tuning_blk_pattern_4bit[] = {
84 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
85 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
86 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
87 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
88 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
89 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
90 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
91 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
94 static const u8 tuning_blk_pattern_8bit[] = {
95 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
96 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
97 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
98 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
99 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
100 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
101 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
102 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
103 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
104 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
105 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
106 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
107 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
108 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
109 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
110 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
113 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
114 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116 #if defined(CONFIG_DEBUG_FS)
117 static int dw_mci_req_show(struct seq_file *s, void *v)
119 struct dw_mci_slot *slot = s->private;
120 struct mmc_request *mrq;
121 struct mmc_command *cmd;
122 struct mmc_command *stop;
123 struct mmc_data *data;
125 /* Make sure we get a consistent snapshot */
126 spin_lock_bh(&slot->host->lock);
127 mrq = slot->mrq;
129 if (mrq) {
130 cmd = mrq->cmd;
131 data = mrq->data;
132 stop = mrq->stop;
134 if (cmd)
135 seq_printf(s,
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 cmd->opcode, cmd->arg, cmd->flags,
138 cmd->resp[0], cmd->resp[1], cmd->resp[2],
139 cmd->resp[2], cmd->error);
140 if (data)
141 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142 data->bytes_xfered, data->blocks,
143 data->blksz, data->flags, data->error);
144 if (stop)
145 seq_printf(s,
146 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147 stop->opcode, stop->arg, stop->flags,
148 stop->resp[0], stop->resp[1], stop->resp[2],
149 stop->resp[2], stop->error);
152 spin_unlock_bh(&slot->host->lock);
154 return 0;
157 static int dw_mci_req_open(struct inode *inode, struct file *file)
159 return single_open(file, dw_mci_req_show, inode->i_private);
162 static const struct file_operations dw_mci_req_fops = {
163 .owner = THIS_MODULE,
164 .open = dw_mci_req_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
170 static int dw_mci_regs_show(struct seq_file *s, void *v)
172 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179 return 0;
182 static int dw_mci_regs_open(struct inode *inode, struct file *file)
184 return single_open(file, dw_mci_regs_show, inode->i_private);
187 static const struct file_operations dw_mci_regs_fops = {
188 .owner = THIS_MODULE,
189 .open = dw_mci_regs_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = single_release,
195 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197 struct mmc_host *mmc = slot->mmc;
198 struct dw_mci *host = slot->host;
199 struct dentry *root;
200 struct dentry *node;
202 root = mmc->debugfs_root;
203 if (!root)
204 return;
206 node = debugfs_create_file("regs", S_IRUSR, root, host,
207 &dw_mci_regs_fops);
208 if (!node)
209 goto err;
211 node = debugfs_create_file("req", S_IRUSR, root, slot,
212 &dw_mci_req_fops);
213 if (!node)
214 goto err;
216 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
217 if (!node)
218 goto err;
220 node = debugfs_create_x32("pending_events", S_IRUSR, root,
221 (u32 *)&host->pending_events);
222 if (!node)
223 goto err;
225 node = debugfs_create_x32("completed_events", S_IRUSR, root,
226 (u32 *)&host->completed_events);
227 if (!node)
228 goto err;
230 return;
232 err:
233 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235 #endif /* defined(CONFIG_DEBUG_FS) */
237 static void dw_mci_set_timeout(struct dw_mci *host)
239 /* timeout (maximum) */
240 mci_writel(host, TMOUT, 0xffffffff);
243 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
245 struct mmc_data *data;
246 struct dw_mci_slot *slot = mmc_priv(mmc);
247 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
248 u32 cmdr;
249 cmd->error = -EINPROGRESS;
251 cmdr = cmd->opcode;
253 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
254 cmd->opcode == MMC_GO_IDLE_STATE ||
255 cmd->opcode == MMC_GO_INACTIVE_STATE ||
256 (cmd->opcode == SD_IO_RW_DIRECT &&
257 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
258 cmdr |= SDMMC_CMD_STOP;
259 else
260 if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
261 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
263 if (cmd->flags & MMC_RSP_PRESENT) {
264 /* We expect a response, so set this bit */
265 cmdr |= SDMMC_CMD_RESP_EXP;
266 if (cmd->flags & MMC_RSP_136)
267 cmdr |= SDMMC_CMD_RESP_LONG;
270 if (cmd->flags & MMC_RSP_CRC)
271 cmdr |= SDMMC_CMD_RESP_CRC;
273 data = cmd->data;
274 if (data) {
275 cmdr |= SDMMC_CMD_DAT_EXP;
276 if (data->flags & MMC_DATA_STREAM)
277 cmdr |= SDMMC_CMD_STRM_MODE;
278 if (data->flags & MMC_DATA_WRITE)
279 cmdr |= SDMMC_CMD_DAT_WR;
282 if (drv_data && drv_data->prepare_command)
283 drv_data->prepare_command(slot->host, &cmdr);
285 return cmdr;
288 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
290 struct mmc_command *stop;
291 u32 cmdr;
293 if (!cmd->data)
294 return 0;
296 stop = &host->stop_abort;
297 cmdr = cmd->opcode;
298 memset(stop, 0, sizeof(struct mmc_command));
300 if (cmdr == MMC_READ_SINGLE_BLOCK ||
301 cmdr == MMC_READ_MULTIPLE_BLOCK ||
302 cmdr == MMC_WRITE_BLOCK ||
303 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
304 stop->opcode = MMC_STOP_TRANSMISSION;
305 stop->arg = 0;
306 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
307 } else if (cmdr == SD_IO_RW_EXTENDED) {
308 stop->opcode = SD_IO_RW_DIRECT;
309 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
310 ((cmd->arg >> 28) & 0x7);
311 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
312 } else {
313 return 0;
316 cmdr = stop->opcode | SDMMC_CMD_STOP |
317 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
319 return cmdr;
322 static void dw_mci_start_command(struct dw_mci *host,
323 struct mmc_command *cmd, u32 cmd_flags)
325 host->cmd = cmd;
326 dev_vdbg(host->dev,
327 "start command: ARGR=0x%08x CMDR=0x%08x\n",
328 cmd->arg, cmd_flags);
330 mci_writel(host, CMDARG, cmd->arg);
331 wmb();
333 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
336 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
338 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
339 dw_mci_start_command(host, stop, host->stop_cmdr);
342 /* DMA interface functions */
343 static void dw_mci_stop_dma(struct dw_mci *host)
345 if (host->using_dma) {
346 host->dma_ops->stop(host);
347 host->dma_ops->cleanup(host);
348 } else {
349 /* Data transfer was stopped by the interrupt handler */
350 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
354 static int dw_mci_get_dma_dir(struct mmc_data *data)
356 if (data->flags & MMC_DATA_WRITE)
357 return DMA_TO_DEVICE;
358 else
359 return DMA_FROM_DEVICE;
362 #ifdef CONFIG_MMC_DW_IDMAC
363 static void dw_mci_dma_cleanup(struct dw_mci *host)
365 struct mmc_data *data = host->data;
367 if (data)
368 if (!data->host_cookie)
369 dma_unmap_sg(host->dev,
370 data->sg,
371 data->sg_len,
372 dw_mci_get_dma_dir(data));
375 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
377 u32 temp;
379 /* Disable and reset the IDMAC interface */
380 temp = mci_readl(host, CTRL);
381 temp &= ~SDMMC_CTRL_USE_IDMAC;
382 temp |= SDMMC_CTRL_DMA_RESET;
383 mci_writel(host, CTRL, temp);
385 /* Stop the IDMAC running */
386 temp = mci_readl(host, BMOD);
387 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
388 mci_writel(host, BMOD, temp);
391 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
393 struct mmc_data *data = host->data;
395 dev_vdbg(host->dev, "DMA complete\n");
397 host->dma_ops->cleanup(host);
400 * If the card was removed, data will be NULL. No point in trying to
401 * send the stop command or waiting for NBUSY in this case.
403 if (data) {
404 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
405 tasklet_schedule(&host->tasklet);
409 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
410 unsigned int sg_len)
412 int i;
413 struct idmac_desc *desc = host->sg_cpu;
415 for (i = 0; i < sg_len; i++, desc++) {
416 unsigned int length = sg_dma_len(&data->sg[i]);
417 u32 mem_addr = sg_dma_address(&data->sg[i]);
419 /* Set the OWN bit and disable interrupts for this descriptor */
420 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
422 /* Buffer length */
423 IDMAC_SET_BUFFER1_SIZE(desc, length);
425 /* Physical address to DMA to/from */
426 desc->des2 = mem_addr;
429 /* Set first descriptor */
430 desc = host->sg_cpu;
431 desc->des0 |= IDMAC_DES0_FD;
433 /* Set last descriptor */
434 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
435 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
436 desc->des0 |= IDMAC_DES0_LD;
438 wmb();
441 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
443 u32 temp;
445 dw_mci_translate_sglist(host, host->data, sg_len);
447 /* Select IDMAC interface */
448 temp = mci_readl(host, CTRL);
449 temp |= SDMMC_CTRL_USE_IDMAC;
450 mci_writel(host, CTRL, temp);
452 wmb();
454 /* Enable the IDMAC */
455 temp = mci_readl(host, BMOD);
456 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
457 mci_writel(host, BMOD, temp);
459 /* Start it running */
460 mci_writel(host, PLDMND, 1);
463 static int dw_mci_idmac_init(struct dw_mci *host)
465 struct idmac_desc *p;
466 int i;
468 /* Number of descriptors in the ring buffer */
469 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
471 /* Forward link the descriptor list */
472 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
473 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
475 /* Set the last descriptor as the end-of-ring descriptor */
476 p->des3 = host->sg_dma;
477 p->des0 = IDMAC_DES0_ER;
479 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
481 /* Mask out interrupts - get Tx & Rx complete only */
482 mci_writel(host, IDSTS, IDMAC_INT_CLR);
483 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
484 SDMMC_IDMAC_INT_TI);
486 /* Set the descriptor base address */
487 mci_writel(host, DBADDR, host->sg_dma);
488 return 0;
491 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
492 .init = dw_mci_idmac_init,
493 .start = dw_mci_idmac_start_dma,
494 .stop = dw_mci_idmac_stop_dma,
495 .complete = dw_mci_idmac_complete_dma,
496 .cleanup = dw_mci_dma_cleanup,
498 #endif /* CONFIG_MMC_DW_IDMAC */
500 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
501 struct mmc_data *data,
502 bool next)
504 struct scatterlist *sg;
505 unsigned int i, sg_len;
507 if (!next && data->host_cookie)
508 return data->host_cookie;
511 * We don't do DMA on "complex" transfers, i.e. with
512 * non-word-aligned buffers or lengths. Also, we don't bother
513 * with all the DMA setup overhead for short transfers.
515 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
516 return -EINVAL;
518 if (data->blksz & 3)
519 return -EINVAL;
521 for_each_sg(data->sg, sg, data->sg_len, i) {
522 if (sg->offset & 3 || sg->length & 3)
523 return -EINVAL;
526 sg_len = dma_map_sg(host->dev,
527 data->sg,
528 data->sg_len,
529 dw_mci_get_dma_dir(data));
530 if (sg_len == 0)
531 return -EINVAL;
533 if (next)
534 data->host_cookie = sg_len;
536 return sg_len;
539 static void dw_mci_pre_req(struct mmc_host *mmc,
540 struct mmc_request *mrq,
541 bool is_first_req)
543 struct dw_mci_slot *slot = mmc_priv(mmc);
544 struct mmc_data *data = mrq->data;
546 if (!slot->host->use_dma || !data)
547 return;
549 if (data->host_cookie) {
550 data->host_cookie = 0;
551 return;
554 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
555 data->host_cookie = 0;
558 static void dw_mci_post_req(struct mmc_host *mmc,
559 struct mmc_request *mrq,
560 int err)
562 struct dw_mci_slot *slot = mmc_priv(mmc);
563 struct mmc_data *data = mrq->data;
565 if (!slot->host->use_dma || !data)
566 return;
568 if (data->host_cookie)
569 dma_unmap_sg(slot->host->dev,
570 data->sg,
571 data->sg_len,
572 dw_mci_get_dma_dir(data));
573 data->host_cookie = 0;
576 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
578 #ifdef CONFIG_MMC_DW_IDMAC
579 unsigned int blksz = data->blksz;
580 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
581 u32 fifo_width = 1 << host->data_shift;
582 u32 blksz_depth = blksz / fifo_width, fifoth_val;
583 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
584 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
586 tx_wmark = (host->fifo_depth) / 2;
587 tx_wmark_invers = host->fifo_depth - tx_wmark;
590 * MSIZE is '1',
591 * if blksz is not a multiple of the FIFO width
593 if (blksz % fifo_width) {
594 msize = 0;
595 rx_wmark = 1;
596 goto done;
599 do {
600 if (!((blksz_depth % mszs[idx]) ||
601 (tx_wmark_invers % mszs[idx]))) {
602 msize = idx;
603 rx_wmark = mszs[idx] - 1;
604 break;
606 } while (--idx > 0);
608 * If idx is '0', it won't be tried
609 * Thus, initial values are uesed
611 done:
612 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
613 mci_writel(host, FIFOTH, fifoth_val);
614 #endif
617 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
619 unsigned int blksz = data->blksz;
620 u32 blksz_depth, fifo_depth;
621 u16 thld_size;
623 WARN_ON(!(data->flags & MMC_DATA_READ));
625 if (host->timing != MMC_TIMING_MMC_HS200 &&
626 host->timing != MMC_TIMING_UHS_SDR104)
627 goto disable;
629 blksz_depth = blksz / (1 << host->data_shift);
630 fifo_depth = host->fifo_depth;
632 if (blksz_depth > fifo_depth)
633 goto disable;
636 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
637 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
638 * Currently just choose blksz.
640 thld_size = blksz;
641 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
642 return;
644 disable:
645 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
648 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
650 int sg_len;
651 u32 temp;
653 host->using_dma = 0;
655 /* If we don't have a channel, we can't do DMA */
656 if (!host->use_dma)
657 return -ENODEV;
659 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
660 if (sg_len < 0) {
661 host->dma_ops->stop(host);
662 return sg_len;
665 host->using_dma = 1;
667 dev_vdbg(host->dev,
668 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
669 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
670 sg_len);
673 * Decide the MSIZE and RX/TX Watermark.
674 * If current block size is same with previous size,
675 * no need to update fifoth.
677 if (host->prev_blksz != data->blksz)
678 dw_mci_adjust_fifoth(host, data);
680 /* Enable the DMA interface */
681 temp = mci_readl(host, CTRL);
682 temp |= SDMMC_CTRL_DMA_ENABLE;
683 mci_writel(host, CTRL, temp);
685 /* Disable RX/TX IRQs, let DMA handle it */
686 temp = mci_readl(host, INTMASK);
687 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
688 mci_writel(host, INTMASK, temp);
690 host->dma_ops->start(host, sg_len);
692 return 0;
695 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
697 u32 temp;
699 data->error = -EINPROGRESS;
701 WARN_ON(host->data);
702 host->sg = NULL;
703 host->data = data;
705 if (data->flags & MMC_DATA_READ) {
706 host->dir_status = DW_MCI_RECV_STATUS;
707 dw_mci_ctrl_rd_thld(host, data);
708 } else {
709 host->dir_status = DW_MCI_SEND_STATUS;
712 if (dw_mci_submit_data_dma(host, data)) {
713 int flags = SG_MITER_ATOMIC;
714 if (host->data->flags & MMC_DATA_READ)
715 flags |= SG_MITER_TO_SG;
716 else
717 flags |= SG_MITER_FROM_SG;
719 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
720 host->sg = data->sg;
721 host->part_buf_start = 0;
722 host->part_buf_count = 0;
724 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
725 temp = mci_readl(host, INTMASK);
726 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
727 mci_writel(host, INTMASK, temp);
729 temp = mci_readl(host, CTRL);
730 temp &= ~SDMMC_CTRL_DMA_ENABLE;
731 mci_writel(host, CTRL, temp);
734 * Use the initial fifoth_val for PIO mode.
735 * If next issued data may be transfered by DMA mode,
736 * prev_blksz should be invalidated.
738 mci_writel(host, FIFOTH, host->fifoth_val);
739 host->prev_blksz = 0;
740 } else {
742 * Keep the current block size.
743 * It will be used to decide whether to update
744 * fifoth register next time.
746 host->prev_blksz = data->blksz;
750 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
752 struct dw_mci *host = slot->host;
753 unsigned long timeout = jiffies + msecs_to_jiffies(500);
754 unsigned int cmd_status = 0;
756 mci_writel(host, CMDARG, arg);
757 wmb();
758 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
760 while (time_before(jiffies, timeout)) {
761 cmd_status = mci_readl(host, CMD);
762 if (!(cmd_status & SDMMC_CMD_START))
763 return;
765 dev_err(&slot->mmc->class_dev,
766 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
767 cmd, arg, cmd_status);
770 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
772 struct dw_mci *host = slot->host;
773 unsigned int clock = slot->clock;
774 u32 div;
775 u32 clk_en_a;
777 if (!clock) {
778 mci_writel(host, CLKENA, 0);
779 mci_send_cmd(slot,
780 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
781 } else if (clock != host->current_speed || force_clkinit) {
782 div = host->bus_hz / clock;
783 if (host->bus_hz % clock && host->bus_hz > clock)
785 * move the + 1 after the divide to prevent
786 * over-clocking the card.
788 div += 1;
790 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
792 if ((clock << div) != slot->__clk_old || force_clkinit)
793 dev_info(&slot->mmc->class_dev,
794 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
795 slot->id, host->bus_hz, clock,
796 div ? ((host->bus_hz / div) >> 1) :
797 host->bus_hz, div);
799 /* disable clock */
800 mci_writel(host, CLKENA, 0);
801 mci_writel(host, CLKSRC, 0);
803 /* inform CIU */
804 mci_send_cmd(slot,
805 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
807 /* set clock to desired speed */
808 mci_writel(host, CLKDIV, div);
810 /* inform CIU */
811 mci_send_cmd(slot,
812 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
814 /* enable clock; only low power if no SDIO */
815 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
816 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
817 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
818 mci_writel(host, CLKENA, clk_en_a);
820 /* inform CIU */
821 mci_send_cmd(slot,
822 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
824 /* keep the clock with reflecting clock dividor */
825 slot->__clk_old = clock << div;
828 host->current_speed = clock;
830 /* Set the current slot bus width */
831 mci_writel(host, CTYPE, (slot->ctype << slot->id));
834 static void __dw_mci_start_request(struct dw_mci *host,
835 struct dw_mci_slot *slot,
836 struct mmc_command *cmd)
838 struct mmc_request *mrq;
839 struct mmc_data *data;
840 u32 cmdflags;
842 mrq = slot->mrq;
843 if (host->pdata->select_slot)
844 host->pdata->select_slot(slot->id);
846 host->cur_slot = slot;
847 host->mrq = mrq;
849 host->pending_events = 0;
850 host->completed_events = 0;
851 host->cmd_status = 0;
852 host->data_status = 0;
853 host->dir_status = 0;
855 data = cmd->data;
856 if (data) {
857 dw_mci_set_timeout(host);
858 mci_writel(host, BYTCNT, data->blksz*data->blocks);
859 mci_writel(host, BLKSIZ, data->blksz);
862 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
864 /* this is the first command, send the initialization clock */
865 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
866 cmdflags |= SDMMC_CMD_INIT;
868 if (data) {
869 dw_mci_submit_data(host, data);
870 wmb();
873 dw_mci_start_command(host, cmd, cmdflags);
875 if (mrq->stop)
876 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
877 else
878 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
881 static void dw_mci_start_request(struct dw_mci *host,
882 struct dw_mci_slot *slot)
884 struct mmc_request *mrq = slot->mrq;
885 struct mmc_command *cmd;
887 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
888 __dw_mci_start_request(host, slot, cmd);
891 /* must be called with host->lock held */
892 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
893 struct mmc_request *mrq)
895 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
896 host->state);
898 slot->mrq = mrq;
900 if (host->state == STATE_IDLE) {
901 host->state = STATE_SENDING_CMD;
902 dw_mci_start_request(host, slot);
903 } else {
904 list_add_tail(&slot->queue_node, &host->queue);
908 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
910 struct dw_mci_slot *slot = mmc_priv(mmc);
911 struct dw_mci *host = slot->host;
913 WARN_ON(slot->mrq);
916 * The check for card presence and queueing of the request must be
917 * atomic, otherwise the card could be removed in between and the
918 * request wouldn't fail until another card was inserted.
920 spin_lock_bh(&host->lock);
922 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
923 spin_unlock_bh(&host->lock);
924 mrq->cmd->error = -ENOMEDIUM;
925 mmc_request_done(mmc, mrq);
926 return;
929 dw_mci_queue_request(host, slot, mrq);
931 spin_unlock_bh(&host->lock);
934 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
936 struct dw_mci_slot *slot = mmc_priv(mmc);
937 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
938 u32 regs;
940 switch (ios->bus_width) {
941 case MMC_BUS_WIDTH_4:
942 slot->ctype = SDMMC_CTYPE_4BIT;
943 break;
944 case MMC_BUS_WIDTH_8:
945 slot->ctype = SDMMC_CTYPE_8BIT;
946 break;
947 default:
948 /* set default 1 bit mode */
949 slot->ctype = SDMMC_CTYPE_1BIT;
952 regs = mci_readl(slot->host, UHS_REG);
954 /* DDR mode set */
955 if (ios->timing == MMC_TIMING_UHS_DDR50)
956 regs |= ((0x1 << slot->id) << 16);
957 else
958 regs &= ~((0x1 << slot->id) << 16);
960 mci_writel(slot->host, UHS_REG, regs);
961 slot->host->timing = ios->timing;
964 * Use mirror of ios->clock to prevent race with mmc
965 * core ios update when finding the minimum.
967 slot->clock = ios->clock;
969 if (drv_data && drv_data->set_ios)
970 drv_data->set_ios(slot->host, ios);
972 /* Slot specific timing and width adjustment */
973 dw_mci_setup_bus(slot, false);
975 switch (ios->power_mode) {
976 case MMC_POWER_UP:
977 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
978 /* Power up slot */
979 if (slot->host->pdata->setpower)
980 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
981 regs = mci_readl(slot->host, PWREN);
982 regs |= (1 << slot->id);
983 mci_writel(slot->host, PWREN, regs);
984 break;
985 case MMC_POWER_OFF:
986 /* Power down slot */
987 if (slot->host->pdata->setpower)
988 slot->host->pdata->setpower(slot->id, 0);
989 regs = mci_readl(slot->host, PWREN);
990 regs &= ~(1 << slot->id);
991 mci_writel(slot->host, PWREN, regs);
992 break;
993 default:
994 break;
998 static int dw_mci_get_ro(struct mmc_host *mmc)
1000 int read_only;
1001 struct dw_mci_slot *slot = mmc_priv(mmc);
1002 struct dw_mci_board *brd = slot->host->pdata;
1004 /* Use platform get_ro function, else try on board write protect */
1005 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1006 read_only = 0;
1007 else if (brd->get_ro)
1008 read_only = brd->get_ro(slot->id);
1009 else if (gpio_is_valid(slot->wp_gpio))
1010 read_only = gpio_get_value(slot->wp_gpio);
1011 else
1012 read_only =
1013 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1015 dev_dbg(&mmc->class_dev, "card is %s\n",
1016 read_only ? "read-only" : "read-write");
1018 return read_only;
1021 static int dw_mci_get_cd(struct mmc_host *mmc)
1023 int present;
1024 struct dw_mci_slot *slot = mmc_priv(mmc);
1025 struct dw_mci_board *brd = slot->host->pdata;
1027 /* Use platform get_cd function, else try onboard card detect */
1028 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1029 present = 1;
1030 else if (brd->get_cd)
1031 present = !brd->get_cd(slot->id);
1032 else
1033 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1034 == 0 ? 1 : 0;
1036 if (present)
1037 dev_dbg(&mmc->class_dev, "card is present\n");
1038 else
1039 dev_dbg(&mmc->class_dev, "card is not present\n");
1041 return present;
1045 * Disable lower power mode.
1047 * Low power mode will stop the card clock when idle. According to the
1048 * description of the CLKENA register we should disable low power mode
1049 * for SDIO cards if we need SDIO interrupts to work.
1051 * This function is fast if low power mode is already disabled.
1053 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1055 struct dw_mci *host = slot->host;
1056 u32 clk_en_a;
1057 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1059 clk_en_a = mci_readl(host, CLKENA);
1061 if (clk_en_a & clken_low_pwr) {
1062 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1063 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1064 SDMMC_CMD_PRV_DAT_WAIT, 0);
1068 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1070 struct dw_mci_slot *slot = mmc_priv(mmc);
1071 struct dw_mci *host = slot->host;
1072 u32 int_mask;
1074 /* Enable/disable Slot Specific SDIO interrupt */
1075 int_mask = mci_readl(host, INTMASK);
1076 if (enb) {
1078 * Turn off low power mode if it was enabled. This is a bit of
1079 * a heavy operation and we disable / enable IRQs a lot, so
1080 * we'll leave low power mode disabled and it will get
1081 * re-enabled again in dw_mci_setup_bus().
1083 dw_mci_disable_low_power(slot);
1085 mci_writel(host, INTMASK,
1086 (int_mask | SDMMC_INT_SDIO(slot->id)));
1087 } else {
1088 mci_writel(host, INTMASK,
1089 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1093 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1095 struct dw_mci_slot *slot = mmc_priv(mmc);
1096 struct dw_mci *host = slot->host;
1097 const struct dw_mci_drv_data *drv_data = host->drv_data;
1098 struct dw_mci_tuning_data tuning_data;
1099 int err = -ENOSYS;
1101 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1102 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1103 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1104 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1105 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1106 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1107 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1108 } else {
1109 return -EINVAL;
1111 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1112 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1113 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1114 } else {
1115 dev_err(host->dev,
1116 "Undefined command(%d) for tuning\n", opcode);
1117 return -EINVAL;
1120 if (drv_data && drv_data->execute_tuning)
1121 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1122 return err;
1125 static const struct mmc_host_ops dw_mci_ops = {
1126 .request = dw_mci_request,
1127 .pre_req = dw_mci_pre_req,
1128 .post_req = dw_mci_post_req,
1129 .set_ios = dw_mci_set_ios,
1130 .get_ro = dw_mci_get_ro,
1131 .get_cd = dw_mci_get_cd,
1132 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1133 .execute_tuning = dw_mci_execute_tuning,
1136 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1137 __releases(&host->lock)
1138 __acquires(&host->lock)
1140 struct dw_mci_slot *slot;
1141 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1143 WARN_ON(host->cmd || host->data);
1145 host->cur_slot->mrq = NULL;
1146 host->mrq = NULL;
1147 if (!list_empty(&host->queue)) {
1148 slot = list_entry(host->queue.next,
1149 struct dw_mci_slot, queue_node);
1150 list_del(&slot->queue_node);
1151 dev_vdbg(host->dev, "list not empty: %s is next\n",
1152 mmc_hostname(slot->mmc));
1153 host->state = STATE_SENDING_CMD;
1154 dw_mci_start_request(host, slot);
1155 } else {
1156 dev_vdbg(host->dev, "list empty\n");
1157 host->state = STATE_IDLE;
1160 spin_unlock(&host->lock);
1161 mmc_request_done(prev_mmc, mrq);
1162 spin_lock(&host->lock);
1165 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1167 u32 status = host->cmd_status;
1169 host->cmd_status = 0;
1171 /* Read the response from the card (up to 16 bytes) */
1172 if (cmd->flags & MMC_RSP_PRESENT) {
1173 if (cmd->flags & MMC_RSP_136) {
1174 cmd->resp[3] = mci_readl(host, RESP0);
1175 cmd->resp[2] = mci_readl(host, RESP1);
1176 cmd->resp[1] = mci_readl(host, RESP2);
1177 cmd->resp[0] = mci_readl(host, RESP3);
1178 } else {
1179 cmd->resp[0] = mci_readl(host, RESP0);
1180 cmd->resp[1] = 0;
1181 cmd->resp[2] = 0;
1182 cmd->resp[3] = 0;
1186 if (status & SDMMC_INT_RTO)
1187 cmd->error = -ETIMEDOUT;
1188 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1189 cmd->error = -EILSEQ;
1190 else if (status & SDMMC_INT_RESP_ERR)
1191 cmd->error = -EIO;
1192 else
1193 cmd->error = 0;
1195 if (cmd->error) {
1196 /* newer ip versions need a delay between retries */
1197 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1198 mdelay(20);
1201 return cmd->error;
1204 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1206 u32 status = host->data_status;
1208 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1209 if (status & SDMMC_INT_DRTO) {
1210 data->error = -ETIMEDOUT;
1211 } else if (status & SDMMC_INT_DCRC) {
1212 data->error = -EILSEQ;
1213 } else if (status & SDMMC_INT_EBE) {
1214 if (host->dir_status ==
1215 DW_MCI_SEND_STATUS) {
1217 * No data CRC status was returned.
1218 * The number of bytes transferred
1219 * will be exaggerated in PIO mode.
1221 data->bytes_xfered = 0;
1222 data->error = -ETIMEDOUT;
1223 } else if (host->dir_status ==
1224 DW_MCI_RECV_STATUS) {
1225 data->error = -EIO;
1227 } else {
1228 /* SDMMC_INT_SBE is included */
1229 data->error = -EIO;
1232 dev_err(host->dev, "data error, status 0x%08x\n", status);
1235 * After an error, there may be data lingering
1236 * in the FIFO
1238 dw_mci_fifo_reset(host);
1239 } else {
1240 data->bytes_xfered = data->blocks * data->blksz;
1241 data->error = 0;
1244 return data->error;
1247 static void dw_mci_tasklet_func(unsigned long priv)
1249 struct dw_mci *host = (struct dw_mci *)priv;
1250 struct mmc_data *data;
1251 struct mmc_command *cmd;
1252 struct mmc_request *mrq;
1253 enum dw_mci_state state;
1254 enum dw_mci_state prev_state;
1255 unsigned int err;
1257 spin_lock(&host->lock);
1259 state = host->state;
1260 data = host->data;
1261 mrq = host->mrq;
1263 do {
1264 prev_state = state;
1266 switch (state) {
1267 case STATE_IDLE:
1268 break;
1270 case STATE_SENDING_CMD:
1271 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1272 &host->pending_events))
1273 break;
1275 cmd = host->cmd;
1276 host->cmd = NULL;
1277 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1278 err = dw_mci_command_complete(host, cmd);
1279 if (cmd == mrq->sbc && !err) {
1280 prev_state = state = STATE_SENDING_CMD;
1281 __dw_mci_start_request(host, host->cur_slot,
1282 mrq->cmd);
1283 goto unlock;
1286 if (cmd->data && err) {
1287 dw_mci_stop_dma(host);
1288 send_stop_abort(host, data);
1289 state = STATE_SENDING_STOP;
1290 break;
1293 if (!cmd->data || err) {
1294 dw_mci_request_end(host, mrq);
1295 goto unlock;
1298 prev_state = state = STATE_SENDING_DATA;
1299 /* fall through */
1301 case STATE_SENDING_DATA:
1302 if (test_and_clear_bit(EVENT_DATA_ERROR,
1303 &host->pending_events)) {
1304 dw_mci_stop_dma(host);
1305 send_stop_abort(host, data);
1306 state = STATE_DATA_ERROR;
1307 break;
1310 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1311 &host->pending_events))
1312 break;
1314 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1315 prev_state = state = STATE_DATA_BUSY;
1316 /* fall through */
1318 case STATE_DATA_BUSY:
1319 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1320 &host->pending_events))
1321 break;
1323 host->data = NULL;
1324 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1325 err = dw_mci_data_complete(host, data);
1327 if (!err) {
1328 if (!data->stop || mrq->sbc) {
1329 if (mrq->sbc)
1330 data->stop->error = 0;
1331 dw_mci_request_end(host, mrq);
1332 goto unlock;
1335 /* stop command for open-ended transfer*/
1336 if (data->stop)
1337 send_stop_abort(host, data);
1341 * If err has non-zero,
1342 * stop-abort command has been already issued.
1344 prev_state = state = STATE_SENDING_STOP;
1346 /* fall through */
1348 case STATE_SENDING_STOP:
1349 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1350 &host->pending_events))
1351 break;
1353 /* CMD error in data command */
1354 if (mrq->cmd->error && mrq->data)
1355 dw_mci_fifo_reset(host);
1357 host->cmd = NULL;
1358 host->data = NULL;
1360 if (mrq->stop)
1361 dw_mci_command_complete(host, mrq->stop);
1362 else
1363 host->cmd_status = 0;
1365 dw_mci_request_end(host, mrq);
1366 goto unlock;
1368 case STATE_DATA_ERROR:
1369 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1370 &host->pending_events))
1371 break;
1373 state = STATE_DATA_BUSY;
1374 break;
1376 } while (state != prev_state);
1378 host->state = state;
1379 unlock:
1380 spin_unlock(&host->lock);
1384 /* push final bytes to part_buf, only use during push */
1385 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1387 memcpy((void *)&host->part_buf, buf, cnt);
1388 host->part_buf_count = cnt;
1391 /* append bytes to part_buf, only use during push */
1392 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1394 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1395 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1396 host->part_buf_count += cnt;
1397 return cnt;
1400 /* pull first bytes from part_buf, only use during pull */
1401 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1403 cnt = min(cnt, (int)host->part_buf_count);
1404 if (cnt) {
1405 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1406 cnt);
1407 host->part_buf_count -= cnt;
1408 host->part_buf_start += cnt;
1410 return cnt;
1413 /* pull final bytes from the part_buf, assuming it's just been filled */
1414 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1416 memcpy(buf, &host->part_buf, cnt);
1417 host->part_buf_start = cnt;
1418 host->part_buf_count = (1 << host->data_shift) - cnt;
1421 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1423 struct mmc_data *data = host->data;
1424 int init_cnt = cnt;
1426 /* try and push anything in the part_buf */
1427 if (unlikely(host->part_buf_count)) {
1428 int len = dw_mci_push_part_bytes(host, buf, cnt);
1429 buf += len;
1430 cnt -= len;
1431 if (host->part_buf_count == 2) {
1432 mci_writew(host, DATA(host->data_offset),
1433 host->part_buf16);
1434 host->part_buf_count = 0;
1437 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1438 if (unlikely((unsigned long)buf & 0x1)) {
1439 while (cnt >= 2) {
1440 u16 aligned_buf[64];
1441 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1442 int items = len >> 1;
1443 int i;
1444 /* memcpy from input buffer into aligned buffer */
1445 memcpy(aligned_buf, buf, len);
1446 buf += len;
1447 cnt -= len;
1448 /* push data from aligned buffer into fifo */
1449 for (i = 0; i < items; ++i)
1450 mci_writew(host, DATA(host->data_offset),
1451 aligned_buf[i]);
1453 } else
1454 #endif
1456 u16 *pdata = buf;
1457 for (; cnt >= 2; cnt -= 2)
1458 mci_writew(host, DATA(host->data_offset), *pdata++);
1459 buf = pdata;
1461 /* put anything remaining in the part_buf */
1462 if (cnt) {
1463 dw_mci_set_part_bytes(host, buf, cnt);
1464 /* Push data if we have reached the expected data length */
1465 if ((data->bytes_xfered + init_cnt) ==
1466 (data->blksz * data->blocks))
1467 mci_writew(host, DATA(host->data_offset),
1468 host->part_buf16);
1472 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1474 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1475 if (unlikely((unsigned long)buf & 0x1)) {
1476 while (cnt >= 2) {
1477 /* pull data from fifo into aligned buffer */
1478 u16 aligned_buf[64];
1479 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1480 int items = len >> 1;
1481 int i;
1482 for (i = 0; i < items; ++i)
1483 aligned_buf[i] = mci_readw(host,
1484 DATA(host->data_offset));
1485 /* memcpy from aligned buffer into output buffer */
1486 memcpy(buf, aligned_buf, len);
1487 buf += len;
1488 cnt -= len;
1490 } else
1491 #endif
1493 u16 *pdata = buf;
1494 for (; cnt >= 2; cnt -= 2)
1495 *pdata++ = mci_readw(host, DATA(host->data_offset));
1496 buf = pdata;
1498 if (cnt) {
1499 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1500 dw_mci_pull_final_bytes(host, buf, cnt);
1504 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1506 struct mmc_data *data = host->data;
1507 int init_cnt = cnt;
1509 /* try and push anything in the part_buf */
1510 if (unlikely(host->part_buf_count)) {
1511 int len = dw_mci_push_part_bytes(host, buf, cnt);
1512 buf += len;
1513 cnt -= len;
1514 if (host->part_buf_count == 4) {
1515 mci_writel(host, DATA(host->data_offset),
1516 host->part_buf32);
1517 host->part_buf_count = 0;
1520 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1521 if (unlikely((unsigned long)buf & 0x3)) {
1522 while (cnt >= 4) {
1523 u32 aligned_buf[32];
1524 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1525 int items = len >> 2;
1526 int i;
1527 /* memcpy from input buffer into aligned buffer */
1528 memcpy(aligned_buf, buf, len);
1529 buf += len;
1530 cnt -= len;
1531 /* push data from aligned buffer into fifo */
1532 for (i = 0; i < items; ++i)
1533 mci_writel(host, DATA(host->data_offset),
1534 aligned_buf[i]);
1536 } else
1537 #endif
1539 u32 *pdata = buf;
1540 for (; cnt >= 4; cnt -= 4)
1541 mci_writel(host, DATA(host->data_offset), *pdata++);
1542 buf = pdata;
1544 /* put anything remaining in the part_buf */
1545 if (cnt) {
1546 dw_mci_set_part_bytes(host, buf, cnt);
1547 /* Push data if we have reached the expected data length */
1548 if ((data->bytes_xfered + init_cnt) ==
1549 (data->blksz * data->blocks))
1550 mci_writel(host, DATA(host->data_offset),
1551 host->part_buf32);
1555 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1557 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1558 if (unlikely((unsigned long)buf & 0x3)) {
1559 while (cnt >= 4) {
1560 /* pull data from fifo into aligned buffer */
1561 u32 aligned_buf[32];
1562 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1563 int items = len >> 2;
1564 int i;
1565 for (i = 0; i < items; ++i)
1566 aligned_buf[i] = mci_readl(host,
1567 DATA(host->data_offset));
1568 /* memcpy from aligned buffer into output buffer */
1569 memcpy(buf, aligned_buf, len);
1570 buf += len;
1571 cnt -= len;
1573 } else
1574 #endif
1576 u32 *pdata = buf;
1577 for (; cnt >= 4; cnt -= 4)
1578 *pdata++ = mci_readl(host, DATA(host->data_offset));
1579 buf = pdata;
1581 if (cnt) {
1582 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1583 dw_mci_pull_final_bytes(host, buf, cnt);
1587 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1589 struct mmc_data *data = host->data;
1590 int init_cnt = cnt;
1592 /* try and push anything in the part_buf */
1593 if (unlikely(host->part_buf_count)) {
1594 int len = dw_mci_push_part_bytes(host, buf, cnt);
1595 buf += len;
1596 cnt -= len;
1598 if (host->part_buf_count == 8) {
1599 mci_writeq(host, DATA(host->data_offset),
1600 host->part_buf);
1601 host->part_buf_count = 0;
1604 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1605 if (unlikely((unsigned long)buf & 0x7)) {
1606 while (cnt >= 8) {
1607 u64 aligned_buf[16];
1608 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1609 int items = len >> 3;
1610 int i;
1611 /* memcpy from input buffer into aligned buffer */
1612 memcpy(aligned_buf, buf, len);
1613 buf += len;
1614 cnt -= len;
1615 /* push data from aligned buffer into fifo */
1616 for (i = 0; i < items; ++i)
1617 mci_writeq(host, DATA(host->data_offset),
1618 aligned_buf[i]);
1620 } else
1621 #endif
1623 u64 *pdata = buf;
1624 for (; cnt >= 8; cnt -= 8)
1625 mci_writeq(host, DATA(host->data_offset), *pdata++);
1626 buf = pdata;
1628 /* put anything remaining in the part_buf */
1629 if (cnt) {
1630 dw_mci_set_part_bytes(host, buf, cnt);
1631 /* Push data if we have reached the expected data length */
1632 if ((data->bytes_xfered + init_cnt) ==
1633 (data->blksz * data->blocks))
1634 mci_writeq(host, DATA(host->data_offset),
1635 host->part_buf);
1639 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1641 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1642 if (unlikely((unsigned long)buf & 0x7)) {
1643 while (cnt >= 8) {
1644 /* pull data from fifo into aligned buffer */
1645 u64 aligned_buf[16];
1646 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1647 int items = len >> 3;
1648 int i;
1649 for (i = 0; i < items; ++i)
1650 aligned_buf[i] = mci_readq(host,
1651 DATA(host->data_offset));
1652 /* memcpy from aligned buffer into output buffer */
1653 memcpy(buf, aligned_buf, len);
1654 buf += len;
1655 cnt -= len;
1657 } else
1658 #endif
1660 u64 *pdata = buf;
1661 for (; cnt >= 8; cnt -= 8)
1662 *pdata++ = mci_readq(host, DATA(host->data_offset));
1663 buf = pdata;
1665 if (cnt) {
1666 host->part_buf = mci_readq(host, DATA(host->data_offset));
1667 dw_mci_pull_final_bytes(host, buf, cnt);
1671 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1673 int len;
1675 /* get remaining partial bytes */
1676 len = dw_mci_pull_part_bytes(host, buf, cnt);
1677 if (unlikely(len == cnt))
1678 return;
1679 buf += len;
1680 cnt -= len;
1682 /* get the rest of the data */
1683 host->pull_data(host, buf, cnt);
1686 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1688 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1689 void *buf;
1690 unsigned int offset;
1691 struct mmc_data *data = host->data;
1692 int shift = host->data_shift;
1693 u32 status;
1694 unsigned int len;
1695 unsigned int remain, fcnt;
1697 do {
1698 if (!sg_miter_next(sg_miter))
1699 goto done;
1701 host->sg = sg_miter->piter.sg;
1702 buf = sg_miter->addr;
1703 remain = sg_miter->length;
1704 offset = 0;
1706 do {
1707 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1708 << shift) + host->part_buf_count;
1709 len = min(remain, fcnt);
1710 if (!len)
1711 break;
1712 dw_mci_pull_data(host, (void *)(buf + offset), len);
1713 data->bytes_xfered += len;
1714 offset += len;
1715 remain -= len;
1716 } while (remain);
1718 sg_miter->consumed = offset;
1719 status = mci_readl(host, MINTSTS);
1720 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1721 /* if the RXDR is ready read again */
1722 } while ((status & SDMMC_INT_RXDR) ||
1723 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1725 if (!remain) {
1726 if (!sg_miter_next(sg_miter))
1727 goto done;
1728 sg_miter->consumed = 0;
1730 sg_miter_stop(sg_miter);
1731 return;
1733 done:
1734 sg_miter_stop(sg_miter);
1735 host->sg = NULL;
1736 smp_wmb();
1737 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1740 static void dw_mci_write_data_pio(struct dw_mci *host)
1742 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1743 void *buf;
1744 unsigned int offset;
1745 struct mmc_data *data = host->data;
1746 int shift = host->data_shift;
1747 u32 status;
1748 unsigned int len;
1749 unsigned int fifo_depth = host->fifo_depth;
1750 unsigned int remain, fcnt;
1752 do {
1753 if (!sg_miter_next(sg_miter))
1754 goto done;
1756 host->sg = sg_miter->piter.sg;
1757 buf = sg_miter->addr;
1758 remain = sg_miter->length;
1759 offset = 0;
1761 do {
1762 fcnt = ((fifo_depth -
1763 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1764 << shift) - host->part_buf_count;
1765 len = min(remain, fcnt);
1766 if (!len)
1767 break;
1768 host->push_data(host, (void *)(buf + offset), len);
1769 data->bytes_xfered += len;
1770 offset += len;
1771 remain -= len;
1772 } while (remain);
1774 sg_miter->consumed = offset;
1775 status = mci_readl(host, MINTSTS);
1776 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1777 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1779 if (!remain) {
1780 if (!sg_miter_next(sg_miter))
1781 goto done;
1782 sg_miter->consumed = 0;
1784 sg_miter_stop(sg_miter);
1785 return;
1787 done:
1788 sg_miter_stop(sg_miter);
1789 host->sg = NULL;
1790 smp_wmb();
1791 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1794 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1796 if (!host->cmd_status)
1797 host->cmd_status = status;
1799 smp_wmb();
1801 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1802 tasklet_schedule(&host->tasklet);
1805 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1807 struct dw_mci *host = dev_id;
1808 u32 pending;
1809 int i;
1811 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1814 * DTO fix - version 2.10a and below, and only if internal DMA
1815 * is configured.
1817 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1818 if (!pending &&
1819 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1820 pending |= SDMMC_INT_DATA_OVER;
1823 if (pending) {
1824 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1825 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1826 host->cmd_status = pending;
1827 smp_wmb();
1828 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1831 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1832 /* if there is an error report DATA_ERROR */
1833 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1834 host->data_status = pending;
1835 smp_wmb();
1836 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1837 tasklet_schedule(&host->tasklet);
1840 if (pending & SDMMC_INT_DATA_OVER) {
1841 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1842 if (!host->data_status)
1843 host->data_status = pending;
1844 smp_wmb();
1845 if (host->dir_status == DW_MCI_RECV_STATUS) {
1846 if (host->sg != NULL)
1847 dw_mci_read_data_pio(host, true);
1849 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1850 tasklet_schedule(&host->tasklet);
1853 if (pending & SDMMC_INT_RXDR) {
1854 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1855 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1856 dw_mci_read_data_pio(host, false);
1859 if (pending & SDMMC_INT_TXDR) {
1860 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1861 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1862 dw_mci_write_data_pio(host);
1865 if (pending & SDMMC_INT_CMD_DONE) {
1866 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1867 dw_mci_cmd_interrupt(host, pending);
1870 if (pending & SDMMC_INT_CD) {
1871 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1872 queue_work(host->card_workqueue, &host->card_work);
1875 /* Handle SDIO Interrupts */
1876 for (i = 0; i < host->num_slots; i++) {
1877 struct dw_mci_slot *slot = host->slot[i];
1878 if (pending & SDMMC_INT_SDIO(i)) {
1879 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1880 mmc_signal_sdio_irq(slot->mmc);
1886 #ifdef CONFIG_MMC_DW_IDMAC
1887 /* Handle DMA interrupts */
1888 pending = mci_readl(host, IDSTS);
1889 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1890 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1891 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1892 host->dma_ops->complete(host);
1894 #endif
1896 return IRQ_HANDLED;
1899 static void dw_mci_work_routine_card(struct work_struct *work)
1901 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1902 int i;
1904 for (i = 0; i < host->num_slots; i++) {
1905 struct dw_mci_slot *slot = host->slot[i];
1906 struct mmc_host *mmc = slot->mmc;
1907 struct mmc_request *mrq;
1908 int present;
1909 u32 ctrl;
1911 present = dw_mci_get_cd(mmc);
1912 while (present != slot->last_detect_state) {
1913 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1914 present ? "inserted" : "removed");
1916 spin_lock_bh(&host->lock);
1918 /* Card change detected */
1919 slot->last_detect_state = present;
1921 /* Mark card as present if applicable */
1922 if (present != 0)
1923 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1925 /* Clean up queue if present */
1926 mrq = slot->mrq;
1927 if (mrq) {
1928 if (mrq == host->mrq) {
1929 host->data = NULL;
1930 host->cmd = NULL;
1932 switch (host->state) {
1933 case STATE_IDLE:
1934 break;
1935 case STATE_SENDING_CMD:
1936 mrq->cmd->error = -ENOMEDIUM;
1937 if (!mrq->data)
1938 break;
1939 /* fall through */
1940 case STATE_SENDING_DATA:
1941 mrq->data->error = -ENOMEDIUM;
1942 dw_mci_stop_dma(host);
1943 break;
1944 case STATE_DATA_BUSY:
1945 case STATE_DATA_ERROR:
1946 if (mrq->data->error == -EINPROGRESS)
1947 mrq->data->error = -ENOMEDIUM;
1948 /* fall through */
1949 case STATE_SENDING_STOP:
1950 if (mrq->stop)
1951 mrq->stop->error = -ENOMEDIUM;
1952 break;
1955 dw_mci_request_end(host, mrq);
1956 } else {
1957 list_del(&slot->queue_node);
1958 mrq->cmd->error = -ENOMEDIUM;
1959 if (mrq->data)
1960 mrq->data->error = -ENOMEDIUM;
1961 if (mrq->stop)
1962 mrq->stop->error = -ENOMEDIUM;
1964 spin_unlock(&host->lock);
1965 mmc_request_done(slot->mmc, mrq);
1966 spin_lock(&host->lock);
1970 /* Power down slot */
1971 if (present == 0) {
1972 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1974 /* Clear down the FIFO */
1975 dw_mci_fifo_reset(host);
1976 #ifdef CONFIG_MMC_DW_IDMAC
1977 ctrl = mci_readl(host, BMOD);
1978 /* Software reset of DMA */
1979 ctrl |= SDMMC_IDMAC_SWRESET;
1980 mci_writel(host, BMOD, ctrl);
1981 #endif
1985 spin_unlock_bh(&host->lock);
1987 present = dw_mci_get_cd(mmc);
1990 mmc_detect_change(slot->mmc,
1991 msecs_to_jiffies(host->pdata->detect_delay_ms));
1995 #ifdef CONFIG_OF
1996 /* given a slot id, find out the device node representing that slot */
1997 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1999 struct device_node *np;
2000 const __be32 *addr;
2001 int len;
2003 if (!dev || !dev->of_node)
2004 return NULL;
2006 for_each_child_of_node(dev->of_node, np) {
2007 addr = of_get_property(np, "reg", &len);
2008 if (!addr || (len < sizeof(int)))
2009 continue;
2010 if (be32_to_cpup(addr) == slot)
2011 return np;
2013 return NULL;
2016 static struct dw_mci_of_slot_quirks {
2017 char *quirk;
2018 int id;
2019 } of_slot_quirks[] = {
2021 .quirk = "disable-wp",
2022 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2026 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2028 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2029 int quirks = 0;
2030 int idx;
2032 /* get quirks */
2033 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2034 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2035 quirks |= of_slot_quirks[idx].id;
2037 return quirks;
2040 /* find out bus-width for a given slot */
2041 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2043 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2044 u32 bus_wd = 1;
2046 if (!np)
2047 return 1;
2049 if (of_property_read_u32(np, "bus-width", &bus_wd))
2050 dev_err(dev, "bus-width property not found, assuming width"
2051 " as 1\n");
2052 return bus_wd;
2055 /* find the write protect gpio for a given slot; or -1 if none specified */
2056 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2058 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2059 int gpio;
2061 if (!np)
2062 return -EINVAL;
2064 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2066 /* Having a missing entry is valid; return silently */
2067 if (!gpio_is_valid(gpio))
2068 return -EINVAL;
2070 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2071 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2072 return -EINVAL;
2075 return gpio;
2077 #else /* CONFIG_OF */
2078 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2080 return 0;
2082 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2084 return 1;
2086 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2088 return NULL;
2090 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2092 return -EINVAL;
2094 #endif /* CONFIG_OF */
2096 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2098 struct mmc_host *mmc;
2099 struct dw_mci_slot *slot;
2100 const struct dw_mci_drv_data *drv_data = host->drv_data;
2101 int ctrl_id, ret;
2102 u32 freq[2];
2103 u8 bus_width;
2105 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2106 if (!mmc)
2107 return -ENOMEM;
2109 slot = mmc_priv(mmc);
2110 slot->id = id;
2111 slot->mmc = mmc;
2112 slot->host = host;
2113 host->slot[id] = slot;
2115 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2117 mmc->ops = &dw_mci_ops;
2118 if (of_property_read_u32_array(host->dev->of_node,
2119 "clock-freq-min-max", freq, 2)) {
2120 mmc->f_min = DW_MCI_FREQ_MIN;
2121 mmc->f_max = DW_MCI_FREQ_MAX;
2122 } else {
2123 mmc->f_min = freq[0];
2124 mmc->f_max = freq[1];
2127 if (host->pdata->get_ocr)
2128 mmc->ocr_avail = host->pdata->get_ocr(id);
2129 else
2130 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2133 * Start with slot power disabled, it will be enabled when a card
2134 * is detected.
2136 if (host->pdata->setpower)
2137 host->pdata->setpower(id, 0);
2139 if (host->pdata->caps)
2140 mmc->caps = host->pdata->caps;
2142 if (host->pdata->pm_caps)
2143 mmc->pm_caps = host->pdata->pm_caps;
2145 if (host->dev->of_node) {
2146 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2147 if (ctrl_id < 0)
2148 ctrl_id = 0;
2149 } else {
2150 ctrl_id = to_platform_device(host->dev)->id;
2152 if (drv_data && drv_data->caps)
2153 mmc->caps |= drv_data->caps[ctrl_id];
2155 if (host->pdata->caps2)
2156 mmc->caps2 = host->pdata->caps2;
2158 if (host->pdata->get_bus_wd)
2159 bus_width = host->pdata->get_bus_wd(slot->id);
2160 else if (host->dev->of_node)
2161 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2162 else
2163 bus_width = 1;
2165 switch (bus_width) {
2166 case 8:
2167 mmc->caps |= MMC_CAP_8_BIT_DATA;
2168 case 4:
2169 mmc->caps |= MMC_CAP_4_BIT_DATA;
2172 if (host->pdata->blk_settings) {
2173 mmc->max_segs = host->pdata->blk_settings->max_segs;
2174 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2175 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2176 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2177 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2178 } else {
2179 /* Useful defaults if platform data is unset. */
2180 #ifdef CONFIG_MMC_DW_IDMAC
2181 mmc->max_segs = host->ring_size;
2182 mmc->max_blk_size = 65536;
2183 mmc->max_blk_count = host->ring_size;
2184 mmc->max_seg_size = 0x1000;
2185 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2186 #else
2187 mmc->max_segs = 64;
2188 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2189 mmc->max_blk_count = 512;
2190 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2191 mmc->max_seg_size = mmc->max_req_size;
2192 #endif /* CONFIG_MMC_DW_IDMAC */
2195 if (dw_mci_get_cd(mmc))
2196 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2197 else
2198 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2200 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2202 ret = mmc_add_host(mmc);
2203 if (ret)
2204 goto err_setup_bus;
2206 #if defined(CONFIG_DEBUG_FS)
2207 dw_mci_init_debugfs(slot);
2208 #endif
2210 /* Card initially undetected */
2211 slot->last_detect_state = 0;
2213 return 0;
2215 err_setup_bus:
2216 mmc_free_host(mmc);
2217 return -EINVAL;
2220 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2222 /* Shutdown detect IRQ */
2223 if (slot->host->pdata->exit)
2224 slot->host->pdata->exit(id);
2226 /* Debugfs stuff is cleaned up by mmc core */
2227 mmc_remove_host(slot->mmc);
2228 slot->host->slot[id] = NULL;
2229 mmc_free_host(slot->mmc);
2232 static void dw_mci_init_dma(struct dw_mci *host)
2234 /* Alloc memory for sg translation */
2235 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2236 &host->sg_dma, GFP_KERNEL);
2237 if (!host->sg_cpu) {
2238 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2239 __func__);
2240 goto no_dma;
2243 /* Determine which DMA interface to use */
2244 #ifdef CONFIG_MMC_DW_IDMAC
2245 host->dma_ops = &dw_mci_idmac_ops;
2246 dev_info(host->dev, "Using internal DMA controller.\n");
2247 #endif
2249 if (!host->dma_ops)
2250 goto no_dma;
2252 if (host->dma_ops->init && host->dma_ops->start &&
2253 host->dma_ops->stop && host->dma_ops->cleanup) {
2254 if (host->dma_ops->init(host)) {
2255 dev_err(host->dev, "%s: Unable to initialize "
2256 "DMA Controller.\n", __func__);
2257 goto no_dma;
2259 } else {
2260 dev_err(host->dev, "DMA initialization not found.\n");
2261 goto no_dma;
2264 host->use_dma = 1;
2265 return;
2267 no_dma:
2268 dev_info(host->dev, "Using PIO mode.\n");
2269 host->use_dma = 0;
2270 return;
2273 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2275 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2276 u32 ctrl;
2278 ctrl = mci_readl(host, CTRL);
2279 ctrl |= reset;
2280 mci_writel(host, CTRL, ctrl);
2282 /* wait till resets clear */
2283 do {
2284 ctrl = mci_readl(host, CTRL);
2285 if (!(ctrl & reset))
2286 return true;
2287 } while (time_before(jiffies, timeout));
2289 dev_err(host->dev,
2290 "Timeout resetting block (ctrl reset %#x)\n",
2291 ctrl & reset);
2293 return false;
2296 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2299 * Reseting generates a block interrupt, hence setting
2300 * the scatter-gather pointer to NULL.
2302 if (host->sg) {
2303 sg_miter_stop(&host->sg_miter);
2304 host->sg = NULL;
2307 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2310 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2312 return dw_mci_ctrl_reset(host,
2313 SDMMC_CTRL_FIFO_RESET |
2314 SDMMC_CTRL_RESET |
2315 SDMMC_CTRL_DMA_RESET);
2318 #ifdef CONFIG_OF
2319 static struct dw_mci_of_quirks {
2320 char *quirk;
2321 int id;
2322 } of_quirks[] = {
2324 .quirk = "broken-cd",
2325 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2329 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2331 struct dw_mci_board *pdata;
2332 struct device *dev = host->dev;
2333 struct device_node *np = dev->of_node;
2334 const struct dw_mci_drv_data *drv_data = host->drv_data;
2335 int idx, ret;
2336 u32 clock_frequency;
2338 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2339 if (!pdata) {
2340 dev_err(dev, "could not allocate memory for pdata\n");
2341 return ERR_PTR(-ENOMEM);
2344 /* find out number of slots supported */
2345 if (of_property_read_u32(dev->of_node, "num-slots",
2346 &pdata->num_slots)) {
2347 dev_info(dev, "num-slots property not found, "
2348 "assuming 1 slot is available\n");
2349 pdata->num_slots = 1;
2352 /* get quirks */
2353 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2354 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2355 pdata->quirks |= of_quirks[idx].id;
2357 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2358 dev_info(dev, "fifo-depth property not found, using "
2359 "value of FIFOTH register as default\n");
2361 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2363 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2364 pdata->bus_hz = clock_frequency;
2366 if (drv_data && drv_data->parse_dt) {
2367 ret = drv_data->parse_dt(host);
2368 if (ret)
2369 return ERR_PTR(ret);
2372 if (of_find_property(np, "keep-power-in-suspend", NULL))
2373 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2375 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2376 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2378 if (of_find_property(np, "supports-highspeed", NULL))
2379 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2381 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2382 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2384 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2385 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2387 return pdata;
2390 #else /* CONFIG_OF */
2391 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2393 return ERR_PTR(-EINVAL);
2395 #endif /* CONFIG_OF */
2397 int dw_mci_probe(struct dw_mci *host)
2399 const struct dw_mci_drv_data *drv_data = host->drv_data;
2400 int width, i, ret = 0;
2401 u32 fifo_size;
2402 int init_slots = 0;
2404 if (!host->pdata) {
2405 host->pdata = dw_mci_parse_dt(host);
2406 if (IS_ERR(host->pdata)) {
2407 dev_err(host->dev, "platform data not available\n");
2408 return -EINVAL;
2412 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2413 dev_err(host->dev,
2414 "Platform data must supply select_slot function\n");
2415 return -ENODEV;
2418 host->biu_clk = devm_clk_get(host->dev, "biu");
2419 if (IS_ERR(host->biu_clk)) {
2420 dev_dbg(host->dev, "biu clock not available\n");
2421 } else {
2422 ret = clk_prepare_enable(host->biu_clk);
2423 if (ret) {
2424 dev_err(host->dev, "failed to enable biu clock\n");
2425 return ret;
2429 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2430 if (IS_ERR(host->ciu_clk)) {
2431 dev_dbg(host->dev, "ciu clock not available\n");
2432 host->bus_hz = host->pdata->bus_hz;
2433 } else {
2434 ret = clk_prepare_enable(host->ciu_clk);
2435 if (ret) {
2436 dev_err(host->dev, "failed to enable ciu clock\n");
2437 goto err_clk_biu;
2440 if (host->pdata->bus_hz) {
2441 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2442 if (ret)
2443 dev_warn(host->dev,
2444 "Unable to set bus rate to %ul\n",
2445 host->pdata->bus_hz);
2447 host->bus_hz = clk_get_rate(host->ciu_clk);
2450 if (drv_data && drv_data->init) {
2451 ret = drv_data->init(host);
2452 if (ret) {
2453 dev_err(host->dev,
2454 "implementation specific init failed\n");
2455 goto err_clk_ciu;
2459 if (drv_data && drv_data->setup_clock) {
2460 ret = drv_data->setup_clock(host);
2461 if (ret) {
2462 dev_err(host->dev,
2463 "implementation specific clock setup failed\n");
2464 goto err_clk_ciu;
2468 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2469 if (IS_ERR(host->vmmc)) {
2470 ret = PTR_ERR(host->vmmc);
2471 if (ret == -EPROBE_DEFER)
2472 goto err_clk_ciu;
2474 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2475 host->vmmc = NULL;
2476 } else {
2477 ret = regulator_enable(host->vmmc);
2478 if (ret) {
2479 if (ret != -EPROBE_DEFER)
2480 dev_err(host->dev,
2481 "regulator_enable fail: %d\n", ret);
2482 goto err_clk_ciu;
2486 if (!host->bus_hz) {
2487 dev_err(host->dev,
2488 "Platform data must supply bus speed\n");
2489 ret = -ENODEV;
2490 goto err_regulator;
2493 host->quirks = host->pdata->quirks;
2495 spin_lock_init(&host->lock);
2496 INIT_LIST_HEAD(&host->queue);
2499 * Get the host data width - this assumes that HCON has been set with
2500 * the correct values.
2502 i = (mci_readl(host, HCON) >> 7) & 0x7;
2503 if (!i) {
2504 host->push_data = dw_mci_push_data16;
2505 host->pull_data = dw_mci_pull_data16;
2506 width = 16;
2507 host->data_shift = 1;
2508 } else if (i == 2) {
2509 host->push_data = dw_mci_push_data64;
2510 host->pull_data = dw_mci_pull_data64;
2511 width = 64;
2512 host->data_shift = 3;
2513 } else {
2514 /* Check for a reserved value, and warn if it is */
2515 WARN((i != 1),
2516 "HCON reports a reserved host data width!\n"
2517 "Defaulting to 32-bit access.\n");
2518 host->push_data = dw_mci_push_data32;
2519 host->pull_data = dw_mci_pull_data32;
2520 width = 32;
2521 host->data_shift = 2;
2524 /* Reset all blocks */
2525 if (!dw_mci_ctrl_all_reset(host))
2526 return -ENODEV;
2528 host->dma_ops = host->pdata->dma_ops;
2529 dw_mci_init_dma(host);
2531 /* Clear the interrupts for the host controller */
2532 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2533 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2535 /* Put in max timeout */
2536 mci_writel(host, TMOUT, 0xFFFFFFFF);
2539 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2540 * Tx Mark = fifo_size / 2 DMA Size = 8
2542 if (!host->pdata->fifo_depth) {
2544 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2545 * have been overwritten by the bootloader, just like we're
2546 * about to do, so if you know the value for your hardware, you
2547 * should put it in the platform data.
2549 fifo_size = mci_readl(host, FIFOTH);
2550 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2551 } else {
2552 fifo_size = host->pdata->fifo_depth;
2554 host->fifo_depth = fifo_size;
2555 host->fifoth_val =
2556 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2557 mci_writel(host, FIFOTH, host->fifoth_val);
2559 /* disable clock to CIU */
2560 mci_writel(host, CLKENA, 0);
2561 mci_writel(host, CLKSRC, 0);
2564 * In 2.40a spec, Data offset is changed.
2565 * Need to check the version-id and set data-offset for DATA register.
2567 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2568 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2570 if (host->verid < DW_MMC_240A)
2571 host->data_offset = DATA_OFFSET;
2572 else
2573 host->data_offset = DATA_240A_OFFSET;
2575 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2576 host->card_workqueue = alloc_workqueue("dw-mci-card",
2577 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2578 if (!host->card_workqueue) {
2579 ret = -ENOMEM;
2580 goto err_dmaunmap;
2582 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2583 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2584 host->irq_flags, "dw-mci", host);
2585 if (ret)
2586 goto err_workqueue;
2588 if (host->pdata->num_slots)
2589 host->num_slots = host->pdata->num_slots;
2590 else
2591 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2594 * Enable interrupts for command done, data over, data empty, card det,
2595 * receive ready and error such as transmit, receive timeout, crc error
2597 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2598 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2599 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2600 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2601 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2603 dev_info(host->dev, "DW MMC controller at irq %d, "
2604 "%d bit host data width, "
2605 "%u deep fifo\n",
2606 host->irq, width, fifo_size);
2608 /* We need at least one slot to succeed */
2609 for (i = 0; i < host->num_slots; i++) {
2610 ret = dw_mci_init_slot(host, i);
2611 if (ret)
2612 dev_dbg(host->dev, "slot %d init failed\n", i);
2613 else
2614 init_slots++;
2617 if (init_slots) {
2618 dev_info(host->dev, "%d slots initialized\n", init_slots);
2619 } else {
2620 dev_dbg(host->dev, "attempted to initialize %d slots, "
2621 "but failed on all\n", host->num_slots);
2622 goto err_workqueue;
2625 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2626 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2628 return 0;
2630 err_workqueue:
2631 destroy_workqueue(host->card_workqueue);
2633 err_dmaunmap:
2634 if (host->use_dma && host->dma_ops->exit)
2635 host->dma_ops->exit(host);
2637 err_regulator:
2638 if (host->vmmc)
2639 regulator_disable(host->vmmc);
2641 err_clk_ciu:
2642 if (!IS_ERR(host->ciu_clk))
2643 clk_disable_unprepare(host->ciu_clk);
2645 err_clk_biu:
2646 if (!IS_ERR(host->biu_clk))
2647 clk_disable_unprepare(host->biu_clk);
2649 return ret;
2651 EXPORT_SYMBOL(dw_mci_probe);
2653 void dw_mci_remove(struct dw_mci *host)
2655 int i;
2657 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2658 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2660 for (i = 0; i < host->num_slots; i++) {
2661 dev_dbg(host->dev, "remove slot %d\n", i);
2662 if (host->slot[i])
2663 dw_mci_cleanup_slot(host->slot[i], i);
2666 /* disable clock to CIU */
2667 mci_writel(host, CLKENA, 0);
2668 mci_writel(host, CLKSRC, 0);
2670 destroy_workqueue(host->card_workqueue);
2672 if (host->use_dma && host->dma_ops->exit)
2673 host->dma_ops->exit(host);
2675 if (host->vmmc)
2676 regulator_disable(host->vmmc);
2678 if (!IS_ERR(host->ciu_clk))
2679 clk_disable_unprepare(host->ciu_clk);
2681 if (!IS_ERR(host->biu_clk))
2682 clk_disable_unprepare(host->biu_clk);
2684 EXPORT_SYMBOL(dw_mci_remove);
2688 #ifdef CONFIG_PM_SLEEP
2690 * TODO: we should probably disable the clock to the card in the suspend path.
2692 int dw_mci_suspend(struct dw_mci *host)
2694 int i, ret = 0;
2696 for (i = 0; i < host->num_slots; i++) {
2697 struct dw_mci_slot *slot = host->slot[i];
2698 if (!slot)
2699 continue;
2700 ret = mmc_suspend_host(slot->mmc);
2701 if (ret < 0) {
2702 while (--i >= 0) {
2703 slot = host->slot[i];
2704 if (slot)
2705 mmc_resume_host(host->slot[i]->mmc);
2707 return ret;
2711 if (host->vmmc)
2712 regulator_disable(host->vmmc);
2714 return 0;
2716 EXPORT_SYMBOL(dw_mci_suspend);
2718 int dw_mci_resume(struct dw_mci *host)
2720 int i, ret;
2722 if (host->vmmc) {
2723 ret = regulator_enable(host->vmmc);
2724 if (ret) {
2725 dev_err(host->dev,
2726 "failed to enable regulator: %d\n", ret);
2727 return ret;
2731 if (!dw_mci_ctrl_all_reset(host)) {
2732 ret = -ENODEV;
2733 return ret;
2736 if (host->use_dma && host->dma_ops->init)
2737 host->dma_ops->init(host);
2740 * Restore the initial value at FIFOTH register
2741 * And Invalidate the prev_blksz with zero
2743 mci_writel(host, FIFOTH, host->fifoth_val);
2744 host->prev_blksz = 0;
2746 /* Put in max timeout */
2747 mci_writel(host, TMOUT, 0xFFFFFFFF);
2749 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2750 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2751 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2752 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2753 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2755 for (i = 0; i < host->num_slots; i++) {
2756 struct dw_mci_slot *slot = host->slot[i];
2757 if (!slot)
2758 continue;
2759 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2760 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2761 dw_mci_setup_bus(slot, true);
2764 ret = mmc_resume_host(host->slot[i]->mmc);
2765 if (ret < 0)
2766 return ret;
2768 return 0;
2770 EXPORT_SYMBOL(dw_mci_resume);
2771 #endif /* CONFIG_PM_SLEEP */
2773 static int __init dw_mci_init(void)
2775 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2776 return 0;
2779 static void __exit dw_mci_exit(void)
2783 module_init(dw_mci_init);
2784 module_exit(dw_mci_exit);
2786 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2787 MODULE_AUTHOR("NXP Semiconductor VietNam");
2788 MODULE_AUTHOR("Imagination Technologies Ltd");
2789 MODULE_LICENSE("GPL v2");