AT91 MMC 5 : Minor cleanups
[linux-2.6/verdex.git] / drivers / mmc / at91_mci.c
blobd500b6b21ea053bd3849d5de483963d7197dc98f
1 /*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
6 * Copyright (C) 2006 Malcolm Noyes
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 This is the AT91RM9200 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
30 There are three main types of request, commands, reads and writes.
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers.
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
52 GET RO
53 Gets the status of the write protect pin, if available.
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
68 #include <linux/mmc/host.h>
69 #include <linux/mmc/protocol.h>
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/mach/mmc.h>
74 #include <asm/arch/board.h>
75 #include <asm/arch/gpio.h>
76 #include <asm/arch/at91_mci.h>
77 #include <asm/arch/at91_pdc.h>
79 #define DRIVER_NAME "at91_mci"
81 #undef SUPPORT_4WIRE
83 #define FL_SENT_COMMAND (1 << 0)
84 #define FL_SENT_STOP (1 << 1)
86 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
87 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
88 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
90 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
91 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
95 * Low level type for this driver
97 struct at91mci_host
99 struct mmc_host *mmc;
100 struct mmc_command *cmd;
101 struct mmc_request *request;
103 void __iomem *baseaddr;
104 int irq;
106 struct at91_mmc_data *board;
107 int present;
109 struct clk *mci_clk;
112 * Flag indicating when the command has been sent. This is used to
113 * work out whether or not to send the stop
115 unsigned int flags;
116 /* flag for current bus settings */
117 u32 bus_mode;
119 /* DMA buffer used for transmitting */
120 unsigned int* buffer;
121 dma_addr_t physical_address;
122 unsigned int total_length;
124 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
125 int in_use_index;
127 /* Latest in the scatterlist that has been enabled for transfer */
128 int transfer_index;
132 * Copy from sg to a dma block - used for transfers
134 static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
136 unsigned int len, i, size;
137 unsigned *dmabuf = host->buffer;
139 size = host->total_length;
140 len = data->sg_len;
143 * Just loop through all entries. Size might not
144 * be the entire list though so make sure that
145 * we do not transfer too much.
147 for (i = 0; i < len; i++) {
148 struct scatterlist *sg;
149 int amount;
150 int index;
151 unsigned int *sgbuffer;
153 sg = &data->sg[i];
155 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
156 amount = min(size, sg->length);
157 size -= amount;
158 amount /= 4;
160 for (index = 0; index < amount; index++)
161 *dmabuf++ = swab32(sgbuffer[index]);
163 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
165 if (size == 0)
166 break;
170 * Check that we didn't get a request to transfer
171 * more data than can fit into the SG list.
173 BUG_ON(size != 0);
177 * Prepare a dma read
179 static void at91mci_pre_dma_read(struct at91mci_host *host)
181 int i;
182 struct scatterlist *sg;
183 struct mmc_command *cmd;
184 struct mmc_data *data;
186 pr_debug("pre dma read\n");
188 cmd = host->cmd;
189 if (!cmd) {
190 pr_debug("no command\n");
191 return;
194 data = cmd->data;
195 if (!data) {
196 pr_debug("no data\n");
197 return;
200 for (i = 0; i < 2; i++) {
201 /* nothing left to transfer */
202 if (host->transfer_index >= data->sg_len) {
203 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
204 break;
207 /* Check to see if this needs filling */
208 if (i == 0) {
209 if (at91_mci_read(host, AT91_PDC_RCR) != 0) {
210 pr_debug("Transfer active in current\n");
211 continue;
214 else {
215 if (at91_mci_read(host, AT91_PDC_RNCR) != 0) {
216 pr_debug("Transfer active in next\n");
217 continue;
221 /* Setup the next transfer */
222 pr_debug("Using transfer index %d\n", host->transfer_index);
224 sg = &data->sg[host->transfer_index++];
225 pr_debug("sg = %p\n", sg);
227 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
229 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
231 if (i == 0) {
232 at91_mci_write(host, AT91_PDC_RPR, sg->dma_address);
233 at91_mci_write(host, AT91_PDC_RCR, sg->length / 4);
235 else {
236 at91_mci_write(host, AT91_PDC_RNPR, sg->dma_address);
237 at91_mci_write(host, AT91_PDC_RNCR, sg->length / 4);
241 pr_debug("pre dma read done\n");
245 * Handle after a dma read
247 static void at91mci_post_dma_read(struct at91mci_host *host)
249 struct mmc_command *cmd;
250 struct mmc_data *data;
252 pr_debug("post dma read\n");
254 cmd = host->cmd;
255 if (!cmd) {
256 pr_debug("no command\n");
257 return;
260 data = cmd->data;
261 if (!data) {
262 pr_debug("no data\n");
263 return;
266 while (host->in_use_index < host->transfer_index) {
267 unsigned int *buffer;
268 int index;
269 int len;
271 struct scatterlist *sg;
273 pr_debug("finishing index %d\n", host->in_use_index);
275 sg = &data->sg[host->in_use_index++];
277 pr_debug("Unmapping page %08X\n", sg->dma_address);
279 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
281 /* Swap the contents of the buffer */
282 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
283 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
285 data->bytes_xfered += sg->length;
287 len = sg->length / 4;
289 for (index = 0; index < len; index++) {
290 buffer[index] = swab32(buffer[index]);
292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
293 flush_dcache_page(sg->page);
296 /* Is there another transfer to trigger? */
297 if (host->transfer_index < data->sg_len)
298 at91mci_pre_dma_read(host);
299 else {
300 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
301 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
304 pr_debug("post dma read done\n");
308 * Handle transmitted data
310 static void at91_mci_handle_transmitted(struct at91mci_host *host)
312 struct mmc_command *cmd;
313 struct mmc_data *data;
315 pr_debug("Handling the transmit\n");
317 /* Disable the transfer */
318 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
320 /* Now wait for cmd ready */
321 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
322 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
324 cmd = host->cmd;
325 if (!cmd) return;
327 data = cmd->data;
328 if (!data) return;
330 data->bytes_xfered = host->total_length;
334 * Enable the controller
336 static void at91_mci_enable(struct at91mci_host *host)
338 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
339 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
340 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
341 at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a);
342 at91_mci_write(host, AT91_MCI_SDCR, 0);
346 * Disable the controller
348 static void at91_mci_disable(struct at91mci_host *host)
350 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
354 * Send a command
355 * return the interrupts to enable
357 static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
359 unsigned int cmdr, mr;
360 unsigned int block_length;
361 struct mmc_data *data = cmd->data;
363 unsigned int blocks;
364 unsigned int ier = 0;
366 host->cmd = cmd;
368 /* Not sure if this is needed */
369 #if 0
370 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
371 pr_debug("Clearing timeout\n");
372 at91_mci_write(host, AT91_MCI_ARGR, 0);
373 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
374 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
375 /* spin */
376 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
379 #endif
380 cmdr = cmd->opcode;
382 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
383 cmdr |= AT91_MCI_RSPTYP_NONE;
384 else {
385 /* if a response is expected then allow maximum response latancy */
386 cmdr |= AT91_MCI_MAXLAT;
387 /* set 136 bit response for R2, 48 bit response otherwise */
388 if (mmc_resp_type(cmd) == MMC_RSP_R2)
389 cmdr |= AT91_MCI_RSPTYP_136;
390 else
391 cmdr |= AT91_MCI_RSPTYP_48;
394 if (data) {
395 block_length = data->blksz;
396 blocks = data->blocks;
398 /* always set data start - also set direction flag for read */
399 if (data->flags & MMC_DATA_READ)
400 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
401 else if (data->flags & MMC_DATA_WRITE)
402 cmdr |= AT91_MCI_TRCMD_START;
404 if (data->flags & MMC_DATA_STREAM)
405 cmdr |= AT91_MCI_TRTYP_STREAM;
406 if (data->flags & MMC_DATA_MULTI)
407 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
409 else {
410 block_length = 0;
411 blocks = 0;
414 if (cmd->opcode == MMC_STOP_TRANSMISSION)
415 cmdr |= AT91_MCI_TRCMD_STOP;
417 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
418 cmdr |= AT91_MCI_OPDCMD;
421 * Set the arguments and send the command
423 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
424 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
426 if (!data) {
427 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS);
428 at91_mci_write(host, AT91_PDC_RPR, 0);
429 at91_mci_write(host, AT91_PDC_RCR, 0);
430 at91_mci_write(host, AT91_PDC_RNPR, 0);
431 at91_mci_write(host, AT91_PDC_RNCR, 0);
432 at91_mci_write(host, AT91_PDC_TPR, 0);
433 at91_mci_write(host, AT91_PDC_TCR, 0);
434 at91_mci_write(host, AT91_PDC_TNPR, 0);
435 at91_mci_write(host, AT91_PDC_TNCR, 0);
437 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
438 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
439 return AT91_MCI_CMDRDY;
442 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
443 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
446 * Disable the PDC controller
448 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
450 if (cmdr & AT91_MCI_TRCMD_START) {
451 data->bytes_xfered = 0;
452 host->transfer_index = 0;
453 host->in_use_index = 0;
454 if (cmdr & AT91_MCI_TRDIR) {
456 * Handle a read
458 host->buffer = NULL;
459 host->total_length = 0;
461 at91mci_pre_dma_read(host);
462 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
464 else {
466 * Handle a write
468 host->total_length = block_length * blocks;
469 host->buffer = dma_alloc_coherent(NULL,
470 host->total_length,
471 &host->physical_address, GFP_KERNEL);
473 at91mci_sg_to_dma(host, data);
475 pr_debug("Transmitting %d bytes\n", host->total_length);
477 at91_mci_write(host, AT91_PDC_TPR, host->physical_address);
478 at91_mci_write(host, AT91_PDC_TCR, host->total_length / 4);
479 ier = AT91_MCI_TXBUFE;
484 * Send the command and then enable the PDC - not the other way round as
485 * the data sheet says
488 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
489 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
491 if (cmdr & AT91_MCI_TRCMD_START) {
492 if (cmdr & AT91_MCI_TRDIR)
493 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTEN);
494 else
495 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTEN);
497 return ier;
501 * Wait for a command to complete
503 static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
505 unsigned int ier;
507 ier = at91_mci_send_command(host, cmd);
509 pr_debug("setting ier to %08X\n", ier);
511 /* Stop on errors or the required value */
512 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
516 * Process the next step in the request
518 static void at91mci_process_next(struct at91mci_host *host)
520 if (!(host->flags & FL_SENT_COMMAND)) {
521 host->flags |= FL_SENT_COMMAND;
522 at91mci_process_command(host, host->request->cmd);
524 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
525 host->flags |= FL_SENT_STOP;
526 at91mci_process_command(host, host->request->stop);
528 else
529 mmc_request_done(host->mmc, host->request);
533 * Handle a command that has been completed
535 static void at91mci_completed_command(struct at91mci_host *host)
537 struct mmc_command *cmd = host->cmd;
538 unsigned int status;
540 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
542 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
543 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
544 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
545 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
547 if (host->buffer) {
548 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
549 host->buffer = NULL;
552 status = at91_mci_read(host, AT91_MCI_SR);
554 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
555 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
557 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
558 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
559 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
560 if ((status & AT91_MCI_RCRCE) &&
561 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
562 cmd->error = MMC_ERR_NONE;
564 else {
565 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
566 cmd->error = MMC_ERR_TIMEOUT;
567 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
568 cmd->error = MMC_ERR_BADCRC;
569 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
570 cmd->error = MMC_ERR_FIFO;
571 else
572 cmd->error = MMC_ERR_FAILED;
574 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
575 cmd->error, cmd->opcode, cmd->retries);
578 else
579 cmd->error = MMC_ERR_NONE;
581 at91mci_process_next(host);
585 * Handle an MMC request
587 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
589 struct at91mci_host *host = mmc_priv(mmc);
590 host->request = mrq;
591 host->flags = 0;
593 at91mci_process_next(host);
597 * Set the IOS
599 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
601 int clkdiv;
602 struct at91mci_host *host = mmc_priv(mmc);
603 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
605 host->bus_mode = ios->bus_mode;
607 if (ios->clock == 0) {
608 /* Disable the MCI controller */
609 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
610 clkdiv = 0;
612 else {
613 /* Enable the MCI controller */
614 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
616 if ((at91_master_clock % (ios->clock * 2)) == 0)
617 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
618 else
619 clkdiv = (at91_master_clock / ios->clock) / 2;
621 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
622 at91_master_clock / (2 * (clkdiv + 1)));
624 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
625 pr_debug("MMC: Setting controller bus width to 4\n");
626 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
628 else {
629 pr_debug("MMC: Setting controller bus width to 1\n");
630 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
633 /* Set the clock divider */
634 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
636 /* maybe switch power to the card */
637 if (host->board->vcc_pin) {
638 switch (ios->power_mode) {
639 case MMC_POWER_OFF:
640 at91_set_gpio_output(host->board->vcc_pin, 0);
641 break;
642 case MMC_POWER_UP:
643 case MMC_POWER_ON:
644 at91_set_gpio_output(host->board->vcc_pin, 1);
645 break;
651 * Handle an interrupt
653 static irqreturn_t at91_mci_irq(int irq, void *devid)
655 struct at91mci_host *host = devid;
656 int completed = 0;
657 unsigned int int_status, int_mask;
659 int_status = at91_mci_read(host, AT91_MCI_SR);
660 int_mask = at91_mci_read(host, AT91_MCI_IMR);
662 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
663 int_status & int_mask);
665 int_status = int_status & int_mask;
667 if (int_status & AT91_MCI_ERRORS) {
668 completed = 1;
670 if (int_status & AT91_MCI_UNRE)
671 pr_debug("MMC: Underrun error\n");
672 if (int_status & AT91_MCI_OVRE)
673 pr_debug("MMC: Overrun error\n");
674 if (int_status & AT91_MCI_DTOE)
675 pr_debug("MMC: Data timeout\n");
676 if (int_status & AT91_MCI_DCRCE)
677 pr_debug("MMC: CRC error in data\n");
678 if (int_status & AT91_MCI_RTOE)
679 pr_debug("MMC: Response timeout\n");
680 if (int_status & AT91_MCI_RENDE)
681 pr_debug("MMC: Response end bit error\n");
682 if (int_status & AT91_MCI_RCRCE)
683 pr_debug("MMC: Response CRC error\n");
684 if (int_status & AT91_MCI_RDIRE)
685 pr_debug("MMC: Response direction error\n");
686 if (int_status & AT91_MCI_RINDE)
687 pr_debug("MMC: Response index error\n");
688 } else {
689 /* Only continue processing if no errors */
691 if (int_status & AT91_MCI_TXBUFE) {
692 pr_debug("TX buffer empty\n");
693 at91_mci_handle_transmitted(host);
696 if (int_status & AT91_MCI_RXBUFF) {
697 pr_debug("RX buffer full\n");
698 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
701 if (int_status & AT91_MCI_ENDTX)
702 pr_debug("Transmit has ended\n");
704 if (int_status & AT91_MCI_ENDRX) {
705 pr_debug("Receive has ended\n");
706 at91mci_post_dma_read(host);
709 if (int_status & AT91_MCI_NOTBUSY) {
710 pr_debug("Card is ready\n");
711 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
714 if (int_status & AT91_MCI_DTIP)
715 pr_debug("Data transfer in progress\n");
717 if (int_status & AT91_MCI_BLKE)
718 pr_debug("Block transfer has ended\n");
720 if (int_status & AT91_MCI_TXRDY)
721 pr_debug("Ready to transmit\n");
723 if (int_status & AT91_MCI_RXRDY)
724 pr_debug("Ready to receive\n");
726 if (int_status & AT91_MCI_CMDRDY) {
727 pr_debug("Command ready\n");
728 completed = 1;
732 if (completed) {
733 pr_debug("Completed command\n");
734 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
735 at91mci_completed_command(host);
736 } else
737 at91_mci_write(host, AT91_MCI_IDR, int_status);
739 return IRQ_HANDLED;
742 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
744 struct at91mci_host *host = _host;
745 int present = !at91_get_gpio_value(irq);
748 * we expect this irq on both insert and remove,
749 * and use a short delay to debounce.
751 if (present != host->present) {
752 host->present = present;
753 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
754 present ? "insert" : "remove");
755 if (!present) {
756 pr_debug("****** Resetting SD-card bus width ******\n");
757 at91_mci_write(host, AT91_MCI_SDCR, 0);
759 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
761 return IRQ_HANDLED;
764 int at91_mci_get_ro(struct mmc_host *mmc)
766 int read_only = 0;
767 struct at91mci_host *host = mmc_priv(mmc);
769 if (host->board->wp_pin) {
770 read_only = at91_get_gpio_value(host->board->wp_pin);
771 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
772 (read_only ? "read-only" : "read-write") );
774 else {
775 printk(KERN_WARNING "%s: host does not support reading read-only "
776 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
778 return read_only;
781 static const struct mmc_host_ops at91_mci_ops = {
782 .request = at91_mci_request,
783 .set_ios = at91_mci_set_ios,
784 .get_ro = at91_mci_get_ro,
788 * Probe for the device
790 static int at91_mci_probe(struct platform_device *pdev)
792 struct mmc_host *mmc;
793 struct at91mci_host *host;
794 struct resource *res;
795 int ret;
797 pr_debug("Probe MCI devices\n");
799 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
800 if (!res)
801 return -ENXIO;
803 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
804 return -EBUSY;
806 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
807 if (!mmc) {
808 pr_debug("Failed to allocate mmc host\n");
809 release_mem_region(res->start, res->end - res->start + 1);
810 return -ENOMEM;
813 mmc->ops = &at91_mci_ops;
814 mmc->f_min = 375000;
815 mmc->f_max = 25000000;
816 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
817 mmc->caps = MMC_CAP_BYTEBLOCK;
819 host = mmc_priv(mmc);
820 host->mmc = mmc;
821 host->buffer = NULL;
822 host->bus_mode = 0;
823 host->board = pdev->dev.platform_data;
824 if (host->board->wire4) {
825 #ifdef SUPPORT_4WIRE
826 mmc->caps |= MMC_CAP_4_BIT_DATA;
827 #else
828 printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
829 #endif
833 * Get Clock
835 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
836 if (IS_ERR(host->mci_clk)) {
837 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
838 mmc_free_host(mmc);
839 release_mem_region(res->start, res->end - res->start + 1);
840 return -ENODEV;
844 * Map I/O region
846 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
847 if (!host->baseaddr) {
848 clk_put(host->mci_clk);
849 mmc_free_host(mmc);
850 release_mem_region(res->start, res->end - res->start + 1);
851 return -ENOMEM;
855 * Reset hardware
857 clk_enable(host->mci_clk); /* Enable the peripheral clock */
858 at91_mci_disable(host);
859 at91_mci_enable(host);
862 * Allocate the MCI interrupt
864 host->irq = platform_get_irq(pdev, 0);
865 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
866 if (ret) {
867 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
868 clk_disable(host->mci_clk);
869 clk_put(host->mci_clk);
870 mmc_free_host(mmc);
871 iounmap(host->baseaddr);
872 release_mem_region(res->start, res->end - res->start + 1);
873 return ret;
876 platform_set_drvdata(pdev, mmc);
879 * Add host to MMC layer
881 if (host->board->det_pin)
882 host->present = !at91_get_gpio_value(host->board->det_pin);
883 else
884 host->present = -1;
886 mmc_add_host(mmc);
889 * monitor card insertion/removal if we can
891 if (host->board->det_pin) {
892 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
893 0, DRIVER_NAME, host);
894 if (ret)
895 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
898 pr_debug("Added MCI driver\n");
900 return 0;
904 * Remove a device
906 static int at91_mci_remove(struct platform_device *pdev)
908 struct mmc_host *mmc = platform_get_drvdata(pdev);
909 struct at91mci_host *host;
910 struct resource *res;
912 if (!mmc)
913 return -1;
915 host = mmc_priv(mmc);
917 if (host->present != -1) {
918 free_irq(host->board->det_pin, host);
919 cancel_delayed_work(&host->mmc->detect);
922 at91_mci_disable(host);
923 mmc_remove_host(mmc);
924 free_irq(host->irq, host);
926 clk_disable(host->mci_clk); /* Disable the peripheral clock */
927 clk_put(host->mci_clk);
929 iounmap(host->baseaddr);
930 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
931 release_mem_region(res->start, res->end - res->start + 1);
933 mmc_free_host(mmc);
934 platform_set_drvdata(pdev, NULL);
935 pr_debug("MCI Removed\n");
937 return 0;
940 #ifdef CONFIG_PM
941 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
943 struct mmc_host *mmc = platform_get_drvdata(pdev);
944 int ret = 0;
946 if (mmc)
947 ret = mmc_suspend_host(mmc, state);
949 return ret;
952 static int at91_mci_resume(struct platform_device *pdev)
954 struct mmc_host *mmc = platform_get_drvdata(pdev);
955 int ret = 0;
957 if (mmc)
958 ret = mmc_resume_host(mmc);
960 return ret;
962 #else
963 #define at91_mci_suspend NULL
964 #define at91_mci_resume NULL
965 #endif
967 static struct platform_driver at91_mci_driver = {
968 .probe = at91_mci_probe,
969 .remove = at91_mci_remove,
970 .suspend = at91_mci_suspend,
971 .resume = at91_mci_resume,
972 .driver = {
973 .name = DRIVER_NAME,
974 .owner = THIS_MODULE,
978 static int __init at91_mci_init(void)
980 return platform_driver_register(&at91_mci_driver);
983 static void __exit at91_mci_exit(void)
985 platform_driver_unregister(&at91_mci_driver);
988 module_init(at91_mci_init);
989 module_exit(at91_mci_exit);
991 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
992 MODULE_AUTHOR("Nick Randell");
993 MODULE_LICENSE("GPL");