[PATCH] CCISS: use ARRAY_SIZE without intermediates
[linux-2.6/openmoko-kernel/knife-kernel.git] / drivers / mmc / at91_mci.c
blob3228516b7d19abee647a3af61c2ea1a36c9e0447
1 /*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
6 * Copyright (C) 2006 Malcolm Noyes
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 This is the AT91RM9200 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
30 There are three main types of request, commands, reads and writes.
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers.
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
52 GET RO
53 Gets the status of the write protect pin, if available.
56 #include <linux/config.h>
57 #include <linux/module.h>
58 #include <linux/moduleparam.h>
59 #include <linux/init.h>
60 #include <linux/ioport.h>
61 #include <linux/platform_device.h>
62 #include <linux/interrupt.h>
63 #include <linux/blkdev.h>
64 #include <linux/delay.h>
65 #include <linux/err.h>
66 #include <linux/dma-mapping.h>
67 #include <linux/clk.h>
69 #include <linux/mmc/host.h>
70 #include <linux/mmc/protocol.h>
72 #include <asm/io.h>
73 #include <asm/irq.h>
74 #include <asm/mach/mmc.h>
75 #include <asm/arch/board.h>
76 #include <asm/arch/gpio.h>
77 #include <asm/arch/at91rm9200_mci.h>
78 #include <asm/arch/at91rm9200_pdc.h>
80 #define DRIVER_NAME "at91_mci"
82 #undef SUPPORT_4WIRE
84 static struct clk *mci_clk;
86 #define FL_SENT_COMMAND (1 << 0)
87 #define FL_SENT_STOP (1 << 1)
92 * Read from a MCI register.
94 static inline unsigned long at91_mci_read(unsigned int reg)
96 void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI;
98 return __raw_readl(mci_base + reg);
102 * Write to a MCI register.
104 static inline void at91_mci_write(unsigned int reg, unsigned long value)
106 void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI;
108 __raw_writel(value, mci_base + reg);
112 * Low level type for this driver
114 struct at91mci_host
116 struct mmc_host *mmc;
117 struct mmc_command *cmd;
118 struct mmc_request *request;
120 struct at91_mmc_data *board;
121 int present;
124 * Flag indicating when the command has been sent. This is used to
125 * work out whether or not to send the stop
127 unsigned int flags;
128 /* flag for current bus settings */
129 u32 bus_mode;
131 /* DMA buffer used for transmitting */
132 unsigned int* buffer;
133 dma_addr_t physical_address;
134 unsigned int total_length;
136 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
137 int in_use_index;
139 /* Latest in the scatterlist that has been enabled for transfer */
140 int transfer_index;
144 * Copy from sg to a dma block - used for transfers
146 static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
148 unsigned int len, i, size;
149 unsigned *dmabuf = host->buffer;
151 size = host->total_length;
152 len = data->sg_len;
155 * Just loop through all entries. Size might not
156 * be the entire list though so make sure that
157 * we do not transfer too much.
159 for (i = 0; i < len; i++) {
160 struct scatterlist *sg;
161 int amount;
162 int index;
163 unsigned int *sgbuffer;
165 sg = &data->sg[i];
167 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
168 amount = min(size, sg->length);
169 size -= amount;
170 amount /= 4;
172 for (index = 0; index < amount; index++)
173 *dmabuf++ = swab32(sgbuffer[index]);
175 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
177 if (size == 0)
178 break;
182 * Check that we didn't get a request to transfer
183 * more data than can fit into the SG list.
185 BUG_ON(size != 0);
189 * Prepare a dma read
191 static void at91mci_pre_dma_read(struct at91mci_host *host)
193 int i;
194 struct scatterlist *sg;
195 struct mmc_command *cmd;
196 struct mmc_data *data;
198 pr_debug("pre dma read\n");
200 cmd = host->cmd;
201 if (!cmd) {
202 pr_debug("no command\n");
203 return;
206 data = cmd->data;
207 if (!data) {
208 pr_debug("no data\n");
209 return;
212 for (i = 0; i < 2; i++) {
213 /* nothing left to transfer */
214 if (host->transfer_index >= data->sg_len) {
215 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
216 break;
219 /* Check to see if this needs filling */
220 if (i == 0) {
221 if (at91_mci_read(AT91_PDC_RCR) != 0) {
222 pr_debug("Transfer active in current\n");
223 continue;
226 else {
227 if (at91_mci_read(AT91_PDC_RNCR) != 0) {
228 pr_debug("Transfer active in next\n");
229 continue;
233 /* Setup the next transfer */
234 pr_debug("Using transfer index %d\n", host->transfer_index);
236 sg = &data->sg[host->transfer_index++];
237 pr_debug("sg = %p\n", sg);
239 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
241 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
243 if (i == 0) {
244 at91_mci_write(AT91_PDC_RPR, sg->dma_address);
245 at91_mci_write(AT91_PDC_RCR, sg->length / 4);
247 else {
248 at91_mci_write(AT91_PDC_RNPR, sg->dma_address);
249 at91_mci_write(AT91_PDC_RNCR, sg->length / 4);
253 pr_debug("pre dma read done\n");
257 * Handle after a dma read
259 static void at91mci_post_dma_read(struct at91mci_host *host)
261 struct mmc_command *cmd;
262 struct mmc_data *data;
264 pr_debug("post dma read\n");
266 cmd = host->cmd;
267 if (!cmd) {
268 pr_debug("no command\n");
269 return;
272 data = cmd->data;
273 if (!data) {
274 pr_debug("no data\n");
275 return;
278 while (host->in_use_index < host->transfer_index) {
279 unsigned int *buffer;
280 int index;
281 int len;
283 struct scatterlist *sg;
285 pr_debug("finishing index %d\n", host->in_use_index);
287 sg = &data->sg[host->in_use_index++];
289 pr_debug("Unmapping page %08X\n", sg->dma_address);
291 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
293 /* Swap the contents of the buffer */
294 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
295 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
297 data->bytes_xfered += sg->length;
299 len = sg->length / 4;
301 for (index = 0; index < len; index++) {
302 buffer[index] = swab32(buffer[index]);
304 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
305 flush_dcache_page(sg->page);
308 /* Is there another transfer to trigger? */
309 if (host->transfer_index < data->sg_len)
310 at91mci_pre_dma_read(host);
311 else {
312 at91_mci_write(AT91_MCI_IER, AT91_MCI_RXBUFF);
313 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
316 pr_debug("post dma read done\n");
320 * Handle transmitted data
322 static void at91_mci_handle_transmitted(struct at91mci_host *host)
324 struct mmc_command *cmd;
325 struct mmc_data *data;
327 pr_debug("Handling the transmit\n");
329 /* Disable the transfer */
330 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
332 /* Now wait for cmd ready */
333 at91_mci_write(AT91_MCI_IDR, AT91_MCI_TXBUFE);
334 at91_mci_write(AT91_MCI_IER, AT91_MCI_NOTBUSY);
336 cmd = host->cmd;
337 if (!cmd) return;
339 data = cmd->data;
340 if (!data) return;
342 data->bytes_xfered = host->total_length;
346 * Enable the controller
348 static void at91_mci_enable(void)
350 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN);
351 at91_mci_write(AT91_MCI_IDR, 0xFFFFFFFF);
352 at91_mci_write(AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
353 at91_mci_write(AT91_MCI_MR, 0x834A);
354 at91_mci_write(AT91_MCI_SDCR, 0x0);
358 * Disable the controller
360 static void at91_mci_disable(void)
362 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
366 * Send a command
367 * return the interrupts to enable
369 static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
371 unsigned int cmdr, mr;
372 unsigned int block_length;
373 struct mmc_data *data = cmd->data;
375 unsigned int blocks;
376 unsigned int ier = 0;
378 host->cmd = cmd;
380 /* Not sure if this is needed */
381 #if 0
382 if ((at91_mci_read(AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
383 pr_debug("Clearing timeout\n");
384 at91_mci_write(AT91_MCI_ARGR, 0);
385 at91_mci_write(AT91_MCI_CMDR, AT91_MCI_OPDCMD);
386 while (!(at91_mci_read(AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
387 /* spin */
388 pr_debug("Clearing: SR = %08X\n", at91_mci_read(AT91_MCI_SR));
391 #endif
392 cmdr = cmd->opcode;
394 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
395 cmdr |= AT91_MCI_RSPTYP_NONE;
396 else {
397 /* if a response is expected then allow maximum response latancy */
398 cmdr |= AT91_MCI_MAXLAT;
399 /* set 136 bit response for R2, 48 bit response otherwise */
400 if (mmc_resp_type(cmd) == MMC_RSP_R2)
401 cmdr |= AT91_MCI_RSPTYP_136;
402 else
403 cmdr |= AT91_MCI_RSPTYP_48;
406 if (data) {
407 block_length = data->blksz;
408 blocks = data->blocks;
410 /* always set data start - also set direction flag for read */
411 if (data->flags & MMC_DATA_READ)
412 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
413 else if (data->flags & MMC_DATA_WRITE)
414 cmdr |= AT91_MCI_TRCMD_START;
416 if (data->flags & MMC_DATA_STREAM)
417 cmdr |= AT91_MCI_TRTYP_STREAM;
418 if (data->flags & MMC_DATA_MULTI)
419 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
421 else {
422 block_length = 0;
423 blocks = 0;
426 if (cmd->opcode == MMC_STOP_TRANSMISSION)
427 cmdr |= AT91_MCI_TRCMD_STOP;
429 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
430 cmdr |= AT91_MCI_OPDCMD;
433 * Set the arguments and send the command
435 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n",
436 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(AT91_MCI_MR));
438 if (!data) {
439 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS);
440 at91_mci_write(AT91_PDC_RPR, 0);
441 at91_mci_write(AT91_PDC_RCR, 0);
442 at91_mci_write(AT91_PDC_RNPR, 0);
443 at91_mci_write(AT91_PDC_RNCR, 0);
444 at91_mci_write(AT91_PDC_TPR, 0);
445 at91_mci_write(AT91_PDC_TCR, 0);
446 at91_mci_write(AT91_PDC_TNPR, 0);
447 at91_mci_write(AT91_PDC_TNCR, 0);
449 at91_mci_write(AT91_MCI_ARGR, cmd->arg);
450 at91_mci_write(AT91_MCI_CMDR, cmdr);
451 return AT91_MCI_CMDRDY;
454 mr = at91_mci_read(AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
455 at91_mci_write(AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
458 * Disable the PDC controller
460 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
462 if (cmdr & AT91_MCI_TRCMD_START) {
463 data->bytes_xfered = 0;
464 host->transfer_index = 0;
465 host->in_use_index = 0;
466 if (cmdr & AT91_MCI_TRDIR) {
468 * Handle a read
470 host->buffer = NULL;
471 host->total_length = 0;
473 at91mci_pre_dma_read(host);
474 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
476 else {
478 * Handle a write
480 host->total_length = block_length * blocks;
481 host->buffer = dma_alloc_coherent(NULL,
482 host->total_length,
483 &host->physical_address, GFP_KERNEL);
485 at91mci_sg_to_dma(host, data);
487 pr_debug("Transmitting %d bytes\n", host->total_length);
489 at91_mci_write(AT91_PDC_TPR, host->physical_address);
490 at91_mci_write(AT91_PDC_TCR, host->total_length / 4);
491 ier = AT91_MCI_TXBUFE;
496 * Send the command and then enable the PDC - not the other way round as
497 * the data sheet says
500 at91_mci_write(AT91_MCI_ARGR, cmd->arg);
501 at91_mci_write(AT91_MCI_CMDR, cmdr);
503 if (cmdr & AT91_MCI_TRCMD_START) {
504 if (cmdr & AT91_MCI_TRDIR)
505 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTEN);
506 else
507 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTEN);
509 return ier;
513 * Wait for a command to complete
515 static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
517 unsigned int ier;
519 ier = at91_mci_send_command(host, cmd);
521 pr_debug("setting ier to %08X\n", ier);
523 /* Stop on errors or the required value */
524 at91_mci_write(AT91_MCI_IER, 0xffff0000 | ier);
528 * Process the next step in the request
530 static void at91mci_process_next(struct at91mci_host *host)
532 if (!(host->flags & FL_SENT_COMMAND)) {
533 host->flags |= FL_SENT_COMMAND;
534 at91mci_process_command(host, host->request->cmd);
536 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
537 host->flags |= FL_SENT_STOP;
538 at91mci_process_command(host, host->request->stop);
540 else
541 mmc_request_done(host->mmc, host->request);
545 * Handle a command that has been completed
547 static void at91mci_completed_command(struct at91mci_host *host)
549 struct mmc_command *cmd = host->cmd;
550 unsigned int status;
552 at91_mci_write(AT91_MCI_IDR, 0xffffffff);
554 cmd->resp[0] = at91_mci_read(AT91_MCI_RSPR(0));
555 cmd->resp[1] = at91_mci_read(AT91_MCI_RSPR(1));
556 cmd->resp[2] = at91_mci_read(AT91_MCI_RSPR(2));
557 cmd->resp[3] = at91_mci_read(AT91_MCI_RSPR(3));
559 if (host->buffer) {
560 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
561 host->buffer = NULL;
564 status = at91_mci_read(AT91_MCI_SR);
566 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
567 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
569 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
570 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
571 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
572 if ((status & AT91_MCI_RCRCE) &&
573 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
574 cmd->error = MMC_ERR_NONE;
576 else {
577 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
578 cmd->error = MMC_ERR_TIMEOUT;
579 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
580 cmd->error = MMC_ERR_BADCRC;
581 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
582 cmd->error = MMC_ERR_FIFO;
583 else
584 cmd->error = MMC_ERR_FAILED;
586 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
587 cmd->error, cmd->opcode, cmd->retries);
590 else
591 cmd->error = MMC_ERR_NONE;
593 at91mci_process_next(host);
597 * Handle an MMC request
599 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
601 struct at91mci_host *host = mmc_priv(mmc);
602 host->request = mrq;
603 host->flags = 0;
605 at91mci_process_next(host);
609 * Set the IOS
611 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
613 int clkdiv;
614 struct at91mci_host *host = mmc_priv(mmc);
615 unsigned long at91_master_clock = clk_get_rate(mci_clk);
617 host->bus_mode = ios->bus_mode;
619 if (ios->clock == 0) {
620 /* Disable the MCI controller */
621 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS);
622 clkdiv = 0;
624 else {
625 /* Enable the MCI controller */
626 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN);
628 if ((at91_master_clock % (ios->clock * 2)) == 0)
629 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
630 else
631 clkdiv = (at91_master_clock / ios->clock) / 2;
633 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
634 at91_master_clock / (2 * (clkdiv + 1)));
636 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
637 pr_debug("MMC: Setting controller bus width to 4\n");
638 at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
640 else {
641 pr_debug("MMC: Setting controller bus width to 1\n");
642 at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
645 /* Set the clock divider */
646 at91_mci_write(AT91_MCI_MR, (at91_mci_read(AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
648 /* maybe switch power to the card */
649 if (host->board->vcc_pin) {
650 switch (ios->power_mode) {
651 case MMC_POWER_OFF:
652 at91_set_gpio_output(host->board->vcc_pin, 0);
653 break;
654 case MMC_POWER_UP:
655 case MMC_POWER_ON:
656 at91_set_gpio_output(host->board->vcc_pin, 1);
657 break;
663 * Handle an interrupt
665 static irqreturn_t at91_mci_irq(int irq, void *devid, struct pt_regs *regs)
667 struct at91mci_host *host = devid;
668 int completed = 0;
670 unsigned int int_status;
672 int_status = at91_mci_read(AT91_MCI_SR);
673 pr_debug("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(AT91_MCI_IMR),
674 int_status & at91_mci_read(AT91_MCI_IMR));
676 if ((int_status & at91_mci_read(AT91_MCI_IMR)) & 0xffff0000)
677 completed = 1;
679 int_status &= at91_mci_read(AT91_MCI_IMR);
681 if (int_status & AT91_MCI_UNRE)
682 pr_debug("MMC: Underrun error\n");
683 if (int_status & AT91_MCI_OVRE)
684 pr_debug("MMC: Overrun error\n");
685 if (int_status & AT91_MCI_DTOE)
686 pr_debug("MMC: Data timeout\n");
687 if (int_status & AT91_MCI_DCRCE)
688 pr_debug("MMC: CRC error in data\n");
689 if (int_status & AT91_MCI_RTOE)
690 pr_debug("MMC: Response timeout\n");
691 if (int_status & AT91_MCI_RENDE)
692 pr_debug("MMC: Response end bit error\n");
693 if (int_status & AT91_MCI_RCRCE)
694 pr_debug("MMC: Response CRC error\n");
695 if (int_status & AT91_MCI_RDIRE)
696 pr_debug("MMC: Response direction error\n");
697 if (int_status & AT91_MCI_RINDE)
698 pr_debug("MMC: Response index error\n");
700 /* Only continue processing if no errors */
701 if (!completed) {
702 if (int_status & AT91_MCI_TXBUFE) {
703 pr_debug("TX buffer empty\n");
704 at91_mci_handle_transmitted(host);
707 if (int_status & AT91_MCI_RXBUFF) {
708 pr_debug("RX buffer full\n");
709 at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY);
712 if (int_status & AT91_MCI_ENDTX) {
713 pr_debug("Transmit has ended\n");
716 if (int_status & AT91_MCI_ENDRX) {
717 pr_debug("Receive has ended\n");
718 at91mci_post_dma_read(host);
721 if (int_status & AT91_MCI_NOTBUSY) {
722 pr_debug("Card is ready\n");
723 at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY);
726 if (int_status & AT91_MCI_DTIP) {
727 pr_debug("Data transfer in progress\n");
730 if (int_status & AT91_MCI_BLKE) {
731 pr_debug("Block transfer has ended\n");
734 if (int_status & AT91_MCI_TXRDY) {
735 pr_debug("Ready to transmit\n");
738 if (int_status & AT91_MCI_RXRDY) {
739 pr_debug("Ready to receive\n");
742 if (int_status & AT91_MCI_CMDRDY) {
743 pr_debug("Command ready\n");
744 completed = 1;
747 at91_mci_write(AT91_MCI_IDR, int_status);
749 if (completed) {
750 pr_debug("Completed command\n");
751 at91_mci_write(AT91_MCI_IDR, 0xffffffff);
752 at91mci_completed_command(host);
755 return IRQ_HANDLED;
758 static irqreturn_t at91_mmc_det_irq(int irq, void *_host, struct pt_regs *regs)
760 struct at91mci_host *host = _host;
761 int present = !at91_get_gpio_value(irq);
764 * we expect this irq on both insert and remove,
765 * and use a short delay to debounce.
767 if (present != host->present) {
768 host->present = present;
769 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
770 present ? "insert" : "remove");
771 if (!present) {
772 pr_debug("****** Resetting SD-card bus width ******\n");
773 at91_mci_write(AT91_MCI_SDCR, 0);
775 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
777 return IRQ_HANDLED;
780 int at91_mci_get_ro(struct mmc_host *mmc)
782 int read_only = 0;
783 struct at91mci_host *host = mmc_priv(mmc);
785 if (host->board->wp_pin) {
786 read_only = at91_get_gpio_value(host->board->wp_pin);
787 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
788 (read_only ? "read-only" : "read-write") );
790 else {
791 printk(KERN_WARNING "%s: host does not support reading read-only "
792 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
794 return read_only;
797 static struct mmc_host_ops at91_mci_ops = {
798 .request = at91_mci_request,
799 .set_ios = at91_mci_set_ios,
800 .get_ro = at91_mci_get_ro,
804 * Probe for the device
806 static int at91_mci_probe(struct platform_device *pdev)
808 struct mmc_host *mmc;
809 struct at91mci_host *host;
810 int ret;
812 pr_debug("Probe MCI devices\n");
813 at91_mci_disable();
814 at91_mci_enable();
816 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
817 if (!mmc) {
818 pr_debug("Failed to allocate mmc host\n");
819 return -ENOMEM;
822 mmc->ops = &at91_mci_ops;
823 mmc->f_min = 375000;
824 mmc->f_max = 25000000;
825 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
827 host = mmc_priv(mmc);
828 host->mmc = mmc;
829 host->buffer = NULL;
830 host->bus_mode = 0;
831 host->board = pdev->dev.platform_data;
832 if (host->board->wire4) {
833 #ifdef SUPPORT_4WIRE
834 mmc->caps |= MMC_CAP_4_BIT_DATA;
835 #else
836 printk("MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
837 #endif
841 * Get Clock
843 mci_clk = clk_get(&pdev->dev, "mci_clk");
844 if (IS_ERR(mci_clk)) {
845 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
846 mmc_free_host(mmc);
847 return -ENODEV;
849 clk_enable(mci_clk); /* Enable the peripheral clock */
852 * Allocate the MCI interrupt
854 ret = request_irq(AT91_ID_MCI, at91_mci_irq, SA_SHIRQ, DRIVER_NAME, host);
855 if (ret) {
856 printk(KERN_ERR "Failed to request MCI interrupt\n");
857 clk_disable(mci_clk);
858 clk_put(mci_clk);
859 mmc_free_host(mmc);
860 return ret;
863 platform_set_drvdata(pdev, mmc);
866 * Add host to MMC layer
868 if (host->board->det_pin)
869 host->present = !at91_get_gpio_value(host->board->det_pin);
870 else
871 host->present = -1;
873 mmc_add_host(mmc);
876 * monitor card insertion/removal if we can
878 if (host->board->det_pin) {
879 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
880 0, DRIVER_NAME, host);
881 if (ret)
882 printk(KERN_ERR "couldn't allocate MMC detect irq\n");
885 pr_debug(KERN_INFO "Added MCI driver\n");
887 return 0;
891 * Remove a device
893 static int at91_mci_remove(struct platform_device *pdev)
895 struct mmc_host *mmc = platform_get_drvdata(pdev);
896 struct at91mci_host *host;
898 if (!mmc)
899 return -1;
901 host = mmc_priv(mmc);
903 if (host->present != -1) {
904 free_irq(host->board->det_pin, host);
905 cancel_delayed_work(&host->mmc->detect);
908 mmc_remove_host(mmc);
909 at91_mci_disable();
910 free_irq(AT91_ID_MCI, host);
911 mmc_free_host(mmc);
913 clk_disable(mci_clk); /* Disable the peripheral clock */
914 clk_put(mci_clk);
916 platform_set_drvdata(pdev, NULL);
918 pr_debug("MCI Removed\n");
920 return 0;
923 #ifdef CONFIG_PM
924 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
926 struct mmc_host *mmc = platform_get_drvdata(pdev);
927 int ret = 0;
929 if (mmc)
930 ret = mmc_suspend_host(mmc, state);
932 return ret;
935 static int at91_mci_resume(struct platform_device *pdev)
937 struct mmc_host *mmc = platform_get_drvdata(pdev);
938 int ret = 0;
940 if (mmc)
941 ret = mmc_resume_host(mmc);
943 return ret;
945 #else
946 #define at91_mci_suspend NULL
947 #define at91_mci_resume NULL
948 #endif
950 static struct platform_driver at91_mci_driver = {
951 .probe = at91_mci_probe,
952 .remove = at91_mci_remove,
953 .suspend = at91_mci_suspend,
954 .resume = at91_mci_resume,
955 .driver = {
956 .name = DRIVER_NAME,
957 .owner = THIS_MODULE,
961 static int __init at91_mci_init(void)
963 return platform_driver_register(&at91_mci_driver);
966 static void __exit at91_mci_exit(void)
968 platform_driver_unregister(&at91_mci_driver);
971 module_init(at91_mci_init);
972 module_exit(at91_mci_exit);
974 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
975 MODULE_AUTHOR("Nick Randell");
976 MODULE_LICENSE("GPL");