mtd: add LPC32xx MLC NAND driver
[linux-2.6/btrfs-unstable.git] / drivers / mtd / nand / lpc32xx_mlc.c
blob260b2c242491d57b74ff1d311fbeb00391fb2085
1 /*
2 * Driver for NAND MLC Controller in LPC32xx
4 * Author: Roland Stigge <stigge@antcom.de>
6 * Copyright © 2011 WORK Microwave GmbH
7 * Copyright © 2011, 2012 Roland Stigge
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * NAND Flash Controller Operation:
21 * - Read: Auto Decode
22 * - Write: Auto Encode
23 * - Tested Page Sizes: 2048, 4096
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/nand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/clk.h>
33 #include <linux/err.h>
34 #include <linux/delay.h>
35 #include <linux/completion.h>
36 #include <linux/interrupt.h>
37 #include <linux/of.h>
38 #include <linux/of_mtd.h>
39 #include <linux/of_gpio.h>
40 #include <linux/amba/pl08x.h>
41 #include <linux/io.h>
42 #include <linux/mm.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/dmaengine.h>
45 #include <linux/mtd/nand_ecc.h>
47 #define DRV_NAME "lpc32xx_mlc"
49 /**********************************************************************
50 * MLC NAND controller register offsets
51 **********************************************************************/
53 #define MLC_BUFF(x) (x + 0x00000)
54 #define MLC_DATA(x) (x + 0x08000)
55 #define MLC_CMD(x) (x + 0x10000)
56 #define MLC_ADDR(x) (x + 0x10004)
57 #define MLC_ECC_ENC_REG(x) (x + 0x10008)
58 #define MLC_ECC_DEC_REG(x) (x + 0x1000C)
59 #define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
60 #define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
61 #define MLC_RPR(x) (x + 0x10018)
62 #define MLC_WPR(x) (x + 0x1001C)
63 #define MLC_RUBP(x) (x + 0x10020)
64 #define MLC_ROBP(x) (x + 0x10024)
65 #define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
66 #define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
67 #define MLC_ICR(x) (x + 0x10030)
68 #define MLC_TIME_REG(x) (x + 0x10034)
69 #define MLC_IRQ_MR(x) (x + 0x10038)
70 #define MLC_IRQ_SR(x) (x + 0x1003C)
71 #define MLC_LOCK_PR(x) (x + 0x10044)
72 #define MLC_ISR(x) (x + 0x10048)
73 #define MLC_CEH(x) (x + 0x1004C)
75 /**********************************************************************
76 * MLC_CMD bit definitions
77 **********************************************************************/
78 #define MLCCMD_RESET 0xFF
80 /**********************************************************************
81 * MLC_ICR bit definitions
82 **********************************************************************/
83 #define MLCICR_WPROT (1 << 3)
84 #define MLCICR_LARGEBLOCK (1 << 2)
85 #define MLCICR_LONGADDR (1 << 1)
86 #define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
88 /**********************************************************************
89 * MLC_TIME_REG bit definitions
90 **********************************************************************/
91 #define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
92 #define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
93 #define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
94 #define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
95 #define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
96 #define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
97 #define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
99 /**********************************************************************
100 * MLC_IRQ_MR and MLC_IRQ_SR bit definitions
101 **********************************************************************/
102 #define MLCIRQ_NAND_READY (1 << 5)
103 #define MLCIRQ_CONTROLLER_READY (1 << 4)
104 #define MLCIRQ_DECODE_FAILURE (1 << 3)
105 #define MLCIRQ_DECODE_ERROR (1 << 2)
106 #define MLCIRQ_ECC_READY (1 << 1)
107 #define MLCIRQ_WRPROT_FAULT (1 << 0)
109 /**********************************************************************
110 * MLC_LOCK_PR bit definitions
111 **********************************************************************/
112 #define MLCLOCKPR_MAGIC 0xA25E
114 /**********************************************************************
115 * MLC_ISR bit definitions
116 **********************************************************************/
117 #define MLCISR_DECODER_FAILURE (1 << 6)
118 #define MLCISR_ERRORS ((1 << 4) | (1 << 5))
119 #define MLCISR_ERRORS_DETECTED (1 << 3)
120 #define MLCISR_ECC_READY (1 << 2)
121 #define MLCISR_CONTROLLER_READY (1 << 1)
122 #define MLCISR_NAND_READY (1 << 0)
124 /**********************************************************************
125 * MLC_CEH bit definitions
126 **********************************************************************/
127 #define MLCCEH_NORMAL (1 << 0)
129 struct lpc32xx_nand_cfg_mlc {
130 uint32_t tcea_delay;
131 uint32_t busy_delay;
132 uint32_t nand_ta;
133 uint32_t rd_high;
134 uint32_t rd_low;
135 uint32_t wr_high;
136 uint32_t wr_low;
137 int wp_gpio;
138 struct mtd_partition *parts;
139 unsigned num_parts;
142 static struct nand_ecclayout lpc32xx_nand_oob = {
143 .eccbytes = 40,
144 .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
145 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
146 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
147 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
148 .oobfree = {
149 { .offset = 0,
150 .length = 6, },
151 { .offset = 16,
152 .length = 6, },
153 { .offset = 32,
154 .length = 6, },
155 { .offset = 48,
156 .length = 6, },
160 static struct nand_bbt_descr lpc32xx_nand_bbt = {
161 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
162 NAND_BBT_WRITE,
163 .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
166 static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
167 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
168 NAND_BBT_WRITE,
169 .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
172 struct lpc32xx_nand_host {
173 struct nand_chip nand_chip;
174 struct clk *clk;
175 struct mtd_info mtd;
176 void __iomem *io_base;
177 int irq;
178 struct lpc32xx_nand_cfg_mlc *ncfg;
179 struct completion comp_nand;
180 struct completion comp_controller;
181 uint32_t llptr;
183 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
185 dma_addr_t oob_buf_phy;
187 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
189 uint8_t *oob_buf;
190 /* Physical address of DMA base address */
191 dma_addr_t io_base_phy;
193 struct completion comp_dma;
194 struct dma_chan *dma_chan;
195 struct dma_slave_config dma_slave_config;
196 struct scatterlist sgl;
197 uint8_t *dma_buf;
198 uint8_t *dummy_buf;
199 int mlcsubpages; /* number of 512bytes-subpages */
203 * Activate/Deactivate DMA Operation:
205 * Using the PL080 DMA Controller for transferring the 512 byte subpages
206 * instead of doing readl() / writel() in a loop slows it down significantly.
207 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
209 * - readl() of 128 x 32 bits in a loop: ~20us
210 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
211 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
213 * This applies to the transfer itself. In the DMA case: only the
214 * wait_for_completion() (DMA setup _not_ included).
216 * Note that the 512 bytes subpage transfer is done directly from/to a
217 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
218 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
219 * controller transferring data between its internal buffer to/from the NAND
220 * chip.)
222 * Therefore, using the PL080 DMA is disabled by default, for now.
225 static int use_dma;
227 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
229 uint32_t clkrate, tmp;
231 /* Reset MLC controller */
232 writel(MLCCMD_RESET, MLC_CMD(host->io_base));
233 udelay(1000);
235 /* Get base clock for MLC block */
236 clkrate = clk_get_rate(host->clk);
237 if (clkrate == 0)
238 clkrate = 104000000;
240 /* Unlock MLC_ICR
241 * (among others, will be locked again automatically) */
242 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
244 /* Configure MLC Controller: Large Block, 5 Byte Address */
245 tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
246 writel(tmp, MLC_ICR(host->io_base));
248 /* Unlock MLC_TIME_REG
249 * (among others, will be locked again automatically) */
250 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
252 /* Compute clock setup values, see LPC and NAND manual */
253 tmp = 0;
254 tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
255 tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
256 tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
257 tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
258 tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
259 tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
260 tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
261 writel(tmp, MLC_TIME_REG(host->io_base));
263 /* Enable IRQ for CONTROLLER_READY and NAND_READY */
264 writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
265 MLC_IRQ_MR(host->io_base));
267 /* Normal nCE operation: nCE controlled by controller */
268 writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
272 * Hardware specific access to control lines
274 static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
275 unsigned int ctrl)
277 struct nand_chip *nand_chip = mtd->priv;
278 struct lpc32xx_nand_host *host = nand_chip->priv;
280 if (cmd != NAND_CMD_NONE) {
281 if (ctrl & NAND_CLE)
282 writel(cmd, MLC_CMD(host->io_base));
283 else
284 writel(cmd, MLC_ADDR(host->io_base));
289 * Read Device Ready (NAND device _and_ controller ready)
291 static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
293 struct nand_chip *nand_chip = mtd->priv;
294 struct lpc32xx_nand_host *host = nand_chip->priv;
296 if ((readb(MLC_ISR(host->io_base)) &
297 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
298 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
299 return 1;
301 return 0;
304 static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
306 uint8_t sr;
308 /* Clear interrupt flag by reading status */
309 sr = readb(MLC_IRQ_SR(host->io_base));
310 if (sr & MLCIRQ_NAND_READY)
311 complete(&host->comp_nand);
312 if (sr & MLCIRQ_CONTROLLER_READY)
313 complete(&host->comp_controller);
315 return IRQ_HANDLED;
318 static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
320 struct lpc32xx_nand_host *host = chip->priv;
322 if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
323 goto exit;
325 wait_for_completion(&host->comp_nand);
327 while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
328 /* Seems to be delayed sometimes by controller */
329 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
330 cpu_relax();
333 exit:
334 return NAND_STATUS_READY;
337 static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
338 struct nand_chip *chip)
340 struct lpc32xx_nand_host *host = chip->priv;
342 if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
343 goto exit;
345 wait_for_completion(&host->comp_controller);
347 while (!(readb(MLC_ISR(host->io_base)) &
348 MLCISR_CONTROLLER_READY)) {
349 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
350 cpu_relax();
353 exit:
354 return NAND_STATUS_READY;
357 static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
359 lpc32xx_waitfunc_nand(mtd, chip);
360 lpc32xx_waitfunc_controller(mtd, chip);
362 return NAND_STATUS_READY;
366 * Enable NAND write protect
368 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
370 if (gpio_is_valid(host->ncfg->wp_gpio))
371 gpio_set_value(host->ncfg->wp_gpio, 0);
375 * Disable NAND write protect
377 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
379 if (gpio_is_valid(host->ncfg->wp_gpio))
380 gpio_set_value(host->ncfg->wp_gpio, 1);
383 static void lpc32xx_dma_complete_func(void *completion)
385 complete(completion);
388 static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
389 enum dma_transfer_direction dir)
391 struct nand_chip *chip = mtd->priv;
392 struct lpc32xx_nand_host *host = chip->priv;
393 struct dma_async_tx_descriptor *desc;
394 int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
395 int res;
397 sg_init_one(&host->sgl, mem, len);
399 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
400 DMA_BIDIRECTIONAL);
401 if (res != 1) {
402 dev_err(mtd->dev.parent, "Failed to map sg list\n");
403 return -ENXIO;
405 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
406 flags);
407 if (!desc) {
408 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
409 goto out1;
412 init_completion(&host->comp_dma);
413 desc->callback = lpc32xx_dma_complete_func;
414 desc->callback_param = &host->comp_dma;
416 dmaengine_submit(desc);
417 dma_async_issue_pending(host->dma_chan);
419 wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
421 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
422 DMA_BIDIRECTIONAL);
423 return 0;
424 out1:
425 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
426 DMA_BIDIRECTIONAL);
427 return -ENXIO;
430 static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
431 uint8_t *buf, int oob_required, int page)
433 struct lpc32xx_nand_host *host = chip->priv;
434 int i, j;
435 uint8_t *oobbuf = chip->oob_poi;
436 uint32_t mlc_isr;
437 int res;
438 uint8_t *dma_buf;
439 bool dma_mapped;
441 if ((void *)buf <= high_memory) {
442 dma_buf = buf;
443 dma_mapped = true;
444 } else {
445 dma_buf = host->dma_buf;
446 dma_mapped = false;
449 /* Writing Command and Address */
450 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
452 /* For all sub-pages */
453 for (i = 0; i < host->mlcsubpages; i++) {
454 /* Start Auto Decode Command */
455 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
457 /* Wait for Controller Ready */
458 lpc32xx_waitfunc_controller(mtd, chip);
460 /* Check ECC Error status */
461 mlc_isr = readl(MLC_ISR(host->io_base));
462 if (mlc_isr & MLCISR_DECODER_FAILURE) {
463 mtd->ecc_stats.failed++;
464 dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
465 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
466 mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
469 /* Read 512 + 16 Bytes */
470 if (use_dma) {
471 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
472 DMA_DEV_TO_MEM);
473 if (res)
474 return res;
475 } else {
476 for (j = 0; j < (512 >> 2); j++) {
477 *((uint32_t *)(buf)) =
478 readl(MLC_BUFF(host->io_base));
479 buf += 4;
482 for (j = 0; j < (16 >> 2); j++) {
483 *((uint32_t *)(oobbuf)) =
484 readl(MLC_BUFF(host->io_base));
485 oobbuf += 4;
489 if (use_dma && !dma_mapped)
490 memcpy(buf, dma_buf, mtd->writesize);
492 return 0;
495 static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
496 struct nand_chip *chip,
497 const uint8_t *buf, int oob_required)
499 struct lpc32xx_nand_host *host = chip->priv;
500 const uint8_t *oobbuf = chip->oob_poi;
501 uint8_t *dma_buf = (uint8_t *)buf;
502 int res;
503 int i, j;
505 if (use_dma && (void *)buf >= high_memory) {
506 dma_buf = host->dma_buf;
507 memcpy(dma_buf, buf, mtd->writesize);
510 for (i = 0; i < host->mlcsubpages; i++) {
511 /* Start Encode */
512 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
514 /* Write 512 + 6 Bytes to Buffer */
515 if (use_dma) {
516 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
517 DMA_MEM_TO_DEV);
518 if (res)
519 return res;
520 } else {
521 for (j = 0; j < (512 >> 2); j++) {
522 writel(*((uint32_t *)(buf)),
523 MLC_BUFF(host->io_base));
524 buf += 4;
527 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
528 oobbuf += 4;
529 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
530 oobbuf += 12;
532 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
533 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
535 /* Wait for Controller Ready */
536 lpc32xx_waitfunc_controller(mtd, chip);
538 return 0;
541 static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
542 const uint8_t *buf, int oob_required, int page,
543 int cached, int raw)
545 int res;
547 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
548 res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
549 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
550 lpc32xx_waitfunc(mtd, chip);
552 return res;
555 static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
556 int page)
558 struct lpc32xx_nand_host *host = chip->priv;
560 /* Read whole page - necessary with MLC controller! */
561 lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
563 return 0;
566 static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
567 int page)
569 /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
570 return 0;
573 /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
574 static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
576 /* Always enabled! */
579 static bool lpc32xx_dma_filter(struct dma_chan *chan, void *param)
581 struct pl08x_dma_chan *ch =
582 container_of(chan, struct pl08x_dma_chan, chan);
584 /* In LPC32xx's PL080 DMA wiring, the MLC NAND DMA signal is #12 */
585 if (ch->cd->min_signal == 12)
586 return true;
587 return false;
590 static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
592 struct mtd_info *mtd = &host->mtd;
593 dma_cap_mask_t mask;
595 dma_cap_zero(mask);
596 dma_cap_set(DMA_SLAVE, mask);
597 host->dma_chan = dma_request_channel(mask, lpc32xx_dma_filter, NULL);
598 if (!host->dma_chan) {
599 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
600 return -EBUSY;
604 * Set direction to a sensible value even if the dmaengine driver
605 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
606 * driver criticizes it as "alien transfer direction".
608 host->dma_slave_config.direction = DMA_DEV_TO_MEM;
609 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
610 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
611 host->dma_slave_config.src_maxburst = 128;
612 host->dma_slave_config.dst_maxburst = 128;
613 /* DMA controller does flow control: */
614 host->dma_slave_config.device_fc = false;
615 host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
616 host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
617 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
618 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
619 goto out1;
622 return 0;
623 out1:
624 dma_release_channel(host->dma_chan);
625 return -ENXIO;
628 #ifdef CONFIG_OF
629 static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
631 struct lpc32xx_nand_cfg_mlc *pdata;
632 struct device_node *np = dev->of_node;
634 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
635 if (!pdata) {
636 dev_err(dev, "could not allocate memory for platform data\n");
637 return NULL;
640 of_property_read_u32(np, "nxp,tcea-delay", &pdata->tcea_delay);
641 of_property_read_u32(np, "nxp,busy-delay", &pdata->busy_delay);
642 of_property_read_u32(np, "nxp,nand-ta", &pdata->nand_ta);
643 of_property_read_u32(np, "nxp,rd-high", &pdata->rd_high);
644 of_property_read_u32(np, "nxp,rd-low", &pdata->rd_low);
645 of_property_read_u32(np, "nxp,wr-high", &pdata->wr_high);
646 of_property_read_u32(np, "nxp,wr-low", &pdata->wr_low);
648 if (!pdata->tcea_delay || !pdata->busy_delay || !pdata->nand_ta ||
649 !pdata->rd_high || !pdata->rd_low || !pdata->wr_high ||
650 !pdata->wr_low) {
651 dev_err(dev, "chip parameters not specified correctly\n");
652 return NULL;
655 pdata->wp_gpio = of_get_named_gpio(np, "gpios", 0);
657 return pdata;
659 #else
660 static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
662 return NULL;
664 #endif
667 * Probe for NAND controller
669 static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
671 struct lpc32xx_nand_host *host;
672 struct mtd_info *mtd;
673 struct nand_chip *nand_chip;
674 struct resource *rc;
675 int res;
676 struct mtd_part_parser_data ppdata = {};
678 /* Allocate memory for the device structure (and zero it) */
679 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
680 if (!host) {
681 dev_err(&pdev->dev, "failed to allocate device structure.\n");
682 return -ENOMEM;
685 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
686 if (rc == NULL) {
687 dev_err(&pdev->dev, "No memory resource found for device!\r\n");
688 return -ENXIO;
691 host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
692 if (host->io_base == NULL) {
693 dev_err(&pdev->dev, "ioremap failed\n");
694 return -EIO;
696 host->io_base_phy = rc->start;
698 mtd = &host->mtd;
699 nand_chip = &host->nand_chip;
700 if (pdev->dev.of_node)
701 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
702 else
703 host->ncfg = pdev->dev.platform_data;
704 if (!host->ncfg) {
705 dev_err(&pdev->dev, "Missing platform data\n");
706 return -ENOENT;
708 if (host->ncfg->wp_gpio == -EPROBE_DEFER)
709 return -EPROBE_DEFER;
710 if (gpio_is_valid(host->ncfg->wp_gpio) &&
711 gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
712 dev_err(&pdev->dev, "GPIO not available\n");
713 return -EBUSY;
715 lpc32xx_wp_disable(host);
717 nand_chip->priv = host; /* link the private data structures */
718 mtd->priv = nand_chip;
719 mtd->owner = THIS_MODULE;
720 mtd->dev.parent = &pdev->dev;
722 /* Get NAND clock */
723 host->clk = clk_get(&pdev->dev, NULL);
724 if (IS_ERR(host->clk)) {
725 dev_err(&pdev->dev, "Clock initialization failure\n");
726 res = -ENOENT;
727 goto err_exit1;
729 clk_enable(host->clk);
731 nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
732 nand_chip->dev_ready = lpc32xx_nand_device_ready;
733 nand_chip->chip_delay = 25; /* us */
734 nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
735 nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
737 /* Init NAND controller */
738 lpc32xx_nand_setup(host);
740 platform_set_drvdata(pdev, host);
742 /* Initialize function pointers */
743 nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
744 nand_chip->ecc.read_page_raw = lpc32xx_read_page;
745 nand_chip->ecc.read_page = lpc32xx_read_page;
746 nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
747 nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
748 nand_chip->ecc.write_oob = lpc32xx_write_oob;
749 nand_chip->ecc.read_oob = lpc32xx_read_oob;
750 nand_chip->ecc.strength = 4;
751 nand_chip->write_page = lpc32xx_write_page;
752 nand_chip->waitfunc = lpc32xx_waitfunc;
754 nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
755 nand_chip->bbt_td = &lpc32xx_nand_bbt;
756 nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
758 /* bitflip_threshold's default is defined as ecc_strength anyway.
759 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
760 * being 0, it causes bad block table scanning errors in
761 * nand_scan_tail(), so preparing it here. */
762 mtd->bitflip_threshold = nand_chip->ecc.strength;
764 if (use_dma) {
765 res = lpc32xx_dma_setup(host);
766 if (res) {
767 res = -EIO;
768 goto err_exit2;
773 * Scan to find existance of the device and
774 * Get the type of NAND device SMALL block or LARGE block
776 if (nand_scan_ident(mtd, 1, NULL)) {
777 res = -ENXIO;
778 goto err_exit3;
781 host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
782 if (!host->dma_buf) {
783 dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
784 res = -ENOMEM;
785 goto err_exit3;
788 host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
789 if (!host->dummy_buf) {
790 dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
791 res = -ENOMEM;
792 goto err_exit3;
795 nand_chip->ecc.mode = NAND_ECC_HW;
796 nand_chip->ecc.size = mtd->writesize;
797 nand_chip->ecc.layout = &lpc32xx_nand_oob;
798 host->mlcsubpages = mtd->writesize / 512;
800 /* initially clear interrupt status */
801 readb(MLC_IRQ_SR(host->io_base));
803 init_completion(&host->comp_nand);
804 init_completion(&host->comp_controller);
806 host->irq = platform_get_irq(pdev, 0);
807 if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
808 dev_err(&pdev->dev, "failed to get platform irq\n");
809 res = -EINVAL;
810 goto err_exit3;
813 if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
814 IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
815 dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
816 res = -ENXIO;
817 goto err_exit3;
821 * Fills out all the uninitialized function pointers with the defaults
822 * And scans for a bad block table if appropriate.
824 if (nand_scan_tail(mtd)) {
825 res = -ENXIO;
826 goto err_exit4;
829 mtd->name = DRV_NAME;
831 ppdata.of_node = pdev->dev.of_node;
832 res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
833 host->ncfg->num_parts);
834 if (!res)
835 return res;
837 nand_release(mtd);
839 err_exit4:
840 free_irq(host->irq, host);
841 err_exit3:
842 if (use_dma)
843 dma_release_channel(host->dma_chan);
844 err_exit2:
845 clk_disable(host->clk);
846 clk_put(host->clk);
847 platform_set_drvdata(pdev, NULL);
848 err_exit1:
849 lpc32xx_wp_enable(host);
850 gpio_free(host->ncfg->wp_gpio);
852 return res;
856 * Remove NAND device
858 static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
860 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
861 struct mtd_info *mtd = &host->mtd;
863 nand_release(mtd);
864 free_irq(host->irq, host);
865 if (use_dma)
866 dma_release_channel(host->dma_chan);
868 clk_disable(host->clk);
869 clk_put(host->clk);
870 platform_set_drvdata(pdev, NULL);
872 lpc32xx_wp_enable(host);
873 gpio_free(host->ncfg->wp_gpio);
875 return 0;
878 #ifdef CONFIG_PM
879 static int lpc32xx_nand_resume(struct platform_device *pdev)
881 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
883 /* Re-enable NAND clock */
884 clk_enable(host->clk);
886 /* Fresh init of NAND controller */
887 lpc32xx_nand_setup(host);
889 /* Disable write protect */
890 lpc32xx_wp_disable(host);
892 return 0;
895 static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
897 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
899 /* Enable write protect for safety */
900 lpc32xx_wp_enable(host);
902 /* Disable clock */
903 clk_disable(host->clk);
904 return 0;
907 #else
908 #define lpc32xx_nand_resume NULL
909 #define lpc32xx_nand_suspend NULL
910 #endif
912 #if defined(CONFIG_OF)
913 static const struct of_device_id lpc32xx_nand_match[] = {
914 { .compatible = "nxp,lpc3220-mlc" },
915 { /* sentinel */ },
917 MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
918 #endif
920 static struct platform_driver lpc32xx_nand_driver = {
921 .probe = lpc32xx_nand_probe,
922 .remove = __devexit_p(lpc32xx_nand_remove),
923 .resume = lpc32xx_nand_resume,
924 .suspend = lpc32xx_nand_suspend,
925 .driver = {
926 .name = DRV_NAME,
927 .owner = THIS_MODULE,
928 .of_match_table = of_match_ptr(lpc32xx_nand_match),
932 module_platform_driver(lpc32xx_nand_driver);
934 MODULE_LICENSE("GPL");
935 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
936 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");