2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/delay.h>
18 #include <linux/clk.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/mtd/partitions.h>
23 #include <linux/irq.h>
24 #include <linux/slab.h>
27 #include <plat/pxa3xx_nand.h>
29 #define CHIP_DELAY_TIMEOUT (2 * HZ/10)
30 #define NAND_STOP_DELAY (2 * HZ/50)
31 #define PAGE_CHUNK_SIZE (2048)
33 /* registers and bit definitions */
34 #define NDCR (0x00) /* Control register */
35 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
36 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
37 #define NDSR (0x14) /* Status Register */
38 #define NDPCR (0x18) /* Page Count Register */
39 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
40 #define NDBDR1 (0x20) /* Bad Block Register 1 */
41 #define NDDB (0x40) /* Data Buffer */
42 #define NDCB0 (0x48) /* Command Buffer0 */
43 #define NDCB1 (0x4C) /* Command Buffer1 */
44 #define NDCB2 (0x50) /* Command Buffer2 */
46 #define NDCR_SPARE_EN (0x1 << 31)
47 #define NDCR_ECC_EN (0x1 << 30)
48 #define NDCR_DMA_EN (0x1 << 29)
49 #define NDCR_ND_RUN (0x1 << 28)
50 #define NDCR_DWIDTH_C (0x1 << 27)
51 #define NDCR_DWIDTH_M (0x1 << 26)
52 #define NDCR_PAGE_SZ (0x1 << 24)
53 #define NDCR_NCSX (0x1 << 23)
54 #define NDCR_ND_MODE (0x3 << 21)
55 #define NDCR_NAND_MODE (0x0)
56 #define NDCR_CLR_PG_CNT (0x1 << 20)
57 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
58 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
59 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
61 #define NDCR_RA_START (0x1 << 15)
62 #define NDCR_PG_PER_BLK (0x1 << 14)
63 #define NDCR_ND_ARB_EN (0x1 << 12)
64 #define NDCR_INT_MASK (0xFFF)
66 #define NDSR_MASK (0xfff)
67 #define NDSR_RDY (0x1 << 12)
68 #define NDSR_FLASH_RDY (0x1 << 11)
69 #define NDSR_CS0_PAGED (0x1 << 10)
70 #define NDSR_CS1_PAGED (0x1 << 9)
71 #define NDSR_CS0_CMDD (0x1 << 8)
72 #define NDSR_CS1_CMDD (0x1 << 7)
73 #define NDSR_CS0_BBD (0x1 << 6)
74 #define NDSR_CS1_BBD (0x1 << 5)
75 #define NDSR_DBERR (0x1 << 4)
76 #define NDSR_SBERR (0x1 << 3)
77 #define NDSR_WRDREQ (0x1 << 2)
78 #define NDSR_RDDREQ (0x1 << 1)
79 #define NDSR_WRCMDREQ (0x1)
81 #define NDCB0_ST_ROW_EN (0x1 << 26)
82 #define NDCB0_AUTO_RS (0x1 << 25)
83 #define NDCB0_CSEL (0x1 << 24)
84 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
85 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
86 #define NDCB0_NC (0x1 << 20)
87 #define NDCB0_DBC (0x1 << 19)
88 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
89 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
90 #define NDCB0_CMD2_MASK (0xff << 8)
91 #define NDCB0_CMD1_MASK (0xff)
92 #define NDCB0_ADDR_CYC_SHIFT (16)
94 /* macros for registers read/write */
95 #define nand_writel(info, off, val) \
96 __raw_writel((val), (info)->mmio_base + (off))
98 #define nand_readl(info, off) \
99 __raw_readl((info)->mmio_base + (off))
101 /* error code and state */
123 struct pxa3xx_nand_info
{
124 struct nand_chip nand_chip
;
126 struct nand_hw_control controller
;
127 struct platform_device
*pdev
;
128 struct pxa3xx_nand_cmdset
*cmdset
;
131 void __iomem
*mmio_base
;
132 unsigned long mmio_phys
;
134 unsigned int buf_start
;
135 unsigned int buf_count
;
137 struct mtd_info
*mtd
;
138 /* DMA information */
142 unsigned char *data_buff
;
143 unsigned char *oob_buff
;
144 dma_addr_t data_buff_phys
;
145 size_t data_buff_size
;
147 struct pxa_dma_desc
*data_desc
;
148 dma_addr_t data_desc_addr
;
152 /* saved column/page_addr during CMD_SEQIN */
156 /* relate to the command */
159 int use_ecc
; /* use HW ECC ? */
160 int use_dma
; /* use DMA ? */
163 unsigned int page_size
; /* page size of attached chip */
164 unsigned int data_size
; /* data size in FIFO */
166 struct completion cmd_complete
;
168 /* generated NDCBx register values */
173 /* timing calcuted from setting */
177 /* calculated from pxa3xx_nand_flash data */
179 size_t read_id_bytes
;
181 unsigned int col_addr_cycles
;
182 unsigned int row_addr_cycles
;
185 static int use_dma
= 1;
186 module_param(use_dma
, bool, 0444);
187 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
190 * Default NAND flash controller configuration setup by the
191 * bootloader. This configuration is used only when pdata->keep_config is set
193 static struct pxa3xx_nand_cmdset default_cmdset
= {
197 .read_status
= 0x0070,
203 .lock_status
= 0x007A,
206 static struct pxa3xx_nand_timing timing
[] = {
207 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
208 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
209 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
210 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
213 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
214 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing
[0] },
215 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing
[1] },
216 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing
[1] },
217 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing
[1] },
218 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing
[2] },
219 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing
[2] },
220 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing
[2] },
221 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing
[2] },
222 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing
[3] },
225 /* Define a default flash type setting serve as flash detecting only */
226 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
228 const char *mtd_names
[] = {"pxa3xx_nand-0", NULL
};
230 #define NDTR0_tCH(c) (min((c), 7) << 19)
231 #define NDTR0_tCS(c) (min((c), 7) << 16)
232 #define NDTR0_tWH(c) (min((c), 7) << 11)
233 #define NDTR0_tWP(c) (min((c), 7) << 8)
234 #define NDTR0_tRH(c) (min((c), 7) << 3)
235 #define NDTR0_tRP(c) (min((c), 7) << 0)
237 #define NDTR1_tR(c) (min((c), 65535) << 16)
238 #define NDTR1_tWHR(c) (min((c), 15) << 4)
239 #define NDTR1_tAR(c) (min((c), 15) << 0)
241 /* convert nano-seconds to nand flash controller clock cycles */
242 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
244 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info
*info
,
245 const struct pxa3xx_nand_timing
*t
)
247 unsigned long nand_clk
= clk_get_rate(info
->clk
);
248 uint32_t ndtr0
, ndtr1
;
250 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
251 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
252 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
253 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
254 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
255 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
257 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
258 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
259 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
261 info
->ndtr0cs0
= ndtr0
;
262 info
->ndtr1cs0
= ndtr1
;
263 nand_writel(info
, NDTR0CS0
, ndtr0
);
264 nand_writel(info
, NDTR1CS0
, ndtr1
);
267 static void pxa3xx_set_datasize(struct pxa3xx_nand_info
*info
)
269 int oob_enable
= info
->reg_ndcr
& NDCR_SPARE_EN
;
271 info
->data_size
= info
->page_size
;
277 switch (info
->page_size
) {
279 info
->oob_size
= (info
->use_ecc
) ? 40 : 64;
282 info
->oob_size
= (info
->use_ecc
) ? 8 : 16;
288 * NOTE: it is a must to set ND_RUN firstly, then write
289 * command buffer, otherwise, it does not work.
290 * We enable all the interrupt at the same time, and
291 * let pxa3xx_nand_irq to handle all logic.
293 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
297 ndcr
= info
->reg_ndcr
;
298 ndcr
|= info
->use_ecc
? NDCR_ECC_EN
: 0;
299 ndcr
|= info
->use_dma
? NDCR_DMA_EN
: 0;
302 /* clear status bits and run */
303 nand_writel(info
, NDCR
, 0);
304 nand_writel(info
, NDSR
, NDSR_MASK
);
305 nand_writel(info
, NDCR
, ndcr
);
308 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
311 int timeout
= NAND_STOP_DELAY
;
313 /* wait RUN bit in NDCR become 0 */
314 ndcr
= nand_readl(info
, NDCR
);
315 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
316 ndcr
= nand_readl(info
, NDCR
);
321 ndcr
&= ~NDCR_ND_RUN
;
322 nand_writel(info
, NDCR
, ndcr
);
324 /* clear status bits */
325 nand_writel(info
, NDSR
, NDSR_MASK
);
328 static void enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
332 ndcr
= nand_readl(info
, NDCR
);
333 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
336 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
340 ndcr
= nand_readl(info
, NDCR
);
341 nand_writel(info
, NDCR
, ndcr
| int_mask
);
344 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
346 switch (info
->state
) {
347 case STATE_PIO_WRITING
:
348 __raw_writesl(info
->mmio_base
+ NDDB
, info
->data_buff
,
349 DIV_ROUND_UP(info
->data_size
, 4));
350 if (info
->oob_size
> 0)
351 __raw_writesl(info
->mmio_base
+ NDDB
, info
->oob_buff
,
352 DIV_ROUND_UP(info
->oob_size
, 4));
354 case STATE_PIO_READING
:
355 __raw_readsl(info
->mmio_base
+ NDDB
, info
->data_buff
,
356 DIV_ROUND_UP(info
->data_size
, 4));
357 if (info
->oob_size
> 0)
358 __raw_readsl(info
->mmio_base
+ NDDB
, info
->oob_buff
,
359 DIV_ROUND_UP(info
->oob_size
, 4));
362 printk(KERN_ERR
"%s: invalid state %d\n", __func__
,
368 static void start_data_dma(struct pxa3xx_nand_info
*info
)
370 struct pxa_dma_desc
*desc
= info
->data_desc
;
371 int dma_len
= ALIGN(info
->data_size
+ info
->oob_size
, 32);
373 desc
->ddadr
= DDADR_STOP
;
374 desc
->dcmd
= DCMD_ENDIRQEN
| DCMD_WIDTH4
| DCMD_BURST32
| dma_len
;
376 switch (info
->state
) {
377 case STATE_DMA_WRITING
:
378 desc
->dsadr
= info
->data_buff_phys
;
379 desc
->dtadr
= info
->mmio_phys
+ NDDB
;
380 desc
->dcmd
|= DCMD_INCSRCADDR
| DCMD_FLOWTRG
;
382 case STATE_DMA_READING
:
383 desc
->dtadr
= info
->data_buff_phys
;
384 desc
->dsadr
= info
->mmio_phys
+ NDDB
;
385 desc
->dcmd
|= DCMD_INCTRGADDR
| DCMD_FLOWSRC
;
388 printk(KERN_ERR
"%s: invalid state %d\n", __func__
,
393 DRCMR(info
->drcmr_dat
) = DRCMR_MAPVLD
| info
->data_dma_ch
;
394 DDADR(info
->data_dma_ch
) = info
->data_desc_addr
;
395 DCSR(info
->data_dma_ch
) |= DCSR_RUN
;
398 static void pxa3xx_nand_data_dma_irq(int channel
, void *data
)
400 struct pxa3xx_nand_info
*info
= data
;
403 dcsr
= DCSR(channel
);
404 DCSR(channel
) = dcsr
;
406 if (dcsr
& DCSR_BUSERR
) {
407 info
->retcode
= ERR_DMABUSERR
;
410 info
->state
= STATE_DMA_DONE
;
411 enable_int(info
, NDCR_INT_MASK
);
412 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
415 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
417 struct pxa3xx_nand_info
*info
= devid
;
418 unsigned int status
, is_completed
= 0;
420 status
= nand_readl(info
, NDSR
);
422 if (status
& NDSR_DBERR
)
423 info
->retcode
= ERR_DBERR
;
424 if (status
& NDSR_SBERR
)
425 info
->retcode
= ERR_SBERR
;
426 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
427 /* whether use dma to transfer data */
429 disable_int(info
, NDCR_INT_MASK
);
430 info
->state
= (status
& NDSR_RDDREQ
) ?
431 STATE_DMA_READING
: STATE_DMA_WRITING
;
432 start_data_dma(info
);
433 goto NORMAL_IRQ_EXIT
;
435 info
->state
= (status
& NDSR_RDDREQ
) ?
436 STATE_PIO_READING
: STATE_PIO_WRITING
;
437 handle_data_pio(info
);
440 if (status
& NDSR_CS0_CMDD
) {
441 info
->state
= STATE_CMD_DONE
;
444 if (status
& NDSR_FLASH_RDY
) {
446 info
->state
= STATE_READY
;
449 if (status
& NDSR_WRCMDREQ
) {
450 nand_writel(info
, NDSR
, NDSR_WRCMDREQ
);
451 status
&= ~NDSR_WRCMDREQ
;
452 info
->state
= STATE_CMD_HANDLE
;
453 nand_writel(info
, NDCB0
, info
->ndcb0
);
454 nand_writel(info
, NDCB0
, info
->ndcb1
);
455 nand_writel(info
, NDCB0
, info
->ndcb2
);
458 /* clear NDSR to let the controller exit the IRQ */
459 nand_writel(info
, NDSR
, status
);
461 complete(&info
->cmd_complete
);
466 static int pxa3xx_nand_dev_ready(struct mtd_info
*mtd
)
468 struct pxa3xx_nand_info
*info
= mtd
->priv
;
469 return (nand_readl(info
, NDSR
) & NDSR_RDY
) ? 1 : 0;
472 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
474 for (; len
> 0; len
--)
480 static int prepare_command_pool(struct pxa3xx_nand_info
*info
, int command
,
481 uint16_t column
, int page_addr
)
484 int addr_cycle
, exec_cmd
, ndcb0
;
485 struct mtd_info
*mtd
= info
->mtd
;
491 /* reset data and oob column point to handle data */
497 info
->retcode
= ERR_NONE
;
501 case NAND_CMD_PAGEPROG
:
503 case NAND_CMD_READOOB
:
504 pxa3xx_set_datasize(info
);
516 addr_cycle
= NDCB0_ADDR_CYC(info
->row_addr_cycles
517 + info
->col_addr_cycles
);
520 case NAND_CMD_READOOB
:
522 cmd
= info
->cmdset
->read1
;
523 if (command
== NAND_CMD_READOOB
)
524 info
->buf_start
= mtd
->writesize
+ column
;
526 info
->buf_start
= column
;
528 if (unlikely(info
->page_size
< PAGE_CHUNK_SIZE
))
529 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
531 | (cmd
& NDCB0_CMD1_MASK
);
533 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
539 /* small page addr setting */
540 if (unlikely(info
->page_size
< PAGE_CHUNK_SIZE
)) {
541 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
546 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
549 if (page_addr
& 0xFF0000)
550 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
555 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
556 memset(info
->data_buff
, 0xFF, info
->buf_count
);
560 case NAND_CMD_PAGEPROG
:
561 if (is_buf_blank(info
->data_buff
,
562 (mtd
->writesize
+ mtd
->oobsize
))) {
567 cmd
= info
->cmdset
->program
;
568 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
576 case NAND_CMD_READID
:
577 cmd
= info
->cmdset
->read_id
;
578 info
->buf_count
= info
->read_id_bytes
;
579 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
585 case NAND_CMD_STATUS
:
586 cmd
= info
->cmdset
->read_status
;
588 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
595 case NAND_CMD_ERASE1
:
596 cmd
= info
->cmdset
->erase
;
597 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
602 info
->ndcb1
= page_addr
;
607 cmd
= info
->cmdset
->reset
;
608 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
613 case NAND_CMD_ERASE2
:
619 printk(KERN_ERR
"pxa3xx-nand: non-supported"
620 " command %x\n", command
);
627 static void pxa3xx_nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
628 int column
, int page_addr
)
630 struct pxa3xx_nand_info
*info
= mtd
->priv
;
634 * if this is a x16 device ,then convert the input
635 * "byte" address into a "word" address appropriate
636 * for indexing a word-oriented device
638 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
641 exec_cmd
= prepare_command_pool(info
, command
, column
, page_addr
);
643 init_completion(&info
->cmd_complete
);
644 pxa3xx_nand_start(info
);
646 ret
= wait_for_completion_timeout(&info
->cmd_complete
,
649 printk(KERN_ERR
"Wait time out!!!\n");
650 /* Stop State Machine for next command cycle */
651 pxa3xx_nand_stop(info
);
653 info
->state
= STATE_IDLE
;
657 static void pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
658 struct nand_chip
*chip
, const uint8_t *buf
)
660 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
661 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
664 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
665 struct nand_chip
*chip
, uint8_t *buf
, int page
)
667 struct pxa3xx_nand_info
*info
= mtd
->priv
;
669 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
670 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
672 if (info
->retcode
== ERR_SBERR
) {
673 switch (info
->use_ecc
) {
675 mtd
->ecc_stats
.corrected
++;
681 } else if (info
->retcode
== ERR_DBERR
) {
683 * for blank page (all 0xff), HW will calculate its ECC as
684 * 0, which is different from the ECC information within
685 * OOB, ignore such double bit errors
687 if (is_buf_blank(buf
, mtd
->writesize
))
688 info
->retcode
= ERR_NONE
;
690 mtd
->ecc_stats
.failed
++;
696 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
698 struct pxa3xx_nand_info
*info
= mtd
->priv
;
701 if (info
->buf_start
< info
->buf_count
)
702 /* Has just send a new command? */
703 retval
= info
->data_buff
[info
->buf_start
++];
708 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
710 struct pxa3xx_nand_info
*info
= mtd
->priv
;
713 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
714 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
715 info
->buf_start
+= 2;
720 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
722 struct pxa3xx_nand_info
*info
= mtd
->priv
;
723 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
725 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
726 info
->buf_start
+= real_len
;
729 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
730 const uint8_t *buf
, int len
)
732 struct pxa3xx_nand_info
*info
= mtd
->priv
;
733 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
735 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
736 info
->buf_start
+= real_len
;
739 static int pxa3xx_nand_verify_buf(struct mtd_info
*mtd
,
740 const uint8_t *buf
, int len
)
745 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
750 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
752 struct pxa3xx_nand_info
*info
= mtd
->priv
;
754 /* pxa3xx_nand_send_command has waited for command complete */
755 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
756 if (info
->retcode
== ERR_NONE
)
760 * any error make it return 0x01 which will tell
761 * the caller the erase and write fail
770 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info
*info
,
771 const struct pxa3xx_nand_flash
*f
)
773 struct platform_device
*pdev
= info
->pdev
;
774 struct pxa3xx_nand_platform_data
*pdata
= pdev
->dev
.platform_data
;
775 uint32_t ndcr
= 0x0; /* enable all interrupts */
777 if (f
->page_size
!= 2048 && f
->page_size
!= 512)
780 if (f
->flash_width
!= 16 && f
->flash_width
!= 8)
783 /* calculate flash information */
784 info
->cmdset
= &default_cmdset
;
785 info
->page_size
= f
->page_size
;
786 info
->read_id_bytes
= (f
->page_size
== 2048) ? 4 : 2;
788 /* calculate addressing information */
789 info
->col_addr_cycles
= (f
->page_size
== 2048) ? 2 : 1;
791 if (f
->num_blocks
* f
->page_per_block
> 65536)
792 info
->row_addr_cycles
= 3;
794 info
->row_addr_cycles
= 2;
796 ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
797 ndcr
|= (info
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
798 ndcr
|= (f
->page_per_block
== 64) ? NDCR_PG_PER_BLK
: 0;
799 ndcr
|= (f
->page_size
== 2048) ? NDCR_PAGE_SZ
: 0;
800 ndcr
|= (f
->flash_width
== 16) ? NDCR_DWIDTH_M
: 0;
801 ndcr
|= (f
->dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
803 ndcr
|= NDCR_RD_ID_CNT(info
->read_id_bytes
);
804 ndcr
|= NDCR_SPARE_EN
; /* enable spare by default */
806 info
->reg_ndcr
= ndcr
;
808 pxa3xx_nand_set_timing(info
, f
->timing
);
812 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
814 uint32_t ndcr
= nand_readl(info
, NDCR
);
815 info
->page_size
= ndcr
& NDCR_PAGE_SZ
? 2048 : 512;
816 /* set info fields needed to read id */
817 info
->read_id_bytes
= (info
->page_size
== 2048) ? 4 : 2;
818 info
->reg_ndcr
= ndcr
& ~NDCR_INT_MASK
;
819 info
->cmdset
= &default_cmdset
;
821 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
822 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
827 /* the maximum possible buffer size for large page with OOB data
828 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
829 * data buffer and the DMA descriptor
831 #define MAX_BUFF_SIZE PAGE_SIZE
833 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
835 struct platform_device
*pdev
= info
->pdev
;
836 int data_desc_offset
= MAX_BUFF_SIZE
- sizeof(struct pxa_dma_desc
);
839 info
->data_buff
= kmalloc(MAX_BUFF_SIZE
, GFP_KERNEL
);
840 if (info
->data_buff
== NULL
)
845 info
->data_buff
= dma_alloc_coherent(&pdev
->dev
, MAX_BUFF_SIZE
,
846 &info
->data_buff_phys
, GFP_KERNEL
);
847 if (info
->data_buff
== NULL
) {
848 dev_err(&pdev
->dev
, "failed to allocate dma buffer\n");
852 info
->data_buff_size
= MAX_BUFF_SIZE
;
853 info
->data_desc
= (void *)info
->data_buff
+ data_desc_offset
;
854 info
->data_desc_addr
= info
->data_buff_phys
+ data_desc_offset
;
856 info
->data_dma_ch
= pxa_request_dma("nand-data", DMA_PRIO_LOW
,
857 pxa3xx_nand_data_dma_irq
, info
);
858 if (info
->data_dma_ch
< 0) {
859 dev_err(&pdev
->dev
, "failed to request data dma\n");
860 dma_free_coherent(&pdev
->dev
, info
->data_buff_size
,
861 info
->data_buff
, info
->data_buff_phys
);
862 return info
->data_dma_ch
;
868 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info
*info
)
870 struct mtd_info
*mtd
= info
->mtd
;
871 struct nand_chip
*chip
= mtd
->priv
;
873 /* use the common timing to make a try */
874 pxa3xx_nand_config_flash(info
, &builtin_flash_types
[0]);
875 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, 0, 0);
882 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
884 struct pxa3xx_nand_info
*info
= mtd
->priv
;
885 struct platform_device
*pdev
= info
->pdev
;
886 struct pxa3xx_nand_platform_data
*pdata
= pdev
->dev
.platform_data
;
887 struct nand_flash_dev pxa3xx_flash_ids
[2], *def
= NULL
;
888 const struct pxa3xx_nand_flash
*f
= NULL
;
889 struct nand_chip
*chip
= mtd
->priv
;
894 if (pdata
->keep_config
&& !pxa3xx_nand_detect_config(info
))
897 ret
= pxa3xx_nand_sensing(info
);
901 printk(KERN_INFO
"There is no nand chip on cs 0!\n");
906 chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0, 0);
907 id
= *((uint16_t *)(info
->data_buff
));
909 printk(KERN_INFO
"Detect a flash id %x\n", id
);
913 printk(KERN_WARNING
"Read out ID 0, potential timing set wrong!!\n");
918 num
= ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1;
919 for (i
= 0; i
< num
; i
++) {
920 if (i
< pdata
->num_flash
)
921 f
= pdata
->flash
+ i
;
923 f
= &builtin_flash_types
[i
- pdata
->num_flash
+ 1];
925 /* find the chip in default list */
926 if (f
->chip_id
== id
)
930 if (i
>= (ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1)) {
933 printk(KERN_ERR
"ERROR!! flash not defined!!!\n");
938 pxa3xx_nand_config_flash(info
, f
);
939 pxa3xx_flash_ids
[0].name
= f
->name
;
940 pxa3xx_flash_ids
[0].id
= (f
->chip_id
>> 8) & 0xffff;
941 pxa3xx_flash_ids
[0].pagesize
= f
->page_size
;
942 chipsize
= (uint64_t)f
->num_blocks
* f
->page_per_block
* f
->page_size
;
943 pxa3xx_flash_ids
[0].chipsize
= chipsize
>> 20;
944 pxa3xx_flash_ids
[0].erasesize
= f
->page_size
* f
->page_per_block
;
945 if (f
->flash_width
== 16)
946 pxa3xx_flash_ids
[0].options
= NAND_BUSWIDTH_16
;
947 pxa3xx_flash_ids
[1].name
= NULL
;
948 def
= pxa3xx_flash_ids
;
950 if (nand_scan_ident(mtd
, 1, def
))
952 /* calculate addressing information */
953 info
->col_addr_cycles
= (mtd
->writesize
>= 2048) ? 2 : 1;
954 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
955 if ((mtd
->size
>> chip
->page_shift
) > 65536)
956 info
->row_addr_cycles
= 3;
958 info
->row_addr_cycles
= 2;
959 mtd
->name
= mtd_names
[0];
960 chip
->ecc
.mode
= NAND_ECC_HW
;
961 chip
->ecc
.size
= info
->page_size
;
963 chip
->options
= (info
->reg_ndcr
& NDCR_DWIDTH_M
) ? NAND_BUSWIDTH_16
: 0;
964 chip
->options
|= NAND_NO_AUTOINCR
;
965 chip
->options
|= NAND_NO_READRDY
;
967 return nand_scan_tail(mtd
);
971 struct pxa3xx_nand_info
*alloc_nand_resource(struct platform_device
*pdev
)
973 struct pxa3xx_nand_info
*info
;
974 struct nand_chip
*chip
;
975 struct mtd_info
*mtd
;
979 mtd
= kzalloc(sizeof(struct mtd_info
) + sizeof(struct pxa3xx_nand_info
),
982 dev_err(&pdev
->dev
, "failed to allocate memory\n");
986 info
= (struct pxa3xx_nand_info
*)(&mtd
[1]);
987 chip
= (struct nand_chip
*)(&mtd
[1]);
991 mtd
->owner
= THIS_MODULE
;
993 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
994 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
995 chip
->controller
= &info
->controller
;
996 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
997 chip
->select_chip
= pxa3xx_nand_select_chip
;
998 chip
->dev_ready
= pxa3xx_nand_dev_ready
;
999 chip
->cmdfunc
= pxa3xx_nand_cmdfunc
;
1000 chip
->read_word
= pxa3xx_nand_read_word
;
1001 chip
->read_byte
= pxa3xx_nand_read_byte
;
1002 chip
->read_buf
= pxa3xx_nand_read_buf
;
1003 chip
->write_buf
= pxa3xx_nand_write_buf
;
1004 chip
->verify_buf
= pxa3xx_nand_verify_buf
;
1006 spin_lock_init(&chip
->controller
->lock
);
1007 init_waitqueue_head(&chip
->controller
->wq
);
1008 info
->clk
= clk_get(&pdev
->dev
, NULL
);
1009 if (IS_ERR(info
->clk
)) {
1010 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1011 ret
= PTR_ERR(info
->clk
);
1014 clk_enable(info
->clk
);
1016 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1018 dev_err(&pdev
->dev
, "no resource defined for data DMA\n");
1022 info
->drcmr_dat
= r
->start
;
1024 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1026 dev_err(&pdev
->dev
, "no resource defined for command DMA\n");
1030 info
->drcmr_cmd
= r
->start
;
1032 irq
= platform_get_irq(pdev
, 0);
1034 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1039 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1041 dev_err(&pdev
->dev
, "no IO memory resource defined\n");
1046 r
= request_mem_region(r
->start
, resource_size(r
), pdev
->name
);
1048 dev_err(&pdev
->dev
, "failed to request memory resource\n");
1053 info
->mmio_base
= ioremap(r
->start
, resource_size(r
));
1054 if (info
->mmio_base
== NULL
) {
1055 dev_err(&pdev
->dev
, "ioremap() failed\n");
1059 info
->mmio_phys
= r
->start
;
1061 ret
= pxa3xx_nand_init_buff(info
);
1065 /* initialize all interrupts to be disabled */
1066 disable_int(info
, NDSR_MASK
);
1068 ret
= request_irq(irq
, pxa3xx_nand_irq
, IRQF_DISABLED
,
1071 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1075 platform_set_drvdata(pdev
, info
);
1080 free_irq(irq
, info
);
1082 pxa_free_dma(info
->data_dma_ch
);
1083 dma_free_coherent(&pdev
->dev
, info
->data_buff_size
,
1084 info
->data_buff
, info
->data_buff_phys
);
1086 kfree(info
->data_buff
);
1088 iounmap(info
->mmio_base
);
1090 release_mem_region(r
->start
, resource_size(r
));
1092 clk_disable(info
->clk
);
1099 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1101 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1102 struct mtd_info
*mtd
= info
->mtd
;
1106 platform_set_drvdata(pdev
, NULL
);
1108 irq
= platform_get_irq(pdev
, 0);
1110 free_irq(irq
, info
);
1112 pxa_free_dma(info
->data_dma_ch
);
1113 dma_free_writecombine(&pdev
->dev
, info
->data_buff_size
,
1114 info
->data_buff
, info
->data_buff_phys
);
1116 kfree(info
->data_buff
);
1118 iounmap(info
->mmio_base
);
1119 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1120 release_mem_region(r
->start
, resource_size(r
));
1122 clk_disable(info
->clk
);
1132 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1134 struct pxa3xx_nand_platform_data
*pdata
;
1135 struct pxa3xx_nand_info
*info
;
1136 struct mtd_partition
*parts
;
1139 pdata
= pdev
->dev
.platform_data
;
1141 dev_err(&pdev
->dev
, "no platform data defined\n");
1145 info
= alloc_nand_resource(pdev
);
1149 if (pxa3xx_nand_scan(info
->mtd
)) {
1150 dev_err(&pdev
->dev
, "failed to scan nand\n");
1151 pxa3xx_nand_remove(pdev
);
1156 nr_parts
= parse_mtd_partitions(info
->mtd
, NULL
, &parts
, 0);
1159 return mtd_device_register(info
->mtd
, parts
, nr_parts
);
1161 return mtd_device_register(info
->mtd
, pdata
->parts
, pdata
->nr_parts
);
1165 static int pxa3xx_nand_suspend(struct platform_device
*pdev
, pm_message_t state
)
1167 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1170 dev_err(&pdev
->dev
, "driver busy, state = %d\n", info
->state
);
1177 static int pxa3xx_nand_resume(struct platform_device
*pdev
)
1179 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1181 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1182 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1183 clk_enable(info
->clk
);
1188 #define pxa3xx_nand_suspend NULL
1189 #define pxa3xx_nand_resume NULL
1192 static struct platform_driver pxa3xx_nand_driver
= {
1194 .name
= "pxa3xx-nand",
1196 .probe
= pxa3xx_nand_probe
,
1197 .remove
= pxa3xx_nand_remove
,
1198 .suspend
= pxa3xx_nand_suspend
,
1199 .resume
= pxa3xx_nand_resume
,
1202 static int __init
pxa3xx_nand_init(void)
1204 return platform_driver_register(&pxa3xx_nand_driver
);
1206 module_init(pxa3xx_nand_init
);
1208 static void __exit
pxa3xx_nand_exit(void)
1210 platform_driver_unregister(&pxa3xx_nand_driver
);
1212 module_exit(pxa3xx_nand_exit
);
1214 MODULE_LICENSE("GPL");
1215 MODULE_DESCRIPTION("PXA3xx NAND controller driver");