2 * Broadcom NAND core interface
4 * Copyright (C) 2012, Broadcom Corporation. All Rights Reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
29 #include <nand_core.h>
34 #define NANDFL_MSG(args) printf args
36 #define NANDFL_MSG(args)
39 #define NANDF_RETRIES 1000000
41 #define NANDF_SMALL_BADBLOCK_POS 5
42 #define NANDF_LARGE_BADBLOCK_POS 0
44 /* Private global state */
45 static hndnand_t nandcore
;
48 static int nandcore_poll(si_t
*sih
, nandregs_t
*nc
);
50 hndnand_t
*nandcore_init(si_t
*sih
);
51 static int nandcore_read(hndnand_t
*nfl
, uint64 offset
, uint len
, uchar
*buf
);
52 static int nandcore_write(hndnand_t
*nfl
, uint64 offset
, uint len
, const uchar
*buf
);
53 static int nandcore_erase(hndnand_t
*nfl
, uint64 offset
);
54 static int nandcore_checkbadb(hndnand_t
*nfl
, uint64 offset
);
55 static int nandcore_mark_badb(hndnand_t
*nfl
, uint64 offset
);
57 static int nandcore_read_oob(hndnand_t
*nfl
, uint64 addr
, uint8
*oob
);
59 static int nandcore_dev_ready(hndnand_t
*nfl
);
60 static int nandcore_select_chip(hndnand_t
*nfl
, int chip
);
61 static int nandcore_cmdfunc(hndnand_t
*nfl
, uint64 addr
, int cmd
);
62 static int nandcore_waitfunc(hndnand_t
*nfl
, int *status
);
63 static int nandcore_write_oob(hndnand_t
*nfl
, uint64 addr
, uint8
*oob
);
64 static int nandcore_read_page(hndnand_t
*nfl
, uint64 addr
, uint8
*buf
, uint8
*oob
, bool ecc
,
65 uint32
*herr
, uint32
*serr
);
66 static int nandcore_write_page(hndnand_t
*nfl
, uint64 addr
, const uint8
*buf
, uint8
*oob
, bool ecc
);
67 static int nandcore_cmd_read_byte(hndnand_t
*nfl
, int cmd
, int arg
);
70 /* Issue a nand flash command */
72 nandcore_cmd(osl_t
*osh
, nandregs_t
*nc
, uint opcode
)
74 W_REG(osh
, &nc
->cmd_start
, opcode
);
83 for (j
= 0; j
< 32; j
++)
90 _nandcore_buf_erased(const void *buf
, unsigned len
)
93 const uint32
*p
= buf
;
95 for (i
= 0; i
< (len
>> 2); i
++) {
96 if (p
[i
] != 0xffffffff)
104 _nandcore_read_page(hndnand_t
*nfl
, uint64 offset
, uint8
*buf
, uint8
*oob
, bool ecc
,
105 uint32
*herr
, uint32
*serr
)
108 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
109 aidmp_t
*ai
= (aidmp_t
*)nfl
->wrap
;
111 unsigned spare_per_sec
;
113 unsigned hard_err_count
= 0;
114 uint32 mask
, reg
, *to
;
115 uint32 err_soft_reg
, err_hard_reg
;
116 int i
, ret
, sectorsize_shift
, sec_per_page_shift
;
120 mask
= nfl
->pagesize
- 1;
121 /* Check offset and length */
122 if ((offset
& mask
) != 0)
125 if ((((offset
+ nfl
->pagesize
) >> 20) > nfl
->size
) ||
126 ((((offset
+ nfl
->pagesize
) >> 20) == nfl
->size
) &&
127 (((offset
+ nfl
->pagesize
) & ((1 << 20) - 1)) != 0)))
130 osh
= si_osh(nfl
->sih
);
132 /* Reset ECC error stats */
133 err_hard_reg
= R_REG(osh
, &nc
->uncorr_error_count
);
134 err_soft_reg
= R_REG(osh
, &nc
->read_error_count
);
137 sectorsize_shift
= _nandcore_ffs(nfl
->sectorsize
) - 1;
138 sec_per_page_shift
= _nandcore_ffs(nfl
->pagesize
) - 1 - sectorsize_shift
;
140 spare_per_sec
= nfl
->oobsize
>> sec_per_page_shift
;
142 /* Set the page address for the following commands */
143 reg
= (R_REG(osh
, &nc
->cmd_ext_address
) & ~NANDCMD_EXT_ADDR_MASK
);
144 W_REG(osh
, &nc
->cmd_ext_address
, (reg
| (offset
>> 32)));
146 /* Enable ECC validation for ecc page reads */
148 OR_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
,
149 NANDAC_CS0_RD_ECC_EN
);
151 AND_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
,
152 ~NANDAC_CS0_RD_ECC_EN
);
154 /* Loop all sectors in page */
155 for (sector
= 0; sector
< (1 << sec_per_page_shift
); sector
++) {
158 /* Copy partial sectors sized by cache reg */
159 while (data_bytes
< (1 << sectorsize_shift
)) {
162 col
= data_bytes
+ (sector
<< sectorsize_shift
);
164 W_REG(osh
, &nc
->cmd_address
, offset
+ col
);
166 /* Issue command to read partial page */
167 nandcore_cmd(osh
, nc
, NANDCMD_PAGE_RD
);
169 /* Wait for the command to complete */
170 if ((ret
= nandcore_poll(nfl
->sih
, nc
)) < 0)
173 /* Set controller to Little Endian mode for copying */
174 OR_REG(osh
, &ai
->ioctrl
, NAND_APB_LITTLE_ENDIAN
);
176 if (data_bytes
== 0 && oob
) {
177 to
= (uint32
*)(oob
+ sector
* spare_per_sec
);
178 for (i
= 0; i
< spare_per_sec
; i
+= 4, to
++)
179 *to
= R_REG(osh
, &nc
->spare_area_read_ofs
[i
/4]);
183 to
= (uint32
*)(buf
+ col
);
184 for (i
= 0; i
< NFL_SECTOR_SIZE
; i
+= 4, to
++)
185 *to
= R_REG(osh
, &nc
->flash_cache
[i
/4]);
187 data_bytes
+= NFL_SECTOR_SIZE
;
189 /* Return to Big Endian mode for commands etc */
190 AND_REG(osh
, &ai
->ioctrl
, ~NAND_APB_LITTLE_ENDIAN
);
192 /* capture hard errors for each partial */
193 if (err_hard_reg
!= R_REG(osh
, &nc
->uncorr_error_count
)) {
194 int era
= (R_REG(osh
, &nc
->intfc_status
) & NANDIST_ERASED
);
195 if ((!era
) && (!_nandcore_buf_erased(buf
+col
, NFL_SECTOR_SIZE
)))
198 err_hard_reg
= R_REG(osh
, &nc
->uncorr_error_count
);
200 } /* while FlashCache buffer */
206 /* Report hard ECC errors */
208 *herr
= hard_err_count
;
210 /* Get ECC soft error stats */
212 *serr
= R_REG(osh
, &nc
->read_error_count
) - err_soft_reg
;
218 _nandcore_write_page(hndnand_t
*nfl
, uint64 offset
, const uint8
*buf
, uint8
*oob
, bool ecc
)
221 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
222 aidmp_t
*ai
= (aidmp_t
*)nfl
->wrap
;
223 unsigned data_bytes
, spare_bytes
;
224 unsigned spare_per_sec
, sector
, num_sec
;
225 uint32 mask
, reg
, *from
;
226 int i
, ret
= 0, sectorsize_shift
, sec_per_page_shift
;
230 mask
= nfl
->pagesize
- 1;
231 /* Check offset and length */
232 if ((offset
& mask
) != 0)
235 if ((((offset
+ nfl
->pagesize
) >> 20) > nfl
->size
) ||
236 ((((offset
+ nfl
->pagesize
) >> 20) == nfl
->size
) &&
237 (((offset
+ nfl
->pagesize
) & ((1 << 20) - 1)) != 0)))
240 osh
= si_osh(nfl
->sih
);
243 sectorsize_shift
= _nandcore_ffs(nfl
->sectorsize
) - 1;
244 sec_per_page_shift
= _nandcore_ffs(nfl
->pagesize
) - 1 - sectorsize_shift
;
246 spare_per_sec
= nfl
->oobsize
>> sec_per_page_shift
;
249 AND_REG(osh
, &nc
->cs_nand_select
, ~NANDCSEL_NAND_WP
);
251 /* Set the page address for the following commands */
252 reg
= (R_REG(osh
, &nc
->cmd_ext_address
) & ~NANDCMD_EXT_ADDR_MASK
);
253 W_REG(osh
, &nc
->cmd_ext_address
, (reg
| (offset
>> 32)));
255 /* Enable ECC generation for ecc page write, if requested */
257 OR_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
,
258 NANDAC_CS0_WR_ECC_EN
);
260 AND_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
,
261 ~NANDAC_CS0_WR_ECC_EN
);
264 num_sec
= 1 << sec_per_page_shift
;
266 /* Loop all sectors in page */
267 for (sector
= 0; sector
< num_sec
; sector
++) {
270 /* Copy partial sectors sized by cache reg */
271 while (data_bytes
< (1 << sectorsize_shift
)) {
274 col
= data_bytes
+ (sector
<< sectorsize_shift
);
276 /* Set address of 512-byte sub-page */
277 W_REG(osh
, &nc
->cmd_address
, offset
+ col
);
279 /* Set controller to Little Endian mode for copying */
280 OR_REG(osh
, &ai
->ioctrl
, NAND_APB_LITTLE_ENDIAN
);
282 /* Set spare area is written at each sector start */
283 if (data_bytes
== 0) {
285 from
= (uint32
*)(oob
+ spare_bytes
);
286 for (i
= 0; i
< spare_per_sec
; i
+= 4, from
++)
287 W_REG(osh
, &nc
->spare_area_write_ofs
[i
/4], *from
);
290 /* Write 0xffffffff to spare_area_write_ofs register
291 * to prevent old spare_area_write_ofs vale write
292 * when we issue NANDCMD_PAGE_PROG.
294 for (i
= 0; i
< spare_per_sec
; i
+= 4)
295 W_REG(osh
, &nc
->spare_area_write_ofs
[i
/4],
299 spare_bytes
+= spare_per_sec
;
302 /* Copy sub-page data */
303 from
= (uint32
*)(buf
+ col
);
304 for (i
= 0; i
< NFL_SECTOR_SIZE
; i
+= 4, from
++)
305 W_REG(osh
, &nc
->flash_cache
[i
/4], *from
);
307 data_bytes
+= NFL_SECTOR_SIZE
;
309 /* Return to Big Endian mode for commands etc */
310 AND_REG(osh
, &ai
->ioctrl
, ~NAND_APB_LITTLE_ENDIAN
);
312 /* Push data into internal cache */
313 nandcore_cmd(osh
, nc
, NANDCMD_PAGE_PROG
);
315 ret
= nandcore_poll(nfl
->sih
, nc
);
323 OR_REG(osh
, &nc
->cs_nand_select
, NANDCSEL_NAND_WP
);
328 static bool firsttime
= TRUE
;
331 nandcore_check_id(uint8
*id
)
339 case NFL_VENDOR_NUMONYX
:
342 case NFL_VENDOR_MICRON
:
345 case NFL_VENDOR_TOSHIBA
:
348 case NFL_VENDOR_HYNIX
:
351 case NFL_VENDOR_SAMSUNG
:
354 case NFL_VENDOR_ESMT
:
357 case NFL_VENDOR_MXIC
:
361 printf("No NAND flash type found\n");
368 /* Initialize nand flash access */
370 nandcore_init(si_t
*sih
)
383 /* Only support chipcommon revision == 42 for now */
384 if (sih
->ccrev
!= 42)
387 if ((nc
= (nandregs_t
*)si_setcore(sih
, NS_NAND_CORE_ID
, 0)) == NULL
)
390 if (R_REG(NULL
, &nc
->flash_device_id
) == 0)
393 if (!firsttime
&& nandcore
.size
)
397 bzero(&nandcore
, sizeof(nandcore
));
400 nandcore
.core
= (void *)nc
;
401 nandcore
.wrap
= si_wrapperregs(sih
);
402 nandcore
.read
= nandcore_read
;
403 nandcore
.write
= nandcore_write
;
404 nandcore
.erase
= nandcore_erase
;
405 nandcore
.checkbadb
= nandcore_checkbadb
;
406 nandcore
.markbadb
= nandcore_mark_badb
;
408 nandcore
.read_oob
= nandcore_read_oob
;
410 nandcore
.dev_ready
= nandcore_dev_ready
;
411 nandcore
.select_chip
= nandcore_select_chip
;
412 nandcore
.cmdfunc
= nandcore_cmdfunc
;
413 nandcore
.waitfunc
= nandcore_waitfunc
;
414 nandcore
.write_oob
= nandcore_write_oob
;
415 nandcore
.read_page
= nandcore_read_page
;
416 nandcore
.write_page
= nandcore_write_page
;
417 nandcore
.cmd_read_byte
= nandcore_cmd_read_byte
;
420 nandcore_cmd(osh
, nc
, NANDCMD_ID_RD
);
421 if (nandcore_poll(sih
, nc
) < 0) {
425 ai
= (aidmp_t
*)nandcore
.wrap
;
427 /* Toggle as little endian */
428 OR_REG(osh
, &ai
->ioctrl
, NAND_APB_LITTLE_ENDIAN
);
430 id
= R_REG(osh
, &nc
->flash_device_id
);
431 id2
= R_REG(osh
, &nc
->flash_device_id_ext
);
433 /* Toggle as big endian */
434 AND_REG(osh
, &ai
->ioctrl
, ~NAND_APB_LITTLE_ENDIAN
);
436 for (i
= 0; i
< 5; i
++) {
438 nandcore
.id
[i
] = (id
>> (8*i
)) & 0xff;
440 nandcore
.id
[i
] = id2
& 0xff;
443 name
= nandcore_check_id(nandcore
.id
);
446 nandcore
.type
= nandcore
.id
[0];
448 ncf
= R_REG(osh
, &nc
->config_cs0
);
449 /* Page size (# of bytes) */
450 val
= (ncf
& NANDCF_CS0_PAGE_SIZE_MASK
) >> NANDCF_CS0_PAGE_SIZE_SHIFT
;
453 nandcore
.pagesize
= 512;
456 nandcore
.pagesize
= (1 << 10) * 2;
459 nandcore
.pagesize
= (1 << 10) * 4;
462 nandcore
.pagesize
= (1 << 10) * 8;
465 /* Block size (# of bytes) */
466 val
= (ncf
& NANDCF_CS0_BLOCK_SIZE_MASK
) >> NANDCF_CS0_BLOCK_SIZE_SHIFT
;
469 nandcore
.blocksize
= (1 << 10) * 8;
472 nandcore
.blocksize
= (1 << 10) * 16;
475 nandcore
.blocksize
= (1 << 10) * 128;
478 nandcore
.blocksize
= (1 << 10) * 256;
481 nandcore
.blocksize
= (1 << 10) * 512;
484 nandcore
.blocksize
= (1 << 10) * 1024;
487 nandcore
.blocksize
= (1 << 10) * 2048;
490 printf("Unknown block size\n");
493 /* NAND flash size in MBytes */
494 val
= (ncf
& NANDCF_CS0_DEVICE_SIZE_MASK
) >> NANDCF_CS0_DEVICE_SIZE_SHIFT
;
495 nandcore
.size
= (1 << val
) * 4;
497 /* Get Device I/O data bus width */
498 if (ncf
& NANDCF_CS0_DEVICE_WIDTH
)
501 /* Spare size and Spare per sector (# of bytes) */
502 acc_control
= R_REG(osh
, &nc
->acc_control_cs0
);
503 if (acc_control
& NANDAC_CS0_SECTOR_SIZE_1K
) {
504 printf("Pin strapping error. Sector size 1K hasn't supported yet\n");
508 /* Check conflict between 1K sector and page size */
509 if (acc_control
& NANDAC_CS0_SECTOR_SIZE_1K
) {
510 /* NOTE: 1K sector is not yet supported. */
511 nandcore
.sectorsize
= 1024;
514 nandcore
.sectorsize
= 512;
516 if (nandcore
.sectorsize
== 1024 && nandcore
.pagesize
== 512) {
517 printf("Pin strapping error. Page size is 512, but sector size is 1024\n");
522 nandcore
.sparesize
= acc_control
& NANDAC_CS0_SPARE_AREA_SIZE
;
525 nandcore
.oobsize
= nandcore
.sparesize
* (nandcore
.pagesize
/ NFL_SECTOR_SIZE
);
528 nandcore
.ecclevel
= (acc_control
& NANDAC_CS0_ECC_LEVEL_MASK
) >> NANDAC_CS0_ECC_LEVEL_SHIFT
;
529 if (nandcore
.sectorsize
== 1024)
530 nandcore
.ecclevel
*= 2;
532 nandcore
.numblocks
= (nandcore
.size
* (1 << 10)) / (nandcore
.blocksize
>> 10);
534 printf("Found a %s NAND flash:\n", name
);
535 printf("Total size: %uMB\n", nandcore
.size
);
536 printf("Block size: %uKB\n", (nandcore
.blocksize
>> 10));
537 printf("Page Size: %uB\n", nandcore
.pagesize
);
538 printf("OOB Size: %uB\n", nandcore
.oobsize
);
539 printf("Sector size: %uB\n", nandcore
.sectorsize
);
540 printf("Spare size: %uB\n", nandcore
.sparesize
);
541 printf("ECC level: %u-bit\n", nandcore
.ecclevel
);
542 printf("Device ID: 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x\n",
543 nandcore
.id
[0], nandcore
.id
[1], nandcore
.id
[2],
544 nandcore
.id
[3], nandcore
.id
[4]);
549 nandcore
.phybase
= SI_NS_NANDFLASH
;
550 nandcore
.base
= (uint32
)REG_MAP(SI_NS_NANDFLASH
, SI_FLASH_WINDOW
);
553 /* Configuration readback */
554 printf("R_REG(cs_nand_select) = 0x%08x\n", R_REG(osh
, &nc
->cs_nand_select
));
555 printf("R_REG(config_cs0) = 0x%08x\n", R_REG(osh
, &nc
->config_cs0
));
556 printf("R_REG(acc_control_cs0) = 0x%08x\n", R_REG(osh
, &nc
->acc_control_cs0
));
559 return nandcore
.size
? &nandcore
: NULL
;
562 /* Read len bytes starting at offset into buf. Returns number of bytes read. */
564 nandcore_read(hndnand_t
*nfl
, uint64 offset
, uint len
, uchar
*buf
)
569 uint32 herr
= 0, serr
= 0;
572 osh
= si_osh(nfl
->sih
);
578 _nandcore_read_page(nfl
, offset
, to
, NULL
, TRUE
, &herr
, &serr
);
580 res
-= nfl
->pagesize
;
581 offset
+= nfl
->pagesize
;
588 /* Poll for command completion. Returns zero when complete. */
590 nandcore_poll(si_t
*sih
, nandregs_t
*nc
)
599 pollmask
= NANDIST_CTRL_READY
| NANDIST_FLASH_READY
;
600 for (i
= 0; i
< NANDF_RETRIES
; i
++) {
601 if ((R_REG(osh
, &nc
->intfc_status
) & pollmask
) == pollmask
) {
606 printf("%s: not ready\n", __FUNCTION__
);
610 /* Write len bytes starting at offset into buf. Returns number of bytes
614 nandcore_write(hndnand_t
*nfl
, uint64 offset
, uint len
, const uchar
*buf
)
622 osh
= si_osh(nfl
->sih
);
628 ret
= _nandcore_write_page(nfl
, offset
, from
, NULL
, TRUE
);
632 res
-= nfl
->pagesize
;
633 offset
+= nfl
->pagesize
;
634 from
+= nfl
->pagesize
;
643 /* Erase a region. Returns number of bytes scheduled for erasure.
644 * Caller should poll for completion.
647 nandcore_erase(hndnand_t
*nfl
, uint64 offset
)
649 si_t
*sih
= nfl
->sih
;
650 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
659 if ((offset
>> 20) >= nfl
->size
)
661 if ((offset
& (nfl
->blocksize
- 1)) != 0) {
666 AND_REG(osh
, &nc
->cs_nand_select
, ~NANDCSEL_NAND_WP
);
668 /* Set the block address for the following commands */
669 reg
= (R_REG(osh
, &nc
->cmd_ext_address
) & ~NANDCMD_EXT_ADDR_MASK
);
670 W_REG(osh
, &nc
->cmd_ext_address
, (reg
| (offset
>> 32)));
672 W_REG(osh
, &nc
->cmd_address
, offset
);
673 nandcore_cmd(osh
, nc
, NANDCMD_BLOCK_ERASE
);
674 if (nandcore_poll(sih
, nc
) < 0)
678 W_REG(osh
, &nc
->cmd_start
, NANDCMD_STATUS_RD
);
679 if (nandcore_poll(sih
, nc
) < 0)
682 status
= R_REG(osh
, &nc
->intfc_status
) & NANDIST_STATUS
;
689 OR_REG(osh
, &nc
->cs_nand_select
, NANDCSEL_NAND_WP
);
695 nandcore_checkbadb(hndnand_t
*nfl
, uint64 offset
)
697 si_t
*sih
= nfl
->sih
;
698 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
699 aidmp_t
*ai
= (aidmp_t
*)nfl
->wrap
;
703 uint32 nand_intfc_status
;
710 if ((offset
>> 20) >= nfl
->size
)
712 if ((offset
& (nfl
->blocksize
- 1)) != 0) {
716 /* Set the block address for the following commands */
717 reg
= (R_REG(osh
, &nc
->cmd_ext_address
) & ~NANDCMD_EXT_ADDR_MASK
);
718 W_REG(osh
, &nc
->cmd_ext_address
, (reg
| (offset
>> 32)));
720 for (i
= 0; i
< 2; i
++) {
721 off
= offset
+ (nfl
->pagesize
* i
);
722 W_REG(osh
, &nc
->cmd_address
, off
);
723 nandcore_cmd(osh
, nc
, NANDCMD_SPARE_RD
);
724 if (nandcore_poll(sih
, nc
) < 0) {
728 nand_intfc_status
= R_REG(osh
, &nc
->intfc_status
) & NANDIST_SPARE_VALID
;
729 if (nand_intfc_status
!= NANDIST_SPARE_VALID
) {
732 printf("%s: Spare is not valid\n", __FUNCTION__
);
737 /* Toggle as little endian */
738 OR_REG(osh
, &ai
->ioctrl
, NAND_APB_LITTLE_ENDIAN
);
740 if ((R_REG(osh
, &nc
->spare_area_read_ofs
[0]) & 0xff) != 0xff) {
743 printf("%s: Bad Block (0x%llx)\n", __FUNCTION__
, offset
);
747 /* Toggle as big endian */
748 AND_REG(osh
, &ai
->ioctrl
, ~NAND_APB_LITTLE_ENDIAN
);
759 nandcore_mark_badb(hndnand_t
*nfl
, uint64 offset
)
761 si_t
*sih
= nfl
->sih
;
762 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
763 aidmp_t
*ai
= (aidmp_t
*)nfl
->wrap
;
772 if ((offset
>> 20) >= nfl
->size
)
774 if ((offset
& (nfl
->blocksize
- 1)) != 0) {
779 AND_REG(osh
, &nc
->cs_nand_select
, ~NANDCSEL_NAND_WP
);
781 /* Set the block address for the following commands */
782 reg
= (R_REG(osh
, &nc
->cmd_ext_address
) & ~NANDCMD_EXT_ADDR_MASK
);
783 W_REG(osh
, &nc
->cmd_ext_address
, (reg
| (offset
>> 32)));
786 W_REG(osh
, &nc
->cmd_address
, offset
);
787 nandcore_cmd(osh
, nc
, NANDCMD_BLOCK_ERASE
);
788 if (nandcore_poll(sih
, nc
) < 0) {
790 /* Still go through the spare area write */
795 * Enable partial page programming and disable ECC checkbit generation
796 * for PROGRAM_SPARE_AREA
798 reg
= R_REG(osh
, &nc
->acc_control_cs0
);
799 reg
|= NANDAC_CS0_PARTIAL_PAGE_EN
;
800 reg
|= NANDAC_CS0_FAST_PGM_RDIN
;
801 reg
&= ~NANDAC_CS0_WR_ECC_EN
;
802 W_REG(osh
, &nc
->acc_control_cs0
, reg
);
804 for (i
= 0; i
< 2; i
++) {
805 off
= offset
+ (nfl
->pagesize
* i
);
806 W_REG(osh
, &nc
->cmd_address
, off
);
808 /* Toggle as little endian */
809 OR_REG(osh
, &ai
->ioctrl
, NAND_APB_LITTLE_ENDIAN
);
811 W_REG(osh
, &nc
->spare_area_write_ofs
[0], 0);
812 W_REG(osh
, &nc
->spare_area_write_ofs
[1], 0);
813 W_REG(osh
, &nc
->spare_area_write_ofs
[2], 0);
814 W_REG(osh
, &nc
->spare_area_write_ofs
[3], 0);
816 /* Toggle as big endian */
817 AND_REG(osh
, &ai
->ioctrl
, ~NAND_APB_LITTLE_ENDIAN
);
819 nandcore_cmd(osh
, nc
, NANDCMD_SPARE_PROG
);
820 if (nandcore_poll(sih
, nc
) < 0) {
823 printf("%s: Spare program is not ready\n", __FUNCTION__
);
830 /* Restore the default value for spare area write registers */
831 W_REG(osh
, &nc
->spare_area_write_ofs
[0], 0xffffffff);
832 W_REG(osh
, &nc
->spare_area_write_ofs
[1], 0xffffffff);
833 W_REG(osh
, &nc
->spare_area_write_ofs
[2], 0xffffffff);
834 W_REG(osh
, &nc
->spare_area_write_ofs
[3], 0xffffffff);
837 * Disable partial page programming and enable ECC checkbit generation
838 * for PROGRAM_SPARE_AREA
840 reg
= R_REG(osh
, &nc
->acc_control_cs0
);
841 reg
&= ~NANDAC_CS0_PARTIAL_PAGE_EN
;
842 reg
&= ~NANDAC_CS0_FAST_PGM_RDIN
;
843 reg
|= NANDAC_CS0_WR_ECC_EN
;
844 W_REG(osh
, &nc
->acc_control_cs0
, reg
);
847 OR_REG(osh
, &nc
->cs_nand_select
, NANDCSEL_NAND_WP
);
854 /* Functions support brcmnand driver */
856 _nandcore_set_cmd_address(hndnand_t
*nfl
, uint64 addr
)
860 si_t
*sih
= nfl
->sih
;
861 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
866 reg
= (R_REG(osh
, &nc
->cmd_ext_address
) & ~NANDCMD_EXT_ADDR_MASK
);
867 W_REG(osh
, &nc
->cmd_ext_address
, (reg
| (addr
>> 32)));
868 W_REG(osh
, &nc
->cmd_address
, addr
);
872 nandcore_dev_ready(hndnand_t
*nfl
)
874 aidmp_t
*ai
= (aidmp_t
*)nfl
->wrap
;
878 return (R_REG(si_osh(nfl
->sih
), &ai
->iostatus
) & NAND_RO_CTRL_READY
);
882 nandcore_select_chip(hndnand_t
*nfl
, int chip
)
886 si_t
*sih
= nfl
->sih
;
887 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
892 reg
= R_REG(osh
, &nc
->cmd_ext_address
);
893 reg
&= ~NANDCMD_CS_SEL_MASK
;
894 reg
|= (chip
<< NANDCMD_CS_SEL_SHIFT
);
895 W_REG(osh
, &nc
->cmd_ext_address
, reg
);
897 /* Set active chip index */
904 nandcore_cmdfunc(hndnand_t
*nfl
, uint64 addr
, int cmd
)
908 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
911 osh
= si_osh(nfl
->sih
);
915 _nandcore_set_cmd_address(nfl
, addr
);
919 AND_REG(osh
, &nc
->cs_nand_select
, ~NANDCSEL_NAND_WP
);
920 nandcore_cmd(osh
, nc
, NANDCMD_BLOCK_ERASE
);
921 ret
= nandcore_waitfunc(nfl
, NULL
);
923 OR_REG(osh
, &nc
->cs_nand_select
, NANDCSEL_NAND_WP
);
926 _nandcore_set_cmd_address(nfl
, addr
);
929 _nandcore_set_cmd_address(nfl
, addr
);
930 nandcore_cmd(osh
, nc
, NANDCMD_PAGE_RD
);
931 ret
= nandcore_waitfunc(nfl
, NULL
);
934 nandcore_cmd(osh
, nc
, NANDCMD_FLASH_RESET
);
935 ret
= nandcore_waitfunc(nfl
, NULL
);
938 nandcore_cmd(osh
, nc
, NANDCMD_ID_RD
);
939 ret
= nandcore_waitfunc(nfl
, NULL
);
943 AND_REG(osh
, &nc
->cs_nand_select
, ~NANDCSEL_NAND_WP
);
944 nandcore_cmd(osh
, nc
, NANDCMD_STATUS_RD
);
945 ret
= nandcore_waitfunc(nfl
, NULL
);
947 OR_REG(osh
, &nc
->cs_nand_select
, NANDCSEL_NAND_WP
);
949 case CMDFUNC_READOOB
:
953 printf("%s: Unknow command 0x%x\n", __FUNCTION__
, cmd
);
962 /* Return intfc_status FLASH_STATUS if CTRL/FLASH is ready otherwise -1 */
964 nandcore_waitfunc(hndnand_t
*nfl
, int *status
)
968 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
971 osh
= si_osh(nfl
->sih
);
973 ret
= nandcore_poll(nfl
->sih
, nc
);
974 if (ret
== 0 && status
)
975 *status
= R_REG(osh
, &nc
->intfc_status
) & NANDIST_STATUS
;
989 for (j
= 0; j
< 32; j
++)
997 nandcore_read_oob(hndnand_t
*nfl
, uint64 addr
, uint8
*oob
)
1000 si_t
*sih
= nfl
->sih
;
1001 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
1002 aidmp_t
*ai
= (aidmp_t
*)nfl
->wrap
;
1004 unsigned spare_per_sec
, sector
;
1005 int i
, sectorsize_shift
, sec_per_page_shift
;
1010 /* Set the page address for the following commands */
1011 reg
= (R_REG(osh
, &nc
->cmd_ext_address
) & ~NANDCMD_EXT_ADDR_MASK
);
1012 W_REG(osh
, &nc
->cmd_ext_address
, (reg
| (addr
>> 32)));
1015 sectorsize_shift
= ffs(nfl
->sectorsize
) - 1;
1016 sec_per_page_shift
= ffs(nfl
->pagesize
) - 1 - sectorsize_shift
;
1018 spare_per_sec
= nfl
->oobsize
>> sec_per_page_shift
;
1020 /* Disable ECC validation for spare area reads */
1021 AND_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
,
1022 ~NANDAC_CS0_RD_ECC_EN
);
1024 /* Loop all sectors in page */
1025 for (sector
= 0; sector
< (1 << sec_per_page_shift
); sector
++) {
1028 col
= (sector
<< sectorsize_shift
);
1030 /* Issue command to read partial page */
1031 W_REG(osh
, &nc
->cmd_address
, addr
+ col
);
1033 nandcore_cmd(osh
, nc
, NANDCMD_SPARE_RD
);
1035 /* Wait for the command to complete */
1036 if (nandcore_poll(sih
, nc
))
1039 if (!(R_REG(osh
, &nc
->intfc_status
) & NANDIST_SPARE_VALID
)) {
1040 printf("%s: data not valid\n", __FUNCTION__
);
1044 /* Set controller to Little Endian mode for copying */
1045 OR_REG(osh
, &ai
->ioctrl
, NAND_APB_LITTLE_ENDIAN
);
1047 to
= (uint32
*)(oob
+ sector
* spare_per_sec
);
1048 for (i
= 0; i
< spare_per_sec
; i
+= 4, to
++)
1049 *to
= R_REG(osh
, &nc
->spare_area_read_ofs
[i
/4]);
1051 /* Return to Big Endian mode for commands etc */
1052 AND_REG(osh
, &ai
->ioctrl
, ~NAND_APB_LITTLE_ENDIAN
);
1060 nandcore_write_oob(hndnand_t
*nfl
, uint64 addr
, uint8
*oob
)
1063 si_t
*sih
= nfl
->sih
;
1064 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
1065 aidmp_t
*ai
= (aidmp_t
*)nfl
->wrap
;
1067 unsigned spare_per_sec
, sector
, num_sec
;
1068 int i
, sectorsize_shift
, sec_per_page_shift
;
1075 AND_REG(osh
, &nc
->cs_nand_select
, ~NANDCSEL_NAND_WP
);
1078 * Enable partial page programming and disable ECC checkbit generation
1079 * for PROGRAM_SPARE_AREA
1081 reg
= R_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
);
1082 reg
|= NANDAC_CS0_PARTIAL_PAGE_EN
;
1083 reg
|= NANDAC_CS0_FAST_PGM_RDIN
;
1084 reg
&= ~NANDAC_CS0_WR_ECC_EN
;
1085 W_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
, reg
);
1088 sectorsize_shift
= _nandcore_ffs(nfl
->sectorsize
) - 1;
1089 sec_per_page_shift
= _nandcore_ffs(nfl
->pagesize
) - 1 - sectorsize_shift
;
1091 spare_per_sec
= nfl
->oobsize
>> sec_per_page_shift
;
1093 /* Set the page address for the following commands */
1094 reg
= (R_REG(osh
, &nc
->cmd_ext_address
) & ~NANDCMD_EXT_ADDR_MASK
);
1095 W_REG(osh
, &nc
->cmd_ext_address
, (reg
| (addr
>> 32)));
1097 num_sec
= 1 << sec_per_page_shift
;
1099 /* Loop all sectors in page */
1100 for (sector
= 0; sector
< num_sec
; sector
++) {
1103 /* Spare area accessed by the data sector offset */
1104 col
= (sector
<< sectorsize_shift
);
1106 W_REG(osh
, &nc
->cmd_address
, addr
+ col
);
1108 /* Set controller to Little Endian mode for copying */
1109 OR_REG(osh
, &ai
->ioctrl
, NAND_APB_LITTLE_ENDIAN
);
1111 from
= (uint32
*)(oob
+ sector
* spare_per_sec
);
1112 for (i
= 0; i
< spare_per_sec
; i
+= 4, from
++)
1113 W_REG(osh
, &nc
->spare_area_write_ofs
[i
/4], *from
);
1115 /* Return to Big Endian mode for commands etc */
1116 AND_REG(osh
, &ai
->ioctrl
, ~NAND_APB_LITTLE_ENDIAN
);
1118 /* Push spare bytes into internal buffer, last goes to flash */
1119 nandcore_cmd(osh
, nc
, NANDCMD_SPARE_PROG
);
1121 if (nandcore_poll(sih
, nc
)) {
1129 * Disable partial page programming and enable ECC checkbit generation
1130 * for PROGRAM_SPARE_AREA
1132 reg
= R_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
);
1133 reg
&= ~NANDAC_CS0_PARTIAL_PAGE_EN
;
1134 reg
&= ~NANDAC_CS0_FAST_PGM_RDIN
;
1135 reg
|= NANDAC_CS0_WR_ECC_EN
;
1136 W_REG(osh
, nfl
->chipidx
? &nc
->acc_control_cs1
: &nc
->acc_control_cs0
, reg
);
1139 OR_REG(osh
, &nc
->cs_nand_select
, NANDCSEL_NAND_WP
);
1145 nandcore_read_page(hndnand_t
*nfl
, uint64 addr
, uint8
*buf
, uint8
*oob
, bool ecc
,
1146 uint32
*herr
, uint32
*serr
)
1148 return _nandcore_read_page(nfl
, addr
, buf
, oob
, ecc
, herr
, serr
);
1152 nandcore_write_page(hndnand_t
*nfl
, uint64 addr
, const uint8
*buf
, uint8
*oob
, bool ecc
)
1154 return _nandcore_write_page(nfl
, addr
, buf
, oob
, ecc
);
1158 nandcore_cmd_read_byte(hndnand_t
*nfl
, int cmd
, int arg
)
1162 nandregs_t
*nc
= (nandregs_t
*)nfl
->core
;
1165 osh
= si_osh(nfl
->sih
);
1168 case CMDFUNC_READID
:
1169 return R_REG(osh
, id_ext
? &nc
->flash_device_id_ext
: &nc
->flash_device_id
);
1170 case CMDFUNC_STATUS
:
1171 return (R_REG(osh
, &nc
->intfc_status
) & NANDIST_STATUS
);
1174 printf("%s: Unknow command 0x%x\n", __FUNCTION__
, cmd
);