2 * Copyright © 2010-2015 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/version.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/completion.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/ioport.h>
26 #include <linux/bug.h>
27 #include <linux/kernel.h>
28 #include <linux/bitops.h>
30 #include <linux/mtd/mtd.h>
31 #include <linux/mtd/nand.h>
32 #include <linux/mtd/partitions.h>
34 #include <linux/of_mtd.h>
35 #include <linux/of_platform.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/log2.h>
43 * This flag controls if WP stays on between erase/write commands to mitigate
44 * flash corruption due to power glitches. Values:
45 * 0: NAND_WP is not used or not available
46 * 1: NAND_WP is set by default, cleared for erase/write operations
47 * 2: NAND_WP is always cleared
50 module_param(wp_on
, int, 0444);
52 /***********************************************************************
54 ***********************************************************************/
56 #define DRV_NAME "brcmnand"
59 #define CMD_PAGE_READ 0x01
60 #define CMD_SPARE_AREA_READ 0x02
61 #define CMD_STATUS_READ 0x03
62 #define CMD_PROGRAM_PAGE 0x04
63 #define CMD_PROGRAM_SPARE_AREA 0x05
64 #define CMD_COPY_BACK 0x06
65 #define CMD_DEVICE_ID_READ 0x07
66 #define CMD_BLOCK_ERASE 0x08
67 #define CMD_FLASH_RESET 0x09
68 #define CMD_BLOCKS_LOCK 0x0a
69 #define CMD_BLOCKS_LOCK_DOWN 0x0b
70 #define CMD_BLOCKS_UNLOCK 0x0c
71 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
72 #define CMD_PARAMETER_READ 0x0e
73 #define CMD_PARAMETER_CHANGE_COL 0x0f
74 #define CMD_LOW_LEVEL_OP 0x10
76 struct brcm_nand_dma_desc
{
91 /* Bitfields for brcm_nand_dma_desc::status_valid */
92 #define FLASH_DMA_ECC_ERROR (1 << 8)
93 #define FLASH_DMA_CORR_ERROR (1 << 9)
95 /* 512B flash cache in the NAND controller HW */
98 #define FC_WORDS (FC_BYTES >> 2)
100 #define BRCMNAND_MIN_PAGESIZE 512
101 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
102 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
104 /* Controller feature flags */
106 BRCMNAND_HAS_1K_SECTORS
= BIT(0),
107 BRCMNAND_HAS_PREFETCH
= BIT(1),
108 BRCMNAND_HAS_CACHE_MODE
= BIT(2),
109 BRCMNAND_HAS_WP
= BIT(3),
112 struct brcmnand_controller
{
114 struct nand_hw_control controller
;
115 void __iomem
*nand_base
;
116 void __iomem
*nand_fc
; /* flash cache */
117 void __iomem
*flash_dma_base
;
119 unsigned int dma_irq
;
122 /* Some SoCs provide custom interrupt status register(s) */
123 struct brcmnand_soc
*soc
;
127 struct completion done
;
128 struct completion dma_done
;
130 /* List of NAND hosts (one for each chip-select) */
131 struct list_head host_list
;
133 struct brcm_nand_dma_desc
*dma_desc
;
136 /* in-memory cache of the FLASH_CACHE, used only for some commands */
137 u8 flash_cache
[FC_BYTES
];
139 /* Controller revision details */
140 const u16
*reg_offsets
;
141 unsigned int reg_spacing
; /* between CS1, CS2, ... regs */
142 const u8
*cs_offsets
; /* within each chip-select */
143 const u8
*cs0_offsets
; /* within CS0, if different */
144 unsigned int max_block_size
;
145 const unsigned int *block_sizes
;
146 unsigned int max_page_size
;
147 const unsigned int *page_sizes
;
148 unsigned int max_oob
;
151 /* for low-power standby/resume only */
152 u32 nand_cs_nand_select
;
153 u32 nand_cs_nand_xor
;
154 u32 corr_stat_threshold
;
158 struct brcmnand_cfg
{
160 unsigned int block_size
;
161 unsigned int page_size
;
162 unsigned int spare_area_size
;
163 unsigned int device_width
;
164 unsigned int col_adr_bytes
;
165 unsigned int blk_adr_bytes
;
166 unsigned int ful_adr_bytes
;
167 unsigned int sector_size_1k
;
168 unsigned int ecc_level
;
169 /* use for low-power standby/resume only */
177 struct brcmnand_host
{
178 struct list_head node
;
180 struct nand_chip chip
;
182 struct platform_device
*pdev
;
185 unsigned int last_cmd
;
186 unsigned int last_byte
;
188 struct brcmnand_cfg hwcfg
;
189 struct brcmnand_controller
*ctrl
;
193 BRCMNAND_CMD_START
= 0,
194 BRCMNAND_CMD_EXT_ADDRESS
,
195 BRCMNAND_CMD_ADDRESS
,
196 BRCMNAND_INTFC_STATUS
,
201 BRCMNAND_CS1_BASE
, /* CS1 regs, if non-contiguous */
202 BRCMNAND_CORR_THRESHOLD
,
203 BRCMNAND_CORR_THRESHOLD_EXT
,
204 BRCMNAND_UNCORR_COUNT
,
206 BRCMNAND_CORR_EXT_ADDR
,
208 BRCMNAND_UNCORR_EXT_ADDR
,
209 BRCMNAND_UNCORR_ADDR
,
214 BRCMNAND_OOB_READ_BASE
,
215 BRCMNAND_OOB_READ_10_BASE
, /* offset 0x10, if non-contiguous */
216 BRCMNAND_OOB_WRITE_BASE
,
217 BRCMNAND_OOB_WRITE_10_BASE
, /* offset 0x10, if non-contiguous */
222 static const u16 brcmnand_regs_v40
[] = {
223 [BRCMNAND_CMD_START
] = 0x04,
224 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
225 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
226 [BRCMNAND_INTFC_STATUS
] = 0x6c,
227 [BRCMNAND_CS_SELECT
] = 0x14,
228 [BRCMNAND_CS_XOR
] = 0x18,
229 [BRCMNAND_LL_OP
] = 0x178,
230 [BRCMNAND_CS0_BASE
] = 0x40,
231 [BRCMNAND_CS1_BASE
] = 0xd0,
232 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
233 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
234 [BRCMNAND_UNCORR_COUNT
] = 0,
235 [BRCMNAND_CORR_COUNT
] = 0,
236 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
237 [BRCMNAND_CORR_ADDR
] = 0x74,
238 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
239 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
240 [BRCMNAND_SEMAPHORE
] = 0x58,
241 [BRCMNAND_ID
] = 0x60,
242 [BRCMNAND_ID_EXT
] = 0x64,
243 [BRCMNAND_LL_RDATA
] = 0x17c,
244 [BRCMNAND_OOB_READ_BASE
] = 0x20,
245 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
246 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
247 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
248 [BRCMNAND_FC_BASE
] = 0x200,
252 static const u16 brcmnand_regs_v50
[] = {
253 [BRCMNAND_CMD_START
] = 0x04,
254 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
255 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
256 [BRCMNAND_INTFC_STATUS
] = 0x6c,
257 [BRCMNAND_CS_SELECT
] = 0x14,
258 [BRCMNAND_CS_XOR
] = 0x18,
259 [BRCMNAND_LL_OP
] = 0x178,
260 [BRCMNAND_CS0_BASE
] = 0x40,
261 [BRCMNAND_CS1_BASE
] = 0xd0,
262 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
263 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
264 [BRCMNAND_UNCORR_COUNT
] = 0,
265 [BRCMNAND_CORR_COUNT
] = 0,
266 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
267 [BRCMNAND_CORR_ADDR
] = 0x74,
268 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
269 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
270 [BRCMNAND_SEMAPHORE
] = 0x58,
271 [BRCMNAND_ID
] = 0x60,
272 [BRCMNAND_ID_EXT
] = 0x64,
273 [BRCMNAND_LL_RDATA
] = 0x17c,
274 [BRCMNAND_OOB_READ_BASE
] = 0x20,
275 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
276 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
277 [BRCMNAND_OOB_WRITE_10_BASE
] = 0x140,
278 [BRCMNAND_FC_BASE
] = 0x200,
281 /* BRCMNAND v6.0 - v7.1 */
282 static const u16 brcmnand_regs_v60
[] = {
283 [BRCMNAND_CMD_START
] = 0x04,
284 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
285 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
286 [BRCMNAND_INTFC_STATUS
] = 0x14,
287 [BRCMNAND_CS_SELECT
] = 0x18,
288 [BRCMNAND_CS_XOR
] = 0x1c,
289 [BRCMNAND_LL_OP
] = 0x20,
290 [BRCMNAND_CS0_BASE
] = 0x50,
291 [BRCMNAND_CS1_BASE
] = 0,
292 [BRCMNAND_CORR_THRESHOLD
] = 0xc0,
293 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xc4,
294 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
295 [BRCMNAND_CORR_COUNT
] = 0x100,
296 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
297 [BRCMNAND_CORR_ADDR
] = 0x110,
298 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
299 [BRCMNAND_UNCORR_ADDR
] = 0x118,
300 [BRCMNAND_SEMAPHORE
] = 0x150,
301 [BRCMNAND_ID
] = 0x194,
302 [BRCMNAND_ID_EXT
] = 0x198,
303 [BRCMNAND_LL_RDATA
] = 0x19c,
304 [BRCMNAND_OOB_READ_BASE
] = 0x200,
305 [BRCMNAND_OOB_READ_10_BASE
] = 0,
306 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
307 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
308 [BRCMNAND_FC_BASE
] = 0x400,
311 enum brcmnand_cs_reg
{
312 BRCMNAND_CS_CFG_EXT
= 0,
314 BRCMNAND_CS_ACC_CONTROL
,
319 /* Per chip-select offsets for v7.1 */
320 static const u8 brcmnand_cs_offsets_v71
[] = {
321 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
322 [BRCMNAND_CS_CFG_EXT
] = 0x04,
323 [BRCMNAND_CS_CFG
] = 0x08,
324 [BRCMNAND_CS_TIMING1
] = 0x0c,
325 [BRCMNAND_CS_TIMING2
] = 0x10,
328 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
329 static const u8 brcmnand_cs_offsets
[] = {
330 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
331 [BRCMNAND_CS_CFG_EXT
] = 0x04,
332 [BRCMNAND_CS_CFG
] = 0x04,
333 [BRCMNAND_CS_TIMING1
] = 0x08,
334 [BRCMNAND_CS_TIMING2
] = 0x0c,
337 /* Per chip-select offset for <= v5.0 on CS0 only */
338 static const u8 brcmnand_cs_offsets_cs0
[] = {
339 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
340 [BRCMNAND_CS_CFG_EXT
] = 0x08,
341 [BRCMNAND_CS_CFG
] = 0x08,
342 [BRCMNAND_CS_TIMING1
] = 0x10,
343 [BRCMNAND_CS_TIMING2
] = 0x14,
347 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
348 * one config register, but once the bitfields overflowed, newer controllers
349 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
352 CFG_BLK_ADR_BYTES_SHIFT
= 8,
353 CFG_COL_ADR_BYTES_SHIFT
= 12,
354 CFG_FUL_ADR_BYTES_SHIFT
= 16,
355 CFG_BUS_WIDTH_SHIFT
= 23,
356 CFG_BUS_WIDTH
= BIT(CFG_BUS_WIDTH_SHIFT
),
357 CFG_DEVICE_SIZE_SHIFT
= 24,
359 /* Only for pre-v7.1 (with no CFG_EXT register) */
360 CFG_PAGE_SIZE_SHIFT
= 20,
361 CFG_BLK_SIZE_SHIFT
= 28,
363 /* Only for v7.1+ (with CFG_EXT register) */
364 CFG_EXT_PAGE_SIZE_SHIFT
= 0,
365 CFG_EXT_BLK_SIZE_SHIFT
= 4,
368 /* BRCMNAND_INTFC_STATUS */
370 INTFC_FLASH_STATUS
= GENMASK(7, 0),
372 INTFC_ERASED
= BIT(27),
373 INTFC_OOB_VALID
= BIT(28),
374 INTFC_CACHE_VALID
= BIT(29),
375 INTFC_FLASH_READY
= BIT(30),
376 INTFC_CTLR_READY
= BIT(31),
379 static inline u32
nand_readreg(struct brcmnand_controller
*ctrl
, u32 offs
)
381 return brcmnand_readl(ctrl
->nand_base
+ offs
);
384 static inline void nand_writereg(struct brcmnand_controller
*ctrl
, u32 offs
,
387 brcmnand_writel(val
, ctrl
->nand_base
+ offs
);
390 static int brcmnand_revision_init(struct brcmnand_controller
*ctrl
)
392 static const unsigned int block_sizes_v6
[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
393 static const unsigned int block_sizes_v4
[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
394 static const unsigned int page_sizes
[] = { 512, 2048, 4096, 8192, 0 };
396 ctrl
->nand_version
= nand_readreg(ctrl
, 0) & 0xffff;
398 /* Only support v4.0+? */
399 if (ctrl
->nand_version
< 0x0400) {
400 dev_err(ctrl
->dev
, "version %#x not supported\n",
405 /* Register offsets */
406 if (ctrl
->nand_version
>= 0x0600)
407 ctrl
->reg_offsets
= brcmnand_regs_v60
;
408 else if (ctrl
->nand_version
>= 0x0500)
409 ctrl
->reg_offsets
= brcmnand_regs_v50
;
410 else if (ctrl
->nand_version
>= 0x0400)
411 ctrl
->reg_offsets
= brcmnand_regs_v40
;
413 /* Chip-select stride */
414 if (ctrl
->nand_version
>= 0x0701)
415 ctrl
->reg_spacing
= 0x14;
417 ctrl
->reg_spacing
= 0x10;
419 /* Per chip-select registers */
420 if (ctrl
->nand_version
>= 0x0701) {
421 ctrl
->cs_offsets
= brcmnand_cs_offsets_v71
;
423 ctrl
->cs_offsets
= brcmnand_cs_offsets
;
425 /* v5.0 and earlier has a different CS0 offset layout */
426 if (ctrl
->nand_version
<= 0x0500)
427 ctrl
->cs0_offsets
= brcmnand_cs_offsets_cs0
;
430 /* Page / block sizes */
431 if (ctrl
->nand_version
>= 0x0701) {
432 /* >= v7.1 use nice power-of-2 values! */
433 ctrl
->max_page_size
= 16 * 1024;
434 ctrl
->max_block_size
= 2 * 1024 * 1024;
436 ctrl
->page_sizes
= page_sizes
;
437 if (ctrl
->nand_version
>= 0x0600)
438 ctrl
->block_sizes
= block_sizes_v6
;
440 ctrl
->block_sizes
= block_sizes_v4
;
442 if (ctrl
->nand_version
< 0x0400) {
443 ctrl
->max_page_size
= 4096;
444 ctrl
->max_block_size
= 512 * 1024;
448 /* Maximum spare area sector size (per 512B) */
449 if (ctrl
->nand_version
>= 0x0600)
451 else if (ctrl
->nand_version
>= 0x0500)
456 /* v6.0 and newer (except v6.1) have prefetch support */
457 if (ctrl
->nand_version
>= 0x0600 && ctrl
->nand_version
!= 0x0601)
458 ctrl
->features
|= BRCMNAND_HAS_PREFETCH
;
461 * v6.x has cache mode, but it's implemented differently. Ignore it for
464 if (ctrl
->nand_version
>= 0x0700)
465 ctrl
->features
|= BRCMNAND_HAS_CACHE_MODE
;
467 if (ctrl
->nand_version
>= 0x0500)
468 ctrl
->features
|= BRCMNAND_HAS_1K_SECTORS
;
470 if (ctrl
->nand_version
>= 0x0700)
471 ctrl
->features
|= BRCMNAND_HAS_WP
;
472 else if (of_property_read_bool(ctrl
->dev
->of_node
, "brcm,nand-has-wp"))
473 ctrl
->features
|= BRCMNAND_HAS_WP
;
478 static inline u32
brcmnand_read_reg(struct brcmnand_controller
*ctrl
,
479 enum brcmnand_reg reg
)
481 u16 offs
= ctrl
->reg_offsets
[reg
];
484 return nand_readreg(ctrl
, offs
);
489 static inline void brcmnand_write_reg(struct brcmnand_controller
*ctrl
,
490 enum brcmnand_reg reg
, u32 val
)
492 u16 offs
= ctrl
->reg_offsets
[reg
];
495 nand_writereg(ctrl
, offs
, val
);
498 static inline void brcmnand_rmw_reg(struct brcmnand_controller
*ctrl
,
499 enum brcmnand_reg reg
, u32 mask
, unsigned
502 u32 tmp
= brcmnand_read_reg(ctrl
, reg
);
506 brcmnand_write_reg(ctrl
, reg
, tmp
);
509 static inline u32
brcmnand_read_fc(struct brcmnand_controller
*ctrl
, int word
)
511 return __raw_readl(ctrl
->nand_fc
+ word
* 4);
514 static inline void brcmnand_write_fc(struct brcmnand_controller
*ctrl
,
517 __raw_writel(val
, ctrl
->nand_fc
+ word
* 4);
520 static inline u16
brcmnand_cs_offset(struct brcmnand_controller
*ctrl
, int cs
,
521 enum brcmnand_cs_reg reg
)
523 u16 offs_cs0
= ctrl
->reg_offsets
[BRCMNAND_CS0_BASE
];
524 u16 offs_cs1
= ctrl
->reg_offsets
[BRCMNAND_CS1_BASE
];
527 if (cs
== 0 && ctrl
->cs0_offsets
)
528 cs_offs
= ctrl
->cs0_offsets
[reg
];
530 cs_offs
= ctrl
->cs_offsets
[reg
];
533 return offs_cs1
+ (cs
- 1) * ctrl
->reg_spacing
+ cs_offs
;
535 return offs_cs0
+ cs
* ctrl
->reg_spacing
+ cs_offs
;
538 static inline u32
brcmnand_count_corrected(struct brcmnand_controller
*ctrl
)
540 if (ctrl
->nand_version
< 0x0600)
542 return brcmnand_read_reg(ctrl
, BRCMNAND_CORR_COUNT
);
545 static void brcmnand_wr_corr_thresh(struct brcmnand_host
*host
, u8 val
)
547 struct brcmnand_controller
*ctrl
= host
->ctrl
;
548 unsigned int shift
= 0, bits
;
549 enum brcmnand_reg reg
= BRCMNAND_CORR_THRESHOLD
;
552 if (ctrl
->nand_version
>= 0x0600)
554 else if (ctrl
->nand_version
>= 0x0500)
559 if (ctrl
->nand_version
>= 0x0600) {
561 reg
= BRCMNAND_CORR_THRESHOLD_EXT
;
562 shift
= (cs
% 5) * bits
;
564 brcmnand_rmw_reg(ctrl
, reg
, (bits
- 1) << shift
, shift
, val
);
567 static inline int brcmnand_cmd_shift(struct brcmnand_controller
*ctrl
)
569 if (ctrl
->nand_version
< 0x0700)
574 /***********************************************************************
575 * NAND ACC CONTROL bitfield
577 * Some bits have remained constant throughout hardware revision, while
578 * others have shifted around.
579 ***********************************************************************/
581 /* Constant for all versions (where supported) */
583 /* See BRCMNAND_HAS_CACHE_MODE */
584 ACC_CONTROL_CACHE_MODE
= BIT(22),
586 /* See BRCMNAND_HAS_PREFETCH */
587 ACC_CONTROL_PREFETCH
= BIT(23),
589 ACC_CONTROL_PAGE_HIT
= BIT(24),
590 ACC_CONTROL_WR_PREEMPT
= BIT(25),
591 ACC_CONTROL_PARTIAL_PAGE
= BIT(26),
592 ACC_CONTROL_RD_ERASED
= BIT(27),
593 ACC_CONTROL_FAST_PGM_RDIN
= BIT(28),
594 ACC_CONTROL_WR_ECC
= BIT(30),
595 ACC_CONTROL_RD_ECC
= BIT(31),
598 static inline u32
brcmnand_spare_area_mask(struct brcmnand_controller
*ctrl
)
600 if (ctrl
->nand_version
>= 0x0600)
601 return GENMASK(6, 0);
603 return GENMASK(5, 0);
606 #define NAND_ACC_CONTROL_ECC_SHIFT 16
608 static inline u32
brcmnand_ecc_level_mask(struct brcmnand_controller
*ctrl
)
610 u32 mask
= (ctrl
->nand_version
>= 0x0600) ? 0x1f : 0x0f;
612 return mask
<< NAND_ACC_CONTROL_ECC_SHIFT
;
615 static void brcmnand_set_ecc_enabled(struct brcmnand_host
*host
, int en
)
617 struct brcmnand_controller
*ctrl
= host
->ctrl
;
618 u16 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
619 u32 acc_control
= nand_readreg(ctrl
, offs
);
620 u32 ecc_flags
= ACC_CONTROL_WR_ECC
| ACC_CONTROL_RD_ECC
;
623 acc_control
|= ecc_flags
; /* enable RD/WR ECC */
624 acc_control
|= host
->hwcfg
.ecc_level
625 << NAND_ACC_CONTROL_ECC_SHIFT
;
627 acc_control
&= ~ecc_flags
; /* disable RD/WR ECC */
628 acc_control
&= ~brcmnand_ecc_level_mask(ctrl
);
631 nand_writereg(ctrl
, offs
, acc_control
);
634 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller
*ctrl
)
636 if (ctrl
->nand_version
>= 0x0600)
638 else if (ctrl
->nand_version
>= 0x0500)
644 static int brcmnand_get_sector_size_1k(struct brcmnand_host
*host
)
646 struct brcmnand_controller
*ctrl
= host
->ctrl
;
647 int shift
= brcmnand_sector_1k_shift(ctrl
);
648 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
649 BRCMNAND_CS_ACC_CONTROL
);
654 return (nand_readreg(ctrl
, acc_control_offs
) >> shift
) & 0x1;
657 static void brcmnand_set_sector_size_1k(struct brcmnand_host
*host
, int val
)
659 struct brcmnand_controller
*ctrl
= host
->ctrl
;
660 int shift
= brcmnand_sector_1k_shift(ctrl
);
661 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
662 BRCMNAND_CS_ACC_CONTROL
);
668 tmp
= nand_readreg(ctrl
, acc_control_offs
);
669 tmp
&= ~(1 << shift
);
670 tmp
|= (!!val
) << shift
;
671 nand_writereg(ctrl
, acc_control_offs
, tmp
);
674 /***********************************************************************
676 ***********************************************************************/
679 CS_SELECT_NAND_WP
= BIT(29),
680 CS_SELECT_AUTO_DEVICE_ID_CFG
= BIT(30),
683 static inline void brcmnand_set_wp(struct brcmnand_controller
*ctrl
, bool en
)
685 u32 val
= en
? CS_SELECT_NAND_WP
: 0;
687 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
, CS_SELECT_NAND_WP
, 0, val
);
690 /***********************************************************************
692 ***********************************************************************/
695 FLASH_DMA_REVISION
= 0x00,
696 FLASH_DMA_FIRST_DESC
= 0x04,
697 FLASH_DMA_FIRST_DESC_EXT
= 0x08,
698 FLASH_DMA_CTRL
= 0x0c,
699 FLASH_DMA_MODE
= 0x10,
700 FLASH_DMA_STATUS
= 0x14,
701 FLASH_DMA_INTERRUPT_DESC
= 0x18,
702 FLASH_DMA_INTERRUPT_DESC_EXT
= 0x1c,
703 FLASH_DMA_ERROR_STATUS
= 0x20,
704 FLASH_DMA_CURRENT_DESC
= 0x24,
705 FLASH_DMA_CURRENT_DESC_EXT
= 0x28,
708 static inline bool has_flash_dma(struct brcmnand_controller
*ctrl
)
710 return ctrl
->flash_dma_base
;
713 static inline bool flash_dma_buf_ok(const void *buf
)
715 return buf
&& !is_vmalloc_addr(buf
) &&
716 likely(IS_ALIGNED((uintptr_t)buf
, 4));
719 static inline void flash_dma_writel(struct brcmnand_controller
*ctrl
, u8 offs
,
722 brcmnand_writel(val
, ctrl
->flash_dma_base
+ offs
);
725 static inline u32
flash_dma_readl(struct brcmnand_controller
*ctrl
, u8 offs
)
727 return brcmnand_readl(ctrl
->flash_dma_base
+ offs
);
730 /* Low-level operation types: command, address, write, or read */
731 enum brcmnand_llop_type
{
738 /***********************************************************************
739 * Internal support functions
740 ***********************************************************************/
742 static inline bool is_hamming_ecc(struct brcmnand_cfg
*cfg
)
744 return cfg
->sector_size_1k
== 0 && cfg
->spare_area_size
== 16 &&
745 cfg
->ecc_level
== 15;
749 * Returns a nand_ecclayout strucutre for the given layout/configuration.
750 * Returns NULL on failure.
752 static struct nand_ecclayout
*brcmnand_create_layout(int ecc_level
,
753 struct brcmnand_host
*host
)
755 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
757 struct nand_ecclayout
*layout
;
763 layout
= devm_kzalloc(&host
->pdev
->dev
, sizeof(*layout
), GFP_KERNEL
);
767 sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
768 sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
771 if (is_hamming_ecc(cfg
)) {
772 for (i
= 0, idx1
= 0, idx2
= 0; i
< sectors
; i
++) {
773 /* First sector of each page may have BBI */
775 layout
->oobfree
[idx2
].offset
= i
* sas
+ 1;
776 /* Small-page NAND use byte 6 for BBI */
777 if (cfg
->page_size
== 512)
778 layout
->oobfree
[idx2
].offset
--;
779 layout
->oobfree
[idx2
].length
= 5;
781 layout
->oobfree
[idx2
].offset
= i
* sas
;
782 layout
->oobfree
[idx2
].length
= 6;
785 layout
->eccpos
[idx1
++] = i
* sas
+ 6;
786 layout
->eccpos
[idx1
++] = i
* sas
+ 7;
787 layout
->eccpos
[idx1
++] = i
* sas
+ 8;
788 layout
->oobfree
[idx2
].offset
= i
* sas
+ 9;
789 layout
->oobfree
[idx2
].length
= 7;
791 /* Leave zero-terminated entry for OOBFREE */
792 if (idx1
>= MTD_MAX_ECCPOS_ENTRIES_LARGE
||
793 idx2
>= MTD_MAX_OOBFREE_ENTRIES_LARGE
- 1)
800 * CONTROLLER_VERSION:
801 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
802 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
803 * But we will just be conservative.
805 req
= DIV_ROUND_UP(ecc_level
* 14, 8);
807 dev_err(&host
->pdev
->dev
,
808 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
813 layout
->eccbytes
= req
* sectors
;
814 for (i
= 0, idx1
= 0, idx2
= 0; i
< sectors
; i
++) {
815 for (j
= sas
- req
; j
< sas
&& idx1
<
816 MTD_MAX_ECCPOS_ENTRIES_LARGE
; j
++, idx1
++)
817 layout
->eccpos
[idx1
] = i
* sas
+ j
;
819 /* First sector of each page may have BBI */
821 if (cfg
->page_size
== 512 && (sas
- req
>= 6)) {
822 /* Small-page NAND use byte 6 for BBI */
823 layout
->oobfree
[idx2
].offset
= 0;
824 layout
->oobfree
[idx2
].length
= 5;
827 layout
->oobfree
[idx2
].offset
= 6;
828 layout
->oobfree
[idx2
].length
=
832 } else if (sas
> req
+ 1) {
833 layout
->oobfree
[idx2
].offset
= i
* sas
+ 1;
834 layout
->oobfree
[idx2
].length
= sas
- req
- 1;
837 } else if (sas
> req
) {
838 layout
->oobfree
[idx2
].offset
= i
* sas
;
839 layout
->oobfree
[idx2
].length
= sas
- req
;
842 /* Leave zero-terminated entry for OOBFREE */
843 if (idx1
>= MTD_MAX_ECCPOS_ENTRIES_LARGE
||
844 idx2
>= MTD_MAX_OOBFREE_ENTRIES_LARGE
- 1)
848 /* Sum available OOB */
849 for (i
= 0; i
< MTD_MAX_OOBFREE_ENTRIES_LARGE
; i
++)
850 layout
->oobavail
+= layout
->oobfree
[i
].length
;
854 static struct nand_ecclayout
*brcmstb_choose_ecc_layout(
855 struct brcmnand_host
*host
)
857 struct nand_ecclayout
*layout
;
858 struct brcmnand_cfg
*p
= &host
->hwcfg
;
859 unsigned int ecc_level
= p
->ecc_level
;
861 if (p
->sector_size_1k
)
864 layout
= brcmnand_create_layout(ecc_level
, host
);
866 dev_err(&host
->pdev
->dev
,
867 "no proper ecc_layout for this NAND cfg\n");
874 static void brcmnand_wp(struct mtd_info
*mtd
, int wp
)
876 struct nand_chip
*chip
= mtd
->priv
;
877 struct brcmnand_host
*host
= chip
->priv
;
878 struct brcmnand_controller
*ctrl
= host
->ctrl
;
880 if ((ctrl
->features
& BRCMNAND_HAS_WP
) && wp_on
== 1) {
881 static int old_wp
= -1;
884 dev_dbg(ctrl
->dev
, "WP %s\n", wp
? "on" : "off");
887 brcmnand_set_wp(ctrl
, wp
);
891 /* Helper functions for reading and writing OOB registers */
892 static inline u8
oob_reg_read(struct brcmnand_controller
*ctrl
, u32 offs
)
894 u16 offset0
, offset10
, reg_offs
;
896 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_BASE
];
897 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_10_BASE
];
899 if (offs
>= ctrl
->max_oob
)
902 if (offs
>= 16 && offset10
)
903 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
905 reg_offs
= offset0
+ (offs
& ~0x03);
907 return nand_readreg(ctrl
, reg_offs
) >> (24 - ((offs
& 0x03) << 3));
910 static inline void oob_reg_write(struct brcmnand_controller
*ctrl
, u32 offs
,
913 u16 offset0
, offset10
, reg_offs
;
915 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_BASE
];
916 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_10_BASE
];
918 if (offs
>= ctrl
->max_oob
)
921 if (offs
>= 16 && offset10
)
922 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
924 reg_offs
= offset0
+ (offs
& ~0x03);
926 nand_writereg(ctrl
, reg_offs
, data
);
930 * read_oob_from_regs - read data from OOB registers
931 * @ctrl: NAND controller
932 * @i: sub-page sector index
933 * @oob: buffer to read to
934 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
935 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
937 static int read_oob_from_regs(struct brcmnand_controller
*ctrl
, int i
, u8
*oob
,
938 int sas
, int sector_1k
)
940 int tbytes
= sas
<< sector_1k
;
943 /* Adjust OOB values for 1K sector size */
944 if (sector_1k
&& (i
& 0x01))
945 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
946 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
948 for (j
= 0; j
< tbytes
; j
++)
949 oob
[j
] = oob_reg_read(ctrl
, j
);
954 * write_oob_to_regs - write data to OOB registers
955 * @i: sub-page sector index
956 * @oob: buffer to write from
957 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
958 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
960 static int write_oob_to_regs(struct brcmnand_controller
*ctrl
, int i
,
961 const u8
*oob
, int sas
, int sector_1k
)
963 int tbytes
= sas
<< sector_1k
;
966 /* Adjust OOB values for 1K sector size */
967 if (sector_1k
&& (i
& 0x01))
968 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
969 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
971 for (j
= 0; j
< tbytes
; j
+= 4)
972 oob_reg_write(ctrl
, j
,
980 static irqreturn_t
brcmnand_ctlrdy_irq(int irq
, void *data
)
982 struct brcmnand_controller
*ctrl
= data
;
984 /* Discard all NAND_CTLRDY interrupts during DMA */
985 if (ctrl
->dma_pending
)
988 complete(&ctrl
->done
);
992 /* Handle SoC-specific interrupt hardware */
993 static irqreturn_t
brcmnand_irq(int irq
, void *data
)
995 struct brcmnand_controller
*ctrl
= data
;
997 if (ctrl
->soc
->ctlrdy_ack(ctrl
->soc
))
998 return brcmnand_ctlrdy_irq(irq
, data
);
1003 static irqreturn_t
brcmnand_dma_irq(int irq
, void *data
)
1005 struct brcmnand_controller
*ctrl
= data
;
1007 complete(&ctrl
->dma_done
);
1012 static void brcmnand_send_cmd(struct brcmnand_host
*host
, int cmd
)
1014 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1017 dev_dbg(ctrl
->dev
, "send native cmd %d addr_lo 0x%x\n", cmd
,
1018 brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
));
1019 BUG_ON(ctrl
->cmd_pending
!= 0);
1020 ctrl
->cmd_pending
= cmd
;
1022 intfc
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
);
1023 BUG_ON(!(intfc
& INTFC_CTLR_READY
));
1025 mb(); /* flush previous writes */
1026 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_START
,
1027 cmd
<< brcmnand_cmd_shift(ctrl
));
1030 /***********************************************************************
1031 * NAND MTD API: read/program/erase
1032 ***********************************************************************/
1034 static void brcmnand_cmd_ctrl(struct mtd_info
*mtd
, int dat
,
1037 /* intentionally left blank */
1040 static int brcmnand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1042 struct nand_chip
*chip
= mtd
->priv
;
1043 struct brcmnand_host
*host
= chip
->priv
;
1044 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1045 unsigned long timeo
= msecs_to_jiffies(100);
1047 dev_dbg(ctrl
->dev
, "wait on native cmd %d\n", ctrl
->cmd_pending
);
1048 if (ctrl
->cmd_pending
&&
1049 wait_for_completion_timeout(&ctrl
->done
, timeo
) <= 0) {
1050 u32 cmd
= brcmnand_read_reg(ctrl
, BRCMNAND_CMD_START
)
1051 >> brcmnand_cmd_shift(ctrl
);
1053 dev_err_ratelimited(ctrl
->dev
,
1054 "timeout waiting for command %#02x\n", cmd
);
1055 dev_err_ratelimited(ctrl
->dev
, "intfc status %08x\n",
1056 brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
));
1058 ctrl
->cmd_pending
= 0;
1059 return brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1068 LLOP_RETURN_IDLE
= BIT(31),
1070 LLOP_DATA_MASK
= GENMASK(15, 0),
1073 static int brcmnand_low_level_op(struct brcmnand_host
*host
,
1074 enum brcmnand_llop_type type
, u32 data
,
1077 struct mtd_info
*mtd
= &host
->mtd
;
1078 struct nand_chip
*chip
= &host
->chip
;
1079 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1082 tmp
= data
& LLOP_DATA_MASK
;
1085 tmp
|= LLOP_WE
| LLOP_CLE
;
1089 tmp
|= LLOP_WE
| LLOP_ALE
;
1102 tmp
|= LLOP_RETURN_IDLE
;
1104 dev_dbg(ctrl
->dev
, "ll_op cmd %#x\n", tmp
);
1106 brcmnand_write_reg(ctrl
, BRCMNAND_LL_OP
, tmp
);
1107 (void)brcmnand_read_reg(ctrl
, BRCMNAND_LL_OP
);
1109 brcmnand_send_cmd(host
, CMD_LOW_LEVEL_OP
);
1110 return brcmnand_waitfunc(mtd
, chip
);
1113 static void brcmnand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1114 int column
, int page_addr
)
1116 struct nand_chip
*chip
= mtd
->priv
;
1117 struct brcmnand_host
*host
= chip
->priv
;
1118 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1119 u64 addr
= (u64
)page_addr
<< chip
->page_shift
;
1122 if (command
== NAND_CMD_READID
|| command
== NAND_CMD_PARAM
||
1123 command
== NAND_CMD_RNDOUT
)
1125 /* Avoid propagating a negative, don't-care address */
1126 else if (page_addr
< 0)
1129 dev_dbg(ctrl
->dev
, "cmd 0x%x addr 0x%llx\n", command
,
1130 (unsigned long long)addr
);
1132 host
->last_cmd
= command
;
1133 host
->last_byte
= 0;
1134 host
->last_addr
= addr
;
1137 case NAND_CMD_RESET
:
1138 native_cmd
= CMD_FLASH_RESET
;
1140 case NAND_CMD_STATUS
:
1141 native_cmd
= CMD_STATUS_READ
;
1143 case NAND_CMD_READID
:
1144 native_cmd
= CMD_DEVICE_ID_READ
;
1146 case NAND_CMD_READOOB
:
1147 native_cmd
= CMD_SPARE_AREA_READ
;
1149 case NAND_CMD_ERASE1
:
1150 native_cmd
= CMD_BLOCK_ERASE
;
1151 brcmnand_wp(mtd
, 0);
1153 case NAND_CMD_PARAM
:
1154 native_cmd
= CMD_PARAMETER_READ
;
1156 case NAND_CMD_SET_FEATURES
:
1157 case NAND_CMD_GET_FEATURES
:
1158 brcmnand_low_level_op(host
, LL_OP_CMD
, command
, false);
1159 brcmnand_low_level_op(host
, LL_OP_ADDR
, column
, false);
1161 case NAND_CMD_RNDOUT
:
1162 native_cmd
= CMD_PARAMETER_CHANGE_COL
;
1163 addr
&= ~((u64
)(FC_BYTES
- 1));
1165 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1166 * NB: hwcfg.sector_size_1k may not be initialized yet
1168 if (brcmnand_get_sector_size_1k(host
)) {
1169 host
->hwcfg
.sector_size_1k
=
1170 brcmnand_get_sector_size_1k(host
);
1171 brcmnand_set_sector_size_1k(host
, 0);
1179 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1180 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1181 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1182 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
, lower_32_bits(addr
));
1183 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1185 brcmnand_send_cmd(host
, native_cmd
);
1186 brcmnand_waitfunc(mtd
, chip
);
1188 if (native_cmd
== CMD_PARAMETER_READ
||
1189 native_cmd
== CMD_PARAMETER_CHANGE_COL
) {
1190 /* Copy flash cache word-wise */
1191 u32
*flash_cache
= (u32
*)ctrl
->flash_cache
;
1194 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1197 * Must cache the FLASH_CACHE now, since changes in
1198 * SECTOR_SIZE_1K may invalidate it
1200 for (i
= 0; i
< FC_WORDS
; i
++)
1202 * Flash cache is big endian for parameter pages, at
1205 flash_cache
[i
] = be32_to_cpu(brcmnand_read_fc(ctrl
, i
));
1207 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1209 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1210 if (host
->hwcfg
.sector_size_1k
)
1211 brcmnand_set_sector_size_1k(host
,
1212 host
->hwcfg
.sector_size_1k
);
1215 /* Re-enable protection is necessary only after erase */
1216 if (command
== NAND_CMD_ERASE1
)
1217 brcmnand_wp(mtd
, 1);
1220 static uint8_t brcmnand_read_byte(struct mtd_info
*mtd
)
1222 struct nand_chip
*chip
= mtd
->priv
;
1223 struct brcmnand_host
*host
= chip
->priv
;
1224 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1228 switch (host
->last_cmd
) {
1229 case NAND_CMD_READID
:
1230 if (host
->last_byte
< 4)
1231 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID
) >>
1232 (24 - (host
->last_byte
<< 3));
1233 else if (host
->last_byte
< 8)
1234 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID_EXT
) >>
1235 (56 - (host
->last_byte
<< 3));
1238 case NAND_CMD_READOOB
:
1239 ret
= oob_reg_read(ctrl
, host
->last_byte
);
1242 case NAND_CMD_STATUS
:
1243 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1245 if (wp_on
) /* hide WP status */
1246 ret
|= NAND_STATUS_WP
;
1249 case NAND_CMD_PARAM
:
1250 case NAND_CMD_RNDOUT
:
1251 addr
= host
->last_addr
+ host
->last_byte
;
1252 offs
= addr
& (FC_BYTES
- 1);
1254 /* At FC_BYTES boundary, switch to next column */
1255 if (host
->last_byte
> 0 && offs
== 0)
1256 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, addr
, -1);
1258 ret
= ctrl
->flash_cache
[offs
];
1260 case NAND_CMD_GET_FEATURES
:
1261 if (host
->last_byte
>= ONFI_SUBFEATURE_PARAM_LEN
) {
1264 bool last
= host
->last_byte
==
1265 ONFI_SUBFEATURE_PARAM_LEN
- 1;
1266 brcmnand_low_level_op(host
, LL_OP_RD
, 0, last
);
1267 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_LL_RDATA
) & 0xff;
1271 dev_dbg(ctrl
->dev
, "read byte = 0x%02x\n", ret
);
1277 static void brcmnand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1281 for (i
= 0; i
< len
; i
++, buf
++)
1282 *buf
= brcmnand_read_byte(mtd
);
1285 static void brcmnand_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
,
1289 struct nand_chip
*chip
= mtd
->priv
;
1290 struct brcmnand_host
*host
= chip
->priv
;
1292 switch (host
->last_cmd
) {
1293 case NAND_CMD_SET_FEATURES
:
1294 for (i
= 0; i
< len
; i
++)
1295 brcmnand_low_level_op(host
, LL_OP_WR
, buf
[i
],
1305 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1306 * following ahead of time:
1307 * - Is this descriptor the beginning or end of a linked list?
1308 * - What is the (DMA) address of the next descriptor in the linked list?
1310 static int brcmnand_fill_dma_desc(struct brcmnand_host
*host
,
1311 struct brcm_nand_dma_desc
*desc
, u64 addr
,
1312 dma_addr_t buf
, u32 len
, u8 dma_cmd
,
1313 bool begin
, bool end
,
1314 dma_addr_t next_desc
)
1316 memset(desc
, 0, sizeof(*desc
));
1317 /* Descriptors are written in native byte order (wordwise) */
1318 desc
->next_desc
= lower_32_bits(next_desc
);
1319 desc
->next_desc_ext
= upper_32_bits(next_desc
);
1320 desc
->cmd_irq
= (dma_cmd
<< 24) |
1321 (end
? (0x03 << 8) : 0) | /* IRQ | STOP */
1322 (!!begin
) | ((!!end
) << 1); /* head, tail */
1323 #ifdef CONFIG_CPU_BIG_ENDIAN
1324 desc
->cmd_irq
|= 0x01 << 12;
1326 desc
->dram_addr
= lower_32_bits(buf
);
1327 desc
->dram_addr_ext
= upper_32_bits(buf
);
1328 desc
->tfr_len
= len
;
1329 desc
->total_len
= len
;
1330 desc
->flash_addr
= lower_32_bits(addr
);
1331 desc
->flash_addr_ext
= upper_32_bits(addr
);
1332 desc
->cs
= host
->cs
;
1333 desc
->status_valid
= 0x01;
1338 * Kick the FLASH_DMA engine, with a given DMA descriptor
1340 static void brcmnand_dma_run(struct brcmnand_host
*host
, dma_addr_t desc
)
1342 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1343 unsigned long timeo
= msecs_to_jiffies(100);
1345 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC
, lower_32_bits(desc
));
1346 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC
);
1347 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC_EXT
, upper_32_bits(desc
));
1348 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC_EXT
);
1350 /* Start FLASH_DMA engine */
1351 ctrl
->dma_pending
= true;
1352 mb(); /* flush previous writes */
1353 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0x03); /* wake | run */
1355 if (wait_for_completion_timeout(&ctrl
->dma_done
, timeo
) <= 0) {
1357 "timeout waiting for DMA; status %#x, error status %#x\n",
1358 flash_dma_readl(ctrl
, FLASH_DMA_STATUS
),
1359 flash_dma_readl(ctrl
, FLASH_DMA_ERROR_STATUS
));
1361 ctrl
->dma_pending
= false;
1362 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0); /* force stop */
1365 static int brcmnand_dma_trans(struct brcmnand_host
*host
, u64 addr
, u32
*buf
,
1366 u32 len
, u8 dma_cmd
)
1368 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1370 int dir
= dma_cmd
== CMD_PAGE_READ
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1372 buf_pa
= dma_map_single(ctrl
->dev
, buf
, len
, dir
);
1373 if (dma_mapping_error(ctrl
->dev
, buf_pa
)) {
1374 dev_err(ctrl
->dev
, "unable to map buffer for DMA\n");
1378 brcmnand_fill_dma_desc(host
, ctrl
->dma_desc
, addr
, buf_pa
, len
,
1379 dma_cmd
, true, true, 0);
1381 brcmnand_dma_run(host
, ctrl
->dma_pa
);
1383 dma_unmap_single(ctrl
->dev
, buf_pa
, len
, dir
);
1385 if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_ECC_ERROR
)
1387 else if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_CORR_ERROR
)
1394 * Assumes proper CS is already set
1396 static int brcmnand_read_by_pio(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1397 u64 addr
, unsigned int trans
, u32
*buf
,
1398 u8
*oob
, u64
*err_addr
)
1400 struct brcmnand_host
*host
= chip
->priv
;
1401 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1404 /* Clear error addresses */
1405 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_ADDR
, 0);
1406 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_ADDR
, 0);
1407 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_EXT_ADDR
, 0);
1408 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_EXT_ADDR
, 0);
1410 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1411 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1412 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1414 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1415 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1416 lower_32_bits(addr
));
1417 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1418 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1419 brcmnand_send_cmd(host
, CMD_PAGE_READ
);
1420 brcmnand_waitfunc(mtd
, chip
);
1423 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1425 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1426 *buf
= brcmnand_read_fc(ctrl
, j
);
1428 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1432 oob
+= read_oob_from_regs(ctrl
, i
, oob
,
1433 mtd
->oobsize
/ trans
,
1434 host
->hwcfg
.sector_size_1k
);
1437 *err_addr
= brcmnand_read_reg(ctrl
,
1438 BRCMNAND_UNCORR_ADDR
) |
1439 ((u64
)(brcmnand_read_reg(ctrl
,
1440 BRCMNAND_UNCORR_EXT_ADDR
)
1447 *err_addr
= brcmnand_read_reg(ctrl
,
1448 BRCMNAND_CORR_ADDR
) |
1449 ((u64
)(brcmnand_read_reg(ctrl
,
1450 BRCMNAND_CORR_EXT_ADDR
)
1460 static int brcmnand_read(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1461 u64 addr
, unsigned int trans
, u32
*buf
, u8
*oob
)
1463 struct brcmnand_host
*host
= chip
->priv
;
1464 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1468 dev_dbg(ctrl
->dev
, "read %llx -> %p\n", (unsigned long long)addr
, buf
);
1470 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_COUNT
, 0);
1472 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1473 err
= brcmnand_dma_trans(host
, addr
, buf
, trans
* FC_BYTES
,
1476 if (mtd_is_bitflip_or_eccerr(err
))
1483 memset(oob
, 0x99, mtd
->oobsize
);
1485 err
= brcmnand_read_by_pio(mtd
, chip
, addr
, trans
, buf
,
1489 if (mtd_is_eccerr(err
)) {
1490 dev_dbg(ctrl
->dev
, "uncorrectable error at 0x%llx\n",
1491 (unsigned long long)err_addr
);
1492 mtd
->ecc_stats
.failed
++;
1493 /* NAND layer expects zero on ECC errors */
1497 if (mtd_is_bitflip(err
)) {
1498 unsigned int corrected
= brcmnand_count_corrected(ctrl
);
1500 dev_dbg(ctrl
->dev
, "corrected error at 0x%llx\n",
1501 (unsigned long long)err_addr
);
1502 mtd
->ecc_stats
.corrected
+= corrected
;
1503 /* Always exceed the software-imposed threshold */
1504 return max(mtd
->bitflip_threshold
, corrected
);
1510 static int brcmnand_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1511 uint8_t *buf
, int oob_required
, int page
)
1513 struct brcmnand_host
*host
= chip
->priv
;
1514 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1516 return brcmnand_read(mtd
, chip
, host
->last_addr
,
1517 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1520 static int brcmnand_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1521 uint8_t *buf
, int oob_required
, int page
)
1523 struct brcmnand_host
*host
= chip
->priv
;
1524 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1527 brcmnand_set_ecc_enabled(host
, 0);
1528 ret
= brcmnand_read(mtd
, chip
, host
->last_addr
,
1529 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1530 brcmnand_set_ecc_enabled(host
, 1);
1534 static int brcmnand_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1537 return brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1538 mtd
->writesize
>> FC_SHIFT
,
1539 NULL
, (u8
*)chip
->oob_poi
);
1542 static int brcmnand_read_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1545 struct brcmnand_host
*host
= chip
->priv
;
1547 brcmnand_set_ecc_enabled(host
, 0);
1548 brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1549 mtd
->writesize
>> FC_SHIFT
,
1550 NULL
, (u8
*)chip
->oob_poi
);
1551 brcmnand_set_ecc_enabled(host
, 1);
1555 static int brcmnand_write(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1556 u64 addr
, const u32
*buf
, u8
*oob
)
1558 struct brcmnand_host
*host
= chip
->priv
;
1559 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1560 unsigned int i
, j
, trans
= mtd
->writesize
>> FC_SHIFT
;
1561 int status
, ret
= 0;
1563 dev_dbg(ctrl
->dev
, "write %llx <- %p\n", (unsigned long long)addr
, buf
);
1565 if (unlikely((unsigned long)buf
& 0x03)) {
1566 dev_warn(ctrl
->dev
, "unaligned buffer: %p\n", buf
);
1567 buf
= (u32
*)((unsigned long)buf
& ~0x03);
1570 brcmnand_wp(mtd
, 0);
1572 for (i
= 0; i
< ctrl
->max_oob
; i
+= 4)
1573 oob_reg_write(ctrl
, i
, 0xffffffff);
1575 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1576 if (brcmnand_dma_trans(host
, addr
, (u32
*)buf
,
1577 mtd
->writesize
, CMD_PROGRAM_PAGE
))
1582 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1583 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1584 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1586 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1587 /* full address MUST be set before populating FC */
1588 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1589 lower_32_bits(addr
));
1590 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1593 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1595 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1596 brcmnand_write_fc(ctrl
, j
, *buf
);
1598 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1600 for (j
= 0; j
< FC_WORDS
; j
++)
1601 brcmnand_write_fc(ctrl
, j
, 0xffffffff);
1605 oob
+= write_oob_to_regs(ctrl
, i
, oob
,
1606 mtd
->oobsize
/ trans
,
1607 host
->hwcfg
.sector_size_1k
);
1610 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
1611 brcmnand_send_cmd(host
, CMD_PROGRAM_PAGE
);
1612 status
= brcmnand_waitfunc(mtd
, chip
);
1614 if (status
& NAND_STATUS_FAIL
) {
1615 dev_info(ctrl
->dev
, "program failed at %llx\n",
1616 (unsigned long long)addr
);
1622 brcmnand_wp(mtd
, 1);
1626 static int brcmnand_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1627 const uint8_t *buf
, int oob_required
, int page
)
1629 struct brcmnand_host
*host
= chip
->priv
;
1630 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1632 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1636 static int brcmnand_write_page_raw(struct mtd_info
*mtd
,
1637 struct nand_chip
*chip
, const uint8_t *buf
,
1638 int oob_required
, int page
)
1640 struct brcmnand_host
*host
= chip
->priv
;
1641 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1643 brcmnand_set_ecc_enabled(host
, 0);
1644 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1645 brcmnand_set_ecc_enabled(host
, 1);
1649 static int brcmnand_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1652 return brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1653 NULL
, chip
->oob_poi
);
1656 static int brcmnand_write_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1659 struct brcmnand_host
*host
= chip
->priv
;
1662 brcmnand_set_ecc_enabled(host
, 0);
1663 ret
= brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
, NULL
,
1664 (u8
*)chip
->oob_poi
);
1665 brcmnand_set_ecc_enabled(host
, 1);
1670 /***********************************************************************
1671 * Per-CS setup (1 NAND device)
1672 ***********************************************************************/
1674 static int brcmnand_set_cfg(struct brcmnand_host
*host
,
1675 struct brcmnand_cfg
*cfg
)
1677 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1678 struct nand_chip
*chip
= &host
->chip
;
1679 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1680 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1681 BRCMNAND_CS_CFG_EXT
);
1682 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1683 BRCMNAND_CS_ACC_CONTROL
);
1684 u8 block_size
= 0, page_size
= 0, device_size
= 0;
1687 if (ctrl
->block_sizes
) {
1690 for (i
= 0, found
= 0; ctrl
->block_sizes
[i
]; i
++)
1691 if (ctrl
->block_sizes
[i
] * 1024 == cfg
->block_size
) {
1696 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1701 block_size
= ffs(cfg
->block_size
) - ffs(BRCMNAND_MIN_BLOCKSIZE
);
1704 if (cfg
->block_size
< BRCMNAND_MIN_BLOCKSIZE
|| (ctrl
->max_block_size
&&
1705 cfg
->block_size
> ctrl
->max_block_size
)) {
1706 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1711 if (ctrl
->page_sizes
) {
1714 for (i
= 0, found
= 0; ctrl
->page_sizes
[i
]; i
++)
1715 if (ctrl
->page_sizes
[i
] == cfg
->page_size
) {
1720 dev_warn(ctrl
->dev
, "invalid page size %u\n",
1725 page_size
= ffs(cfg
->page_size
) - ffs(BRCMNAND_MIN_PAGESIZE
);
1728 if (cfg
->page_size
< BRCMNAND_MIN_PAGESIZE
|| (ctrl
->max_page_size
&&
1729 cfg
->page_size
> ctrl
->max_page_size
)) {
1730 dev_warn(ctrl
->dev
, "invalid page size %u\n", cfg
->page_size
);
1734 if (fls64(cfg
->device_size
) < fls64(BRCMNAND_MIN_DEVSIZE
)) {
1735 dev_warn(ctrl
->dev
, "invalid device size 0x%llx\n",
1736 (unsigned long long)cfg
->device_size
);
1739 device_size
= fls64(cfg
->device_size
) - fls64(BRCMNAND_MIN_DEVSIZE
);
1741 tmp
= (cfg
->blk_adr_bytes
<< CFG_BLK_ADR_BYTES_SHIFT
) |
1742 (cfg
->col_adr_bytes
<< CFG_COL_ADR_BYTES_SHIFT
) |
1743 (cfg
->ful_adr_bytes
<< CFG_FUL_ADR_BYTES_SHIFT
) |
1744 (!!(cfg
->device_width
== 16) << CFG_BUS_WIDTH_SHIFT
) |
1745 (device_size
<< CFG_DEVICE_SIZE_SHIFT
);
1746 if (cfg_offs
== cfg_ext_offs
) {
1747 tmp
|= (page_size
<< CFG_PAGE_SIZE_SHIFT
) |
1748 (block_size
<< CFG_BLK_SIZE_SHIFT
);
1749 nand_writereg(ctrl
, cfg_offs
, tmp
);
1751 nand_writereg(ctrl
, cfg_offs
, tmp
);
1752 tmp
= (page_size
<< CFG_EXT_PAGE_SIZE_SHIFT
) |
1753 (block_size
<< CFG_EXT_BLK_SIZE_SHIFT
);
1754 nand_writereg(ctrl
, cfg_ext_offs
, tmp
);
1757 tmp
= nand_readreg(ctrl
, acc_control_offs
);
1758 tmp
&= ~brcmnand_ecc_level_mask(ctrl
);
1759 tmp
|= cfg
->ecc_level
<< NAND_ACC_CONTROL_ECC_SHIFT
;
1760 tmp
&= ~brcmnand_spare_area_mask(ctrl
);
1761 tmp
|= cfg
->spare_area_size
;
1762 nand_writereg(ctrl
, acc_control_offs
, tmp
);
1764 brcmnand_set_sector_size_1k(host
, cfg
->sector_size_1k
);
1766 /* threshold = ceil(BCH-level * 0.75) */
1767 brcmnand_wr_corr_thresh(host
, DIV_ROUND_UP(chip
->ecc
.strength
* 3, 4));
1772 static void brcmnand_print_cfg(char *buf
, struct brcmnand_cfg
*cfg
)
1775 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
1776 (unsigned long long)cfg
->device_size
>> 20,
1777 cfg
->block_size
>> 10,
1778 cfg
->page_size
>= 1024 ? cfg
->page_size
>> 10 : cfg
->page_size
,
1779 cfg
->page_size
>= 1024 ? "KiB" : "B",
1780 cfg
->spare_area_size
, cfg
->device_width
);
1782 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
1783 if (is_hamming_ecc(cfg
))
1784 sprintf(buf
, ", Hamming ECC");
1785 else if (cfg
->sector_size_1k
)
1786 sprintf(buf
, ", BCH-%u (1KiB sector)", cfg
->ecc_level
<< 1);
1788 sprintf(buf
, ", BCH-%u", cfg
->ecc_level
);
1792 * Minimum number of bytes to address a page. Calculated as:
1793 * roundup(log2(size / page-size) / 8)
1795 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
1796 * OK because many other things will break if 'size' is irregular...
1798 static inline int get_blk_adr_bytes(u64 size
, u32 writesize
)
1800 return ALIGN(ilog2(size
) - ilog2(writesize
), 8) >> 3;
1803 static int brcmnand_setup_dev(struct brcmnand_host
*host
)
1805 struct mtd_info
*mtd
= &host
->mtd
;
1806 struct nand_chip
*chip
= &host
->chip
;
1807 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1808 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
1810 u32 offs
, tmp
, oob_sector
;
1813 memset(cfg
, 0, sizeof(*cfg
));
1815 ret
= of_property_read_u32(nand_get_flash_node(chip
),
1816 "brcm,nand-oob-sector-size",
1819 /* Use detected size */
1820 cfg
->spare_area_size
= mtd
->oobsize
/
1821 (mtd
->writesize
>> FC_SHIFT
);
1823 cfg
->spare_area_size
= oob_sector
;
1825 if (cfg
->spare_area_size
> ctrl
->max_oob
)
1826 cfg
->spare_area_size
= ctrl
->max_oob
;
1828 * Set oobsize to be consistent with controller's spare_area_size, as
1829 * the rest is inaccessible.
1831 mtd
->oobsize
= cfg
->spare_area_size
* (mtd
->writesize
>> FC_SHIFT
);
1833 cfg
->device_size
= mtd
->size
;
1834 cfg
->block_size
= mtd
->erasesize
;
1835 cfg
->page_size
= mtd
->writesize
;
1836 cfg
->device_width
= (chip
->options
& NAND_BUSWIDTH_16
) ? 16 : 8;
1837 cfg
->col_adr_bytes
= 2;
1838 cfg
->blk_adr_bytes
= get_blk_adr_bytes(mtd
->size
, mtd
->writesize
);
1840 switch (chip
->ecc
.size
) {
1842 if (chip
->ecc
.strength
== 1) /* Hamming */
1843 cfg
->ecc_level
= 15;
1845 cfg
->ecc_level
= chip
->ecc
.strength
;
1846 cfg
->sector_size_1k
= 0;
1849 if (!(ctrl
->features
& BRCMNAND_HAS_1K_SECTORS
)) {
1850 dev_err(ctrl
->dev
, "1KB sectors not supported\n");
1853 if (chip
->ecc
.strength
& 0x1) {
1855 "odd ECC not supported with 1KB sectors\n");
1859 cfg
->ecc_level
= chip
->ecc
.strength
>> 1;
1860 cfg
->sector_size_1k
= 1;
1863 dev_err(ctrl
->dev
, "unsupported ECC size: %d\n",
1868 cfg
->ful_adr_bytes
= cfg
->blk_adr_bytes
;
1869 if (mtd
->writesize
> 512)
1870 cfg
->ful_adr_bytes
+= cfg
->col_adr_bytes
;
1872 cfg
->ful_adr_bytes
+= 1;
1874 ret
= brcmnand_set_cfg(host
, cfg
);
1878 brcmnand_set_ecc_enabled(host
, 1);
1880 brcmnand_print_cfg(msg
, cfg
);
1881 dev_info(ctrl
->dev
, "detected %s\n", msg
);
1883 /* Configure ACC_CONTROL */
1884 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
1885 tmp
= nand_readreg(ctrl
, offs
);
1886 tmp
&= ~ACC_CONTROL_PARTIAL_PAGE
;
1887 tmp
&= ~ACC_CONTROL_RD_ERASED
;
1888 tmp
&= ~ACC_CONTROL_FAST_PGM_RDIN
;
1889 if (ctrl
->features
& BRCMNAND_HAS_PREFETCH
) {
1891 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
1894 if (has_flash_dma(ctrl
))
1895 tmp
&= ~ACC_CONTROL_PREFETCH
;
1897 tmp
|= ACC_CONTROL_PREFETCH
;
1899 nand_writereg(ctrl
, offs
, tmp
);
1904 static int brcmnand_init_cs(struct brcmnand_host
*host
, struct device_node
*dn
)
1906 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1907 struct platform_device
*pdev
= host
->pdev
;
1908 struct mtd_info
*mtd
;
1909 struct nand_chip
*chip
;
1913 ret
= of_property_read_u32(dn
, "reg", &host
->cs
);
1915 dev_err(&pdev
->dev
, "can't get chip-select\n");
1922 nand_set_flash_node(chip
, dn
);
1925 mtd
->name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "brcmnand.%d",
1927 mtd
->owner
= THIS_MODULE
;
1928 mtd
->dev
.parent
= &pdev
->dev
;
1930 chip
->IO_ADDR_R
= (void __iomem
*)0xdeadbeef;
1931 chip
->IO_ADDR_W
= (void __iomem
*)0xdeadbeef;
1933 chip
->cmd_ctrl
= brcmnand_cmd_ctrl
;
1934 chip
->cmdfunc
= brcmnand_cmdfunc
;
1935 chip
->waitfunc
= brcmnand_waitfunc
;
1936 chip
->read_byte
= brcmnand_read_byte
;
1937 chip
->read_buf
= brcmnand_read_buf
;
1938 chip
->write_buf
= brcmnand_write_buf
;
1940 chip
->ecc
.mode
= NAND_ECC_HW
;
1941 chip
->ecc
.read_page
= brcmnand_read_page
;
1942 chip
->ecc
.write_page
= brcmnand_write_page
;
1943 chip
->ecc
.read_page_raw
= brcmnand_read_page_raw
;
1944 chip
->ecc
.write_page_raw
= brcmnand_write_page_raw
;
1945 chip
->ecc
.write_oob_raw
= brcmnand_write_oob_raw
;
1946 chip
->ecc
.read_oob_raw
= brcmnand_read_oob_raw
;
1947 chip
->ecc
.read_oob
= brcmnand_read_oob
;
1948 chip
->ecc
.write_oob
= brcmnand_write_oob
;
1950 chip
->controller
= &ctrl
->controller
;
1953 * The bootloader might have configured 16bit mode but
1954 * NAND READID command only works in 8bit mode. We force
1955 * 8bit mode here to ensure that NAND READID commands works.
1957 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1958 nand_writereg(ctrl
, cfg_offs
,
1959 nand_readreg(ctrl
, cfg_offs
) & ~CFG_BUS_WIDTH
);
1961 if (nand_scan_ident(mtd
, 1, NULL
))
1964 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1966 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
1967 * to/from, and have nand_base pass us a bounce buffer instead, as
1970 chip
->options
|= NAND_USE_BOUNCE_BUFFER
;
1972 if (of_get_nand_on_flash_bbt(dn
))
1973 chip
->bbt_options
|= NAND_BBT_USE_FLASH
| NAND_BBT_NO_OOB
;
1975 if (brcmnand_setup_dev(host
))
1978 chip
->ecc
.size
= host
->hwcfg
.sector_size_1k
? 1024 : 512;
1979 /* only use our internal HW threshold */
1980 mtd
->bitflip_threshold
= 1;
1982 chip
->ecc
.layout
= brcmstb_choose_ecc_layout(host
);
1983 if (!chip
->ecc
.layout
)
1986 if (nand_scan_tail(mtd
))
1989 return mtd_device_register(mtd
, NULL
, 0);
1992 static void brcmnand_save_restore_cs_config(struct brcmnand_host
*host
,
1995 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1996 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1997 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1998 BRCMNAND_CS_CFG_EXT
);
1999 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2000 BRCMNAND_CS_ACC_CONTROL
);
2001 u16 t1_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING1
);
2002 u16 t2_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING2
);
2005 nand_writereg(ctrl
, cfg_offs
, host
->hwcfg
.config
);
2006 if (cfg_offs
!= cfg_ext_offs
)
2007 nand_writereg(ctrl
, cfg_ext_offs
,
2008 host
->hwcfg
.config_ext
);
2009 nand_writereg(ctrl
, acc_control_offs
, host
->hwcfg
.acc_control
);
2010 nand_writereg(ctrl
, t1_offs
, host
->hwcfg
.timing_1
);
2011 nand_writereg(ctrl
, t2_offs
, host
->hwcfg
.timing_2
);
2013 host
->hwcfg
.config
= nand_readreg(ctrl
, cfg_offs
);
2014 if (cfg_offs
!= cfg_ext_offs
)
2015 host
->hwcfg
.config_ext
=
2016 nand_readreg(ctrl
, cfg_ext_offs
);
2017 host
->hwcfg
.acc_control
= nand_readreg(ctrl
, acc_control_offs
);
2018 host
->hwcfg
.timing_1
= nand_readreg(ctrl
, t1_offs
);
2019 host
->hwcfg
.timing_2
= nand_readreg(ctrl
, t2_offs
);
2023 static int brcmnand_suspend(struct device
*dev
)
2025 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2026 struct brcmnand_host
*host
;
2028 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2029 brcmnand_save_restore_cs_config(host
, 0);
2031 ctrl
->nand_cs_nand_select
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_SELECT
);
2032 ctrl
->nand_cs_nand_xor
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_XOR
);
2033 ctrl
->corr_stat_threshold
=
2034 brcmnand_read_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
);
2036 if (has_flash_dma(ctrl
))
2037 ctrl
->flash_dma_mode
= flash_dma_readl(ctrl
, FLASH_DMA_MODE
);
2042 static int brcmnand_resume(struct device
*dev
)
2044 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2045 struct brcmnand_host
*host
;
2047 if (has_flash_dma(ctrl
)) {
2048 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, ctrl
->flash_dma_mode
);
2049 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2052 brcmnand_write_reg(ctrl
, BRCMNAND_CS_SELECT
, ctrl
->nand_cs_nand_select
);
2053 brcmnand_write_reg(ctrl
, BRCMNAND_CS_XOR
, ctrl
->nand_cs_nand_xor
);
2054 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
,
2055 ctrl
->corr_stat_threshold
);
2057 /* Clear/re-enable interrupt */
2058 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2059 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2062 list_for_each_entry(host
, &ctrl
->host_list
, node
) {
2063 struct mtd_info
*mtd
= &host
->mtd
;
2064 struct nand_chip
*chip
= mtd
->priv
;
2066 brcmnand_save_restore_cs_config(host
, 1);
2068 /* Reset the chip, required by some chips after power-up */
2069 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
2075 const struct dev_pm_ops brcmnand_pm_ops
= {
2076 .suspend
= brcmnand_suspend
,
2077 .resume
= brcmnand_resume
,
2079 EXPORT_SYMBOL_GPL(brcmnand_pm_ops
);
2081 static const struct of_device_id brcmnand_of_match
[] = {
2082 { .compatible
= "brcm,brcmnand-v4.0" },
2083 { .compatible
= "brcm,brcmnand-v5.0" },
2084 { .compatible
= "brcm,brcmnand-v6.0" },
2085 { .compatible
= "brcm,brcmnand-v6.1" },
2086 { .compatible
= "brcm,brcmnand-v7.0" },
2087 { .compatible
= "brcm,brcmnand-v7.1" },
2090 MODULE_DEVICE_TABLE(of
, brcmnand_of_match
);
2092 /***********************************************************************
2093 * Platform driver setup (per controller)
2094 ***********************************************************************/
2096 int brcmnand_probe(struct platform_device
*pdev
, struct brcmnand_soc
*soc
)
2098 struct device
*dev
= &pdev
->dev
;
2099 struct device_node
*dn
= dev
->of_node
, *child
;
2100 struct brcmnand_controller
*ctrl
;
2101 struct resource
*res
;
2104 /* We only support device-tree instantiation */
2108 if (!of_match_node(brcmnand_of_match
, dn
))
2111 ctrl
= devm_kzalloc(dev
, sizeof(*ctrl
), GFP_KERNEL
);
2115 dev_set_drvdata(dev
, ctrl
);
2118 init_completion(&ctrl
->done
);
2119 init_completion(&ctrl
->dma_done
);
2120 spin_lock_init(&ctrl
->controller
.lock
);
2121 init_waitqueue_head(&ctrl
->controller
.wq
);
2122 INIT_LIST_HEAD(&ctrl
->host_list
);
2124 /* NAND register range */
2125 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2126 ctrl
->nand_base
= devm_ioremap_resource(dev
, res
);
2127 if (IS_ERR(ctrl
->nand_base
))
2128 return PTR_ERR(ctrl
->nand_base
);
2130 /* Initialize NAND revision */
2131 ret
= brcmnand_revision_init(ctrl
);
2136 * Most chips have this cache at a fixed offset within 'nand' block.
2137 * Some must specify this region separately.
2139 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand-cache");
2141 ctrl
->nand_fc
= devm_ioremap_resource(dev
, res
);
2142 if (IS_ERR(ctrl
->nand_fc
))
2143 return PTR_ERR(ctrl
->nand_fc
);
2145 ctrl
->nand_fc
= ctrl
->nand_base
+
2146 ctrl
->reg_offsets
[BRCMNAND_FC_BASE
];
2150 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "flash-dma");
2152 ctrl
->flash_dma_base
= devm_ioremap_resource(dev
, res
);
2153 if (IS_ERR(ctrl
->flash_dma_base
))
2154 return PTR_ERR(ctrl
->flash_dma_base
);
2156 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, 1); /* linked-list */
2157 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2159 /* Allocate descriptor(s) */
2160 ctrl
->dma_desc
= dmam_alloc_coherent(dev
,
2161 sizeof(*ctrl
->dma_desc
),
2162 &ctrl
->dma_pa
, GFP_KERNEL
);
2163 if (!ctrl
->dma_desc
)
2166 ctrl
->dma_irq
= platform_get_irq(pdev
, 1);
2167 if ((int)ctrl
->dma_irq
< 0) {
2168 dev_err(dev
, "missing FLASH_DMA IRQ\n");
2172 ret
= devm_request_irq(dev
, ctrl
->dma_irq
,
2173 brcmnand_dma_irq
, 0, DRV_NAME
,
2176 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2177 ctrl
->dma_irq
, ret
);
2181 dev_info(dev
, "enabling FLASH_DMA\n");
2184 /* Disable automatic device ID config, direct addressing */
2185 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
,
2186 CS_SELECT_AUTO_DEVICE_ID_CFG
| 0xff, 0, 0);
2187 /* Disable XOR addressing */
2188 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_XOR
, 0xff, 0, 0);
2190 if (ctrl
->features
& BRCMNAND_HAS_WP
) {
2191 /* Permanently disable write protection */
2193 brcmnand_set_wp(ctrl
, false);
2199 ctrl
->irq
= platform_get_irq(pdev
, 0);
2200 if ((int)ctrl
->irq
< 0) {
2201 dev_err(dev
, "no IRQ defined\n");
2206 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2212 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_irq
, 0,
2215 /* Enable interrupt */
2216 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2217 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2219 /* Use standard interrupt infrastructure */
2220 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_ctlrdy_irq
, 0,
2224 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2229 for_each_available_child_of_node(dn
, child
) {
2230 if (of_device_is_compatible(child
, "brcm,nandcs")) {
2231 struct brcmnand_host
*host
;
2233 host
= devm_kzalloc(dev
, sizeof(*host
), GFP_KERNEL
);
2241 ret
= brcmnand_init_cs(host
, child
);
2243 devm_kfree(dev
, host
);
2244 continue; /* Try all chip-selects */
2247 list_add_tail(&host
->node
, &ctrl
->host_list
);
2251 /* No chip-selects could initialize properly */
2252 if (list_empty(&ctrl
->host_list
))
2257 EXPORT_SYMBOL_GPL(brcmnand_probe
);
2259 int brcmnand_remove(struct platform_device
*pdev
)
2261 struct brcmnand_controller
*ctrl
= dev_get_drvdata(&pdev
->dev
);
2262 struct brcmnand_host
*host
;
2264 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2265 nand_release(&host
->mtd
);
2267 dev_set_drvdata(&pdev
->dev
, NULL
);
2271 EXPORT_SYMBOL_GPL(brcmnand_remove
);
2273 MODULE_LICENSE("GPL v2");
2274 MODULE_AUTHOR("Kevin Cernekee");
2275 MODULE_AUTHOR("Brian Norris");
2276 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2277 MODULE_ALIAS("platform:brcmnand");