2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/wait.h>
23 #include <linux/mutex.h>
24 #include <linux/pci.h>
25 #include <linux/mtd/mtd.h>
26 #include <linux/module.h>
30 MODULE_LICENSE("GPL");
32 /* We define a module parameter that allows the user to override
33 * the hardware and decide what timing mode should be used.
35 #define NAND_DEFAULT_TIMINGS -1
37 static int onfi_timing_mode
= NAND_DEFAULT_TIMINGS
;
38 module_param(onfi_timing_mode
, int, S_IRUGO
);
39 MODULE_PARM_DESC(onfi_timing_mode
, "Overrides default ONFI setting."
40 " -1 indicates use default timings");
42 #define DENALI_NAND_NAME "denali-nand"
44 /* We define a macro here that combines all interrupts this driver uses into
45 * a single constant value, for convenience. */
46 #define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
47 INTR_STATUS0__ECC_TRANSACTION_DONE | \
48 INTR_STATUS0__ECC_ERR | \
49 INTR_STATUS0__PROGRAM_FAIL | \
50 INTR_STATUS0__LOAD_COMP | \
51 INTR_STATUS0__PROGRAM_COMP | \
52 INTR_STATUS0__TIME_OUT | \
53 INTR_STATUS0__ERASE_FAIL | \
54 INTR_STATUS0__RST_COMP | \
55 INTR_STATUS0__ERASE_COMP)
57 /* indicates whether or not the internal value for the flash bank is
59 #define CHIP_SELECT_INVALID -1
61 #define SUPPORT_8BITECC 1
63 /* This macro divides two integers and rounds fractional values up
64 * to the nearest integer value. */
65 #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
67 /* this macro allows us to convert from an MTD structure to our own
68 * device context (denali) structure.
70 #define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
72 /* These constants are defined by the driver to enable common driver
73 configuration options. */
74 #define SPARE_ACCESS 0x41
75 #define MAIN_ACCESS 0x42
76 #define MAIN_SPARE_ACCESS 0x43
79 #define DENALI_WRITE 0x100
81 /* types of device accesses. We can issue commands and get status */
82 #define COMMAND_CYCLE 0
84 #define STATUS_CYCLE 2
86 /* this is a helper macro that allows us to
87 * format the bank into the proper bits for the controller */
88 #define BANK(x) ((x) << 24)
90 /* List of platforms this NAND controller has be integrated into */
91 static const struct pci_device_id denali_pci_ids
[] = {
92 { PCI_VDEVICE(INTEL
, 0x0701), INTEL_CE4100
},
93 { PCI_VDEVICE(INTEL
, 0x0809), INTEL_MRST
},
94 { /* end: all zeroes */ }
98 /* these are static lookup tables that give us easy access to
99 registers in the NAND controller.
101 static const uint32_t intr_status_addresses
[4] = {INTR_STATUS0
,
106 static const uint32_t device_reset_banks
[4] = {DEVICE_RESET__BANK0
,
109 DEVICE_RESET__BANK3
};
111 static const uint32_t operation_timeout
[4] = {INTR_STATUS0__TIME_OUT
,
112 INTR_STATUS1__TIME_OUT
,
113 INTR_STATUS2__TIME_OUT
,
114 INTR_STATUS3__TIME_OUT
};
116 static const uint32_t reset_complete
[4] = {INTR_STATUS0__RST_COMP
,
117 INTR_STATUS1__RST_COMP
,
118 INTR_STATUS2__RST_COMP
,
119 INTR_STATUS3__RST_COMP
};
121 /* specifies the debug level of the driver */
122 static int nand_debug_level
;
124 /* forward declarations */
125 static void clear_interrupts(struct denali_nand_info
*denali
);
126 static uint32_t wait_for_irq(struct denali_nand_info
*denali
,
128 static void denali_irq_enable(struct denali_nand_info
*denali
,
130 static uint32_t read_interrupt_status(struct denali_nand_info
*denali
);
132 #define DEBUG_DENALI 0
134 /* This is a wrapper for writing to the denali registers.
135 * this allows us to create debug information so we can
136 * observe how the driver is programming the device.
137 * it uses standard linux convention for (val, addr) */
138 static void denali_write32(uint32_t value
, void *addr
)
140 iowrite32(value
, addr
);
143 printk(KERN_INFO
"wrote: 0x%x -> 0x%x\n", value
,
144 (uint32_t)((uint32_t)addr
& 0x1fff));
148 /* Certain operations for the denali NAND controller use
149 * an indexed mode to read/write data. The operation is
150 * performed by writing the address value of the command
151 * to the device memory followed by the data. This function
152 * abstracts this common operation.
154 static void index_addr(struct denali_nand_info
*denali
,
155 uint32_t address
, uint32_t data
)
157 denali_write32(address
, denali
->flash_mem
);
158 denali_write32(data
, denali
->flash_mem
+ 0x10);
161 /* Perform an indexed read of the device */
162 static void index_addr_read_data(struct denali_nand_info
*denali
,
163 uint32_t address
, uint32_t *pdata
)
165 denali_write32(address
, denali
->flash_mem
);
166 *pdata
= ioread32(denali
->flash_mem
+ 0x10);
169 /* We need to buffer some data for some of the NAND core routines.
170 * The operations manage buffering that data. */
171 static void reset_buf(struct denali_nand_info
*denali
)
173 denali
->buf
.head
= denali
->buf
.tail
= 0;
176 static void write_byte_to_buf(struct denali_nand_info
*denali
, uint8_t byte
)
178 BUG_ON(denali
->buf
.tail
>= sizeof(denali
->buf
.buf
));
179 denali
->buf
.buf
[denali
->buf
.tail
++] = byte
;
182 /* reads the status of the device */
183 static void read_status(struct denali_nand_info
*denali
)
187 /* initialize the data buffer to store status */
190 /* initiate a device status read */
191 cmd
= MODE_11
| BANK(denali
->flash_bank
);
192 index_addr(denali
, cmd
| COMMAND_CYCLE
, 0x70);
193 denali_write32(cmd
| STATUS_CYCLE
, denali
->flash_mem
);
195 /* update buffer with status value */
196 write_byte_to_buf(denali
, ioread32(denali
->flash_mem
+ 0x10));
199 printk(KERN_INFO
"device reporting status value of 0x%2x\n",
204 /* resets a specific device connected to the core */
205 static void reset_bank(struct denali_nand_info
*denali
)
207 uint32_t irq_status
= 0;
208 uint32_t irq_mask
= reset_complete
[denali
->flash_bank
] |
209 operation_timeout
[denali
->flash_bank
];
212 clear_interrupts(denali
);
214 bank
= device_reset_banks
[denali
->flash_bank
];
215 denali_write32(bank
, denali
->flash_reg
+ DEVICE_RESET
);
217 irq_status
= wait_for_irq(denali
, irq_mask
);
219 if (irq_status
& operation_timeout
[denali
->flash_bank
])
220 printk(KERN_ERR
"reset bank failed.\n");
223 /* Reset the flash controller */
224 static uint16_t denali_nand_reset(struct denali_nand_info
*denali
)
228 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
229 __FILE__
, __LINE__
, __func__
);
231 for (i
= 0 ; i
< LLD_MAX_FLASH_BANKS
; i
++)
232 denali_write32(reset_complete
[i
] | operation_timeout
[i
],
233 denali
->flash_reg
+ intr_status_addresses
[i
]);
235 for (i
= 0 ; i
< LLD_MAX_FLASH_BANKS
; i
++) {
236 denali_write32(device_reset_banks
[i
],
237 denali
->flash_reg
+ DEVICE_RESET
);
238 while (!(ioread32(denali
->flash_reg
+
239 intr_status_addresses
[i
]) &
240 (reset_complete
[i
] | operation_timeout
[i
])))
242 if (ioread32(denali
->flash_reg
+ intr_status_addresses
[i
]) &
243 operation_timeout
[i
])
244 nand_dbg_print(NAND_DBG_WARN
,
245 "NAND Reset operation timed out on bank %d\n", i
);
248 for (i
= 0; i
< LLD_MAX_FLASH_BANKS
; i
++)
249 denali_write32(reset_complete
[i
] | operation_timeout
[i
],
250 denali
->flash_reg
+ intr_status_addresses
[i
]);
255 /* this routine calculates the ONFI timing values for a given mode and
256 * programs the clocking register accordingly. The mode is determined by
257 * the get_onfi_nand_para routine.
259 static void nand_onfi_timing_set(struct denali_nand_info
*denali
,
262 uint16_t Trea
[6] = {40, 30, 25, 20, 20, 16};
263 uint16_t Trp
[6] = {50, 25, 17, 15, 12, 10};
264 uint16_t Treh
[6] = {30, 15, 15, 10, 10, 7};
265 uint16_t Trc
[6] = {100, 50, 35, 30, 25, 20};
266 uint16_t Trhoh
[6] = {0, 15, 15, 15, 15, 15};
267 uint16_t Trloh
[6] = {0, 0, 0, 0, 5, 5};
268 uint16_t Tcea
[6] = {100, 45, 30, 25, 25, 25};
269 uint16_t Tadl
[6] = {200, 100, 100, 100, 70, 70};
270 uint16_t Trhw
[6] = {200, 100, 100, 100, 100, 100};
271 uint16_t Trhz
[6] = {200, 100, 100, 100, 100, 100};
272 uint16_t Twhr
[6] = {120, 80, 80, 60, 60, 60};
273 uint16_t Tcs
[6] = {70, 35, 25, 25, 20, 15};
275 uint16_t TclsRising
= 1;
276 uint16_t data_invalid_rhoh
, data_invalid_rloh
, data_invalid
;
277 uint16_t dv_window
= 0;
278 uint16_t en_lo
, en_hi
;
280 uint16_t addr_2_data
, re_2_we
, re_2_re
, we_2_re
, cs_cnt
;
282 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
283 __FILE__
, __LINE__
, __func__
);
285 en_lo
= CEIL_DIV(Trp
[mode
], CLK_X
);
286 en_hi
= CEIL_DIV(Treh
[mode
], CLK_X
);
288 if ((en_hi
* CLK_X
) < (Treh
[mode
] + 2))
292 if ((en_lo
+ en_hi
) * CLK_X
< Trc
[mode
])
293 en_lo
+= CEIL_DIV((Trc
[mode
] - (en_lo
+ en_hi
) * CLK_X
), CLK_X
);
295 if ((en_lo
+ en_hi
) < CLK_MULTI
)
296 en_lo
+= CLK_MULTI
- en_lo
- en_hi
;
298 while (dv_window
< 8) {
299 data_invalid_rhoh
= en_lo
* CLK_X
+ Trhoh
[mode
];
301 data_invalid_rloh
= (en_lo
+ en_hi
) * CLK_X
+ Trloh
[mode
];
305 data_invalid_rloh
? data_invalid_rhoh
: data_invalid_rloh
;
307 dv_window
= data_invalid
- Trea
[mode
];
313 acc_clks
= CEIL_DIV(Trea
[mode
], CLK_X
);
315 while (((acc_clks
* CLK_X
) - Trea
[mode
]) < 3)
318 if ((data_invalid
- acc_clks
* CLK_X
) < 2)
319 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d: Warning!\n",
322 addr_2_data
= CEIL_DIV(Tadl
[mode
], CLK_X
);
323 re_2_we
= CEIL_DIV(Trhw
[mode
], CLK_X
);
324 re_2_re
= CEIL_DIV(Trhz
[mode
], CLK_X
);
325 we_2_re
= CEIL_DIV(Twhr
[mode
], CLK_X
);
326 cs_cnt
= CEIL_DIV((Tcs
[mode
] - Trp
[mode
]), CLK_X
);
328 cs_cnt
= CEIL_DIV(Tcs
[mode
], CLK_X
);
333 while (((cs_cnt
* CLK_X
) + Trea
[mode
]) < Tcea
[mode
])
342 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
343 if ((ioread32(denali
->flash_reg
+ MANUFACTURER_ID
) == 0) &&
344 (ioread32(denali
->flash_reg
+ DEVICE_ID
) == 0x88))
347 denali_write32(acc_clks
, denali
->flash_reg
+ ACC_CLKS
);
348 denali_write32(re_2_we
, denali
->flash_reg
+ RE_2_WE
);
349 denali_write32(re_2_re
, denali
->flash_reg
+ RE_2_RE
);
350 denali_write32(we_2_re
, denali
->flash_reg
+ WE_2_RE
);
351 denali_write32(addr_2_data
, denali
->flash_reg
+ ADDR_2_DATA
);
352 denali_write32(en_lo
, denali
->flash_reg
+ RDWR_EN_LO_CNT
);
353 denali_write32(en_hi
, denali
->flash_reg
+ RDWR_EN_HI_CNT
);
354 denali_write32(cs_cnt
, denali
->flash_reg
+ CS_SETUP_CNT
);
357 /* configures the initial ECC settings for the controller */
358 static void set_ecc_config(struct denali_nand_info
*denali
)
361 if ((ioread32(denali
->flash_reg
+ DEVICE_MAIN_AREA_SIZE
) < 4096) ||
362 (ioread32(denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
) <= 128))
363 denali_write32(8, denali
->flash_reg
+ ECC_CORRECTION
);
366 if ((ioread32(denali
->flash_reg
+ ECC_CORRECTION
) &
367 ECC_CORRECTION__VALUE
) == 1) {
368 denali
->dev_info
.wECCBytesPerSector
= 4;
369 denali
->dev_info
.wECCBytesPerSector
*=
370 denali
->dev_info
.wDevicesConnected
;
371 denali
->dev_info
.wNumPageSpareFlag
=
372 denali
->dev_info
.wPageSpareSize
-
373 denali
->dev_info
.wPageDataSize
/
374 (ECC_SECTOR_SIZE
* denali
->dev_info
.wDevicesConnected
) *
375 denali
->dev_info
.wECCBytesPerSector
376 - denali
->dev_info
.wSpareSkipBytes
;
378 denali
->dev_info
.wECCBytesPerSector
=
379 (ioread32(denali
->flash_reg
+ ECC_CORRECTION
) &
380 ECC_CORRECTION__VALUE
) * 13 / 8;
381 if ((denali
->dev_info
.wECCBytesPerSector
) % 2 == 0)
382 denali
->dev_info
.wECCBytesPerSector
+= 2;
384 denali
->dev_info
.wECCBytesPerSector
+= 1;
386 denali
->dev_info
.wECCBytesPerSector
*=
387 denali
->dev_info
.wDevicesConnected
;
388 denali
->dev_info
.wNumPageSpareFlag
=
389 denali
->dev_info
.wPageSpareSize
-
390 denali
->dev_info
.wPageDataSize
/
391 (ECC_SECTOR_SIZE
* denali
->dev_info
.wDevicesConnected
) *
392 denali
->dev_info
.wECCBytesPerSector
393 - denali
->dev_info
.wSpareSkipBytes
;
397 /* queries the NAND device to see what ONFI modes it supports. */
398 static uint16_t get_onfi_nand_para(struct denali_nand_info
*denali
)
401 uint16_t blks_lun_l
, blks_lun_h
, n_of_luns
;
402 uint32_t blockperlun
, id
;
404 denali_write32(DEVICE_RESET__BANK0
, denali
->flash_reg
+ DEVICE_RESET
);
406 while (!((ioread32(denali
->flash_reg
+ INTR_STATUS0
) &
407 INTR_STATUS0__RST_COMP
) |
408 (ioread32(denali
->flash_reg
+ INTR_STATUS0
) &
409 INTR_STATUS0__TIME_OUT
)))
412 if (ioread32(denali
->flash_reg
+ INTR_STATUS0
) &
413 INTR_STATUS0__RST_COMP
) {
414 denali_write32(DEVICE_RESET__BANK1
,
415 denali
->flash_reg
+ DEVICE_RESET
);
416 while (!((ioread32(denali
->flash_reg
+ INTR_STATUS1
) &
417 INTR_STATUS1__RST_COMP
) |
418 (ioread32(denali
->flash_reg
+ INTR_STATUS1
) &
419 INTR_STATUS1__TIME_OUT
)))
422 if (ioread32(denali
->flash_reg
+ INTR_STATUS1
) &
423 INTR_STATUS1__RST_COMP
) {
424 denali_write32(DEVICE_RESET__BANK2
,
425 denali
->flash_reg
+ DEVICE_RESET
);
426 while (!((ioread32(denali
->flash_reg
+ INTR_STATUS2
) &
427 INTR_STATUS2__RST_COMP
) |
428 (ioread32(denali
->flash_reg
+ INTR_STATUS2
) &
429 INTR_STATUS2__TIME_OUT
)))
432 if (ioread32(denali
->flash_reg
+ INTR_STATUS2
) &
433 INTR_STATUS2__RST_COMP
) {
434 denali_write32(DEVICE_RESET__BANK3
,
435 denali
->flash_reg
+ DEVICE_RESET
);
436 while (!((ioread32(denali
->flash_reg
+
438 INTR_STATUS3__RST_COMP
) |
439 (ioread32(denali
->flash_reg
+
441 INTR_STATUS3__TIME_OUT
)))
444 printk(KERN_ERR
"Getting a time out for bank 2!\n");
447 printk(KERN_ERR
"Getting a time out for bank 1!\n");
451 denali_write32(INTR_STATUS0__TIME_OUT
,
452 denali
->flash_reg
+ INTR_STATUS0
);
453 denali_write32(INTR_STATUS1__TIME_OUT
,
454 denali
->flash_reg
+ INTR_STATUS1
);
455 denali_write32(INTR_STATUS2__TIME_OUT
,
456 denali
->flash_reg
+ INTR_STATUS2
);
457 denali_write32(INTR_STATUS3__TIME_OUT
,
458 denali
->flash_reg
+ INTR_STATUS3
);
460 denali
->dev_info
.wONFIDevFeatures
=
461 ioread32(denali
->flash_reg
+ ONFI_DEVICE_FEATURES
);
462 denali
->dev_info
.wONFIOptCommands
=
463 ioread32(denali
->flash_reg
+ ONFI_OPTIONAL_COMMANDS
);
464 denali
->dev_info
.wONFITimingMode
=
465 ioread32(denali
->flash_reg
+ ONFI_TIMING_MODE
);
466 denali
->dev_info
.wONFIPgmCacheTimingMode
=
467 ioread32(denali
->flash_reg
+ ONFI_PGM_CACHE_TIMING_MODE
);
469 n_of_luns
= ioread32(denali
->flash_reg
+ ONFI_DEVICE_NO_OF_LUNS
) &
470 ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS
;
471 blks_lun_l
= ioread32(denali
->flash_reg
+
472 ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L
);
473 blks_lun_h
= ioread32(denali
->flash_reg
+
474 ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U
);
476 blockperlun
= (blks_lun_h
<< 16) | blks_lun_l
;
478 denali
->dev_info
.wTotalBlocks
= n_of_luns
* blockperlun
;
480 if (!(ioread32(denali
->flash_reg
+ ONFI_TIMING_MODE
) &
481 ONFI_TIMING_MODE__VALUE
))
484 for (i
= 5; i
> 0; i
--) {
485 if (ioread32(denali
->flash_reg
+ ONFI_TIMING_MODE
) &
490 nand_onfi_timing_set(denali
, i
);
492 index_addr(denali
, MODE_11
| 0, 0x90);
493 index_addr(denali
, MODE_11
| 1, 0);
495 for (i
= 0; i
< 3; i
++)
496 index_addr_read_data(denali
, MODE_11
| 2, &id
);
498 nand_dbg_print(NAND_DBG_DEBUG
, "3rd ID: 0x%x\n", id
);
500 denali
->dev_info
.MLCDevice
= id
& 0x0C;
502 /* By now, all the ONFI devices we know support the page cache */
503 /* rw feature. So here we enable the pipeline_rw_ahead feature */
504 /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
505 /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
510 static void get_samsung_nand_para(struct denali_nand_info
*denali
)
512 uint8_t no_of_planes
;
514 uint64_t plane_size
, capacity
;
515 uint32_t id_bytes
[5];
518 index_addr(denali
, (uint32_t)(MODE_11
| 0), 0x90);
519 index_addr(denali
, (uint32_t)(MODE_11
| 1), 0);
520 for (i
= 0; i
< 5; i
++)
521 index_addr_read_data(denali
, (uint32_t)(MODE_11
| 2),
524 nand_dbg_print(NAND_DBG_DEBUG
,
525 "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
526 id_bytes
[0], id_bytes
[1], id_bytes
[2],
527 id_bytes
[3], id_bytes
[4]);
529 if ((id_bytes
[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
530 /* Set timing register values according to datasheet */
531 denali_write32(5, denali
->flash_reg
+ ACC_CLKS
);
532 denali_write32(20, denali
->flash_reg
+ RE_2_WE
);
533 denali_write32(12, denali
->flash_reg
+ WE_2_RE
);
534 denali_write32(14, denali
->flash_reg
+ ADDR_2_DATA
);
535 denali_write32(3, denali
->flash_reg
+ RDWR_EN_LO_CNT
);
536 denali_write32(2, denali
->flash_reg
+ RDWR_EN_HI_CNT
);
537 denali_write32(2, denali
->flash_reg
+ CS_SETUP_CNT
);
540 no_of_planes
= 1 << ((id_bytes
[4] & 0x0c) >> 2);
541 plane_size
= (uint64_t)64 << ((id_bytes
[4] & 0x70) >> 4);
542 blk_size
= 64 << ((ioread32(denali
->flash_reg
+ DEVICE_PARAM_1
) &
544 capacity
= (uint64_t)128 * plane_size
* no_of_planes
;
546 do_div(capacity
, blk_size
);
547 denali
->dev_info
.wTotalBlocks
= capacity
;
550 static void get_toshiba_nand_para(struct denali_nand_info
*denali
)
554 /* Workaround to fix a controller bug which reports a wrong */
555 /* spare area size for some kind of Toshiba NAND device */
556 if ((ioread32(denali
->flash_reg
+ DEVICE_MAIN_AREA_SIZE
) == 4096) &&
557 (ioread32(denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
) == 64)) {
558 denali_write32(216, denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
);
559 tmp
= ioread32(denali
->flash_reg
+ DEVICES_CONNECTED
) *
560 ioread32(denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
);
562 denali
->flash_reg
+ LOGICAL_PAGE_SPARE_SIZE
);
564 denali_write32(15, denali
->flash_reg
+ ECC_CORRECTION
);
565 #elif SUPPORT_8BITECC
566 denali_write32(8, denali
->flash_reg
+ ECC_CORRECTION
);
571 static void get_hynix_nand_para(struct denali_nand_info
*denali
,
574 uint32_t main_size
, spare_size
;
577 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
578 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
579 denali_write32(128, denali
->flash_reg
+ PAGES_PER_BLOCK
);
580 denali_write32(4096, denali
->flash_reg
+ DEVICE_MAIN_AREA_SIZE
);
581 denali_write32(224, denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
);
583 ioread32(denali
->flash_reg
+ DEVICES_CONNECTED
);
585 ioread32(denali
->flash_reg
+ DEVICES_CONNECTED
);
586 denali_write32(main_size
,
587 denali
->flash_reg
+ LOGICAL_PAGE_DATA_SIZE
);
588 denali_write32(spare_size
,
589 denali
->flash_reg
+ LOGICAL_PAGE_SPARE_SIZE
);
590 denali_write32(0, denali
->flash_reg
+ DEVICE_WIDTH
);
592 denali_write32(15, denali
->flash_reg
+ ECC_CORRECTION
);
593 #elif SUPPORT_8BITECC
594 denali_write32(8, denali
->flash_reg
+ ECC_CORRECTION
);
596 denali
->dev_info
.MLCDevice
= 1;
599 nand_dbg_print(NAND_DBG_WARN
,
600 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
601 "Will use default parameter values instead.\n",
602 denali
->dev_info
.wDeviceID
);
606 /* determines how many NAND chips are connected to the controller. Note for
607 Intel CE4100 devices we don't support more than one device.
609 static void find_valid_banks(struct denali_nand_info
*denali
)
611 uint32_t id
[LLD_MAX_FLASH_BANKS
];
614 denali
->total_used_banks
= 1;
615 for (i
= 0; i
< LLD_MAX_FLASH_BANKS
; i
++) {
616 index_addr(denali
, (uint32_t)(MODE_11
| (i
<< 24) | 0), 0x90);
617 index_addr(denali
, (uint32_t)(MODE_11
| (i
<< 24) | 1), 0);
618 index_addr_read_data(denali
,
619 (uint32_t)(MODE_11
| (i
<< 24) | 2), &id
[i
]);
621 nand_dbg_print(NAND_DBG_DEBUG
,
622 "Return 1st ID for bank[%d]: %x\n", i
, id
[i
]);
625 if (!(id
[i
] & 0x0ff))
628 if ((id
[i
] & 0x0ff) == (id
[0] & 0x0ff))
629 denali
->total_used_banks
++;
635 if (denali
->platform
== INTEL_CE4100
) {
636 /* Platform limitations of the CE4100 device limit
637 * users to a single chip solution for NAND.
638 * Multichip support is not enabled.
640 if (denali
->total_used_banks
!= 1) {
641 printk(KERN_ERR
"Sorry, Intel CE4100 only supports "
642 "a single NAND device.\n");
646 nand_dbg_print(NAND_DBG_DEBUG
,
647 "denali->total_used_banks: %d\n", denali
->total_used_banks
);
650 static void detect_partition_feature(struct denali_nand_info
*denali
)
652 if (ioread32(denali
->flash_reg
+ FEATURES
) & FEATURES__PARTITION
) {
653 if ((ioread32(denali
->flash_reg
+ PERM_SRC_ID_1
) &
654 PERM_SRC_ID_1__SRCID
) == SPECTRA_PARTITION_ID
) {
655 denali
->dev_info
.wSpectraStartBlock
=
656 ((ioread32(denali
->flash_reg
+ MIN_MAX_BANK_1
) &
657 MIN_MAX_BANK_1__MIN_VALUE
) *
658 denali
->dev_info
.wTotalBlocks
)
660 (ioread32(denali
->flash_reg
+ MIN_BLK_ADDR_1
) &
661 MIN_BLK_ADDR_1__VALUE
);
663 denali
->dev_info
.wSpectraEndBlock
=
664 (((ioread32(denali
->flash_reg
+ MIN_MAX_BANK_1
) &
665 MIN_MAX_BANK_1__MAX_VALUE
) >> 2) *
666 denali
->dev_info
.wTotalBlocks
)
668 (ioread32(denali
->flash_reg
+ MAX_BLK_ADDR_1
) &
669 MAX_BLK_ADDR_1__VALUE
);
671 denali
->dev_info
.wTotalBlocks
*=
672 denali
->total_used_banks
;
674 if (denali
->dev_info
.wSpectraEndBlock
>=
675 denali
->dev_info
.wTotalBlocks
) {
676 denali
->dev_info
.wSpectraEndBlock
=
677 denali
->dev_info
.wTotalBlocks
- 1;
680 denali
->dev_info
.wDataBlockNum
=
681 denali
->dev_info
.wSpectraEndBlock
-
682 denali
->dev_info
.wSpectraStartBlock
+ 1;
684 denali
->dev_info
.wTotalBlocks
*=
685 denali
->total_used_banks
;
686 denali
->dev_info
.wSpectraStartBlock
=
688 denali
->dev_info
.wSpectraEndBlock
=
689 denali
->dev_info
.wTotalBlocks
- 1;
690 denali
->dev_info
.wDataBlockNum
=
691 denali
->dev_info
.wSpectraEndBlock
-
692 denali
->dev_info
.wSpectraStartBlock
+ 1;
695 denali
->dev_info
.wTotalBlocks
*= denali
->total_used_banks
;
696 denali
->dev_info
.wSpectraStartBlock
= SPECTRA_START_BLOCK
;
697 denali
->dev_info
.wSpectraEndBlock
=
698 denali
->dev_info
.wTotalBlocks
- 1;
699 denali
->dev_info
.wDataBlockNum
=
700 denali
->dev_info
.wSpectraEndBlock
-
701 denali
->dev_info
.wSpectraStartBlock
+ 1;
705 static void dump_device_info(struct denali_nand_info
*denali
)
707 nand_dbg_print(NAND_DBG_DEBUG
, "denali->dev_info:\n");
708 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceMaker: 0x%x\n",
709 denali
->dev_info
.wDeviceMaker
);
710 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceID: 0x%x\n",
711 denali
->dev_info
.wDeviceID
);
712 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceType: 0x%x\n",
713 denali
->dev_info
.wDeviceType
);
714 nand_dbg_print(NAND_DBG_DEBUG
, "SpectraStartBlock: %d\n",
715 denali
->dev_info
.wSpectraStartBlock
);
716 nand_dbg_print(NAND_DBG_DEBUG
, "SpectraEndBlock: %d\n",
717 denali
->dev_info
.wSpectraEndBlock
);
718 nand_dbg_print(NAND_DBG_DEBUG
, "TotalBlocks: %d\n",
719 denali
->dev_info
.wTotalBlocks
);
720 nand_dbg_print(NAND_DBG_DEBUG
, "PagesPerBlock: %d\n",
721 denali
->dev_info
.wPagesPerBlock
);
722 nand_dbg_print(NAND_DBG_DEBUG
, "PageSize: %d\n",
723 denali
->dev_info
.wPageSize
);
724 nand_dbg_print(NAND_DBG_DEBUG
, "PageDataSize: %d\n",
725 denali
->dev_info
.wPageDataSize
);
726 nand_dbg_print(NAND_DBG_DEBUG
, "PageSpareSize: %d\n",
727 denali
->dev_info
.wPageSpareSize
);
728 nand_dbg_print(NAND_DBG_DEBUG
, "NumPageSpareFlag: %d\n",
729 denali
->dev_info
.wNumPageSpareFlag
);
730 nand_dbg_print(NAND_DBG_DEBUG
, "ECCBytesPerSector: %d\n",
731 denali
->dev_info
.wECCBytesPerSector
);
732 nand_dbg_print(NAND_DBG_DEBUG
, "BlockSize: %d\n",
733 denali
->dev_info
.wBlockSize
);
734 nand_dbg_print(NAND_DBG_DEBUG
, "BlockDataSize: %d\n",
735 denali
->dev_info
.wBlockDataSize
);
736 nand_dbg_print(NAND_DBG_DEBUG
, "DataBlockNum: %d\n",
737 denali
->dev_info
.wDataBlockNum
);
738 nand_dbg_print(NAND_DBG_DEBUG
, "PlaneNum: %d\n",
739 denali
->dev_info
.bPlaneNum
);
740 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceMainAreaSize: %d\n",
741 denali
->dev_info
.wDeviceMainAreaSize
);
742 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceSpareAreaSize: %d\n",
743 denali
->dev_info
.wDeviceSpareAreaSize
);
744 nand_dbg_print(NAND_DBG_DEBUG
, "DevicesConnected: %d\n",
745 denali
->dev_info
.wDevicesConnected
);
746 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceWidth: %d\n",
747 denali
->dev_info
.wDeviceWidth
);
748 nand_dbg_print(NAND_DBG_DEBUG
, "HWRevision: 0x%x\n",
749 denali
->dev_info
.wHWRevision
);
750 nand_dbg_print(NAND_DBG_DEBUG
, "HWFeatures: 0x%x\n",
751 denali
->dev_info
.wHWFeatures
);
752 nand_dbg_print(NAND_DBG_DEBUG
, "ONFIDevFeatures: 0x%x\n",
753 denali
->dev_info
.wONFIDevFeatures
);
754 nand_dbg_print(NAND_DBG_DEBUG
, "ONFIOptCommands: 0x%x\n",
755 denali
->dev_info
.wONFIOptCommands
);
756 nand_dbg_print(NAND_DBG_DEBUG
, "ONFITimingMode: 0x%x\n",
757 denali
->dev_info
.wONFITimingMode
);
758 nand_dbg_print(NAND_DBG_DEBUG
, "ONFIPgmCacheTimingMode: 0x%x\n",
759 denali
->dev_info
.wONFIPgmCacheTimingMode
);
760 nand_dbg_print(NAND_DBG_DEBUG
, "MLCDevice: %s\n",
761 denali
->dev_info
.MLCDevice
? "Yes" : "No");
762 nand_dbg_print(NAND_DBG_DEBUG
, "SpareSkipBytes: %d\n",
763 denali
->dev_info
.wSpareSkipBytes
);
764 nand_dbg_print(NAND_DBG_DEBUG
, "BitsInPageNumber: %d\n",
765 denali
->dev_info
.nBitsInPageNumber
);
766 nand_dbg_print(NAND_DBG_DEBUG
, "BitsInPageDataSize: %d\n",
767 denali
->dev_info
.nBitsInPageDataSize
);
768 nand_dbg_print(NAND_DBG_DEBUG
, "BitsInBlockDataSize: %d\n",
769 denali
->dev_info
.nBitsInBlockDataSize
);
772 static uint16_t denali_nand_timing_set(struct denali_nand_info
*denali
)
774 uint16_t status
= PASS
;
775 uint8_t no_of_planes
;
776 uint32_t id_bytes
[5], addr
;
777 uint8_t i
, maf_id
, device_id
;
779 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
780 __FILE__
, __LINE__
, __func__
);
782 /* Use read id method to get device ID and other
783 * params. For some NAND chips, controller can't
784 * report the correct device ID by reading from
787 addr
= (uint32_t)MODE_11
| BANK(denali
->flash_bank
);
788 index_addr(denali
, (uint32_t)addr
| 0, 0x90);
789 index_addr(denali
, (uint32_t)addr
| 1, 0);
790 for (i
= 0; i
< 5; i
++)
791 index_addr_read_data(denali
, addr
| 2, &id_bytes
[i
]);
792 maf_id
= id_bytes
[0];
793 device_id
= id_bytes
[1];
795 if (ioread32(denali
->flash_reg
+ ONFI_DEVICE_NO_OF_LUNS
) &
796 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE
) { /* ONFI 1.0 NAND */
797 if (FAIL
== get_onfi_nand_para(denali
))
799 } else if (maf_id
== 0xEC) { /* Samsung NAND */
800 get_samsung_nand_para(denali
);
801 } else if (maf_id
== 0x98) { /* Toshiba NAND */
802 get_toshiba_nand_para(denali
);
803 } else if (maf_id
== 0xAD) { /* Hynix NAND */
804 get_hynix_nand_para(denali
, device_id
);
806 denali
->dev_info
.wTotalBlocks
= GLOB_HWCTL_DEFAULT_BLKS
;
809 nand_dbg_print(NAND_DBG_DEBUG
, "Dump timing register values:"
810 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
811 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
812 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
813 ioread32(denali
->flash_reg
+ ACC_CLKS
),
814 ioread32(denali
->flash_reg
+ RE_2_WE
),
815 ioread32(denali
->flash_reg
+ WE_2_RE
),
816 ioread32(denali
->flash_reg
+ ADDR_2_DATA
),
817 ioread32(denali
->flash_reg
+ RDWR_EN_LO_CNT
),
818 ioread32(denali
->flash_reg
+ RDWR_EN_HI_CNT
),
819 ioread32(denali
->flash_reg
+ CS_SETUP_CNT
));
821 denali
->dev_info
.wHWRevision
= ioread32(denali
->flash_reg
+ REVISION
);
822 denali
->dev_info
.wHWFeatures
= ioread32(denali
->flash_reg
+ FEATURES
);
824 denali
->dev_info
.wDeviceMainAreaSize
=
825 ioread32(denali
->flash_reg
+ DEVICE_MAIN_AREA_SIZE
);
826 denali
->dev_info
.wDeviceSpareAreaSize
=
827 ioread32(denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
);
829 denali
->dev_info
.wPageDataSize
=
830 ioread32(denali
->flash_reg
+ LOGICAL_PAGE_DATA_SIZE
);
832 /* Note: When using the Micon 4K NAND device, the controller will report
833 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
834 * And if force set it to 218 bytes, the controller can not work
835 * correctly. So just let it be. But keep in mind that this bug may
837 * other problems in future. - Yunpeng 2008-10-10
839 denali
->dev_info
.wPageSpareSize
=
840 ioread32(denali
->flash_reg
+ LOGICAL_PAGE_SPARE_SIZE
);
842 denali
->dev_info
.wPagesPerBlock
=
843 ioread32(denali
->flash_reg
+ PAGES_PER_BLOCK
);
845 denali
->dev_info
.wPageSize
=
846 denali
->dev_info
.wPageDataSize
+ denali
->dev_info
.wPageSpareSize
;
847 denali
->dev_info
.wBlockSize
=
848 denali
->dev_info
.wPageSize
* denali
->dev_info
.wPagesPerBlock
;
849 denali
->dev_info
.wBlockDataSize
=
850 denali
->dev_info
.wPagesPerBlock
* denali
->dev_info
.wPageDataSize
;
852 denali
->dev_info
.wDeviceWidth
=
853 ioread32(denali
->flash_reg
+ DEVICE_WIDTH
);
854 denali
->dev_info
.wDeviceType
=
855 ((ioread32(denali
->flash_reg
+ DEVICE_WIDTH
) > 0) ? 16 : 8);
857 denali
->dev_info
.wDevicesConnected
=
858 ioread32(denali
->flash_reg
+ DEVICES_CONNECTED
);
860 denali
->dev_info
.wSpareSkipBytes
=
861 ioread32(denali
->flash_reg
+ SPARE_AREA_SKIP_BYTES
) *
862 denali
->dev_info
.wDevicesConnected
;
864 denali
->dev_info
.nBitsInPageNumber
=
865 ilog2(denali
->dev_info
.wPagesPerBlock
);
866 denali
->dev_info
.nBitsInPageDataSize
=
867 ilog2(denali
->dev_info
.wPageDataSize
);
868 denali
->dev_info
.nBitsInBlockDataSize
=
869 ilog2(denali
->dev_info
.wBlockDataSize
);
871 set_ecc_config(denali
);
873 no_of_planes
= ioread32(denali
->flash_reg
+ NUMBER_OF_PLANES
) &
874 NUMBER_OF_PLANES__VALUE
;
876 switch (no_of_planes
) {
881 denali
->dev_info
.bPlaneNum
= no_of_planes
+ 1;
888 find_valid_banks(denali
);
890 detect_partition_feature(denali
);
892 dump_device_info(denali
);
894 /* If the user specified to override the default timings
895 * with a specific ONFI mode, we apply those changes here.
897 if (onfi_timing_mode
!= NAND_DEFAULT_TIMINGS
)
898 nand_onfi_timing_set(denali
, onfi_timing_mode
);
903 static void denali_set_intr_modes(struct denali_nand_info
*denali
,
906 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
907 __FILE__
, __LINE__
, __func__
);
910 denali_write32(1, denali
->flash_reg
+ GLOBAL_INT_ENABLE
);
912 denali_write32(0, denali
->flash_reg
+ GLOBAL_INT_ENABLE
);
915 /* validation function to verify that the controlling software is making
918 static inline bool is_flash_bank_valid(int flash_bank
)
920 return (flash_bank
>= 0 && flash_bank
< 4);
923 static void denali_irq_init(struct denali_nand_info
*denali
)
925 uint32_t int_mask
= 0;
927 /* Disable global interrupts */
928 denali_set_intr_modes(denali
, false);
930 int_mask
= DENALI_IRQ_ALL
;
932 /* Clear all status bits */
933 denali_write32(0xFFFF, denali
->flash_reg
+ INTR_STATUS0
);
934 denali_write32(0xFFFF, denali
->flash_reg
+ INTR_STATUS1
);
935 denali_write32(0xFFFF, denali
->flash_reg
+ INTR_STATUS2
);
936 denali_write32(0xFFFF, denali
->flash_reg
+ INTR_STATUS3
);
938 denali_irq_enable(denali
, int_mask
);
941 static void denali_irq_cleanup(int irqnum
, struct denali_nand_info
*denali
)
943 denali_set_intr_modes(denali
, false);
944 free_irq(irqnum
, denali
);
947 static void denali_irq_enable(struct denali_nand_info
*denali
,
950 denali_write32(int_mask
, denali
->flash_reg
+ INTR_EN0
);
951 denali_write32(int_mask
, denali
->flash_reg
+ INTR_EN1
);
952 denali_write32(int_mask
, denali
->flash_reg
+ INTR_EN2
);
953 denali_write32(int_mask
, denali
->flash_reg
+ INTR_EN3
);
956 /* This function only returns when an interrupt that this driver cares about
957 * occurs. This is to reduce the overhead of servicing interrupts
959 static inline uint32_t denali_irq_detected(struct denali_nand_info
*denali
)
961 return read_interrupt_status(denali
) & DENALI_IRQ_ALL
;
964 /* Interrupts are cleared by writing a 1 to the appropriate status bit */
965 static inline void clear_interrupt(struct denali_nand_info
*denali
,
968 uint32_t intr_status_reg
= 0;
970 intr_status_reg
= intr_status_addresses
[denali
->flash_bank
];
972 denali_write32(irq_mask
, denali
->flash_reg
+ intr_status_reg
);
975 static void clear_interrupts(struct denali_nand_info
*denali
)
977 uint32_t status
= 0x0;
978 spin_lock_irq(&denali
->irq_lock
);
980 status
= read_interrupt_status(denali
);
983 denali
->irq_debug_array
[denali
->idx
++] = 0x30000000 | status
;
987 denali
->irq_status
= 0x0;
988 spin_unlock_irq(&denali
->irq_lock
);
991 static uint32_t read_interrupt_status(struct denali_nand_info
*denali
)
993 uint32_t intr_status_reg
= 0;
995 intr_status_reg
= intr_status_addresses
[denali
->flash_bank
];
997 return ioread32(denali
->flash_reg
+ intr_status_reg
);
1001 static void print_irq_log(struct denali_nand_info
*denali
)
1005 printk(KERN_INFO
"ISR debug log index = %X\n", denali
->idx
);
1006 for (i
= 0; i
< 32; i
++)
1007 printk(KERN_INFO
"%08X: %08X\n", i
, denali
->irq_debug_array
[i
]);
1011 /* This is the interrupt service routine. It handles all interrupts
1012 * sent to this device. Note that on CE4100, this is a shared
1015 static irqreturn_t
denali_isr(int irq
, void *dev_id
)
1017 struct denali_nand_info
*denali
= dev_id
;
1018 uint32_t irq_status
= 0x0;
1019 irqreturn_t result
= IRQ_NONE
;
1021 spin_lock(&denali
->irq_lock
);
1023 /* check to see if a valid NAND chip has
1026 if (is_flash_bank_valid(denali
->flash_bank
)) {
1027 /* check to see if controller generated
1028 * the interrupt, since this is a shared interrupt */
1029 irq_status
= denali_irq_detected(denali
);
1030 if (irq_status
!= 0) {
1032 denali
->irq_debug_array
[denali
->idx
++] =
1033 0x10000000 | irq_status
;
1036 printk(KERN_INFO
"IRQ status = 0x%04x\n", irq_status
);
1038 /* handle interrupt */
1039 /* first acknowledge it */
1040 clear_interrupt(denali
, irq_status
);
1041 /* store the status in the device context for someone
1043 denali
->irq_status
|= irq_status
;
1044 /* notify anyone who cares that it happened */
1045 complete(&denali
->complete
);
1046 /* tell the OS that we've handled this */
1047 result
= IRQ_HANDLED
;
1050 spin_unlock(&denali
->irq_lock
);
1053 #define BANK(x) ((x) << 24)
1055 static uint32_t wait_for_irq(struct denali_nand_info
*denali
, uint32_t irq_mask
)
1057 unsigned long comp_res
= 0;
1058 uint32_t intr_status
= 0;
1060 unsigned long timeout
= msecs_to_jiffies(1000);
1064 printk(KERN_INFO
"waiting for 0x%x\n", irq_mask
);
1067 wait_for_completion_timeout(&denali
->complete
, timeout
);
1068 spin_lock_irq(&denali
->irq_lock
);
1069 intr_status
= denali
->irq_status
;
1072 denali
->irq_debug_array
[denali
->idx
++] =
1073 0x20000000 | (irq_mask
<< 16) | intr_status
;
1077 if (intr_status
& irq_mask
) {
1078 denali
->irq_status
&= ~irq_mask
;
1079 spin_unlock_irq(&denali
->irq_lock
);
1082 printk(KERN_INFO
"status on retry = 0x%x\n",
1085 /* our interrupt was detected */
1088 /* these are not the interrupts you are looking for -
1089 * need to wait again */
1090 spin_unlock_irq(&denali
->irq_lock
);
1092 print_irq_log(denali
);
1093 printk(KERN_INFO
"received irq nobody cared:"
1094 " irq_status = 0x%x, irq_mask = 0x%x,"
1095 " timeout = %ld\n", intr_status
,
1096 irq_mask
, comp_res
);
1100 } while (comp_res
!= 0);
1102 if (comp_res
== 0) {
1104 printk(KERN_ERR
"timeout occurred, status = 0x%x, mask = 0x%x\n",
1105 intr_status
, irq_mask
);
1112 /* This helper function setups the registers for ECC and whether or not
1113 the spare area will be transfered. */
1114 static void setup_ecc_for_xfer(struct denali_nand_info
*denali
, bool ecc_en
,
1115 bool transfer_spare
)
1117 int ecc_en_flag
= 0, transfer_spare_flag
= 0;
1119 /* set ECC, transfer spare bits if needed */
1120 ecc_en_flag
= ecc_en
? ECC_ENABLE__FLAG
: 0;
1121 transfer_spare_flag
= transfer_spare
? TRANSFER_SPARE_REG__FLAG
: 0;
1123 /* Enable spare area/ECC per user's request. */
1124 denali_write32(ecc_en_flag
, denali
->flash_reg
+ ECC_ENABLE
);
1125 denali_write32(transfer_spare_flag
,
1126 denali
->flash_reg
+ TRANSFER_SPARE_REG
);
1129 /* sends a pipeline command operation to the controller. See the Denali NAND
1130 controller's user guide for more information (section 4.2.3.6).
1132 static int denali_send_pipeline_cmd(struct denali_nand_info
*denali
,
1134 bool transfer_spare
,
1139 uint32_t addr
= 0x0, cmd
= 0x0, page_count
= 1, irq_status
= 0,
1142 if (op
== DENALI_READ
)
1143 irq_mask
= INTR_STATUS0__LOAD_COMP
;
1144 else if (op
== DENALI_WRITE
)
1149 setup_ecc_for_xfer(denali
, ecc_en
, transfer_spare
);
1152 spin_lock_irq(&denali
->irq_lock
);
1153 denali
->irq_debug_array
[denali
->idx
++] =
1154 0x40000000 | ioread32(denali
->flash_reg
+ ECC_ENABLE
) |
1157 spin_unlock_irq(&denali
->irq_lock
);
1161 /* clear interrupts */
1162 clear_interrupts(denali
);
1164 addr
= BANK(denali
->flash_bank
) | denali
->page
;
1166 if (op
== DENALI_WRITE
&& access_type
!= SPARE_ACCESS
) {
1167 cmd
= MODE_01
| addr
;
1168 denali_write32(cmd
, denali
->flash_mem
);
1169 } else if (op
== DENALI_WRITE
&& access_type
== SPARE_ACCESS
) {
1170 /* read spare area */
1171 cmd
= MODE_10
| addr
;
1172 index_addr(denali
, (uint32_t)cmd
, access_type
);
1174 cmd
= MODE_01
| addr
;
1175 denali_write32(cmd
, denali
->flash_mem
);
1176 } else if (op
== DENALI_READ
) {
1177 /* setup page read request for access type */
1178 cmd
= MODE_10
| addr
;
1179 index_addr(denali
, (uint32_t)cmd
, access_type
);
1181 /* page 33 of the NAND controller spec indicates we should not
1182 use the pipeline commands in Spare area only mode. So we
1185 if (access_type
== SPARE_ACCESS
) {
1186 cmd
= MODE_01
| addr
;
1187 denali_write32(cmd
, denali
->flash_mem
);
1189 index_addr(denali
, (uint32_t)cmd
,
1190 0x2000 | op
| page_count
);
1192 /* wait for command to be accepted
1193 * can always use status0 bit as the
1194 * mask is identical for each
1196 irq_status
= wait_for_irq(denali
, irq_mask
);
1198 if (irq_status
== 0) {
1199 printk(KERN_ERR
"cmd, page, addr on timeout "
1200 "(0x%x, 0x%x, 0x%x)\n", cmd
,
1201 denali
->page
, addr
);
1204 cmd
= MODE_01
| addr
;
1205 denali_write32(cmd
, denali
->flash_mem
);
1212 /* helper function that simply writes a buffer to the flash */
1213 static int write_data_to_flash_mem(struct denali_nand_info
*denali
,
1217 uint32_t i
= 0, *buf32
;
1219 /* verify that the len is a multiple of 4. see comment in
1220 * read_data_from_flash_mem() */
1221 BUG_ON((len
% 4) != 0);
1223 /* write the data to the flash memory */
1224 buf32
= (uint32_t *)buf
;
1225 for (i
= 0; i
< len
/ 4; i
++)
1226 denali_write32(*buf32
++, denali
->flash_mem
+ 0x10);
1227 return i
*4; /* intent is to return the number of bytes read */
1230 /* helper function that simply reads a buffer from the flash */
1231 static int read_data_from_flash_mem(struct denali_nand_info
*denali
,
1235 uint32_t i
= 0, *buf32
;
1237 /* we assume that len will be a multiple of 4, if not
1238 * it would be nice to know about it ASAP rather than
1239 * have random failures...
1240 * This assumption is based on the fact that this
1241 * function is designed to be used to read flash pages,
1242 * which are typically multiples of 4...
1245 BUG_ON((len
% 4) != 0);
1247 /* transfer the data from the flash */
1248 buf32
= (uint32_t *)buf
;
1249 for (i
= 0; i
< len
/ 4; i
++)
1250 *buf32
++ = ioread32(denali
->flash_mem
+ 0x10);
1251 return i
*4; /* intent is to return the number of bytes read */
1254 /* writes OOB data to the device */
1255 static int write_oob_data(struct mtd_info
*mtd
, uint8_t *buf
, int page
)
1257 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1258 uint32_t irq_status
= 0;
1259 uint32_t irq_mask
= INTR_STATUS0__PROGRAM_COMP
|
1260 INTR_STATUS0__PROGRAM_FAIL
;
1263 denali
->page
= page
;
1265 if (denali_send_pipeline_cmd(denali
, false, false, SPARE_ACCESS
,
1266 DENALI_WRITE
) == PASS
) {
1267 write_data_to_flash_mem(denali
, buf
, mtd
->oobsize
);
1270 spin_lock_irq(&denali
->irq_lock
);
1271 denali
->irq_debug_array
[denali
->idx
++] =
1272 0x80000000 | mtd
->oobsize
;
1274 spin_unlock_irq(&denali
->irq_lock
);
1278 /* wait for operation to complete */
1279 irq_status
= wait_for_irq(denali
, irq_mask
);
1281 if (irq_status
== 0) {
1282 printk(KERN_ERR
"OOB write failed\n");
1286 printk(KERN_ERR
"unable to send pipeline command\n");
1292 /* reads OOB data from the device */
1293 static void read_oob_data(struct mtd_info
*mtd
, uint8_t *buf
, int page
)
1295 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1296 uint32_t irq_mask
= INTR_STATUS0__LOAD_COMP
,
1297 irq_status
= 0, addr
= 0x0, cmd
= 0x0;
1299 denali
->page
= page
;
1302 printk(KERN_INFO
"read_oob %d\n", page
);
1304 if (denali_send_pipeline_cmd(denali
, false, true, SPARE_ACCESS
,
1305 DENALI_READ
) == PASS
) {
1306 read_data_from_flash_mem(denali
, buf
, mtd
->oobsize
);
1308 /* wait for command to be accepted
1309 * can always use status0 bit as the mask is identical for each
1311 irq_status
= wait_for_irq(denali
, irq_mask
);
1313 if (irq_status
== 0)
1314 printk(KERN_ERR
"page on OOB timeout %d\n",
1317 /* We set the device back to MAIN_ACCESS here as I observed
1318 * instability with the controller if you do a block erase
1319 * and the last transaction was a SPARE_ACCESS. Block erase
1320 * is reliable (according to the MTD test infrastructure)
1321 * if you are in MAIN_ACCESS.
1323 addr
= BANK(denali
->flash_bank
) | denali
->page
;
1324 cmd
= MODE_10
| addr
;
1325 index_addr(denali
, (uint32_t)cmd
, MAIN_ACCESS
);
1328 spin_lock_irq(&denali
->irq_lock
);
1329 denali
->irq_debug_array
[denali
->idx
++] =
1330 0x60000000 | mtd
->oobsize
;
1332 spin_unlock_irq(&denali
->irq_lock
);
1337 /* this function examines buffers to see if they contain data that
1338 * indicate that the buffer is part of an erased region of flash.
1340 bool is_erased(uint8_t *buf
, int len
)
1343 for (i
= 0; i
< len
; i
++)
1348 #define ECC_SECTOR_SIZE 512
1350 #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
1351 #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
1352 #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
1353 #define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
1354 #define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
1355 #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
1357 static bool handle_ecc(struct denali_nand_info
*denali
, uint8_t *buf
,
1358 uint8_t *oobbuf
, uint32_t irq_status
)
1360 bool check_erased_page
= false;
1362 if (irq_status
& INTR_STATUS0__ECC_ERR
) {
1363 /* read the ECC errors. we'll ignore them for now */
1364 uint32_t err_address
= 0, err_correction_info
= 0;
1365 uint32_t err_byte
= 0, err_sector
= 0, err_device
= 0;
1366 uint32_t err_correction_value
= 0;
1369 err_address
= ioread32(denali
->flash_reg
+
1371 err_sector
= ECC_SECTOR(err_address
);
1372 err_byte
= ECC_BYTE(err_address
);
1375 err_correction_info
= ioread32(denali
->flash_reg
+
1376 ERR_CORRECTION_INFO
);
1377 err_correction_value
=
1378 ECC_CORRECTION_VALUE(err_correction_info
);
1379 err_device
= ECC_ERR_DEVICE(err_correction_info
);
1381 if (ECC_ERROR_CORRECTABLE(err_correction_info
)) {
1382 /* offset in our buffer is computed as:
1383 sector number * sector size + offset in
1386 int offset
= err_sector
* ECC_SECTOR_SIZE
+
1388 if (offset
< denali
->mtd
.writesize
) {
1389 /* correct the ECC error */
1390 buf
[offset
] ^= err_correction_value
;
1391 denali
->mtd
.ecc_stats
.corrected
++;
1393 /* bummer, couldn't correct the error */
1394 printk(KERN_ERR
"ECC offset invalid\n");
1395 denali
->mtd
.ecc_stats
.failed
++;
1398 /* if the error is not correctable, need to
1399 * look at the page to see if it is an erased
1400 * page. if so, then it's not a real ECC error
1402 check_erased_page
= true;
1406 printk(KERN_INFO
"Detected ECC error in page %d:"
1407 " err_addr = 0x%08x, info to fix is"
1408 " 0x%08x\n", denali
->page
, err_address
,
1409 err_correction_info
);
1411 } while (!ECC_LAST_ERR(err_correction_info
));
1413 return check_erased_page
;
1416 /* programs the controller to either enable/disable DMA transfers */
1417 static void denali_enable_dma(struct denali_nand_info
*denali
, bool en
)
1419 uint32_t reg_val
= 0x0;
1422 reg_val
= DMA_ENABLE__FLAG
;
1424 denali_write32(reg_val
, denali
->flash_reg
+ DMA_ENABLE
);
1425 ioread32(denali
->flash_reg
+ DMA_ENABLE
);
1428 /* setups the HW to perform the data DMA */
1429 static void denali_setup_dma(struct denali_nand_info
*denali
, int op
)
1431 uint32_t mode
= 0x0;
1432 const int page_count
= 1;
1433 dma_addr_t addr
= denali
->buf
.dma_buf
;
1435 mode
= MODE_10
| BANK(denali
->flash_bank
);
1437 /* DMA is a four step process */
1439 /* 1. setup transfer type and # of pages */
1440 index_addr(denali
, mode
| denali
->page
, 0x2000 | op
| page_count
);
1442 /* 2. set memory high address bits 23:8 */
1443 index_addr(denali
, mode
| ((uint16_t)(addr
>> 16) << 8), 0x2200);
1445 /* 3. set memory low address bits 23:8 */
1446 index_addr(denali
, mode
| ((uint16_t)addr
<< 8), 0x2300);
1448 /* 4. interrupt when complete, burst len = 64 bytes*/
1449 index_addr(denali
, mode
| 0x14000, 0x2400);
1452 /* writes a page. user specifies type, and this function handles the
1453 configuration details. */
1454 static void write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1455 const uint8_t *buf
, bool raw_xfer
)
1457 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1458 struct pci_dev
*pci_dev
= denali
->dev
;
1460 dma_addr_t addr
= denali
->buf
.dma_buf
;
1461 size_t size
= denali
->mtd
.writesize
+ denali
->mtd
.oobsize
;
1463 uint32_t irq_status
= 0;
1464 uint32_t irq_mask
= INTR_STATUS0__DMA_CMD_COMP
|
1465 INTR_STATUS0__PROGRAM_FAIL
;
1467 /* if it is a raw xfer, we want to disable ecc, and send
1469 * !raw_xfer - enable ecc
1470 * raw_xfer - transfer spare
1472 setup_ecc_for_xfer(denali
, !raw_xfer
, raw_xfer
);
1474 /* copy buffer into DMA buffer */
1475 memcpy(denali
->buf
.buf
, buf
, mtd
->writesize
);
1478 /* transfer the data to the spare area */
1479 memcpy(denali
->buf
.buf
+ mtd
->writesize
,
1484 pci_dma_sync_single_for_device(pci_dev
, addr
, size
, PCI_DMA_TODEVICE
);
1486 clear_interrupts(denali
);
1487 denali_enable_dma(denali
, true);
1489 denali_setup_dma(denali
, DENALI_WRITE
);
1491 /* wait for operation to complete */
1492 irq_status
= wait_for_irq(denali
, irq_mask
);
1494 if (irq_status
== 0) {
1495 printk(KERN_ERR
"timeout on write_page"
1496 " (type = %d)\n", raw_xfer
);
1498 (irq_status
& INTR_STATUS0__PROGRAM_FAIL
) ?
1499 NAND_STATUS_FAIL
: PASS
;
1502 denali_enable_dma(denali
, false);
1503 pci_dma_sync_single_for_cpu(pci_dev
, addr
, size
, PCI_DMA_TODEVICE
);
1506 /* NAND core entry points */
1508 /* this is the callback that the NAND core calls to write a page. Since
1509 writing a page with ECC or without is similar, all the work is done
1510 by write_page above. */
1511 static void denali_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1514 /* for regular page writes, we let HW handle all the ECC
1515 * data written to the device. */
1516 write_page(mtd
, chip
, buf
, false);
1519 /* This is the callback that the NAND core calls to write a page without ECC.
1520 raw access is similiar to ECC page writes, so all the work is done in the
1521 write_page() function above.
1523 static void denali_write_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1526 /* for raw page writes, we want to disable ECC and simply write
1527 whatever data is in the buffer. */
1528 write_page(mtd
, chip
, buf
, true);
1531 static int denali_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1534 return write_oob_data(mtd
, chip
->oob_poi
, page
);
1537 static int denali_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1538 int page
, int sndcmd
)
1540 read_oob_data(mtd
, chip
->oob_poi
, page
);
1542 return 0; /* notify NAND core to send command to
1546 static int denali_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1547 uint8_t *buf
, int page
)
1549 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1550 struct pci_dev
*pci_dev
= denali
->dev
;
1552 dma_addr_t addr
= denali
->buf
.dma_buf
;
1553 size_t size
= denali
->mtd
.writesize
+ denali
->mtd
.oobsize
;
1555 uint32_t irq_status
= 0;
1556 uint32_t irq_mask
= INTR_STATUS0__ECC_TRANSACTION_DONE
|
1557 INTR_STATUS0__ECC_ERR
;
1558 bool check_erased_page
= false;
1560 setup_ecc_for_xfer(denali
, true, false);
1562 denali_enable_dma(denali
, true);
1563 pci_dma_sync_single_for_device(pci_dev
, addr
, size
, PCI_DMA_FROMDEVICE
);
1565 clear_interrupts(denali
);
1566 denali_setup_dma(denali
, DENALI_READ
);
1568 /* wait for operation to complete */
1569 irq_status
= wait_for_irq(denali
, irq_mask
);
1571 pci_dma_sync_single_for_cpu(pci_dev
, addr
, size
, PCI_DMA_FROMDEVICE
);
1573 memcpy(buf
, denali
->buf
.buf
, mtd
->writesize
);
1575 check_erased_page
= handle_ecc(denali
, buf
, chip
->oob_poi
, irq_status
);
1576 denali_enable_dma(denali
, false);
1578 if (check_erased_page
) {
1579 read_oob_data(&denali
->mtd
, chip
->oob_poi
, denali
->page
);
1581 /* check ECC failures that may have occurred on erased pages */
1582 if (check_erased_page
) {
1583 if (!is_erased(buf
, denali
->mtd
.writesize
))
1584 denali
->mtd
.ecc_stats
.failed
++;
1585 if (!is_erased(buf
, denali
->mtd
.oobsize
))
1586 denali
->mtd
.ecc_stats
.failed
++;
1592 static int denali_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1593 uint8_t *buf
, int page
)
1595 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1596 struct pci_dev
*pci_dev
= denali
->dev
;
1598 dma_addr_t addr
= denali
->buf
.dma_buf
;
1599 size_t size
= denali
->mtd
.writesize
+ denali
->mtd
.oobsize
;
1601 uint32_t irq_status
= 0;
1602 uint32_t irq_mask
= INTR_STATUS0__DMA_CMD_COMP
;
1604 setup_ecc_for_xfer(denali
, false, true);
1605 denali_enable_dma(denali
, true);
1607 pci_dma_sync_single_for_device(pci_dev
, addr
, size
, PCI_DMA_FROMDEVICE
);
1609 clear_interrupts(denali
);
1610 denali_setup_dma(denali
, DENALI_READ
);
1612 /* wait for operation to complete */
1613 irq_status
= wait_for_irq(denali
, irq_mask
);
1615 pci_dma_sync_single_for_cpu(pci_dev
, addr
, size
, PCI_DMA_FROMDEVICE
);
1617 denali_enable_dma(denali
, false);
1619 memcpy(buf
, denali
->buf
.buf
, mtd
->writesize
);
1620 memcpy(chip
->oob_poi
, denali
->buf
.buf
+ mtd
->writesize
, mtd
->oobsize
);
1625 static uint8_t denali_read_byte(struct mtd_info
*mtd
)
1627 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1628 uint8_t result
= 0xff;
1630 if (denali
->buf
.head
< denali
->buf
.tail
)
1631 result
= denali
->buf
.buf
[denali
->buf
.head
++];
1634 printk(KERN_INFO
"read byte -> 0x%02x\n", result
);
1639 static void denali_select_chip(struct mtd_info
*mtd
, int chip
)
1641 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1643 printk(KERN_INFO
"denali select chip %d\n", chip
);
1645 spin_lock_irq(&denali
->irq_lock
);
1646 denali
->flash_bank
= chip
;
1647 spin_unlock_irq(&denali
->irq_lock
);
1650 static int denali_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*chip
)
1652 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1653 int status
= denali
->status
;
1657 printk(KERN_INFO
"waitfunc %d\n", status
);
1662 static void denali_erase(struct mtd_info
*mtd
, int page
)
1664 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1666 uint32_t cmd
= 0x0, irq_status
= 0;
1669 printk(KERN_INFO
"erase page: %d\n", page
);
1671 /* clear interrupts */
1672 clear_interrupts(denali
);
1674 /* setup page read request for access type */
1675 cmd
= MODE_10
| BANK(denali
->flash_bank
) | page
;
1676 index_addr(denali
, (uint32_t)cmd
, 0x1);
1678 /* wait for erase to complete or failure to occur */
1679 irq_status
= wait_for_irq(denali
, INTR_STATUS0__ERASE_COMP
|
1680 INTR_STATUS0__ERASE_FAIL
);
1682 denali
->status
= (irq_status
& INTR_STATUS0__ERASE_FAIL
) ?
1683 NAND_STATUS_FAIL
: PASS
;
1686 static void denali_cmdfunc(struct mtd_info
*mtd
, unsigned int cmd
, int col
,
1689 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1694 printk(KERN_INFO
"cmdfunc: 0x%x %d %d\n", cmd
, col
, page
);
1697 case NAND_CMD_PAGEPROG
:
1699 case NAND_CMD_STATUS
:
1700 read_status(denali
);
1702 case NAND_CMD_READID
:
1704 /*sometimes ManufactureId read from register is not right
1705 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
1706 * So here we send READID cmd to NAND insteand
1708 addr
= (uint32_t)MODE_11
| BANK(denali
->flash_bank
);
1709 index_addr(denali
, (uint32_t)addr
| 0, 0x90);
1710 index_addr(denali
, (uint32_t)addr
| 1, 0);
1711 for (i
= 0; i
< 5; i
++) {
1712 index_addr_read_data(denali
,
1715 write_byte_to_buf(denali
, id
);
1718 case NAND_CMD_READ0
:
1719 case NAND_CMD_SEQIN
:
1720 denali
->page
= page
;
1722 case NAND_CMD_RESET
:
1725 case NAND_CMD_READOOB
:
1726 /* TODO: Read OOB data */
1729 printk(KERN_ERR
": unsupported command"
1730 " received 0x%x\n", cmd
);
1735 /* stubs for ECC functions not used by the NAND core */
1736 static int denali_ecc_calculate(struct mtd_info
*mtd
, const uint8_t *data
,
1739 printk(KERN_ERR
"denali_ecc_calculate called unexpectedly\n");
1744 static int denali_ecc_correct(struct mtd_info
*mtd
, uint8_t *data
,
1745 uint8_t *read_ecc
, uint8_t *calc_ecc
)
1747 printk(KERN_ERR
"denali_ecc_correct called unexpectedly\n");
1752 static void denali_ecc_hwctl(struct mtd_info
*mtd
, int mode
)
1754 printk(KERN_ERR
"denali_ecc_hwctl called unexpectedly\n");
1757 /* end NAND core entry points */
1759 /* Initialization code to bring the device up to a known good state */
1760 static void denali_hw_init(struct denali_nand_info
*denali
)
1762 denali_irq_init(denali
);
1763 denali_nand_reset(denali
);
1764 denali_write32(0x0F, denali
->flash_reg
+ RB_PIN_ENABLED
);
1765 denali_write32(CHIP_EN_DONT_CARE__FLAG
,
1766 denali
->flash_reg
+ CHIP_ENABLE_DONT_CARE
);
1768 denali_write32(0x0, denali
->flash_reg
+ SPARE_AREA_SKIP_BYTES
);
1769 denali_write32(0xffff, denali
->flash_reg
+ SPARE_AREA_MARKER
);
1771 /* Should set value for these registers when init */
1772 denali_write32(0, denali
->flash_reg
+ TWO_ROW_ADDR_CYCLES
);
1773 denali_write32(1, denali
->flash_reg
+ ECC_ENABLE
);
1776 /* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
1777 #define ECC_BYTES_SLC (4 * (2048 / ECC_SECTOR_SIZE))
1778 static struct nand_ecclayout nand_oob_slc
= {
1780 .eccpos
= { 0, 1, 2, 3 }, /* not used */
1783 .offset
= ECC_BYTES_SLC
,
1784 .length
= 64 - ECC_BYTES_SLC
1789 #define ECC_BYTES_MLC (14 * (2048 / ECC_SECTOR_SIZE))
1790 static struct nand_ecclayout nand_oob_mlc_14bit
= {
1792 .eccpos
= { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
1795 .offset
= ECC_BYTES_MLC
,
1796 .length
= 64 - ECC_BYTES_MLC
1801 static uint8_t bbt_pattern
[] = {'B', 'b', 't', '0' };
1802 static uint8_t mirror_pattern
[] = {'1', 't', 'b', 'B' };
1804 static struct nand_bbt_descr bbt_main_descr
= {
1805 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
1806 | NAND_BBT_2BIT
| NAND_BBT_VERSION
| NAND_BBT_PERCHIP
,
1811 .pattern
= bbt_pattern
,
1814 static struct nand_bbt_descr bbt_mirror_descr
= {
1815 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
1816 | NAND_BBT_2BIT
| NAND_BBT_VERSION
| NAND_BBT_PERCHIP
,
1821 .pattern
= mirror_pattern
,
1824 /* initalize driver data structures */
1825 void denali_drv_init(struct denali_nand_info
*denali
)
1829 /* setup interrupt handler */
1830 /* the completion object will be used to notify
1831 * the callee that the interrupt is done */
1832 init_completion(&denali
->complete
);
1834 /* the spinlock will be used to synchronize the ISR
1835 * with any element that might be access shared
1836 * data (interrupt status) */
1837 spin_lock_init(&denali
->irq_lock
);
1839 /* indicate that MTD has not selected a valid bank yet */
1840 denali
->flash_bank
= CHIP_SELECT_INVALID
;
1842 /* initialize our irq_status variable to indicate no interrupts */
1843 denali
->irq_status
= 0;
1846 /* driver entry point */
1847 static int denali_pci_probe(struct pci_dev
*dev
, const struct pci_device_id
*id
)
1850 resource_size_t csr_base
, mem_base
;
1851 unsigned long csr_len
, mem_len
;
1852 struct denali_nand_info
*denali
;
1854 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1855 __FILE__
, __LINE__
, __func__
);
1857 denali
= kzalloc(sizeof(*denali
), GFP_KERNEL
);
1861 ret
= pci_enable_device(dev
);
1863 printk(KERN_ERR
"Spectra: pci_enable_device failed.\n");
1867 if (id
->driver_data
== INTEL_CE4100
) {
1868 /* Due to a silicon limitation, we can only support
1869 * ONFI timing mode 1 and below.
1871 if (onfi_timing_mode
< -1 || onfi_timing_mode
> 1) {
1872 printk(KERN_ERR
"Intel CE4100 only supports"
1873 " ONFI timing mode 1 or below\n");
1877 denali
->platform
= INTEL_CE4100
;
1878 mem_base
= pci_resource_start(dev
, 0);
1879 mem_len
= pci_resource_len(dev
, 1);
1880 csr_base
= pci_resource_start(dev
, 1);
1881 csr_len
= pci_resource_len(dev
, 1);
1883 denali
->platform
= INTEL_MRST
;
1884 csr_base
= pci_resource_start(dev
, 0);
1885 csr_len
= pci_resource_start(dev
, 0);
1886 mem_base
= pci_resource_start(dev
, 1);
1887 mem_len
= pci_resource_len(dev
, 1);
1889 mem_base
= csr_base
+ csr_len
;
1891 nand_dbg_print(NAND_DBG_WARN
,
1892 "Spectra: No second"
1893 " BAR for PCI device;"
1894 " assuming %08Lx\n",
1895 (uint64_t)csr_base
);
1899 /* Is 32-bit DMA supported? */
1900 ret
= pci_set_dma_mask(dev
, DMA_BIT_MASK(32));
1903 printk(KERN_ERR
"Spectra: no usable DMA configuration\n");
1906 denali
->buf
.dma_buf
=
1907 pci_map_single(dev
, denali
->buf
.buf
,
1909 PCI_DMA_BIDIRECTIONAL
);
1911 if (pci_dma_mapping_error(dev
, denali
->buf
.dma_buf
)) {
1912 printk(KERN_ERR
"Spectra: failed to map DMA buffer\n");
1916 pci_set_master(dev
);
1919 ret
= pci_request_regions(dev
, DENALI_NAND_NAME
);
1921 printk(KERN_ERR
"Spectra: Unable to request memory regions\n");
1922 goto failed_req_csr
;
1925 denali
->flash_reg
= ioremap_nocache(csr_base
, csr_len
);
1926 if (!denali
->flash_reg
) {
1927 printk(KERN_ERR
"Spectra: Unable to remap memory region\n");
1929 goto failed_remap_csr
;
1931 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
1932 (uint64_t)csr_base
, denali
->flash_reg
, csr_len
);
1934 denali
->flash_mem
= ioremap_nocache(mem_base
, mem_len
);
1935 if (!denali
->flash_mem
) {
1936 printk(KERN_ERR
"Spectra: ioremap_nocache failed!");
1937 iounmap(denali
->flash_reg
);
1939 goto failed_remap_csr
;
1942 nand_dbg_print(NAND_DBG_WARN
,
1943 "Spectra: Remapped flash base address: "
1945 denali
->flash_mem
, csr_len
);
1947 denali_hw_init(denali
);
1948 denali_drv_init(denali
);
1950 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra: IRQ %d\n", dev
->irq
);
1951 if (request_irq(dev
->irq
, denali_isr
, IRQF_SHARED
,
1952 DENALI_NAND_NAME
, denali
)) {
1953 printk(KERN_ERR
"Spectra: Unable to allocate IRQ\n");
1955 goto failed_request_irq
;
1958 /* now that our ISR is registered, we can enable interrupts */
1959 denali_set_intr_modes(denali
, true);
1961 pci_set_drvdata(dev
, denali
);
1963 denali_nand_timing_set(denali
);
1965 /* MTD supported page sizes vary by kernel. We validate our
1966 * kernel supports the device here.
1968 if (denali
->dev_info
.wPageSize
> NAND_MAX_PAGESIZE
+ NAND_MAX_OOBSIZE
) {
1970 printk(KERN_ERR
"Spectra: device size not supported by this "
1975 nand_dbg_print(NAND_DBG_DEBUG
, "Dump timing register values:"
1976 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
1977 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
1978 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
1979 ioread32(denali
->flash_reg
+ ACC_CLKS
),
1980 ioread32(denali
->flash_reg
+ RE_2_WE
),
1981 ioread32(denali
->flash_reg
+ WE_2_RE
),
1982 ioread32(denali
->flash_reg
+ ADDR_2_DATA
),
1983 ioread32(denali
->flash_reg
+ RDWR_EN_LO_CNT
),
1984 ioread32(denali
->flash_reg
+ RDWR_EN_HI_CNT
),
1985 ioread32(denali
->flash_reg
+ CS_SETUP_CNT
));
1987 denali
->mtd
.name
= "Denali NAND";
1988 denali
->mtd
.owner
= THIS_MODULE
;
1989 denali
->mtd
.priv
= &denali
->nand
;
1991 /* register the driver with the NAND core subsystem */
1992 denali
->nand
.select_chip
= denali_select_chip
;
1993 denali
->nand
.cmdfunc
= denali_cmdfunc
;
1994 denali
->nand
.read_byte
= denali_read_byte
;
1995 denali
->nand
.waitfunc
= denali_waitfunc
;
1997 /* scan for NAND devices attached to the controller
1998 * this is the first stage in a two step process to register
1999 * with the nand subsystem */
2000 if (nand_scan_ident(&denali
->mtd
, LLD_MAX_FLASH_BANKS
, NULL
)) {
2005 /* second stage of the NAND scan
2006 * this stage requires information regarding ECC and
2007 * bad block management. */
2009 /* Bad block management */
2010 denali
->nand
.bbt_td
= &bbt_main_descr
;
2011 denali
->nand
.bbt_md
= &bbt_mirror_descr
;
2013 /* skip the scan for now until we have OOB read and write support */
2014 denali
->nand
.options
|= NAND_USE_FLASH_BBT
| NAND_SKIP_BBTSCAN
;
2015 denali
->nand
.ecc
.mode
= NAND_ECC_HW_SYNDROME
;
2017 if (denali
->dev_info
.MLCDevice
) {
2018 denali
->nand
.ecc
.layout
= &nand_oob_mlc_14bit
;
2019 denali
->nand
.ecc
.bytes
= ECC_BYTES_MLC
;
2021 denali
->nand
.ecc
.layout
= &nand_oob_slc
;
2022 denali
->nand
.ecc
.bytes
= ECC_BYTES_SLC
;
2025 /* These functions are required by the NAND core framework, otherwise,
2026 * the NAND core will assert. However, we don't need them, so we'll stub
2028 denali
->nand
.ecc
.calculate
= denali_ecc_calculate
;
2029 denali
->nand
.ecc
.correct
= denali_ecc_correct
;
2030 denali
->nand
.ecc
.hwctl
= denali_ecc_hwctl
;
2032 /* override the default read operations */
2033 denali
->nand
.ecc
.size
= denali
->mtd
.writesize
;
2034 denali
->nand
.ecc
.read_page
= denali_read_page
;
2035 denali
->nand
.ecc
.read_page_raw
= denali_read_page_raw
;
2036 denali
->nand
.ecc
.write_page
= denali_write_page
;
2037 denali
->nand
.ecc
.write_page_raw
= denali_write_page_raw
;
2038 denali
->nand
.ecc
.read_oob
= denali_read_oob
;
2039 denali
->nand
.ecc
.write_oob
= denali_write_oob
;
2040 denali
->nand
.erase_cmd
= denali_erase
;
2042 if (nand_scan_tail(&denali
->mtd
)) {
2047 ret
= add_mtd_device(&denali
->mtd
);
2049 printk(KERN_ERR
"Spectra: Failed to register"
2050 " MTD device: %d\n", ret
);
2056 denali_irq_cleanup(dev
->irq
, denali
);
2058 iounmap(denali
->flash_reg
);
2059 iounmap(denali
->flash_mem
);
2061 pci_release_regions(dev
);
2063 pci_unmap_single(dev
, denali
->buf
.dma_buf
, DENALI_BUF_SIZE
,
2064 PCI_DMA_BIDIRECTIONAL
);
2070 /* driver exit point */
2071 static void denali_pci_remove(struct pci_dev
*dev
)
2073 struct denali_nand_info
*denali
= pci_get_drvdata(dev
);
2075 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
2076 __FILE__
, __LINE__
, __func__
);
2078 nand_release(&denali
->mtd
);
2079 del_mtd_device(&denali
->mtd
);
2081 denali_irq_cleanup(dev
->irq
, denali
);
2083 iounmap(denali
->flash_reg
);
2084 iounmap(denali
->flash_mem
);
2085 pci_release_regions(dev
);
2086 pci_disable_device(dev
);
2087 pci_unmap_single(dev
, denali
->buf
.dma_buf
, DENALI_BUF_SIZE
,
2088 PCI_DMA_BIDIRECTIONAL
);
2089 pci_set_drvdata(dev
, NULL
);
2093 MODULE_DEVICE_TABLE(pci
, denali_pci_ids
);
2095 static struct pci_driver denali_pci_driver
= {
2096 .name
= DENALI_NAND_NAME
,
2097 .id_table
= denali_pci_ids
,
2098 .probe
= denali_pci_probe
,
2099 .remove
= denali_pci_remove
,
2102 static int __devinit
denali_init(void)
2104 printk(KERN_INFO
"Spectra MTD driver built on %s @ %s\n",
2105 __DATE__
, __TIME__
);
2106 return pci_register_driver(&denali_pci_driver
);
2110 static void __devexit
denali_exit(void)
2112 pci_unregister_driver(&denali_pci_driver
);
2115 module_init(denali_init
);
2116 module_exit(denali_exit
);