GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / staging / spectra / lld_nand.c
blob13563c2f7090dc7eda45e64c324fd8ae850dbe58
1 /*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include "lld.h"
21 #include "lld_nand.h"
22 #include "lld_cdma.h"
24 #include "spectraswconfig.h"
25 #include "flash.h"
26 #include "ffsdefs.h"
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/wait.h>
31 #include <linux/mutex.h>
33 #include "nand_regs.h"
35 #define SPECTRA_NAND_NAME "nd"
37 #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
38 #define MAX_PAGES_PER_RW 128
40 #define INT_IDLE_STATE 0
41 #define INT_READ_PAGE_MAIN 0x01
42 #define INT_WRITE_PAGE_MAIN 0x02
43 #define INT_PIPELINE_READ_AHEAD 0x04
44 #define INT_PIPELINE_WRITE_AHEAD 0x08
45 #define INT_MULTI_PLANE_READ 0x10
46 #define INT_MULTI_PLANE_WRITE 0x11
48 static u32 enable_ecc;
50 struct mrst_nand_info info;
52 int totalUsedBanks;
53 u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
55 void __iomem *FlashReg;
56 void __iomem *FlashMem;
58 u16 conf_parameters[] = {
59 0x0000,
60 0x0000,
61 0x01F4,
62 0x01F4,
63 0x01F4,
64 0x01F4,
65 0x0000,
66 0x0000,
67 0x0001,
68 0x0000,
69 0x0000,
70 0x0000,
71 0x0000,
72 0x0040,
73 0x0001,
74 0x000A,
75 0x000A,
76 0x000A,
77 0x0000,
78 0x0000,
79 0x0005,
80 0x0012,
81 0x000C
84 u16 NAND_Get_Bad_Block(u32 block)
86 u32 status = PASS;
87 u32 flag_bytes = 0;
88 u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
89 u32 page, i;
90 u8 *pReadSpareBuf = buf_get_bad_block;
92 if (enable_ecc)
93 flag_bytes = DeviceInfo.wNumPageSpareFlag;
95 for (page = 0; page < 2; page++) {
96 status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
97 if (status != PASS)
98 return READ_ERROR;
99 for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
100 if (pReadSpareBuf[i] != 0xff)
101 return DEFECTIVE_BLOCK;
104 for (page = 1; page < 3; page++) {
105 status = NAND_Read_Page_Spare(pReadSpareBuf, block,
106 DeviceInfo.wPagesPerBlock - page , 1);
107 if (status != PASS)
108 return READ_ERROR;
109 for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
110 if (pReadSpareBuf[i] != 0xff)
111 return DEFECTIVE_BLOCK;
114 return GOOD_BLOCK;
118 u16 NAND_Flash_Reset(void)
120 u32 i;
121 u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
122 INTR_STATUS1__RST_COMP,
123 INTR_STATUS2__RST_COMP,
124 INTR_STATUS3__RST_COMP};
125 u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
126 INTR_STATUS1__TIME_OUT,
127 INTR_STATUS2__TIME_OUT,
128 INTR_STATUS3__TIME_OUT};
129 u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
130 INTR_STATUS2, INTR_STATUS3};
131 u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
132 DEVICE_RESET__BANK1,
133 DEVICE_RESET__BANK2,
134 DEVICE_RESET__BANK3};
136 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
137 __FILE__, __LINE__, __func__);
139 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
140 iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
141 FlashReg + intr_status[i]);
143 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
144 iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
145 while (!(ioread32(FlashReg + intr_status[i]) &
146 (intr_status_rst_comp[i] | intr_status_time_out[i])))
148 if (ioread32(FlashReg + intr_status[i]) &
149 intr_status_time_out[i])
150 nand_dbg_print(NAND_DBG_WARN,
151 "NAND Reset operation timed out on bank %d\n", i);
154 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
155 iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
156 FlashReg + intr_status[i]);
158 return PASS;
161 static void NAND_ONFi_Timing_Mode(u16 mode)
163 u16 Trea[6] = {40, 30, 25, 20, 20, 16};
164 u16 Trp[6] = {50, 25, 17, 15, 12, 10};
165 u16 Treh[6] = {30, 15, 15, 10, 10, 7};
166 u16 Trc[6] = {100, 50, 35, 30, 25, 20};
167 u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
168 u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
169 u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
170 u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
171 u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
172 u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
173 u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
174 u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
176 u16 TclsRising = 1;
177 u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
178 u16 dv_window = 0;
179 u16 en_lo, en_hi;
180 u16 acc_clks;
181 u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
183 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
184 __FILE__, __LINE__, __func__);
186 en_lo = CEIL_DIV(Trp[mode], CLK_X);
187 en_hi = CEIL_DIV(Treh[mode], CLK_X);
189 #if ONFI_BLOOM_TIME
190 if ((en_hi * CLK_X) < (Treh[mode] + 2))
191 en_hi++;
192 #endif
194 if ((en_lo + en_hi) * CLK_X < Trc[mode])
195 en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
197 if ((en_lo + en_hi) < CLK_MULTI)
198 en_lo += CLK_MULTI - en_lo - en_hi;
200 while (dv_window < 8) {
201 data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
203 data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
205 data_invalid =
206 data_invalid_rhoh <
207 data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
209 dv_window = data_invalid - Trea[mode];
211 if (dv_window < 8)
212 en_lo++;
215 acc_clks = CEIL_DIV(Trea[mode], CLK_X);
217 while (((acc_clks * CLK_X) - Trea[mode]) < 3)
218 acc_clks++;
220 if ((data_invalid - acc_clks * CLK_X) < 2)
221 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
222 __FILE__, __LINE__);
224 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
225 re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
226 re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
227 we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
228 cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
229 if (!TclsRising)
230 cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
231 if (cs_cnt == 0)
232 cs_cnt = 1;
234 if (Tcea[mode]) {
235 while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
236 cs_cnt++;
239 #if MODE5_WORKAROUND
240 if (mode == 5)
241 acc_clks = 5;
242 #endif
244 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
245 if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
246 (ioread32(FlashReg + DEVICE_ID) == 0x88))
247 acc_clks = 6;
249 iowrite32(acc_clks, FlashReg + ACC_CLKS);
250 iowrite32(re_2_we, FlashReg + RE_2_WE);
251 iowrite32(re_2_re, FlashReg + RE_2_RE);
252 iowrite32(we_2_re, FlashReg + WE_2_RE);
253 iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
254 iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
255 iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
256 iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
259 static void index_addr(u32 address, u32 data)
261 iowrite32(address, FlashMem);
262 iowrite32(data, FlashMem + 0x10);
265 static void index_addr_read_data(u32 address, u32 *pdata)
267 iowrite32(address, FlashMem);
268 *pdata = ioread32(FlashMem + 0x10);
271 static void set_ecc_config(void)
273 #if SUPPORT_8BITECC
274 if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
275 (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
276 iowrite32(8, FlashReg + ECC_CORRECTION);
277 #endif
279 if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
280 == 1) {
281 DeviceInfo.wECCBytesPerSector = 4;
282 DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
283 DeviceInfo.wNumPageSpareFlag =
284 DeviceInfo.wPageSpareSize -
285 DeviceInfo.wPageDataSize /
286 (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
287 DeviceInfo.wECCBytesPerSector
288 - DeviceInfo.wSpareSkipBytes;
289 } else {
290 DeviceInfo.wECCBytesPerSector =
291 (ioread32(FlashReg + ECC_CORRECTION) &
292 ECC_CORRECTION__VALUE) * 13 / 8;
293 if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
294 DeviceInfo.wECCBytesPerSector += 2;
295 else
296 DeviceInfo.wECCBytesPerSector += 1;
298 DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
299 DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
300 DeviceInfo.wPageDataSize /
301 (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
302 DeviceInfo.wECCBytesPerSector
303 - DeviceInfo.wSpareSkipBytes;
307 static u16 get_onfi_nand_para(void)
309 int i;
310 u16 blks_lun_l, blks_lun_h, n_of_luns;
311 u32 blockperlun, id;
313 iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
315 while (!((ioread32(FlashReg + INTR_STATUS0) &
316 INTR_STATUS0__RST_COMP) |
317 (ioread32(FlashReg + INTR_STATUS0) &
318 INTR_STATUS0__TIME_OUT)))
321 if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
322 iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
323 while (!((ioread32(FlashReg + INTR_STATUS1) &
324 INTR_STATUS1__RST_COMP) |
325 (ioread32(FlashReg + INTR_STATUS1) &
326 INTR_STATUS1__TIME_OUT)))
329 if (ioread32(FlashReg + INTR_STATUS1) &
330 INTR_STATUS1__RST_COMP) {
331 iowrite32(DEVICE_RESET__BANK2,
332 FlashReg + DEVICE_RESET);
333 while (!((ioread32(FlashReg + INTR_STATUS2) &
334 INTR_STATUS2__RST_COMP) |
335 (ioread32(FlashReg + INTR_STATUS2) &
336 INTR_STATUS2__TIME_OUT)))
339 if (ioread32(FlashReg + INTR_STATUS2) &
340 INTR_STATUS2__RST_COMP) {
341 iowrite32(DEVICE_RESET__BANK3,
342 FlashReg + DEVICE_RESET);
343 while (!((ioread32(FlashReg + INTR_STATUS3) &
344 INTR_STATUS3__RST_COMP) |
345 (ioread32(FlashReg + INTR_STATUS3) &
346 INTR_STATUS3__TIME_OUT)))
348 } else {
349 printk(KERN_ERR "Getting a time out for bank 2!\n");
351 } else {
352 printk(KERN_ERR "Getting a time out for bank 1!\n");
356 iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
357 iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
358 iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
359 iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
361 DeviceInfo.wONFIDevFeatures =
362 ioread32(FlashReg + ONFI_DEVICE_FEATURES);
363 DeviceInfo.wONFIOptCommands =
364 ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
365 DeviceInfo.wONFITimingMode =
366 ioread32(FlashReg + ONFI_TIMING_MODE);
367 DeviceInfo.wONFIPgmCacheTimingMode =
368 ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
370 n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
371 ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
372 blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
373 blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
375 blockperlun = (blks_lun_h << 16) | blks_lun_l;
377 DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
379 if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
380 ONFI_TIMING_MODE__VALUE))
381 return FAIL;
383 for (i = 5; i > 0; i--) {
384 if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
385 break;
388 NAND_ONFi_Timing_Mode(i);
390 index_addr(MODE_11 | 0, 0x90);
391 index_addr(MODE_11 | 1, 0);
393 for (i = 0; i < 3; i++)
394 index_addr_read_data(MODE_11 | 2, &id);
396 nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
398 DeviceInfo.MLCDevice = id & 0x0C;
400 /* By now, all the ONFI devices we know support the page cache */
401 /* rw feature. So here we enable the pipeline_rw_ahead feature */
402 /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
403 /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
405 return PASS;
408 static void get_samsung_nand_para(void)
410 u8 no_of_planes;
411 u32 blk_size;
412 u64 plane_size, capacity;
413 u32 id_bytes[5];
414 int i;
416 index_addr((u32)(MODE_11 | 0), 0x90);
417 index_addr((u32)(MODE_11 | 1), 0);
418 for (i = 0; i < 5; i++)
419 index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
421 nand_dbg_print(NAND_DBG_DEBUG,
422 "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
423 id_bytes[0], id_bytes[1], id_bytes[2],
424 id_bytes[3], id_bytes[4]);
426 if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
427 /* Set timing register values according to datasheet */
428 iowrite32(5, FlashReg + ACC_CLKS);
429 iowrite32(20, FlashReg + RE_2_WE);
430 iowrite32(12, FlashReg + WE_2_RE);
431 iowrite32(14, FlashReg + ADDR_2_DATA);
432 iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
433 iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
434 iowrite32(2, FlashReg + CS_SETUP_CNT);
437 no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
438 plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
439 blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
440 capacity = (u64)128 * plane_size * no_of_planes;
442 DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
445 static void get_toshiba_nand_para(void)
447 void __iomem *scratch_reg;
448 u32 tmp;
450 /* spare area size for some kind of Toshiba NAND device */
451 if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
452 (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
453 iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
454 tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
455 ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
456 iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
457 #if SUPPORT_15BITECC
458 iowrite32(15, FlashReg + ECC_CORRECTION);
459 #elif SUPPORT_8BITECC
460 iowrite32(8, FlashReg + ECC_CORRECTION);
461 #endif
464 /* As Toshiba NAND can not provide it's block number, */
465 /* so here we need user to provide the correct block */
466 /* number in a scratch register before the Linux NAND */
467 /* driver is loaded. If no valid value found in the scratch */
468 /* register, then we use default block number value */
469 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
470 if (!scratch_reg) {
471 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
472 __FILE__, __LINE__);
473 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
474 } else {
475 nand_dbg_print(NAND_DBG_WARN,
476 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
477 DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
478 if (DeviceInfo.wTotalBlocks < 512)
479 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
480 iounmap(scratch_reg);
484 static void get_hynix_nand_para(void)
486 void __iomem *scratch_reg;
487 u32 main_size, spare_size;
489 switch (DeviceInfo.wDeviceID) {
490 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
491 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
492 iowrite32(128, FlashReg + PAGES_PER_BLOCK);
493 iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
494 iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
495 main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
496 spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
497 iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
498 iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
499 iowrite32(0, FlashReg + DEVICE_WIDTH);
500 #if SUPPORT_15BITECC
501 iowrite32(15, FlashReg + ECC_CORRECTION);
502 #elif SUPPORT_8BITECC
503 iowrite32(8, FlashReg + ECC_CORRECTION);
504 #endif
505 DeviceInfo.MLCDevice = 1;
506 break;
507 default:
508 nand_dbg_print(NAND_DBG_WARN,
509 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
510 "Will use default parameter values instead.\n",
511 DeviceInfo.wDeviceID);
514 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
515 if (!scratch_reg) {
516 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
517 __FILE__, __LINE__);
518 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
519 } else {
520 nand_dbg_print(NAND_DBG_WARN,
521 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
522 DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
523 if (DeviceInfo.wTotalBlocks < 512)
524 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
525 iounmap(scratch_reg);
529 static void find_valid_banks(void)
531 u32 id[LLD_MAX_FLASH_BANKS];
532 int i;
534 totalUsedBanks = 0;
535 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
536 index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
537 index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
538 index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
540 nand_dbg_print(NAND_DBG_DEBUG,
541 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
543 if (i == 0) {
544 if (id[i] & 0x0ff)
545 GLOB_valid_banks[i] = 1;
546 } else {
547 if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
548 GLOB_valid_banks[i] = 1;
551 totalUsedBanks += GLOB_valid_banks[i];
554 nand_dbg_print(NAND_DBG_DEBUG,
555 "totalUsedBanks: %d\n", totalUsedBanks);
558 static void detect_partition_feature(void)
560 if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
561 if ((ioread32(FlashReg + PERM_SRC_ID_1) &
562 PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
563 DeviceInfo.wSpectraStartBlock =
564 ((ioread32(FlashReg + MIN_MAX_BANK_1) &
565 MIN_MAX_BANK_1__MIN_VALUE) *
566 DeviceInfo.wTotalBlocks)
568 (ioread32(FlashReg + MIN_BLK_ADDR_1) &
569 MIN_BLK_ADDR_1__VALUE);
571 DeviceInfo.wSpectraEndBlock =
572 (((ioread32(FlashReg + MIN_MAX_BANK_1) &
573 MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
574 DeviceInfo.wTotalBlocks)
576 (ioread32(FlashReg + MAX_BLK_ADDR_1) &
577 MAX_BLK_ADDR_1__VALUE);
579 DeviceInfo.wTotalBlocks *= totalUsedBanks;
581 if (DeviceInfo.wSpectraEndBlock >=
582 DeviceInfo.wTotalBlocks) {
583 DeviceInfo.wSpectraEndBlock =
584 DeviceInfo.wTotalBlocks - 1;
587 DeviceInfo.wDataBlockNum =
588 DeviceInfo.wSpectraEndBlock -
589 DeviceInfo.wSpectraStartBlock + 1;
590 } else {
591 DeviceInfo.wTotalBlocks *= totalUsedBanks;
592 DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
593 DeviceInfo.wSpectraEndBlock =
594 DeviceInfo.wTotalBlocks - 1;
595 DeviceInfo.wDataBlockNum =
596 DeviceInfo.wSpectraEndBlock -
597 DeviceInfo.wSpectraStartBlock + 1;
599 } else {
600 DeviceInfo.wTotalBlocks *= totalUsedBanks;
601 DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
602 DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
603 DeviceInfo.wDataBlockNum =
604 DeviceInfo.wSpectraEndBlock -
605 DeviceInfo.wSpectraStartBlock + 1;
609 static void dump_device_info(void)
611 nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
612 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
613 DeviceInfo.wDeviceMaker);
614 nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
615 DeviceInfo.wDeviceID);
616 nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
617 DeviceInfo.wDeviceType);
618 nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
619 DeviceInfo.wSpectraStartBlock);
620 nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
621 DeviceInfo.wSpectraEndBlock);
622 nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
623 DeviceInfo.wTotalBlocks);
624 nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
625 DeviceInfo.wPagesPerBlock);
626 nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
627 DeviceInfo.wPageSize);
628 nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
629 DeviceInfo.wPageDataSize);
630 nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
631 DeviceInfo.wPageSpareSize);
632 nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
633 DeviceInfo.wNumPageSpareFlag);
634 nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
635 DeviceInfo.wECCBytesPerSector);
636 nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
637 DeviceInfo.wBlockSize);
638 nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
639 DeviceInfo.wBlockDataSize);
640 nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
641 DeviceInfo.wDataBlockNum);
642 nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
643 DeviceInfo.bPlaneNum);
644 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
645 DeviceInfo.wDeviceMainAreaSize);
646 nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
647 DeviceInfo.wDeviceSpareAreaSize);
648 nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
649 DeviceInfo.wDevicesConnected);
650 nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
651 DeviceInfo.wDeviceWidth);
652 nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
653 DeviceInfo.wHWRevision);
654 nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
655 DeviceInfo.wHWFeatures);
656 nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
657 DeviceInfo.wONFIDevFeatures);
658 nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
659 DeviceInfo.wONFIOptCommands);
660 nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
661 DeviceInfo.wONFITimingMode);
662 nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
663 DeviceInfo.wONFIPgmCacheTimingMode);
664 nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
665 DeviceInfo.MLCDevice ? "Yes" : "No");
666 nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
667 DeviceInfo.wSpareSkipBytes);
668 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
669 DeviceInfo.nBitsInPageNumber);
670 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
671 DeviceInfo.nBitsInPageDataSize);
672 nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
673 DeviceInfo.nBitsInBlockDataSize);
676 u16 NAND_Read_Device_ID(void)
678 u16 status = PASS;
679 u8 no_of_planes;
681 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
682 __FILE__, __LINE__, __func__);
684 iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
685 iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
686 DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
687 DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
688 DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
690 if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
691 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
692 if (FAIL == get_onfi_nand_para())
693 return FAIL;
694 } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
695 get_samsung_nand_para();
696 } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
697 get_toshiba_nand_para();
698 } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
699 get_hynix_nand_para();
700 } else {
701 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
704 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
705 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
706 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
707 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
708 ioread32(FlashReg + ACC_CLKS),
709 ioread32(FlashReg + RE_2_WE),
710 ioread32(FlashReg + WE_2_RE),
711 ioread32(FlashReg + ADDR_2_DATA),
712 ioread32(FlashReg + RDWR_EN_LO_CNT),
713 ioread32(FlashReg + RDWR_EN_HI_CNT),
714 ioread32(FlashReg + CS_SETUP_CNT));
716 DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
717 DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
719 DeviceInfo.wDeviceMainAreaSize =
720 ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
721 DeviceInfo.wDeviceSpareAreaSize =
722 ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
724 DeviceInfo.wPageDataSize =
725 ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
727 /* Note: When using the Micon 4K NAND device, the controller will report
728 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
729 * And if force set it to 218 bytes, the controller can not work
730 * correctly. So just let it be. But keep in mind that this bug may
731 * cause
732 * other problems in future. - Yunpeng 2008-10-10
734 DeviceInfo.wPageSpareSize =
735 ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
737 DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
739 DeviceInfo.wPageSize =
740 DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
741 DeviceInfo.wBlockSize =
742 DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
743 DeviceInfo.wBlockDataSize =
744 DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
746 DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
747 DeviceInfo.wDeviceType =
748 ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
750 DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
752 DeviceInfo.wSpareSkipBytes =
753 ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
754 DeviceInfo.wDevicesConnected;
756 DeviceInfo.nBitsInPageNumber =
757 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
758 DeviceInfo.nBitsInPageDataSize =
759 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
760 DeviceInfo.nBitsInBlockDataSize =
761 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
763 set_ecc_config();
765 no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
766 NUMBER_OF_PLANES__VALUE;
768 switch (no_of_planes) {
769 case 0:
770 case 1:
771 case 3:
772 case 7:
773 DeviceInfo.bPlaneNum = no_of_planes + 1;
774 break;
775 default:
776 status = FAIL;
777 break;
780 find_valid_banks();
782 detect_partition_feature();
784 dump_device_info();
786 return status;
789 u16 NAND_UnlockArrayAll(void)
791 u64 start_addr, end_addr;
793 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
794 __FILE__, __LINE__, __func__);
796 start_addr = 0;
797 end_addr = ((u64)DeviceInfo.wBlockSize *
798 (DeviceInfo.wTotalBlocks - 1)) >>
799 DeviceInfo.nBitsInPageDataSize;
801 index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
802 index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
804 return PASS;
807 void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
809 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
810 __FILE__, __LINE__, __func__);
812 if (INT_ENABLE)
813 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
814 else
815 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
818 u16 NAND_Erase_Block(u32 block)
820 u16 status = PASS;
821 u64 flash_add;
822 u16 flash_bank;
823 u32 intr_status = 0;
824 u32 intr_status_addresses[4] = {INTR_STATUS0,
825 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
827 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
828 __FILE__, __LINE__, __func__);
830 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
831 * DeviceInfo.wBlockDataSize;
833 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
835 if (block >= DeviceInfo.wTotalBlocks)
836 status = FAIL;
838 if (status == PASS) {
839 intr_status = intr_status_addresses[flash_bank];
841 iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
842 FlashReg + intr_status);
844 index_addr((u32)(MODE_10 | (flash_bank << 24) |
845 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
847 while (!(ioread32(FlashReg + intr_status) &
848 (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
851 if (ioread32(FlashReg + intr_status) &
852 INTR_STATUS0__ERASE_FAIL)
853 status = FAIL;
855 iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
856 FlashReg + intr_status);
859 return status;
862 static u32 Boundary_Check_Block_Page(u32 block, u16 page,
863 u16 page_count)
865 u32 status = PASS;
867 if (block >= DeviceInfo.wTotalBlocks)
868 status = FAIL;
870 if (page + page_count > DeviceInfo.wPagesPerBlock)
871 status = FAIL;
873 return status;
876 u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
877 u16 page_count)
879 u32 status = PASS;
880 u32 i;
881 u64 flash_add;
882 u32 PageSpareSize = DeviceInfo.wPageSpareSize;
883 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
884 u32 flash_bank;
885 u32 intr_status = 0;
886 u32 intr_status_addresses[4] = {INTR_STATUS0,
887 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
888 u8 *page_spare = buf_read_page_spare;
890 if (block >= DeviceInfo.wTotalBlocks) {
891 printk(KERN_ERR "block too big: %d\n", (int)block);
892 status = FAIL;
895 if (page >= DeviceInfo.wPagesPerBlock) {
896 printk(KERN_ERR "page too big: %d\n", page);
897 status = FAIL;
900 if (page_count > 1) {
901 printk(KERN_ERR "page count too big: %d\n", page_count);
902 status = FAIL;
905 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
906 * DeviceInfo.wBlockDataSize +
907 (u64)page * DeviceInfo.wPageDataSize;
909 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
911 if (status == PASS) {
912 intr_status = intr_status_addresses[flash_bank];
913 iowrite32(ioread32(FlashReg + intr_status),
914 FlashReg + intr_status);
916 index_addr((u32)(MODE_10 | (flash_bank << 24) |
917 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
918 0x41);
919 index_addr((u32)(MODE_10 | (flash_bank << 24) |
920 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
921 0x2000 | page_count);
922 while (!(ioread32(FlashReg + intr_status) &
923 INTR_STATUS0__LOAD_COMP))
926 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
927 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
928 FlashMem);
930 for (i = 0; i < (PageSpareSize / 4); i++)
931 *((u32 *)page_spare + i) =
932 ioread32(FlashMem + 0x10);
934 if (enable_ecc) {
935 for (i = 0; i < spareFlagBytes; i++)
936 read_data[i] =
937 page_spare[PageSpareSize -
938 spareFlagBytes + i];
939 for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
940 read_data[spareFlagBytes + i] =
941 page_spare[i];
942 } else {
943 for (i = 0; i < PageSpareSize; i++)
944 read_data[i] = page_spare[i];
947 index_addr((u32)(MODE_10 | (flash_bank << 24) |
948 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
951 return status;
954 /* No use function. Should be removed later */
955 u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
956 u16 page_count)
958 printk(KERN_ERR
959 "Error! This function (NAND_Write_Page_Spare) should never"
960 " be called!\n");
961 return ERR;
964 /* op value: 0 - DDMA read; 1 - DDMA write */
965 static void ddma_trans(u8 *data, u64 flash_add,
966 u32 flash_bank, int op, u32 numPages)
968 u32 data_addr;
970 /* Map virtual address to bus address for DDMA */
971 data_addr = virt_to_bus(data);
973 index_addr((u32)(MODE_10 | (flash_bank << 24) |
974 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
975 (u16)(2 << 12) | (op << 8) | numPages);
977 index_addr((u32)(MODE_10 | (flash_bank << 24) |
978 ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
979 (u16)(2 << 12) | (2 << 8) | 0);
981 index_addr((u32)(MODE_10 | (flash_bank << 24) |
982 ((u16)(0x0FFFF & data_addr) << 8)),
983 (u16)(2 << 12) | (3 << 8) | 0);
985 index_addr((u32)(MODE_10 | (flash_bank << 24) |
986 (1 << 16) | (0x40 << 8)),
987 (u16)(2 << 12) | (4 << 8) | 0);
990 /* If data in buf are all 0xff, then return 1; otherwise return 0 */
991 static int check_all_1(u8 *buf)
993 int i, j, cnt;
995 for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
996 if (buf[i] != 0xff) {
997 cnt = 0;
998 nand_dbg_print(NAND_DBG_WARN,
999 "the first non-0xff data byte is: %d\n", i);
1000 for (j = i; j < DeviceInfo.wPageDataSize; j++) {
1001 nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
1002 cnt++;
1003 if (cnt > 8)
1004 break;
1006 nand_dbg_print(NAND_DBG_WARN, "\n");
1007 return 0;
1011 return 1;
1014 static int do_ecc_new(unsigned long bank, u8 *buf,
1015 u32 block, u16 page)
1017 int status = PASS;
1018 u16 err_page = 0;
1019 u16 err_byte;
1020 u8 err_sect;
1021 u8 err_dev;
1022 u16 err_fix_info;
1023 u16 err_addr;
1024 u32 ecc_sect_size;
1025 u8 *err_pos;
1026 u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
1027 ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
1029 ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1031 do {
1032 err_page = ioread32(FlashReg + err_page_addr[bank]);
1033 err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
1034 err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
1035 err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
1036 err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
1037 err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
1038 >> 8);
1039 if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
1040 nand_dbg_print(NAND_DBG_WARN,
1041 "%s, Line %d Uncorrectable ECC error "
1042 "when read block %d page %d."
1043 "PTN_INTR register: 0x%x "
1044 "err_page: %d, err_sect: %d, err_byte: %d, "
1045 "err_dev: %d, ecc_sect_size: %d, "
1046 "err_fix_info: 0x%x\n",
1047 __FILE__, __LINE__, block, page,
1048 ioread32(FlashReg + PTN_INTR),
1049 err_page, err_sect, err_byte, err_dev,
1050 ecc_sect_size, (u32)err_fix_info);
1052 if (check_all_1(buf))
1053 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
1054 "All 0xff!\n",
1055 __FILE__, __LINE__);
1056 else
1057 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
1058 "Not all 0xff!\n",
1059 __FILE__, __LINE__);
1060 status = FAIL;
1061 } else {
1062 nand_dbg_print(NAND_DBG_WARN,
1063 "%s, Line %d Found ECC error "
1064 "when read block %d page %d."
1065 "err_page: %d, err_sect: %d, err_byte: %d, "
1066 "err_dev: %d, ecc_sect_size: %d, "
1067 "err_fix_info: 0x%x\n",
1068 __FILE__, __LINE__, block, page,
1069 err_page, err_sect, err_byte, err_dev,
1070 ecc_sect_size, (u32)err_fix_info);
1071 if (err_byte < ECC_SECTOR_SIZE) {
1072 err_pos = buf +
1073 (err_page - page) *
1074 DeviceInfo.wPageDataSize +
1075 err_sect * ecc_sect_size +
1076 err_byte *
1077 DeviceInfo.wDevicesConnected +
1078 err_dev;
1080 *err_pos ^= err_fix_info &
1081 ERR_CORRECTION_INFO__BYTEMASK;
1084 } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
1086 return status;
1089 u16 NAND_Read_Page_Main_Polling(u8 *read_data,
1090 u32 block, u16 page, u16 page_count)
1092 u32 status = PASS;
1093 u64 flash_add;
1094 u32 intr_status = 0;
1095 u32 flash_bank;
1096 u32 intr_status_addresses[4] = {INTR_STATUS0,
1097 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1098 u8 *read_data_l;
1100 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1101 __FILE__, __LINE__, __func__);
1103 status = Boundary_Check_Block_Page(block, page, page_count);
1104 if (status != PASS)
1105 return status;
1107 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1108 * DeviceInfo.wBlockDataSize +
1109 (u64)page * DeviceInfo.wPageDataSize;
1110 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1112 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1114 intr_status = intr_status_addresses[flash_bank];
1115 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1117 if (page_count > 1) {
1118 read_data_l = read_data;
1119 while (page_count > MAX_PAGES_PER_RW) {
1120 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1121 status = NAND_Multiplane_Read(read_data_l,
1122 block, page, MAX_PAGES_PER_RW);
1123 else
1124 status = NAND_Pipeline_Read_Ahead_Polling(
1125 read_data_l, block, page,
1126 MAX_PAGES_PER_RW);
1128 if (status == FAIL)
1129 return status;
1131 read_data_l += DeviceInfo.wPageDataSize *
1132 MAX_PAGES_PER_RW;
1133 page_count -= MAX_PAGES_PER_RW;
1134 page += MAX_PAGES_PER_RW;
1136 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1137 status = NAND_Multiplane_Read(read_data_l,
1138 block, page, page_count);
1139 else
1140 status = NAND_Pipeline_Read_Ahead_Polling(
1141 read_data_l, block, page, page_count);
1143 return status;
1146 iowrite32(1, FlashReg + DMA_ENABLE);
1147 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1150 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1151 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1153 ddma_trans(read_data, flash_add, flash_bank, 0, 1);
1155 if (enable_ecc) {
1156 while (!(ioread32(FlashReg + intr_status) &
1157 (INTR_STATUS0__ECC_TRANSACTION_DONE |
1158 INTR_STATUS0__ECC_ERR)))
1161 if (ioread32(FlashReg + intr_status) &
1162 INTR_STATUS0__ECC_ERR) {
1163 iowrite32(INTR_STATUS0__ECC_ERR,
1164 FlashReg + intr_status);
1165 status = do_ecc_new(flash_bank, read_data,
1166 block, page);
1169 if (ioread32(FlashReg + intr_status) &
1170 INTR_STATUS0__ECC_TRANSACTION_DONE &
1171 INTR_STATUS0__ECC_ERR)
1172 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
1173 INTR_STATUS0__ECC_ERR,
1174 FlashReg + intr_status);
1175 else if (ioread32(FlashReg + intr_status) &
1176 INTR_STATUS0__ECC_TRANSACTION_DONE)
1177 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
1178 FlashReg + intr_status);
1179 else if (ioread32(FlashReg + intr_status) &
1180 INTR_STATUS0__ECC_ERR)
1181 iowrite32(INTR_STATUS0__ECC_ERR,
1182 FlashReg + intr_status);
1183 } else {
1184 while (!(ioread32(FlashReg + intr_status) &
1185 INTR_STATUS0__DMA_CMD_COMP))
1187 iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
1190 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1192 iowrite32(0, FlashReg + DMA_ENABLE);
1193 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1196 return status;
1199 u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
1200 u32 block, u16 page, u16 page_count)
1202 u32 status = PASS;
1203 u32 NumPages = page_count;
1204 u64 flash_add;
1205 u32 flash_bank;
1206 u32 intr_status = 0;
1207 u32 intr_status_addresses[4] = {INTR_STATUS0,
1208 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1209 u32 ecc_done_OR_dma_comp;
1211 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1212 __FILE__, __LINE__, __func__);
1214 status = Boundary_Check_Block_Page(block, page, page_count);
1216 if (page_count < 2)
1217 status = FAIL;
1219 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1220 *DeviceInfo.wBlockDataSize +
1221 (u64)page * DeviceInfo.wPageDataSize;
1223 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1225 if (status == PASS) {
1226 intr_status = intr_status_addresses[flash_bank];
1227 iowrite32(ioread32(FlashReg + intr_status),
1228 FlashReg + intr_status);
1230 iowrite32(1, FlashReg + DMA_ENABLE);
1231 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1234 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1236 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1237 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1238 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1240 ecc_done_OR_dma_comp = 0;
1241 while (1) {
1242 if (enable_ecc) {
1243 while (!ioread32(FlashReg + intr_status))
1246 if (ioread32(FlashReg + intr_status) &
1247 INTR_STATUS0__ECC_ERR) {
1248 iowrite32(INTR_STATUS0__ECC_ERR,
1249 FlashReg + intr_status);
1250 status = do_ecc_new(flash_bank,
1251 read_data, block, page);
1252 } else if (ioread32(FlashReg + intr_status) &
1253 INTR_STATUS0__DMA_CMD_COMP) {
1254 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1255 FlashReg + intr_status);
1257 if (1 == ecc_done_OR_dma_comp)
1258 break;
1260 ecc_done_OR_dma_comp = 1;
1261 } else if (ioread32(FlashReg + intr_status) &
1262 INTR_STATUS0__ECC_TRANSACTION_DONE) {
1263 iowrite32(
1264 INTR_STATUS0__ECC_TRANSACTION_DONE,
1265 FlashReg + intr_status);
1267 if (1 == ecc_done_OR_dma_comp)
1268 break;
1270 ecc_done_OR_dma_comp = 1;
1272 } else {
1273 while (!(ioread32(FlashReg + intr_status) &
1274 INTR_STATUS0__DMA_CMD_COMP))
1277 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1278 FlashReg + intr_status);
1279 break;
1282 iowrite32((~INTR_STATUS0__ECC_ERR) &
1283 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
1284 (~INTR_STATUS0__DMA_CMD_COMP),
1285 FlashReg + intr_status);
1289 iowrite32(ioread32(FlashReg + intr_status),
1290 FlashReg + intr_status);
1292 iowrite32(0, FlashReg + DMA_ENABLE);
1294 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1297 return status;
1300 u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
1301 u16 page_count)
1303 u32 status = PASS;
1304 u64 flash_add;
1305 u32 intr_status = 0;
1306 u32 flash_bank;
1307 u32 intr_status_addresses[4] = {INTR_STATUS0,
1308 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1309 int ret;
1310 u8 *read_data_l;
1312 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1313 __FILE__, __LINE__, __func__);
1315 status = Boundary_Check_Block_Page(block, page, page_count);
1316 if (status != PASS)
1317 return status;
1319 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1320 * DeviceInfo.wBlockDataSize +
1321 (u64)page * DeviceInfo.wPageDataSize;
1322 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1324 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1326 intr_status = intr_status_addresses[flash_bank];
1327 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1329 if (page_count > 1) {
1330 read_data_l = read_data;
1331 while (page_count > MAX_PAGES_PER_RW) {
1332 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1333 status = NAND_Multiplane_Read(read_data_l,
1334 block, page, MAX_PAGES_PER_RW);
1335 else
1336 status = NAND_Pipeline_Read_Ahead(
1337 read_data_l, block, page,
1338 MAX_PAGES_PER_RW);
1340 if (status == FAIL)
1341 return status;
1343 read_data_l += DeviceInfo.wPageDataSize *
1344 MAX_PAGES_PER_RW;
1345 page_count -= MAX_PAGES_PER_RW;
1346 page += MAX_PAGES_PER_RW;
1348 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1349 status = NAND_Multiplane_Read(read_data_l,
1350 block, page, page_count);
1351 else
1352 status = NAND_Pipeline_Read_Ahead(
1353 read_data_l, block, page, page_count);
1355 return status;
1358 iowrite32(1, FlashReg + DMA_ENABLE);
1359 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1362 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1363 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1365 /* Fill the mrst_nand_info structure */
1366 info.state = INT_READ_PAGE_MAIN;
1367 info.read_data = read_data;
1368 info.flash_bank = flash_bank;
1369 info.block = block;
1370 info.page = page;
1371 info.ret = PASS;
1373 ddma_trans(read_data, flash_add, flash_bank, 0, 1);
1375 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
1377 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1378 if (!ret) {
1379 printk(KERN_ERR "Wait for completion timeout "
1380 "in %s, Line %d\n", __FILE__, __LINE__);
1381 status = ERR;
1382 } else {
1383 status = info.ret;
1386 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1388 iowrite32(0, FlashReg + DMA_ENABLE);
1389 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1392 return status;
1395 void Conv_Spare_Data_Log2Phy_Format(u8 *data)
1397 int i;
1398 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1399 const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1401 if (enable_ecc) {
1402 for (i = spareFlagBytes - 1; i >= 0; i++)
1403 data[PageSpareSize - spareFlagBytes + i] = data[i];
1407 void Conv_Spare_Data_Phy2Log_Format(u8 *data)
1409 int i;
1410 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1411 const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1413 if (enable_ecc) {
1414 for (i = 0; i < spareFlagBytes; i++)
1415 data[i] = data[PageSpareSize - spareFlagBytes + i];
1420 void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
1422 const u32 PageSize = DeviceInfo.wPageSize;
1423 const u32 PageDataSize = DeviceInfo.wPageDataSize;
1424 const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1425 const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1426 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1427 u32 eccSectorSize;
1428 u32 page_offset;
1429 int i, j;
1431 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1432 if (enable_ecc) {
1433 while (page_count > 0) {
1434 page_offset = (page_count - 1) * PageSize;
1435 j = (DeviceInfo.wPageDataSize / eccSectorSize);
1436 for (i = spareFlagBytes - 1; i >= 0; i--)
1437 data[page_offset +
1438 (eccSectorSize + eccBytes) * j + i] =
1439 data[page_offset + PageDataSize + i];
1440 for (j--; j >= 1; j--) {
1441 for (i = eccSectorSize - 1; i >= 0; i--)
1442 data[page_offset +
1443 (eccSectorSize + eccBytes) * j + i] =
1444 data[page_offset +
1445 eccSectorSize * j + i];
1447 for (i = (PageSize - spareSkipBytes) - 1;
1448 i >= PageDataSize; i--)
1449 data[page_offset + i + spareSkipBytes] =
1450 data[page_offset + i];
1451 page_count--;
1456 void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
1458 const u32 PageSize = DeviceInfo.wPageSize;
1459 const u32 PageDataSize = DeviceInfo.wPageDataSize;
1460 const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1461 const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1462 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1463 u32 eccSectorSize;
1464 u32 page_offset;
1465 int i, j;
1467 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1468 if (enable_ecc) {
1469 while (page_count > 0) {
1470 page_offset = (page_count - 1) * PageSize;
1471 for (i = PageDataSize;
1472 i < PageSize - spareSkipBytes;
1473 i++)
1474 data[page_offset + i] =
1475 data[page_offset + i +
1476 spareSkipBytes];
1477 for (j = 1;
1478 j < DeviceInfo.wPageDataSize / eccSectorSize;
1479 j++) {
1480 for (i = 0; i < eccSectorSize; i++)
1481 data[page_offset +
1482 eccSectorSize * j + i] =
1483 data[page_offset +
1484 (eccSectorSize + eccBytes) * j
1485 + i];
1487 for (i = 0; i < spareFlagBytes; i++)
1488 data[page_offset + PageDataSize + i] =
1489 data[page_offset +
1490 (eccSectorSize + eccBytes) * j + i];
1491 page_count--;
1496 /* Un-tested function */
1497 u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
1498 u16 page_count)
1500 u32 status = PASS;
1501 u32 NumPages = page_count;
1502 u64 flash_add;
1503 u32 flash_bank;
1504 u32 intr_status = 0;
1505 u32 intr_status_addresses[4] = {INTR_STATUS0,
1506 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1507 u32 ecc_done_OR_dma_comp;
1509 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1510 __FILE__, __LINE__, __func__);
1512 status = Boundary_Check_Block_Page(block, page, page_count);
1514 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1515 * DeviceInfo.wBlockDataSize +
1516 (u64)page * DeviceInfo.wPageDataSize;
1518 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1520 if (status == PASS) {
1521 intr_status = intr_status_addresses[flash_bank];
1522 iowrite32(ioread32(FlashReg + intr_status),
1523 FlashReg + intr_status);
1525 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1526 iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
1528 iowrite32(1, FlashReg + DMA_ENABLE);
1529 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1531 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1532 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1533 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1535 ecc_done_OR_dma_comp = 0;
1536 while (1) {
1537 if (enable_ecc) {
1538 while (!ioread32(FlashReg + intr_status))
1541 if (ioread32(FlashReg + intr_status) &
1542 INTR_STATUS0__ECC_ERR) {
1543 iowrite32(INTR_STATUS0__ECC_ERR,
1544 FlashReg + intr_status);
1545 status = do_ecc_new(flash_bank,
1546 read_data, block, page);
1547 } else if (ioread32(FlashReg + intr_status) &
1548 INTR_STATUS0__DMA_CMD_COMP) {
1549 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1550 FlashReg + intr_status);
1552 if (1 == ecc_done_OR_dma_comp)
1553 break;
1555 ecc_done_OR_dma_comp = 1;
1556 } else if (ioread32(FlashReg + intr_status) &
1557 INTR_STATUS0__ECC_TRANSACTION_DONE) {
1558 iowrite32(
1559 INTR_STATUS0__ECC_TRANSACTION_DONE,
1560 FlashReg + intr_status);
1562 if (1 == ecc_done_OR_dma_comp)
1563 break;
1565 ecc_done_OR_dma_comp = 1;
1567 } else {
1568 while (!(ioread32(FlashReg + intr_status) &
1569 INTR_STATUS0__DMA_CMD_COMP))
1571 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1572 FlashReg + intr_status);
1573 break;
1576 iowrite32((~INTR_STATUS0__ECC_ERR) &
1577 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
1578 (~INTR_STATUS0__DMA_CMD_COMP),
1579 FlashReg + intr_status);
1583 iowrite32(ioread32(FlashReg + intr_status),
1584 FlashReg + intr_status);
1586 iowrite32(0, FlashReg + DMA_ENABLE);
1588 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1591 iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
1594 return status;
1597 u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
1598 u16 page, u16 page_count)
1600 u32 status = PASS;
1601 u32 NumPages = page_count;
1602 u64 flash_add;
1603 u32 flash_bank;
1604 u32 intr_status = 0;
1605 u32 intr_status_addresses[4] = {INTR_STATUS0,
1606 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1607 int ret;
1609 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1610 __FILE__, __LINE__, __func__);
1612 status = Boundary_Check_Block_Page(block, page, page_count);
1614 if (page_count < 2)
1615 status = FAIL;
1617 if (status != PASS)
1618 return status;
1620 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1621 *DeviceInfo.wBlockDataSize +
1622 (u64)page * DeviceInfo.wPageDataSize;
1624 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1626 intr_status = intr_status_addresses[flash_bank];
1627 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1629 iowrite32(1, FlashReg + DMA_ENABLE);
1630 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1633 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1635 /* Fill the mrst_nand_info structure */
1636 info.state = INT_PIPELINE_READ_AHEAD;
1637 info.read_data = read_data;
1638 info.flash_bank = flash_bank;
1639 info.block = block;
1640 info.page = page;
1641 info.ret = PASS;
1643 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1644 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1646 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1648 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
1650 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1651 if (!ret) {
1652 printk(KERN_ERR "Wait for completion timeout "
1653 "in %s, Line %d\n", __FILE__, __LINE__);
1654 status = ERR;
1655 } else {
1656 status = info.ret;
1659 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1661 iowrite32(0, FlashReg + DMA_ENABLE);
1663 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1666 return status;
1670 u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
1671 u16 page_count)
1673 u32 status = PASS;
1674 u64 flash_add;
1675 u32 intr_status = 0;
1676 u32 flash_bank;
1677 u32 intr_status_addresses[4] = {INTR_STATUS0,
1678 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1679 int ret;
1680 u8 *write_data_l;
1682 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1683 __FILE__, __LINE__, __func__);
1685 status = Boundary_Check_Block_Page(block, page, page_count);
1686 if (status != PASS)
1687 return status;
1689 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1690 * DeviceInfo.wBlockDataSize +
1691 (u64)page * DeviceInfo.wPageDataSize;
1693 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1695 intr_status = intr_status_addresses[flash_bank];
1697 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1699 iowrite32(INTR_STATUS0__PROGRAM_COMP |
1700 INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
1702 if (page_count > 1) {
1703 write_data_l = write_data;
1704 while (page_count > MAX_PAGES_PER_RW) {
1705 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1706 status = NAND_Multiplane_Write(write_data_l,
1707 block, page, MAX_PAGES_PER_RW);
1708 else
1709 status = NAND_Pipeline_Write_Ahead(
1710 write_data_l, block, page,
1711 MAX_PAGES_PER_RW);
1712 if (status == FAIL)
1713 return status;
1715 write_data_l += DeviceInfo.wPageDataSize *
1716 MAX_PAGES_PER_RW;
1717 page_count -= MAX_PAGES_PER_RW;
1718 page += MAX_PAGES_PER_RW;
1720 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1721 status = NAND_Multiplane_Write(write_data_l,
1722 block, page, page_count);
1723 else
1724 status = NAND_Pipeline_Write_Ahead(write_data_l,
1725 block, page, page_count);
1727 return status;
1730 iowrite32(1, FlashReg + DMA_ENABLE);
1731 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1734 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1736 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1738 /* Fill the mrst_nand_info structure */
1739 info.state = INT_WRITE_PAGE_MAIN;
1740 info.write_data = write_data;
1741 info.flash_bank = flash_bank;
1742 info.block = block;
1743 info.page = page;
1744 info.ret = PASS;
1746 ddma_trans(write_data, flash_add, flash_bank, 1, 1);
1748 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
1750 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1751 if (!ret) {
1752 printk(KERN_ERR "Wait for completion timeout "
1753 "in %s, Line %d\n", __FILE__, __LINE__);
1754 status = ERR;
1755 } else {
1756 status = info.ret;
1759 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1761 iowrite32(0, FlashReg + DMA_ENABLE);
1762 while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
1765 return status;
1768 void NAND_ECC_Ctrl(int enable)
1770 if (enable) {
1771 nand_dbg_print(NAND_DBG_WARN,
1772 "Will enable ECC in %s, Line %d, Function: %s\n",
1773 __FILE__, __LINE__, __func__);
1774 iowrite32(1, FlashReg + ECC_ENABLE);
1775 enable_ecc = 1;
1776 } else {
1777 nand_dbg_print(NAND_DBG_WARN,
1778 "Will disable ECC in %s, Line %d, Function: %s\n",
1779 __FILE__, __LINE__, __func__);
1780 iowrite32(0, FlashReg + ECC_ENABLE);
1781 enable_ecc = 0;
1785 u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
1786 u16 page, u16 page_count)
1788 u32 status = PASS;
1789 u32 i, j, page_num = 0;
1790 u32 PageSize = DeviceInfo.wPageSize;
1791 u32 PageDataSize = DeviceInfo.wPageDataSize;
1792 u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1793 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1794 u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1795 u64 flash_add;
1796 u32 eccSectorSize;
1797 u32 flash_bank;
1798 u32 intr_status = 0;
1799 u32 intr_status_addresses[4] = {INTR_STATUS0,
1800 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1801 u8 *page_main_spare = buf_write_page_main_spare;
1803 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1804 __FILE__, __LINE__, __func__);
1806 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1808 status = Boundary_Check_Block_Page(block, page, page_count);
1810 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1812 if (status == PASS) {
1813 intr_status = intr_status_addresses[flash_bank];
1815 iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
1817 while ((status != FAIL) && (page_count > 0)) {
1818 flash_add = (u64)(block %
1819 (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
1820 DeviceInfo.wBlockDataSize +
1821 (u64)page * DeviceInfo.wPageDataSize;
1823 iowrite32(ioread32(FlashReg + intr_status),
1824 FlashReg + intr_status);
1826 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
1827 (flash_add >>
1828 DeviceInfo.nBitsInPageDataSize)),
1829 FlashMem);
1831 if (enable_ecc) {
1832 for (j = 0;
1834 DeviceInfo.wPageDataSize / eccSectorSize;
1835 j++) {
1836 for (i = 0; i < eccSectorSize; i++)
1837 page_main_spare[(eccSectorSize +
1838 eccBytes) * j +
1839 i] =
1840 write_data[eccSectorSize *
1841 j + i];
1843 for (i = 0; i < eccBytes; i++)
1844 page_main_spare[(eccSectorSize +
1845 eccBytes) * j +
1846 eccSectorSize +
1847 i] =
1848 write_data[PageDataSize +
1849 spareFlagBytes +
1850 eccBytes * j +
1854 for (i = 0; i < spareFlagBytes; i++)
1855 page_main_spare[(eccSectorSize +
1856 eccBytes) * j + i] =
1857 write_data[PageDataSize + i];
1859 for (i = PageSize - 1; i >= PageDataSize +
1860 spareSkipBytes; i--)
1861 page_main_spare[i] = page_main_spare[i -
1862 spareSkipBytes];
1864 for (i = PageDataSize; i < PageDataSize +
1865 spareSkipBytes; i++)
1866 page_main_spare[i] = 0xff;
1868 for (i = 0; i < PageSize / 4; i++)
1869 iowrite32(
1870 *((u32 *)page_main_spare + i),
1871 FlashMem + 0x10);
1872 } else {
1874 for (i = 0; i < PageSize / 4; i++)
1875 iowrite32(*((u32 *)write_data + i),
1876 FlashMem + 0x10);
1879 while (!(ioread32(FlashReg + intr_status) &
1880 (INTR_STATUS0__PROGRAM_COMP |
1881 INTR_STATUS0__PROGRAM_FAIL)))
1884 if (ioread32(FlashReg + intr_status) &
1885 INTR_STATUS0__PROGRAM_FAIL)
1886 status = FAIL;
1888 iowrite32(ioread32(FlashReg + intr_status),
1889 FlashReg + intr_status);
1891 page_num++;
1892 page_count--;
1893 write_data += PageSize;
1896 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1899 return status;
1902 u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
1903 u16 page_count)
1905 u32 status = PASS;
1906 u32 i, j;
1907 u64 flash_add = 0;
1908 u32 PageSize = DeviceInfo.wPageSize;
1909 u32 PageDataSize = DeviceInfo.wPageDataSize;
1910 u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1911 u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1912 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1913 u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1914 u32 eccSectorSize;
1915 u32 flash_bank;
1916 u32 intr_status = 0;
1917 u8 *read_data_l = read_data;
1918 u32 intr_status_addresses[4] = {INTR_STATUS0,
1919 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1920 u8 *page_main_spare = buf_read_page_main_spare;
1922 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1923 __FILE__, __LINE__, __func__);
1925 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1927 status = Boundary_Check_Block_Page(block, page, page_count);
1929 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1931 if (status == PASS) {
1932 intr_status = intr_status_addresses[flash_bank];
1934 iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
1936 iowrite32(ioread32(FlashReg + intr_status),
1937 FlashReg + intr_status);
1939 while ((status != FAIL) && (page_count > 0)) {
1940 flash_add = (u64)(block %
1941 (DeviceInfo.wTotalBlocks / totalUsedBanks))
1942 * DeviceInfo.wBlockDataSize +
1943 (u64)page * DeviceInfo.wPageDataSize;
1945 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1946 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
1947 0x43);
1948 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1949 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
1950 0x2000 | page_count);
1952 while (!(ioread32(FlashReg + intr_status) &
1953 INTR_STATUS0__LOAD_COMP))
1956 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
1957 (flash_add >>
1958 DeviceInfo.nBitsInPageDataSize)),
1959 FlashMem);
1961 for (i = 0; i < PageSize / 4; i++)
1962 *(((u32 *)page_main_spare) + i) =
1963 ioread32(FlashMem + 0x10);
1965 if (enable_ecc) {
1966 for (i = PageDataSize; i < PageSize -
1967 spareSkipBytes; i++)
1968 page_main_spare[i] = page_main_spare[i +
1969 spareSkipBytes];
1971 for (j = 0;
1972 j < DeviceInfo.wPageDataSize / eccSectorSize;
1973 j++) {
1975 for (i = 0; i < eccSectorSize; i++)
1976 read_data_l[eccSectorSize * j +
1977 i] =
1978 page_main_spare[
1979 (eccSectorSize +
1980 eccBytes) * j + i];
1982 for (i = 0; i < eccBytes; i++)
1983 read_data_l[PageDataSize +
1984 spareFlagBytes +
1985 eccBytes * j + i] =
1986 page_main_spare[
1987 (eccSectorSize +
1988 eccBytes) * j +
1989 eccSectorSize + i];
1992 for (i = 0; i < spareFlagBytes; i++)
1993 read_data_l[PageDataSize + i] =
1994 page_main_spare[(eccSectorSize +
1995 eccBytes) * j + i];
1996 } else {
1997 for (i = 0; i < (PageDataSize + PageSpareSize);
1998 i++)
1999 read_data_l[i] = page_main_spare[i];
2003 if (enable_ecc) {
2004 while (!(ioread32(FlashReg + intr_status) &
2005 (INTR_STATUS0__ECC_TRANSACTION_DONE |
2006 INTR_STATUS0__ECC_ERR)))
2009 if (ioread32(FlashReg + intr_status) &
2010 INTR_STATUS0__ECC_ERR) {
2011 iowrite32(INTR_STATUS0__ECC_ERR,
2012 FlashReg + intr_status);
2013 status = do_ecc_new(flash_bank,
2014 read_data, block, page);
2017 if (ioread32(FlashReg + intr_status) &
2018 INTR_STATUS0__ECC_TRANSACTION_DONE &
2019 INTR_STATUS0__ECC_ERR) {
2020 iowrite32(INTR_STATUS0__ECC_ERR |
2021 INTR_STATUS0__ECC_TRANSACTION_DONE,
2022 FlashReg + intr_status);
2023 } else if (ioread32(FlashReg + intr_status) &
2024 INTR_STATUS0__ECC_TRANSACTION_DONE) {
2025 iowrite32(
2026 INTR_STATUS0__ECC_TRANSACTION_DONE,
2027 FlashReg + intr_status);
2028 } else if (ioread32(FlashReg + intr_status) &
2029 INTR_STATUS0__ECC_ERR) {
2030 iowrite32(INTR_STATUS0__ECC_ERR,
2031 FlashReg + intr_status);
2035 page++;
2036 page_count--;
2037 read_data_l += PageSize;
2041 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2043 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2044 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2046 return status;
2049 u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
2050 u16 page, u16 page_count)
2052 u16 status = PASS;
2053 u32 NumPages = page_count;
2054 u64 flash_add;
2055 u32 flash_bank;
2056 u32 intr_status = 0;
2057 u32 intr_status_addresses[4] = {INTR_STATUS0,
2058 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2059 int ret;
2061 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2062 __FILE__, __LINE__, __func__);
2064 status = Boundary_Check_Block_Page(block, page, page_count);
2066 if (page_count < 2)
2067 status = FAIL;
2069 if (status != PASS)
2070 return status;
2072 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
2073 * DeviceInfo.wBlockDataSize +
2074 (u64)page * DeviceInfo.wPageDataSize;
2076 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
2078 intr_status = intr_status_addresses[flash_bank];
2079 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2081 iowrite32(1, FlashReg + DMA_ENABLE);
2082 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2085 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2087 /* Fill the mrst_nand_info structure */
2088 info.state = INT_PIPELINE_WRITE_AHEAD;
2089 info.write_data = write_data;
2090 info.flash_bank = flash_bank;
2091 info.block = block;
2092 info.page = page;
2093 info.ret = PASS;
2095 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2096 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2098 ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
2100 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
2102 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
2103 if (!ret) {
2104 printk(KERN_ERR "Wait for completion timeout "
2105 "in %s, Line %d\n", __FILE__, __LINE__);
2106 status = ERR;
2107 } else {
2108 status = info.ret;
2111 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2113 iowrite32(0, FlashReg + DMA_ENABLE);
2114 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2117 return status;
2120 /* Un-tested function */
2121 u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
2122 u16 page_count)
2124 u16 status = PASS;
2125 u32 NumPages = page_count;
2126 u64 flash_add;
2127 u32 flash_bank;
2128 u32 intr_status = 0;
2129 u32 intr_status_addresses[4] = {INTR_STATUS0,
2130 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2131 u16 status2 = PASS;
2132 u32 t;
2134 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2135 __FILE__, __LINE__, __func__);
2137 status = Boundary_Check_Block_Page(block, page, page_count);
2138 if (status != PASS)
2139 return status;
2141 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
2142 * DeviceInfo.wBlockDataSize +
2143 (u64)page * DeviceInfo.wPageDataSize;
2145 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
2147 intr_status = intr_status_addresses[flash_bank];
2148 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2150 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2151 iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
2153 iowrite32(1, FlashReg + DMA_ENABLE);
2154 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2157 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2159 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2160 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2162 ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
2164 while (1) {
2165 while (!ioread32(FlashReg + intr_status))
2168 if (ioread32(FlashReg + intr_status) &
2169 INTR_STATUS0__DMA_CMD_COMP) {
2170 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2171 FlashReg + intr_status);
2172 status = PASS;
2173 if (status2 == FAIL)
2174 status = FAIL;
2175 break;
2176 } else if (ioread32(FlashReg + intr_status) &
2177 INTR_STATUS0__PROGRAM_FAIL) {
2178 status2 = FAIL;
2179 status = FAIL;
2180 t = ioread32(FlashReg + intr_status) &
2181 INTR_STATUS0__PROGRAM_FAIL;
2182 iowrite32(t, FlashReg + intr_status);
2183 } else {
2184 iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
2185 (~INTR_STATUS0__DMA_CMD_COMP),
2186 FlashReg + intr_status);
2190 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2192 iowrite32(0, FlashReg + DMA_ENABLE);
2194 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2197 iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
2199 return status;
2203 #if CMD_DMA
2204 static irqreturn_t cdma_isr(int irq, void *dev_id)
2206 struct mrst_nand_info *dev = dev_id;
2207 int first_failed_cmd;
2209 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2210 __FILE__, __LINE__, __func__);
2212 if (!is_cdma_interrupt())
2213 return IRQ_NONE;
2215 /* Disable controller interrupts */
2216 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2217 GLOB_FTL_Event_Status(&first_failed_cmd);
2218 complete(&dev->complete);
2220 return IRQ_HANDLED;
2222 #else
2223 static void handle_nand_int_read(struct mrst_nand_info *dev)
2225 u32 intr_status_addresses[4] = {INTR_STATUS0,
2226 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2227 u32 intr_status;
2228 u32 ecc_done_OR_dma_comp = 0;
2230 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2231 __FILE__, __LINE__, __func__);
2233 dev->ret = PASS;
2234 intr_status = intr_status_addresses[dev->flash_bank];
2236 while (1) {
2237 if (enable_ecc) {
2238 if (ioread32(FlashReg + intr_status) &
2239 INTR_STATUS0__ECC_ERR) {
2240 iowrite32(INTR_STATUS0__ECC_ERR,
2241 FlashReg + intr_status);
2242 dev->ret = do_ecc_new(dev->flash_bank,
2243 dev->read_data,
2244 dev->block, dev->page);
2245 } else if (ioread32(FlashReg + intr_status) &
2246 INTR_STATUS0__DMA_CMD_COMP) {
2247 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2248 FlashReg + intr_status);
2249 if (1 == ecc_done_OR_dma_comp)
2250 break;
2251 ecc_done_OR_dma_comp = 1;
2252 } else if (ioread32(FlashReg + intr_status) &
2253 INTR_STATUS0__ECC_TRANSACTION_DONE) {
2254 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
2255 FlashReg + intr_status);
2256 if (1 == ecc_done_OR_dma_comp)
2257 break;
2258 ecc_done_OR_dma_comp = 1;
2260 } else {
2261 if (ioread32(FlashReg + intr_status) &
2262 INTR_STATUS0__DMA_CMD_COMP) {
2263 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2264 FlashReg + intr_status);
2265 break;
2266 } else {
2267 printk(KERN_ERR "Illegal INTS "
2268 "(offset addr 0x%x) value: 0x%x\n",
2269 intr_status,
2270 ioread32(FlashReg + intr_status));
2274 iowrite32((~INTR_STATUS0__ECC_ERR) &
2275 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
2276 (~INTR_STATUS0__DMA_CMD_COMP),
2277 FlashReg + intr_status);
2281 static void handle_nand_int_write(struct mrst_nand_info *dev)
2283 u32 intr_status;
2284 u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
2285 INTR_STATUS2, INTR_STATUS3};
2286 int status = PASS;
2288 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2289 __FILE__, __LINE__, __func__);
2291 dev->ret = PASS;
2292 intr_status = intr[dev->flash_bank];
2294 while (1) {
2295 while (!ioread32(FlashReg + intr_status))
2298 if (ioread32(FlashReg + intr_status) &
2299 INTR_STATUS0__DMA_CMD_COMP) {
2300 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2301 FlashReg + intr_status);
2302 if (FAIL == status)
2303 dev->ret = FAIL;
2304 break;
2305 } else if (ioread32(FlashReg + intr_status) &
2306 INTR_STATUS0__PROGRAM_FAIL) {
2307 status = FAIL;
2308 iowrite32(INTR_STATUS0__PROGRAM_FAIL,
2309 FlashReg + intr_status);
2310 } else {
2311 iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
2312 (~INTR_STATUS0__DMA_CMD_COMP),
2313 FlashReg + intr_status);
2318 static irqreturn_t ddma_isr(int irq, void *dev_id)
2320 struct mrst_nand_info *dev = dev_id;
2321 u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
2322 u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
2323 INTR_STATUS2, INTR_STATUS3};
2325 int_mask = INTR_STATUS0__DMA_CMD_COMP |
2326 INTR_STATUS0__ECC_TRANSACTION_DONE |
2327 INTR_STATUS0__ECC_ERR |
2328 INTR_STATUS0__PROGRAM_FAIL |
2329 INTR_STATUS0__ERASE_FAIL;
2331 ints0 = ioread32(FlashReg + INTR_STATUS0);
2332 ints1 = ioread32(FlashReg + INTR_STATUS1);
2333 ints2 = ioread32(FlashReg + INTR_STATUS2);
2334 ints3 = ioread32(FlashReg + INTR_STATUS3);
2336 ints_offset = intr[dev->flash_bank];
2338 nand_dbg_print(NAND_DBG_DEBUG,
2339 "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
2340 "DMA_INTR: 0x%x, "
2341 "dev->state: 0x%x, dev->flash_bank: %d\n",
2342 ints0, ints1, ints2, ints3,
2343 ioread32(FlashReg + DMA_INTR),
2344 dev->state, dev->flash_bank);
2346 if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
2347 iowrite32(ints0, FlashReg + INTR_STATUS0);
2348 iowrite32(ints1, FlashReg + INTR_STATUS1);
2349 iowrite32(ints2, FlashReg + INTR_STATUS2);
2350 iowrite32(ints3, FlashReg + INTR_STATUS3);
2351 nand_dbg_print(NAND_DBG_WARN,
2352 "ddma_isr: Invalid interrupt for NAND controller. "
2353 "Ignore it\n");
2354 return IRQ_NONE;
2357 switch (dev->state) {
2358 case INT_READ_PAGE_MAIN:
2359 case INT_PIPELINE_READ_AHEAD:
2360 /* Disable controller interrupts */
2361 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2362 handle_nand_int_read(dev);
2363 break;
2364 case INT_WRITE_PAGE_MAIN:
2365 case INT_PIPELINE_WRITE_AHEAD:
2366 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2367 handle_nand_int_write(dev);
2368 break;
2369 default:
2370 printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
2371 dev->state);
2372 return IRQ_NONE;
2375 dev->state = INT_IDLE_STATE;
2376 complete(&dev->complete);
2377 return IRQ_HANDLED;
2379 #endif
2381 static const struct pci_device_id nand_pci_ids[] = {
2383 .vendor = 0x8086,
2384 .device = 0x0809,
2385 .subvendor = PCI_ANY_ID,
2386 .subdevice = PCI_ANY_ID,
2388 { /* end: all zeroes */ }
2391 static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2393 int ret = -ENODEV;
2394 unsigned long csr_base;
2395 unsigned long csr_len;
2396 struct mrst_nand_info *pndev = &info;
2398 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2399 __FILE__, __LINE__, __func__);
2401 ret = pci_enable_device(dev);
2402 if (ret) {
2403 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
2404 return ret;
2407 pci_set_master(dev);
2408 pndev->dev = dev;
2410 csr_base = pci_resource_start(dev, 0);
2411 if (!csr_base) {
2412 printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
2413 return -ENODEV;
2416 csr_len = pci_resource_len(dev, 0);
2417 if (!csr_len) {
2418 printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
2419 return -ENODEV;
2422 ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
2423 if (ret) {
2424 printk(KERN_ERR "Spectra: Unable to request "
2425 "memory region\n");
2426 goto failed_req_csr;
2429 pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
2430 if (!pndev->ioaddr) {
2431 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
2432 ret = -ENOMEM;
2433 goto failed_remap_csr;
2435 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
2436 csr_base, pndev->ioaddr, csr_len);
2438 init_completion(&pndev->complete);
2439 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
2441 #if CMD_DMA
2442 if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
2443 SPECTRA_NAND_NAME, &info)) {
2444 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
2445 ret = -ENODEV;
2446 iounmap(pndev->ioaddr);
2447 goto failed_remap_csr;
2449 #else
2450 if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
2451 SPECTRA_NAND_NAME, &info)) {
2452 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
2453 ret = -ENODEV;
2454 iounmap(pndev->ioaddr);
2455 goto failed_remap_csr;
2457 #endif
2459 pci_set_drvdata(dev, pndev);
2461 return 0;
2463 failed_remap_csr:
2464 pci_release_regions(dev);
2465 failed_req_csr:
2467 return ret;
2470 static void nand_pci_remove(struct pci_dev *dev)
2472 struct mrst_nand_info *pndev = pci_get_drvdata(dev);
2474 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2475 __FILE__, __LINE__, __func__);
2477 #if CMD_DMA
2478 free_irq(dev->irq, pndev);
2479 #endif
2480 iounmap(pndev->ioaddr);
2481 pci_release_regions(dev);
2482 pci_disable_device(dev);
2485 MODULE_DEVICE_TABLE(pci, nand_pci_ids);
2487 static struct pci_driver nand_pci_driver = {
2488 .name = SPECTRA_NAND_NAME,
2489 .id_table = nand_pci_ids,
2490 .probe = nand_pci_probe,
2491 .remove = nand_pci_remove,
2494 int NAND_Flash_Init(void)
2496 int retval;
2497 u32 int_mask;
2499 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2500 __FILE__, __LINE__, __func__);
2502 FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
2503 GLOB_HWCTL_REG_SIZE);
2504 if (!FlashReg) {
2505 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
2506 return -ENOMEM;
2508 nand_dbg_print(NAND_DBG_WARN,
2509 "Spectra: Remapped reg base address: "
2510 "0x%p, len: %d\n",
2511 FlashReg, GLOB_HWCTL_REG_SIZE);
2513 FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
2514 GLOB_HWCTL_MEM_SIZE);
2515 if (!FlashMem) {
2516 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
2517 iounmap(FlashReg);
2518 return -ENOMEM;
2520 nand_dbg_print(NAND_DBG_WARN,
2521 "Spectra: Remapped flash base address: "
2522 "0x%p, len: %d\n",
2523 (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
2525 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
2526 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
2527 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
2528 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
2529 ioread32(FlashReg + ACC_CLKS),
2530 ioread32(FlashReg + RE_2_WE),
2531 ioread32(FlashReg + WE_2_RE),
2532 ioread32(FlashReg + ADDR_2_DATA),
2533 ioread32(FlashReg + RDWR_EN_LO_CNT),
2534 ioread32(FlashReg + RDWR_EN_HI_CNT),
2535 ioread32(FlashReg + CS_SETUP_CNT));
2537 NAND_Flash_Reset();
2539 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2541 #if CMD_DMA
2542 info.pcmds_num = 0;
2543 info.flash_bank = 0;
2544 info.cdma_num = 0;
2545 int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
2546 DMA_INTR__DESC_COMP_CHANNEL1 |
2547 DMA_INTR__DESC_COMP_CHANNEL2 |
2548 DMA_INTR__DESC_COMP_CHANNEL3 |
2549 DMA_INTR__MEMCOPY_DESC_COMP);
2550 iowrite32(int_mask, FlashReg + DMA_INTR_EN);
2551 iowrite32(0xFFFF, FlashReg + DMA_INTR);
2553 int_mask = (INTR_STATUS0__ECC_ERR |
2554 INTR_STATUS0__PROGRAM_FAIL |
2555 INTR_STATUS0__ERASE_FAIL);
2556 #else
2557 int_mask = INTR_STATUS0__DMA_CMD_COMP |
2558 INTR_STATUS0__ECC_TRANSACTION_DONE |
2559 INTR_STATUS0__ECC_ERR |
2560 INTR_STATUS0__PROGRAM_FAIL |
2561 INTR_STATUS0__ERASE_FAIL;
2562 #endif
2563 iowrite32(int_mask, FlashReg + INTR_EN0);
2564 iowrite32(int_mask, FlashReg + INTR_EN1);
2565 iowrite32(int_mask, FlashReg + INTR_EN2);
2566 iowrite32(int_mask, FlashReg + INTR_EN3);
2568 /* Clear all status bits */
2569 iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
2570 iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
2571 iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
2572 iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
2574 iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
2575 iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
2577 /* Should set value for these registers when init */
2578 iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
2579 iowrite32(1, FlashReg + ECC_ENABLE);
2580 enable_ecc = 1;
2582 retval = pci_register_driver(&nand_pci_driver);
2583 if (retval)
2584 return -ENOMEM;
2586 return PASS;
2589 /* Free memory */
2590 int nand_release_spectra(void)
2592 pci_unregister_driver(&nand_pci_driver);
2593 iounmap(FlashMem);
2594 iounmap(FlashReg);
2596 return 0;