GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / staging / spectra / flash.c
blob577c0920304cc7d0c7fd9dceb12e773ef5596ab2
1 /*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/fs.h>
21 #include <linux/slab.h>
23 #include "flash.h"
24 #include "ffsdefs.h"
25 #include "lld.h"
26 #include "lld_nand.h"
27 #if CMD_DMA
28 #include "lld_cdma.h"
29 #endif
31 #define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
32 #define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
33 DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
35 #define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
36 BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
38 #define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
40 #define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
41 BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
43 #define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
45 #if DEBUG_BNDRY
46 void debug_boundary_lineno_error(int chnl, int limit, int no,
47 int lineno, char *filename)
49 if (chnl >= limit)
50 printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
51 "at %s:%d. Other info:%d. Aborting...\n",
52 chnl, limit, filename, lineno, no);
54 /* static int globalmemsize; */
55 #endif
57 static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
58 static int FTL_Cache_Read(u64 dwPageAddr);
59 static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
60 u16 cache_blk);
61 static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
62 u8 cache_blk, u16 flag);
63 static int FTL_Cache_Write(void);
64 static void FTL_Calculate_LRU(void);
65 static u32 FTL_Get_Block_Index(u32 wBlockNum);
67 static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
68 u8 BT_Tag, u16 *Page);
69 static int FTL_Read_Block_Table(void);
70 static int FTL_Write_Block_Table(int wForce);
71 static int FTL_Write_Block_Table_Data(void);
72 static int FTL_Check_Block_Table(int wOldTable);
73 static int FTL_Static_Wear_Leveling(void);
74 static u32 FTL_Replace_Block_Table(void);
75 static int FTL_Write_IN_Progress_Block_Table_Page(void);
77 static u32 FTL_Get_Page_Num(u64 length);
78 static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
80 static u32 FTL_Replace_OneBlock(u32 wBlockNum,
81 u32 wReplaceNum);
82 static u32 FTL_Replace_LWBlock(u32 wBlockNum,
83 int *pGarbageCollect);
84 static u32 FTL_Replace_MWBlock(void);
85 static int FTL_Replace_Block(u64 blk_addr);
86 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
88 struct device_info_tag DeviceInfo;
89 struct flash_cache_tag Cache;
90 static struct spectra_l2_cache_info cache_l2;
92 static u8 *cache_l2_page_buf;
93 static u8 *cache_l2_blk_buf;
95 u8 *g_pBlockTable;
96 u8 *g_pWearCounter;
97 u16 *g_pReadCounter;
98 u32 *g_pBTBlocks;
99 static u16 g_wBlockTableOffset;
100 static u32 g_wBlockTableIndex;
101 static u8 g_cBlockTableStatus;
103 static u8 *g_pTempBuf;
104 static u8 *flag_check_blk_table;
105 static u8 *tmp_buf_search_bt_in_block;
106 static u8 *spare_buf_search_bt_in_block;
107 static u8 *spare_buf_bt_search_bt_in_block;
108 static u8 *tmp_buf1_read_blk_table;
109 static u8 *tmp_buf2_read_blk_table;
110 static u8 *flags_static_wear_leveling;
111 static u8 *tmp_buf_write_blk_table_data;
112 static u8 *tmp_buf_read_disturbance;
114 u8 *buf_read_page_main_spare;
115 u8 *buf_write_page_main_spare;
116 u8 *buf_read_page_spare;
117 u8 *buf_get_bad_block;
119 #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
120 struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
121 struct flash_cache_tag cache_start_copy;
122 #endif
124 int g_wNumFreeBlocks;
125 u8 g_SBDCmdIndex;
127 static u8 *g_pIPF;
128 static u8 bt_flag = FIRST_BT_ID;
129 static u8 bt_block_changed;
131 static u16 cache_block_to_write;
132 static u8 last_erased = FIRST_BT_ID;
134 static u8 GC_Called;
135 static u8 BT_GC_Called;
137 #if CMD_DMA
138 #define COPY_BACK_BUF_NUM 10
140 static u8 ftl_cmd_cnt; /* Init value is 0 */
141 u8 *g_pBTDelta;
142 u8 *g_pBTDelta_Free;
143 u8 *g_pBTStartingCopy;
144 u8 *g_pWearCounterCopy;
145 u16 *g_pReadCounterCopy;
146 u8 *g_pBlockTableCopies;
147 u8 *g_pNextBlockTable;
148 static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
149 static int cp_back_buf_idx;
151 static u8 *g_temp_buf;
153 #pragma pack(push, 1)
154 #pragma pack(1)
155 struct BTableChangesDelta {
156 u8 ftl_cmd_cnt;
157 u8 ValidFields;
158 u16 g_wBlockTableOffset;
159 u32 g_wBlockTableIndex;
160 u32 BT_Index;
161 u32 BT_Entry_Value;
162 u32 WC_Index;
163 u8 WC_Entry_Value;
164 u32 RC_Index;
165 u16 RC_Entry_Value;
168 #pragma pack(pop)
170 struct BTableChangesDelta *p_BTableChangesDelta;
171 #endif
174 #define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
175 #define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
177 #define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
178 sizeof(u32))
179 #define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
180 sizeof(u8))
181 #define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
182 sizeof(u16))
183 #if SUPPORT_LARGE_BLOCKNUM
184 #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
185 sizeof(u8) * 3)
186 #else
187 #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
188 sizeof(u16))
189 #endif
190 #define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
191 FTL_Get_WearCounter_Table_Mem_Size_Bytes
192 #define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
193 FTL_Get_ReadCounter_Table_Mem_Size_Bytes
195 static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
197 u32 byte_num;
199 if (DeviceInfo.MLCDevice) {
200 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
201 DeviceInfo.wDataBlockNum * sizeof(u8) +
202 DeviceInfo.wDataBlockNum * sizeof(u16);
203 } else {
204 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
205 DeviceInfo.wDataBlockNum * sizeof(u8);
208 byte_num += 4 * sizeof(u8);
210 return byte_num;
213 static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
215 return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
218 static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
219 u32 sizeTxed)
221 u32 wBytesCopied, blk_tbl_size, wBytes;
222 u32 *pbt = (u32 *)g_pBlockTable;
224 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
225 for (wBytes = 0;
226 (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
227 wBytes++) {
228 #if SUPPORT_LARGE_BLOCKNUM
229 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
230 >> (((wBytes + sizeTxed) % 3) ?
231 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
232 #else
233 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
234 >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
235 #endif
238 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
239 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
240 wBytesCopied = wBytes;
241 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
242 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
243 memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
245 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
247 if (DeviceInfo.MLCDevice) {
248 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
249 wBytesCopied += wBytes;
250 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
251 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
252 flashBuf[wBytes + wBytesCopied] =
253 (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
254 (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
257 return wBytesCopied + wBytes;
260 static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
261 u32 sizeToTx, u32 sizeTxed)
263 u32 wBytesCopied, blk_tbl_size, wBytes;
264 u32 *pbt = (u32 *)g_pBlockTable;
266 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
267 for (wBytes = 0; (wBytes < sizeToTx) &&
268 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
269 #if SUPPORT_LARGE_BLOCKNUM
270 if (!((wBytes + sizeTxed) % 3))
271 pbt[(wBytes + sizeTxed) / 3] = 0;
272 pbt[(wBytes + sizeTxed) / 3] |=
273 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
274 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
275 #else
276 if (!((wBytes + sizeTxed) % 2))
277 pbt[(wBytes + sizeTxed) / 2] = 0;
278 pbt[(wBytes + sizeTxed) / 2] |=
279 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
280 0 : 8));
281 #endif
284 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
285 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
286 wBytesCopied = wBytes;
287 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
288 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
289 memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
290 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
292 if (DeviceInfo.MLCDevice) {
293 wBytesCopied += wBytes;
294 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
295 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
296 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
297 if (((wBytes + sizeTxed) % 2))
298 g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
299 g_pReadCounter[(wBytes + sizeTxed) / 2] |=
300 (flashBuf[wBytes] <<
301 (((wBytes + sizeTxed) % 2) ? 0 : 8));
305 return wBytesCopied+wBytes;
308 static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
310 int i;
312 for (i = 0; i < BTSIG_BYTES; i++)
313 buf[BTSIG_OFFSET + i] =
314 ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
315 (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
317 return PASS;
320 static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
322 static u8 tag[BTSIG_BYTES >> 1];
323 int i, j, k, tagi, tagtemp, status;
325 *tagarray = (u8 *)tag;
326 tagi = 0;
328 for (i = 0; i < (BTSIG_BYTES - 1); i++) {
329 for (j = i + 1; (j < BTSIG_BYTES) &&
330 (tagi < (BTSIG_BYTES >> 1)); j++) {
331 tagtemp = buf[BTSIG_OFFSET + j] -
332 buf[BTSIG_OFFSET + i];
333 if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
334 tagtemp = (buf[BTSIG_OFFSET + i] +
335 (1 + LAST_BT_ID - FIRST_BT_ID) -
336 (i * BTSIG_DELTA)) %
337 (1 + LAST_BT_ID - FIRST_BT_ID);
338 status = FAIL;
339 for (k = 0; k < tagi; k++) {
340 if (tagtemp == tag[k])
341 status = PASS;
344 if (status == FAIL) {
345 tag[tagi++] = tagtemp;
346 i = (j == (i + 1)) ? i + 1 : i;
347 j = (j == (i + 1)) ? i + 1 : i;
353 return tagi;
357 static int FTL_Execute_SPL_Recovery(void)
359 u32 j, block, blks;
360 u32 *pbt = (u32 *)g_pBlockTable;
361 int ret;
363 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
364 __FILE__, __LINE__, __func__);
366 blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
367 for (j = 0; j <= blks; j++) {
368 block = (pbt[j]);
369 if (((block & BAD_BLOCK) != BAD_BLOCK) &&
370 ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
371 ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
372 if (FAIL == ret) {
373 nand_dbg_print(NAND_DBG_WARN,
374 "NAND Program fail in %s, Line %d, "
375 "Function: %s, new Bad Block %d "
376 "generated!\n",
377 __FILE__, __LINE__, __func__,
378 (int)(block & ~BAD_BLOCK));
379 MARK_BLOCK_AS_BAD(pbt[j]);
384 return PASS;
387 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
388 * Function: GLOB_FTL_IdentifyDevice
389 * Inputs: pointer to identify data structure
390 * Outputs: PASS / FAIL
391 * Description: the identify data structure is filled in with
392 * information for the block driver.
393 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
394 int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
396 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
397 __FILE__, __LINE__, __func__);
399 dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
400 dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
401 dev_data->PageDataSize = DeviceInfo.wPageDataSize;
402 dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
403 dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
405 return PASS;
408 /* ..... */
409 static int allocate_memory(void)
411 u32 block_table_size, page_size, block_size, mem_size;
412 u32 total_bytes = 0;
413 int i;
414 #if CMD_DMA
415 int j;
416 #endif
418 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
419 __FILE__, __LINE__, __func__);
421 page_size = DeviceInfo.wPageSize;
422 block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
424 block_table_size = DeviceInfo.wDataBlockNum *
425 (sizeof(u32) + sizeof(u8) + sizeof(u16));
426 block_table_size += (DeviceInfo.wPageDataSize -
427 (block_table_size % DeviceInfo.wPageDataSize)) %
428 DeviceInfo.wPageDataSize;
430 /* Malloc memory for block tables */
431 g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
432 if (!g_pBlockTable)
433 goto block_table_fail;
434 memset(g_pBlockTable, 0, block_table_size);
435 total_bytes += block_table_size;
437 g_pWearCounter = (u8 *)(g_pBlockTable +
438 DeviceInfo.wDataBlockNum * sizeof(u32));
440 if (DeviceInfo.MLCDevice)
441 g_pReadCounter = (u16 *)(g_pBlockTable +
442 DeviceInfo.wDataBlockNum *
443 (sizeof(u32) + sizeof(u8)));
445 /* Malloc memory and init for cache items */
446 for (i = 0; i < CACHE_ITEM_NUM; i++) {
447 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
448 Cache.array[i].use_cnt = 0;
449 Cache.array[i].changed = CLEAR;
450 Cache.array[i].buf = kmalloc(Cache.cache_item_size,
451 GFP_ATOMIC);
452 if (!Cache.array[i].buf)
453 goto cache_item_fail;
454 memset(Cache.array[i].buf, 0, Cache.cache_item_size);
455 total_bytes += Cache.cache_item_size;
458 /* Malloc memory for IPF */
459 g_pIPF = kmalloc(page_size, GFP_ATOMIC);
460 if (!g_pIPF)
461 goto ipf_fail;
462 memset(g_pIPF, 0, page_size);
463 total_bytes += page_size;
465 /* Malloc memory for data merging during Level2 Cache flush */
466 cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
467 if (!cache_l2_page_buf)
468 goto cache_l2_page_buf_fail;
469 memset(cache_l2_page_buf, 0xff, page_size);
470 total_bytes += page_size;
472 cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
473 if (!cache_l2_blk_buf)
474 goto cache_l2_blk_buf_fail;
475 memset(cache_l2_blk_buf, 0xff, block_size);
476 total_bytes += block_size;
478 /* Malloc memory for temp buffer */
479 g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
480 if (!g_pTempBuf)
481 goto Temp_buf_fail;
482 memset(g_pTempBuf, 0, Cache.cache_item_size);
483 total_bytes += Cache.cache_item_size;
485 /* Malloc memory for block table blocks */
486 mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
487 g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
488 if (!g_pBTBlocks)
489 goto bt_blocks_fail;
490 memset(g_pBTBlocks, 0xff, mem_size);
491 total_bytes += mem_size;
493 /* Malloc memory for function FTL_Check_Block_Table */
494 flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
495 if (!flag_check_blk_table)
496 goto flag_check_blk_table_fail;
497 total_bytes += DeviceInfo.wDataBlockNum;
499 /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
500 tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
501 if (!tmp_buf_search_bt_in_block)
502 goto tmp_buf_search_bt_in_block_fail;
503 memset(tmp_buf_search_bt_in_block, 0xff, page_size);
504 total_bytes += page_size;
506 mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
507 spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
508 if (!spare_buf_search_bt_in_block)
509 goto spare_buf_search_bt_in_block_fail;
510 memset(spare_buf_search_bt_in_block, 0xff, mem_size);
511 total_bytes += mem_size;
513 spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
514 if (!spare_buf_bt_search_bt_in_block)
515 goto spare_buf_bt_search_bt_in_block_fail;
516 memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
517 total_bytes += mem_size;
519 /* Malloc memory for function FTL_Read_Block_Table */
520 tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
521 if (!tmp_buf1_read_blk_table)
522 goto tmp_buf1_read_blk_table_fail;
523 memset(tmp_buf1_read_blk_table, 0xff, page_size);
524 total_bytes += page_size;
526 tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
527 if (!tmp_buf2_read_blk_table)
528 goto tmp_buf2_read_blk_table_fail;
529 memset(tmp_buf2_read_blk_table, 0xff, page_size);
530 total_bytes += page_size;
532 /* Malloc memory for function FTL_Static_Wear_Leveling */
533 flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
534 GFP_ATOMIC);
535 if (!flags_static_wear_leveling)
536 goto flags_static_wear_leveling_fail;
537 total_bytes += DeviceInfo.wDataBlockNum;
539 /* Malloc memory for function FTL_Write_Block_Table_Data */
540 if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
541 mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
542 2 * DeviceInfo.wPageSize;
543 else
544 mem_size = DeviceInfo.wPageSize;
545 tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
546 if (!tmp_buf_write_blk_table_data)
547 goto tmp_buf_write_blk_table_data_fail;
548 memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
549 total_bytes += mem_size;
551 /* Malloc memory for function FTL_Read_Disturbance */
552 tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
553 if (!tmp_buf_read_disturbance)
554 goto tmp_buf_read_disturbance_fail;
555 memset(tmp_buf_read_disturbance, 0xff, block_size);
556 total_bytes += block_size;
558 /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
559 buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
560 if (!buf_read_page_main_spare)
561 goto buf_read_page_main_spare_fail;
562 total_bytes += DeviceInfo.wPageSize;
564 /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
565 buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
566 if (!buf_write_page_main_spare)
567 goto buf_write_page_main_spare_fail;
568 total_bytes += DeviceInfo.wPageSize;
570 /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
571 buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
572 if (!buf_read_page_spare)
573 goto buf_read_page_spare_fail;
574 memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
575 total_bytes += DeviceInfo.wPageSpareSize;
577 /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
578 buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
579 if (!buf_get_bad_block)
580 goto buf_get_bad_block_fail;
581 memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
582 total_bytes += DeviceInfo.wPageSpareSize;
584 #if CMD_DMA
585 g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
586 if (!g_temp_buf)
587 goto temp_buf_fail;
588 memset(g_temp_buf, 0xff, block_size);
589 total_bytes += block_size;
591 /* Malloc memory for copy of block table used in CDMA mode */
592 g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
593 if (!g_pBTStartingCopy)
594 goto bt_starting_copy;
595 memset(g_pBTStartingCopy, 0, block_table_size);
596 total_bytes += block_table_size;
598 g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
599 DeviceInfo.wDataBlockNum * sizeof(u32));
601 if (DeviceInfo.MLCDevice)
602 g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
603 DeviceInfo.wDataBlockNum *
604 (sizeof(u32) + sizeof(u8)));
606 /* Malloc memory for block table copies */
607 mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
608 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
609 if (DeviceInfo.MLCDevice)
610 mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
611 g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
612 if (!g_pBlockTableCopies)
613 goto blk_table_copies_fail;
614 memset(g_pBlockTableCopies, 0, mem_size);
615 total_bytes += mem_size;
616 g_pNextBlockTable = g_pBlockTableCopies;
618 /* Malloc memory for Block Table Delta */
619 mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
620 g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
621 if (!g_pBTDelta)
622 goto bt_delta_fail;
623 memset(g_pBTDelta, 0, mem_size);
624 total_bytes += mem_size;
625 g_pBTDelta_Free = g_pBTDelta;
627 /* Malloc memory for Copy Back Buffers */
628 for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
629 cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
630 if (!cp_back_buf_copies[j])
631 goto cp_back_buf_copies_fail;
632 memset(cp_back_buf_copies[j], 0, block_size);
633 total_bytes += block_size;
635 cp_back_buf_idx = 0;
637 /* Malloc memory for pending commands list */
638 mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
639 info.pcmds = kzalloc(mem_size, GFP_KERNEL);
640 if (!info.pcmds)
641 goto pending_cmds_buf_fail;
642 total_bytes += mem_size;
644 /* Malloc memory for CDMA descripter table */
645 mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
646 info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
647 if (!info.cdma_desc_buf)
648 goto cdma_desc_buf_fail;
649 total_bytes += mem_size;
651 /* Malloc memory for Memcpy descripter table */
652 mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
653 info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
654 if (!info.memcp_desc_buf)
655 goto memcp_desc_buf_fail;
656 total_bytes += mem_size;
657 #endif
659 nand_dbg_print(NAND_DBG_WARN,
660 "Total memory allocated in FTL layer: %d\n", total_bytes);
662 return PASS;
664 #if CMD_DMA
665 memcp_desc_buf_fail:
666 kfree(info.cdma_desc_buf);
667 cdma_desc_buf_fail:
668 kfree(info.pcmds);
669 pending_cmds_buf_fail:
670 cp_back_buf_copies_fail:
671 j--;
672 for (; j >= 0; j--)
673 kfree(cp_back_buf_copies[j]);
674 kfree(g_pBTDelta);
675 bt_delta_fail:
676 kfree(g_pBlockTableCopies);
677 blk_table_copies_fail:
678 kfree(g_pBTStartingCopy);
679 bt_starting_copy:
680 kfree(g_temp_buf);
681 temp_buf_fail:
682 kfree(buf_get_bad_block);
683 #endif
685 buf_get_bad_block_fail:
686 kfree(buf_read_page_spare);
687 buf_read_page_spare_fail:
688 kfree(buf_write_page_main_spare);
689 buf_write_page_main_spare_fail:
690 kfree(buf_read_page_main_spare);
691 buf_read_page_main_spare_fail:
692 kfree(tmp_buf_read_disturbance);
693 tmp_buf_read_disturbance_fail:
694 kfree(tmp_buf_write_blk_table_data);
695 tmp_buf_write_blk_table_data_fail:
696 kfree(flags_static_wear_leveling);
697 flags_static_wear_leveling_fail:
698 kfree(tmp_buf2_read_blk_table);
699 tmp_buf2_read_blk_table_fail:
700 kfree(tmp_buf1_read_blk_table);
701 tmp_buf1_read_blk_table_fail:
702 kfree(spare_buf_bt_search_bt_in_block);
703 spare_buf_bt_search_bt_in_block_fail:
704 kfree(spare_buf_search_bt_in_block);
705 spare_buf_search_bt_in_block_fail:
706 kfree(tmp_buf_search_bt_in_block);
707 tmp_buf_search_bt_in_block_fail:
708 kfree(flag_check_blk_table);
709 flag_check_blk_table_fail:
710 kfree(g_pBTBlocks);
711 bt_blocks_fail:
712 kfree(g_pTempBuf);
713 Temp_buf_fail:
714 kfree(cache_l2_blk_buf);
715 cache_l2_blk_buf_fail:
716 kfree(cache_l2_page_buf);
717 cache_l2_page_buf_fail:
718 kfree(g_pIPF);
719 ipf_fail:
720 cache_item_fail:
721 i--;
722 for (; i >= 0; i--)
723 kfree(Cache.array[i].buf);
724 kfree(g_pBlockTable);
725 block_table_fail:
726 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
727 __FILE__, __LINE__);
729 return -ENOMEM;
732 /* .... */
733 static int free_memory(void)
735 int i;
737 #if CMD_DMA
738 kfree(info.memcp_desc_buf);
739 kfree(info.cdma_desc_buf);
740 kfree(info.pcmds);
741 for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
742 kfree(cp_back_buf_copies[i]);
743 kfree(g_pBTDelta);
744 kfree(g_pBlockTableCopies);
745 kfree(g_pBTStartingCopy);
746 kfree(g_temp_buf);
747 kfree(buf_get_bad_block);
748 #endif
749 kfree(buf_read_page_spare);
750 kfree(buf_write_page_main_spare);
751 kfree(buf_read_page_main_spare);
752 kfree(tmp_buf_read_disturbance);
753 kfree(tmp_buf_write_blk_table_data);
754 kfree(flags_static_wear_leveling);
755 kfree(tmp_buf2_read_blk_table);
756 kfree(tmp_buf1_read_blk_table);
757 kfree(spare_buf_bt_search_bt_in_block);
758 kfree(spare_buf_search_bt_in_block);
759 kfree(tmp_buf_search_bt_in_block);
760 kfree(flag_check_blk_table);
761 kfree(g_pBTBlocks);
762 kfree(g_pTempBuf);
763 kfree(g_pIPF);
764 for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
765 kfree(Cache.array[i].buf);
766 kfree(g_pBlockTable);
768 return 0;
771 static void dump_cache_l2_table(void)
773 struct list_head *p;
774 struct spectra_l2_cache_list *pnd;
775 int n;
777 n = 0;
778 list_for_each(p, &cache_l2.table.list) {
779 pnd = list_entry(p, struct spectra_l2_cache_list, list);
780 nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
782 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
783 if (pnd->pages_array[i] != MAX_U32_VALUE)
784 nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
787 n++;
791 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
792 * Function: GLOB_FTL_Init
793 * Inputs: none
794 * Outputs: PASS=0 / FAIL=1
795 * Description: allocates the memory for cache array,
796 * important data structures
797 * clears the cache array
798 * reads the block table from flash into array
799 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
800 int GLOB_FTL_Init(void)
802 int i;
804 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
805 __FILE__, __LINE__, __func__);
807 Cache.pages_per_item = 1;
808 Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
810 if (allocate_memory() != PASS)
811 return FAIL;
813 #if CMD_DMA
814 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
815 memcpy((void *)&cache_start_copy, (void *)&Cache,
816 sizeof(struct flash_cache_tag));
817 memset((void *)&int_cache, -1,
818 sizeof(struct flash_cache_delta_list_tag) *
819 (MAX_CHANS + MAX_DESCS));
820 #endif
821 ftl_cmd_cnt = 0;
822 #endif
824 if (FTL_Read_Block_Table() != PASS)
825 return FAIL;
827 /* Init the Level2 Cache data structure */
828 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
829 cache_l2.blk_array[i] = MAX_U32_VALUE;
830 cache_l2.cur_blk_idx = 0;
831 cache_l2.cur_page_num = 0;
832 INIT_LIST_HEAD(&cache_l2.table.list);
833 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
835 dump_cache_l2_table();
837 return 0;
841 #if CMD_DMA
843 static void process_cmd(int *first_failed_cmd, u16 idx, int event)
845 printk(KERN_ERR "temporary workaround function. "
846 "Should not be called! \n");
849 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
850 * Function: GLOB_FTL_Event_Status
851 * Inputs: none
852 * Outputs: Event Code
853 * Description: It is called by SBD after hardware interrupt signalling
854 * completion of commands chain
855 * It does following things
856 * get event status from LLD
857 * analyze command chain status
858 * determine last command executed
859 * analyze results
860 * rebuild the block table in case of uncorrectable error
861 * return event code
862 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
863 int GLOB_FTL_Event_Status(int *first_failed_cmd)
865 int event_code = PASS;
866 u16 i_P;
868 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
869 __FILE__, __LINE__, __func__);
871 *first_failed_cmd = 0;
873 event_code = GLOB_LLD_Event_Status();
875 switch (event_code) {
876 case EVENT_PASS:
877 nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
878 break;
879 case EVENT_UNCORRECTABLE_DATA_ERROR:
880 nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
881 break;
882 case EVENT_PROGRAM_FAILURE:
883 case EVENT_ERASE_FAILURE:
884 nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
885 "Event code: 0x%x\n", event_code);
886 p_BTableChangesDelta =
887 (struct BTableChangesDelta *)g_pBTDelta;
888 for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
889 i_P++)
890 process_cmd(first_failed_cmd, i_P, event_code);
891 memcpy(g_pBlockTable, g_pBTStartingCopy,
892 DeviceInfo.wDataBlockNum * sizeof(u32));
893 memcpy(g_pWearCounter, g_pWearCounterCopy,
894 DeviceInfo.wDataBlockNum * sizeof(u8));
895 if (DeviceInfo.MLCDevice)
896 memcpy(g_pReadCounter, g_pReadCounterCopy,
897 DeviceInfo.wDataBlockNum * sizeof(u16));
899 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
900 memcpy((void *)&Cache, (void *)&cache_start_copy,
901 sizeof(struct flash_cache_tag));
902 memset((void *)&int_cache, -1,
903 sizeof(struct flash_cache_delta_list_tag) *
904 (MAX_DESCS + MAX_CHANS));
905 #endif
906 break;
907 default:
908 nand_dbg_print(NAND_DBG_WARN,
909 "Handling unexpected event code - 0x%x\n",
910 event_code);
911 event_code = ERR;
912 break;
915 memcpy(g_pBTStartingCopy, g_pBlockTable,
916 DeviceInfo.wDataBlockNum * sizeof(u32));
917 memcpy(g_pWearCounterCopy, g_pWearCounter,
918 DeviceInfo.wDataBlockNum * sizeof(u8));
919 if (DeviceInfo.MLCDevice)
920 memcpy(g_pReadCounterCopy, g_pReadCounter,
921 DeviceInfo.wDataBlockNum * sizeof(u16));
923 g_pBTDelta_Free = g_pBTDelta;
924 ftl_cmd_cnt = 0;
925 g_pNextBlockTable = g_pBlockTableCopies;
926 cp_back_buf_idx = 0;
928 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
929 memcpy((void *)&cache_start_copy, (void *)&Cache,
930 sizeof(struct flash_cache_tag));
931 memset((void *)&int_cache, -1,
932 sizeof(struct flash_cache_delta_list_tag) *
933 (MAX_DESCS + MAX_CHANS));
934 #endif
936 return event_code;
939 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
940 * Function: glob_ftl_execute_cmds
941 * Inputs: none
942 * Outputs: none
943 * Description: pass thru to LLD
944 ***************************************************************/
945 u16 glob_ftl_execute_cmds(void)
947 nand_dbg_print(NAND_DBG_TRACE,
948 "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
949 (unsigned int)ftl_cmd_cnt);
950 g_SBDCmdIndex = 0;
951 return glob_lld_execute_cmds();
954 #endif
956 #if !CMD_DMA
957 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
958 * Function: GLOB_FTL_Read Immediate
959 * Inputs: pointer to data
960 * address of data
961 * Outputs: PASS / FAIL
962 * Description: Reads one page of data into RAM directly from flash without
963 * using or disturbing cache.It is assumed this function is called
964 * with CMD-DMA disabled.
965 *****************************************************************/
966 int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
968 int wResult = FAIL;
969 u32 Block;
970 u16 Page;
971 u32 phy_blk;
972 u32 *pbt = (u32 *)g_pBlockTable;
974 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
975 __FILE__, __LINE__, __func__);
977 Block = BLK_FROM_ADDR(addr);
978 Page = PAGE_FROM_ADDR(addr, Block);
980 if (!IS_SPARE_BLOCK(Block))
981 return FAIL;
983 phy_blk = pbt[Block];
984 wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
986 if (DeviceInfo.MLCDevice) {
987 g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
988 if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
989 >= MAX_READ_COUNTER)
990 FTL_Read_Disturbance(phy_blk);
991 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
992 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
993 FTL_Write_IN_Progress_Block_Table_Page();
997 return wResult;
999 #endif
1001 #ifdef SUPPORT_BIG_ENDIAN
1002 /*********************************************************************
1003 * Function: FTL_Invert_Block_Table
1004 * Inputs: none
1005 * Outputs: none
1006 * Description: Re-format the block table in ram based on BIG_ENDIAN and
1007 * LARGE_BLOCKNUM if necessary
1008 **********************************************************************/
1009 static void FTL_Invert_Block_Table(void)
1011 u32 i;
1012 u32 *pbt = (u32 *)g_pBlockTable;
1014 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1015 __FILE__, __LINE__, __func__);
1017 #ifdef SUPPORT_LARGE_BLOCKNUM
1018 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1019 pbt[i] = INVERTUINT32(pbt[i]);
1020 g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
1022 #else
1023 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1024 pbt[i] = INVERTUINT16(pbt[i]);
1025 g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
1027 #endif
1029 #endif
1031 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1032 * Function: GLOB_FTL_Flash_Init
1033 * Inputs: none
1034 * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1035 * Description: The flash controller is initialized
1036 * The flash device is reset
1037 * Perform a flash READ ID command to confirm that a
1038 * valid device is attached and active.
1039 * The DeviceInfo structure gets filled in
1040 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1041 int GLOB_FTL_Flash_Init(void)
1043 int status = FAIL;
1045 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1046 __FILE__, __LINE__, __func__);
1048 g_SBDCmdIndex = 0;
1050 GLOB_LLD_Flash_Init();
1052 status = GLOB_LLD_Read_Device_ID();
1054 return status;
1057 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1058 * Inputs: none
1059 * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1060 * Description: The flash controller is released
1061 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1062 int GLOB_FTL_Flash_Release(void)
1064 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1065 __FILE__, __LINE__, __func__);
1067 return GLOB_LLD_Flash_Release();
1071 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1072 * Function: GLOB_FTL_Cache_Release
1073 * Inputs: none
1074 * Outputs: none
1075 * Description: release all allocated memory in GLOB_FTL_Init
1076 * (allocated in GLOB_FTL_Init)
1077 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1078 void GLOB_FTL_Cache_Release(void)
1080 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1081 __FILE__, __LINE__, __func__);
1083 free_memory();
1086 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1087 * Function: FTL_Cache_If_Hit
1088 * Inputs: Page Address
1089 * Outputs: Block number/UNHIT BLOCK
1090 * Description: Determines if the addressed page is in cache
1091 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1092 static u16 FTL_Cache_If_Hit(u64 page_addr)
1094 u16 item;
1095 u64 addr;
1096 int i;
1098 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1099 __FILE__, __LINE__, __func__);
1101 item = UNHIT_CACHE_ITEM;
1102 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1103 addr = Cache.array[i].address;
1104 if ((page_addr >= addr) &&
1105 (page_addr < (addr + Cache.cache_item_size))) {
1106 item = i;
1107 break;
1111 return item;
1114 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1115 * Function: FTL_Calculate_LRU
1116 * Inputs: None
1117 * Outputs: None
1118 * Description: Calculate the least recently block in a cache and record its
1119 * index in LRU field.
1120 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1121 static void FTL_Calculate_LRU(void)
1123 u16 i, bCurrentLRU, bTempCount;
1125 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1126 __FILE__, __LINE__, __func__);
1128 bCurrentLRU = 0;
1129 bTempCount = MAX_WORD_VALUE;
1131 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1132 if (Cache.array[i].use_cnt < bTempCount) {
1133 bCurrentLRU = i;
1134 bTempCount = Cache.array[i].use_cnt;
1138 Cache.LRU = bCurrentLRU;
1141 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1142 * Function: FTL_Cache_Read_Page
1143 * Inputs: pointer to read buffer, logical address and cache item number
1144 * Outputs: None
1145 * Description: Read the page from the cached block addressed by blocknumber
1146 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1147 static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
1149 u8 *start_addr;
1151 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1152 __FILE__, __LINE__, __func__);
1154 start_addr = Cache.array[cache_item].buf;
1155 start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
1156 DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
1158 #if CMD_DMA
1159 GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
1160 DeviceInfo.wPageDataSize, 0);
1161 ftl_cmd_cnt++;
1162 #else
1163 memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
1164 #endif
1166 if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
1167 Cache.array[cache_item].use_cnt++;
1170 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1171 * Function: FTL_Cache_Read_All
1172 * Inputs: pointer to read buffer,block address
1173 * Outputs: PASS=0 / FAIL =1
1174 * Description: It reads pages in cache
1175 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1176 static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
1178 int wResult = PASS;
1179 u32 Block;
1180 u32 lba;
1181 u16 Page;
1182 u16 PageCount;
1183 u32 *pbt = (u32 *)g_pBlockTable;
1184 u32 i;
1186 Block = BLK_FROM_ADDR(phy_addr);
1187 Page = PAGE_FROM_ADDR(phy_addr, Block);
1188 PageCount = Cache.pages_per_item;
1190 nand_dbg_print(NAND_DBG_DEBUG,
1191 "%s, Line %d, Function: %s, Block: 0x%x\n",
1192 __FILE__, __LINE__, __func__, Block);
1194 lba = 0xffffffff;
1195 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1196 if ((pbt[i] & (~BAD_BLOCK)) == Block) {
1197 lba = i;
1198 if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
1199 IS_DISCARDED_BLOCK(i)) {
1200 /* Add by yunpeng -2008.12.3 */
1201 #if CMD_DMA
1202 GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
1203 PageCount * DeviceInfo.wPageDataSize, 0);
1204 ftl_cmd_cnt++;
1205 #else
1206 memset(pData, 0xFF,
1207 PageCount * DeviceInfo.wPageDataSize);
1208 #endif
1209 return wResult;
1210 } else {
1211 continue; /* break ?? */
1216 if (0xffffffff == lba)
1217 printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
1219 #if CMD_DMA
1220 wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
1221 PageCount, LLD_CMD_FLAG_MODE_CDMA);
1222 if (DeviceInfo.MLCDevice) {
1223 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1224 nand_dbg_print(NAND_DBG_DEBUG,
1225 "Read Counter modified in ftl_cmd_cnt %u"
1226 " Block %u Counter%u\n",
1227 ftl_cmd_cnt, (unsigned int)Block,
1228 g_pReadCounter[Block -
1229 DeviceInfo.wSpectraStartBlock]);
1231 p_BTableChangesDelta =
1232 (struct BTableChangesDelta *)g_pBTDelta_Free;
1233 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1234 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
1235 p_BTableChangesDelta->RC_Index =
1236 Block - DeviceInfo.wSpectraStartBlock;
1237 p_BTableChangesDelta->RC_Entry_Value =
1238 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
1239 p_BTableChangesDelta->ValidFields = 0xC0;
1241 ftl_cmd_cnt++;
1243 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1244 MAX_READ_COUNTER)
1245 FTL_Read_Disturbance(Block);
1246 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1247 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1248 FTL_Write_IN_Progress_Block_Table_Page();
1250 } else {
1251 ftl_cmd_cnt++;
1253 #else
1254 wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
1255 if (wResult == FAIL)
1256 return wResult;
1258 if (DeviceInfo.MLCDevice) {
1259 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1260 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1261 MAX_READ_COUNTER)
1262 FTL_Read_Disturbance(Block);
1263 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1264 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1265 FTL_Write_IN_Progress_Block_Table_Page();
1268 #endif
1269 return wResult;
1272 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1273 * Function: FTL_Cache_Write_All
1274 * Inputs: pointer to cache in sys memory
1275 * address of free block in flash
1276 * Outputs: PASS=0 / FAIL=1
1277 * Description: writes all the pages of the block in cache to flash
1279 * NOTE:need to make sure this works ok when cache is limited
1280 * to a partial block. This is where copy-back would be
1281 * activated. This would require knowing which pages in the
1282 * cached block are clean/dirty.Right now we only know if
1283 * the whole block is clean/dirty.
1284 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1285 static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1287 u16 wResult = PASS;
1288 u32 Block;
1289 u16 Page;
1290 u16 PageCount;
1292 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1293 __FILE__, __LINE__, __func__);
1295 nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
1296 "on %d\n", cache_block_to_write,
1297 (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
1299 Block = BLK_FROM_ADDR(blk_addr);
1300 Page = PAGE_FROM_ADDR(blk_addr, Block);
1301 PageCount = Cache.pages_per_item;
1303 #if CMD_DMA
1304 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
1305 Block, Page, PageCount)) {
1306 nand_dbg_print(NAND_DBG_WARN,
1307 "NAND Program fail in %s, Line %d, "
1308 "Function: %s, new Bad Block %d generated! "
1309 "Need Bad Block replacing.\n",
1310 __FILE__, __LINE__, __func__, Block);
1311 wResult = FAIL;
1313 ftl_cmd_cnt++;
1314 #else
1315 if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
1316 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
1317 " Line %d, Function %s, new Bad Block %d generated!"
1318 "Need Bad Block replacing.\n",
1319 __FILE__, __LINE__, __func__, Block);
1320 wResult = FAIL;
1322 #endif
1323 return wResult;
1326 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1327 * Function: FTL_Copy_Block
1328 * Inputs: source block address
1329 * Destination block address
1330 * Outputs: PASS=0 / FAIL=1
1331 * Description: used only for static wear leveling to move the block
1332 * containing static data to new blocks(more worn)
1333 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1334 int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
1336 int i, r1, r2, wResult = PASS;
1338 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1339 __FILE__, __LINE__, __func__);
1341 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1342 r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
1343 i * DeviceInfo.wPageDataSize);
1344 r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
1345 i * DeviceInfo.wPageDataSize);
1346 if ((ERR == r1) || (FAIL == r2)) {
1347 wResult = FAIL;
1348 break;
1352 return wResult;
1355 /* Search the block table to find out the least wear block and then return it */
1356 static u32 find_least_worn_blk_for_l2_cache(void)
1358 int i;
1359 u32 *pbt = (u32 *)g_pBlockTable;
1360 u8 least_wear_cnt = MAX_BYTE_VALUE;
1361 u32 least_wear_blk_idx = MAX_U32_VALUE;
1362 u32 phy_idx;
1364 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1365 if (IS_SPARE_BLOCK(i)) {
1366 phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
1367 if (phy_idx > DeviceInfo.wSpectraEndBlock)
1368 printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
1369 "Too big phy block num (%d)\n", phy_idx);
1370 if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
1371 least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
1372 least_wear_blk_idx = i;
1377 nand_dbg_print(NAND_DBG_WARN,
1378 "find_least_worn_blk_for_l2_cache: "
1379 "find block %d with least worn counter (%d)\n",
1380 least_wear_blk_idx, least_wear_cnt);
1382 return least_wear_blk_idx;
1387 /* Get blocks for Level2 Cache */
1388 static int get_l2_cache_blks(void)
1390 int n;
1391 u32 blk;
1392 u32 *pbt = (u32 *)g_pBlockTable;
1394 for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
1395 blk = find_least_worn_blk_for_l2_cache();
1396 if (blk > DeviceInfo.wDataBlockNum) {
1397 nand_dbg_print(NAND_DBG_WARN,
1398 "find_least_worn_blk_for_l2_cache: "
1399 "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
1400 return FAIL;
1402 /* Tag the free block as discard in block table */
1403 pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
1404 /* Add the free block to the L2 Cache block array */
1405 cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
1408 return PASS;
1411 static int erase_l2_cache_blocks(void)
1413 int i, ret = PASS;
1414 u32 pblk, lblk = BAD_BLOCK;
1415 u64 addr;
1416 u32 *pbt = (u32 *)g_pBlockTable;
1418 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1419 __FILE__, __LINE__, __func__);
1421 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
1422 pblk = cache_l2.blk_array[i];
1424 /* If the L2 cache block is invalid, then just skip it */
1425 if (MAX_U32_VALUE == pblk)
1426 continue;
1428 BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
1430 addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
1431 if (PASS == GLOB_FTL_Block_Erase(addr)) {
1432 /* Get logical block number of the erased block */
1433 lblk = FTL_Get_Block_Index(pblk);
1434 BUG_ON(BAD_BLOCK == lblk);
1435 /* Tag it as free in the block table */
1436 pbt[lblk] &= (u32)(~DISCARD_BLOCK);
1437 pbt[lblk] |= (u32)(SPARE_BLOCK);
1438 } else {
1439 MARK_BLOCK_AS_BAD(pbt[lblk]);
1440 ret = ERR;
1444 return ret;
1448 * Merge the valid data page in the L2 cache blocks into NAND.
1450 static int flush_l2_cache(void)
1452 struct list_head *p;
1453 struct spectra_l2_cache_list *pnd, *tmp_pnd;
1454 u32 *pbt = (u32 *)g_pBlockTable;
1455 u32 phy_blk, l2_blk;
1456 u64 addr;
1457 u16 l2_page;
1458 int i, ret = PASS;
1460 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1461 __FILE__, __LINE__, __func__);
1463 if (list_empty(&cache_l2.table.list)) /* No data to flush */
1464 return ret;
1466 //dump_cache_l2_table();
1468 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
1469 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1470 FTL_Write_IN_Progress_Block_Table_Page();
1473 list_for_each(p, &cache_l2.table.list) {
1474 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1475 if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
1476 IS_BAD_BLOCK(pnd->logical_blk_num) ||
1477 IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
1478 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1479 memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
1480 } else {
1481 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1482 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1483 ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
1484 phy_blk, 0, DeviceInfo.wPagesPerBlock);
1485 if (ret == FAIL) {
1486 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1490 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
1491 if (pnd->pages_array[i] != MAX_U32_VALUE) {
1492 l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
1493 l2_page = pnd->pages_array[i] & 0xffff;
1494 ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
1495 if (ret == FAIL) {
1496 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1498 memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
1502 /* Find a free block and tag the original block as discarded */
1503 addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
1504 ret = FTL_Replace_Block(addr);
1505 if (ret == FAIL) {
1506 printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
1509 /* Write back the updated data into NAND */
1510 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1511 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1512 nand_dbg_print(NAND_DBG_WARN,
1513 "Program NAND block %d fail in %s, Line %d\n",
1514 phy_blk, __FILE__, __LINE__);
1515 /* This may not be really a bad block. So just tag it as discarded. */
1516 /* Then it has a chance to be erased when garbage collection. */
1517 /* If it is really bad, then the erase will fail and it will be marked */
1518 /* as bad then. Otherwise it will be marked as free and can be used again */
1519 MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
1520 /* Find another free block and write it again */
1521 FTL_Replace_Block(addr);
1522 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1523 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1524 printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
1525 "Some data will be lost!\n", phy_blk);
1526 MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
1528 } else {
1529 /* tag the new free block as used block */
1530 pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
1534 /* Destroy the L2 Cache table and free the memory of all nodes */
1535 list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
1536 list_del(&pnd->list);
1537 kfree(pnd);
1540 /* Erase discard L2 cache blocks */
1541 if (erase_l2_cache_blocks() != PASS)
1542 nand_dbg_print(NAND_DBG_WARN,
1543 " Erase L2 cache blocks error in %s, Line %d\n",
1544 __FILE__, __LINE__);
1546 /* Init the Level2 Cache data structure */
1547 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
1548 cache_l2.blk_array[i] = MAX_U32_VALUE;
1549 cache_l2.cur_blk_idx = 0;
1550 cache_l2.cur_page_num = 0;
1551 INIT_LIST_HEAD(&cache_l2.table.list);
1552 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
1554 return ret;
1558 * Write back a changed victim cache item to the Level2 Cache
1559 * and update the L2 Cache table to map the change.
1560 * If the L2 Cache is full, then start to do the L2 Cache flush.
1562 static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
1564 u32 logical_blk_num;
1565 u16 logical_page_num;
1566 struct list_head *p;
1567 struct spectra_l2_cache_list *pnd, *pnd_new;
1568 u32 node_size;
1569 int i, found;
1571 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1572 __FILE__, __LINE__, __func__);
1575 * If Level2 Cache table is empty, then it means either:
1576 * 1. This is the first time that the function called after FTL_init
1577 * or
1578 * 2. The Level2 Cache has just been flushed
1580 * So, 'steal' some free blocks from NAND for L2 Cache using
1581 * by just mask them as discard in the block table
1583 if (list_empty(&cache_l2.table.list)) {
1584 BUG_ON(cache_l2.cur_blk_idx != 0);
1585 BUG_ON(cache_l2.cur_page_num!= 0);
1586 BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
1587 if (FAIL == get_l2_cache_blks()) {
1588 GLOB_FTL_Garbage_Collection();
1589 if (FAIL == get_l2_cache_blks()) {
1590 printk(KERN_ALERT "Fail to get L2 cache blks!\n");
1591 return FAIL;
1596 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1597 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1598 BUG_ON(logical_blk_num == MAX_U32_VALUE);
1600 /* Write the cache item data into the current position of L2 Cache */
1601 #if CMD_DMA
1603 * TODO
1605 #else
1606 if (FAIL == GLOB_LLD_Write_Page_Main(buf,
1607 cache_l2.blk_array[cache_l2.cur_blk_idx],
1608 cache_l2.cur_page_num, 1)) {
1609 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
1610 "%s, Line %d, new Bad Block %d generated!\n",
1611 __FILE__, __LINE__,
1612 cache_l2.blk_array[cache_l2.cur_blk_idx]);
1614 /* TODO: tag the current block as bad and try again */
1616 return FAIL;
1618 #endif
1621 * Update the L2 Cache table.
1623 * First seaching in the table to see whether the logical block
1624 * has been mapped. If not, then kmalloc a new node for the
1625 * logical block, fill data, and then insert it to the list.
1626 * Otherwise, just update the mapped node directly.
1628 found = 0;
1629 list_for_each(p, &cache_l2.table.list) {
1630 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1631 if (pnd->logical_blk_num == logical_blk_num) {
1632 pnd->pages_array[logical_page_num] =
1633 (cache_l2.cur_blk_idx << 16) |
1634 cache_l2.cur_page_num;
1635 found = 1;
1636 break;
1639 if (!found) { /* Create new node for the logical block here */
1641 /* The logical pages to physical pages map array is
1642 * located at the end of struct spectra_l2_cache_list.
1644 node_size = sizeof(struct spectra_l2_cache_list) +
1645 sizeof(u32) * DeviceInfo.wPagesPerBlock;
1646 pnd_new = kmalloc(node_size, GFP_ATOMIC);
1647 if (!pnd_new) {
1648 printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
1649 __FILE__, __LINE__);
1651 * TODO: Need to flush all the L2 cache into NAND ASAP
1652 * since no memory available here
1655 pnd_new->logical_blk_num = logical_blk_num;
1656 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
1657 pnd_new->pages_array[i] = MAX_U32_VALUE;
1658 pnd_new->pages_array[logical_page_num] =
1659 (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
1660 list_add(&pnd_new->list, &cache_l2.table.list);
1663 /* Increasing the current position pointer of the L2 Cache */
1664 cache_l2.cur_page_num++;
1665 if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
1666 cache_l2.cur_blk_idx++;
1667 if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
1668 /* The L2 Cache is full. Need to flush it now */
1669 nand_dbg_print(NAND_DBG_WARN,
1670 "L2 Cache is full, will start to flush it\n");
1671 flush_l2_cache();
1672 } else {
1673 cache_l2.cur_page_num = 0;
1677 return PASS;
1681 * Seach in the Level2 Cache table to find the cache item.
1682 * If find, read the data from the NAND page of L2 Cache,
1683 * Otherwise, return FAIL.
1685 static int search_l2_cache(u8 *buf, u64 logical_addr)
1687 u32 logical_blk_num;
1688 u16 logical_page_num;
1689 struct list_head *p;
1690 struct spectra_l2_cache_list *pnd;
1691 u32 tmp = MAX_U32_VALUE;
1692 u32 phy_blk;
1693 u16 phy_page;
1694 int ret = FAIL;
1696 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1697 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1699 list_for_each(p, &cache_l2.table.list) {
1700 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1701 if (pnd->logical_blk_num == logical_blk_num) {
1702 tmp = pnd->pages_array[logical_page_num];
1703 break;
1707 if (tmp != MAX_U32_VALUE) { /* Found valid map */
1708 phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
1709 phy_page = tmp & 0xFFFF;
1710 #if CMD_DMA
1711 /* TODO */
1712 #else
1713 ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
1714 #endif
1717 return ret;
1720 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1721 * Function: FTL_Cache_Write_Page
1722 * Inputs: Pointer to buffer, page address, cache block number
1723 * Outputs: PASS=0 / FAIL=1
1724 * Description: It writes the data in Cache Block
1725 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1726 static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
1727 u8 cache_blk, u16 flag)
1729 u8 *pDest;
1730 u64 addr;
1732 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1733 __FILE__, __LINE__, __func__);
1735 addr = Cache.array[cache_blk].address;
1736 pDest = Cache.array[cache_blk].buf;
1738 pDest += (unsigned long)(page_addr - addr);
1739 Cache.array[cache_blk].changed = SET;
1740 #if CMD_DMA
1741 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1742 int_cache[ftl_cmd_cnt].item = cache_blk;
1743 int_cache[ftl_cmd_cnt].cache.address =
1744 Cache.array[cache_blk].address;
1745 int_cache[ftl_cmd_cnt].cache.changed =
1746 Cache.array[cache_blk].changed;
1747 #endif
1748 GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
1749 ftl_cmd_cnt++;
1750 #else
1751 memcpy(pDest, pData, DeviceInfo.wPageDataSize);
1752 #endif
1753 if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
1754 Cache.array[cache_blk].use_cnt++;
1757 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1758 * Function: FTL_Cache_Write
1759 * Inputs: none
1760 * Outputs: PASS=0 / FAIL=1
1761 * Description: It writes least frequently used Cache block to flash if it
1762 * has been changed
1763 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1764 static int FTL_Cache_Write(void)
1766 int i, bResult = PASS;
1767 u16 bNO, least_count = 0xFFFF;
1769 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1770 __FILE__, __LINE__, __func__);
1772 FTL_Calculate_LRU();
1774 bNO = Cache.LRU;
1775 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
1776 "Least used cache block is %d\n", bNO);
1778 if (Cache.array[bNO].changed != SET)
1779 return bResult;
1781 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
1782 " Block %d containing logical block %d is dirty\n",
1783 bNO,
1784 (u32)(Cache.array[bNO].address >>
1785 DeviceInfo.nBitsInBlockDataSize));
1786 #if CMD_DMA
1787 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1788 int_cache[ftl_cmd_cnt].item = bNO;
1789 int_cache[ftl_cmd_cnt].cache.address =
1790 Cache.array[bNO].address;
1791 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
1792 #endif
1793 #endif
1794 bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
1795 Cache.array[bNO].address);
1796 if (bResult != ERR)
1797 Cache.array[bNO].changed = CLEAR;
1799 least_count = Cache.array[bNO].use_cnt;
1801 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1802 if (i == bNO)
1803 continue;
1804 if (Cache.array[i].use_cnt > 0)
1805 Cache.array[i].use_cnt -= least_count;
1808 return bResult;
1811 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1812 * Function: FTL_Cache_Read
1813 * Inputs: Page address
1814 * Outputs: PASS=0 / FAIL=1
1815 * Description: It reads the block from device in Cache Block
1816 * Set the LRU count to 1
1817 * Mark the Cache Block as clean
1818 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1819 static int FTL_Cache_Read(u64 logical_addr)
1821 u64 item_addr, phy_addr;
1822 u16 num;
1823 int ret;
1825 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1826 __FILE__, __LINE__, __func__);
1828 num = Cache.LRU; /* The LRU cache item will be overwritten */
1830 item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
1831 Cache.cache_item_size;
1832 Cache.array[num].address = item_addr;
1833 Cache.array[num].use_cnt = 1;
1834 Cache.array[num].changed = CLEAR;
1836 #if CMD_DMA
1837 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1838 int_cache[ftl_cmd_cnt].item = num;
1839 int_cache[ftl_cmd_cnt].cache.address =
1840 Cache.array[num].address;
1841 int_cache[ftl_cmd_cnt].cache.changed =
1842 Cache.array[num].changed;
1843 #endif
1844 #endif
1846 * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
1847 * Otherwise, read it from NAND
1849 ret = search_l2_cache(Cache.array[num].buf, logical_addr);
1850 if (PASS == ret) /* Hit in L2 Cache */
1851 return ret;
1853 /* Compute the physical start address of NAND device according to */
1854 /* the logical start address of the cache item (LRU cache item) */
1855 phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
1856 GLOB_u64_Remainder(item_addr, 2);
1858 return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
1861 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1862 * Function: FTL_Check_Block_Table
1863 * Inputs: ?
1864 * Outputs: PASS=0 / FAIL=1
1865 * Description: It checks the correctness of each block table entry
1866 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1867 static int FTL_Check_Block_Table(int wOldTable)
1869 u32 i;
1870 int wResult = PASS;
1871 u32 blk_idx;
1872 u32 *pbt = (u32 *)g_pBlockTable;
1873 u8 *pFlag = flag_check_blk_table;
1875 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1876 __FILE__, __LINE__, __func__);
1878 if (NULL != pFlag) {
1879 memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
1880 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1881 blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
1884 * 20081006/KBV - Changed to pFlag[i] reference
1885 * to avoid buffer overflow
1889 * 2008-10-20 Yunpeng Note: This change avoid
1890 * buffer overflow, but changed function of
1891 * the code, so it should be re-write later
1893 if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
1894 PASS == pFlag[i]) {
1895 wResult = FAIL;
1896 break;
1897 } else {
1898 pFlag[i] = PASS;
1903 return wResult;
1907 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1908 * Function: FTL_Write_Block_Table
1909 * Inputs: flasg
1910 * Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
1911 * happen. -1 Error
1912 * Description: It writes the block table
1913 * Block table always mapped to LBA 0 which inturn mapped
1914 * to any physical block
1915 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1916 static int FTL_Write_Block_Table(int wForce)
1918 u32 *pbt = (u32 *)g_pBlockTable;
1919 int wSuccess = PASS;
1920 u32 wTempBlockTableIndex;
1921 u16 bt_pages, new_bt_offset;
1922 u8 blockchangeoccured = 0;
1924 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1925 __FILE__, __LINE__, __func__);
1927 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
1929 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
1930 return 0;
1932 if (PASS == wForce) {
1933 g_wBlockTableOffset =
1934 (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
1935 #if CMD_DMA
1936 p_BTableChangesDelta =
1937 (struct BTableChangesDelta *)g_pBTDelta_Free;
1938 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1940 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
1941 p_BTableChangesDelta->g_wBlockTableOffset =
1942 g_wBlockTableOffset;
1943 p_BTableChangesDelta->ValidFields = 0x01;
1944 #endif
1947 nand_dbg_print(NAND_DBG_DEBUG,
1948 "Inside FTL_Write_Block_Table: block %d Page:%d\n",
1949 g_wBlockTableIndex, g_wBlockTableOffset);
1951 do {
1952 new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
1953 if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
1954 (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
1955 (FAIL == wSuccess)) {
1956 wTempBlockTableIndex = FTL_Replace_Block_Table();
1957 if (BAD_BLOCK == wTempBlockTableIndex)
1958 return ERR;
1959 if (!blockchangeoccured) {
1960 bt_block_changed = 1;
1961 blockchangeoccured = 1;
1964 g_wBlockTableIndex = wTempBlockTableIndex;
1965 g_wBlockTableOffset = 0;
1966 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
1967 #if CMD_DMA
1968 p_BTableChangesDelta =
1969 (struct BTableChangesDelta *)g_pBTDelta_Free;
1970 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1972 p_BTableChangesDelta->ftl_cmd_cnt =
1973 ftl_cmd_cnt;
1974 p_BTableChangesDelta->g_wBlockTableOffset =
1975 g_wBlockTableOffset;
1976 p_BTableChangesDelta->g_wBlockTableIndex =
1977 g_wBlockTableIndex;
1978 p_BTableChangesDelta->ValidFields = 0x03;
1980 p_BTableChangesDelta =
1981 (struct BTableChangesDelta *)g_pBTDelta_Free;
1982 g_pBTDelta_Free +=
1983 sizeof(struct BTableChangesDelta);
1985 p_BTableChangesDelta->ftl_cmd_cnt =
1986 ftl_cmd_cnt;
1987 p_BTableChangesDelta->BT_Index =
1988 BLOCK_TABLE_INDEX;
1989 p_BTableChangesDelta->BT_Entry_Value =
1990 pbt[BLOCK_TABLE_INDEX];
1991 p_BTableChangesDelta->ValidFields = 0x0C;
1992 #endif
1995 wSuccess = FTL_Write_Block_Table_Data();
1996 if (FAIL == wSuccess)
1997 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
1998 } while (FAIL == wSuccess);
2000 g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
2002 return 1;
2005 static int force_format_nand(void)
2007 u32 i;
2009 /* Force erase the whole unprotected physical partiton of NAND */
2010 printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
2011 printk(KERN_ALERT "From phyical block %d to %d\n",
2012 DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
2013 for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
2014 if (GLOB_LLD_Erase_Block(i))
2015 printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
2017 printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
2018 while(1);
2020 return PASS;
2023 int GLOB_FTL_Flash_Format(void)
2025 //return FTL_Format_Flash(1);
2026 return force_format_nand();
2030 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2031 * Function: FTL_Search_Block_Table_IN_Block
2032 * Inputs: Block Number
2033 * Pointer to page
2034 * Outputs: PASS / FAIL
2035 * Page contatining the block table
2036 * Description: It searches the block table in the block
2037 * passed as an argument.
2039 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2040 static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
2041 u8 BT_Tag, u16 *Page)
2043 u16 i, j, k;
2044 u16 Result = PASS;
2045 u16 Last_IPF = 0;
2046 u8 BT_Found = 0;
2047 u8 *tagarray;
2048 u8 *tempbuf = tmp_buf_search_bt_in_block;
2049 u8 *pSpareBuf = spare_buf_search_bt_in_block;
2050 u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
2051 u8 bt_flag_last_page = 0xFF;
2052 u8 search_in_previous_pages = 0;
2053 u16 bt_pages;
2055 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2056 __FILE__, __LINE__, __func__);
2058 nand_dbg_print(NAND_DBG_DEBUG,
2059 "Searching block table in %u block\n",
2060 (unsigned int)BT_Block);
2062 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2064 for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
2065 i += (bt_pages + 1)) {
2066 nand_dbg_print(NAND_DBG_DEBUG,
2067 "Searching last IPF: %d\n", i);
2068 Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
2069 BT_Block, i, 1);
2071 if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
2072 if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
2073 continue;
2074 } else {
2075 search_in_previous_pages = 1;
2076 Last_IPF = i;
2080 if (!search_in_previous_pages) {
2081 if (i != bt_pages) {
2082 i -= (bt_pages + 1);
2083 Last_IPF = i;
2087 if (0 == Last_IPF)
2088 break;
2090 if (!search_in_previous_pages) {
2091 i = i + 1;
2092 nand_dbg_print(NAND_DBG_DEBUG,
2093 "Reading the spare area of Block %u Page %u",
2094 (unsigned int)BT_Block, i);
2095 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
2096 BT_Block, i, 1);
2097 nand_dbg_print(NAND_DBG_DEBUG,
2098 "Reading the spare area of Block %u Page %u",
2099 (unsigned int)BT_Block, i + bt_pages - 1);
2100 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2101 BT_Block, i + bt_pages - 1, 1);
2103 k = 0;
2104 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2105 if (j) {
2106 for (; k < j; k++) {
2107 if (tagarray[k] == BT_Tag)
2108 break;
2112 if (k < j)
2113 bt_flag = tagarray[k];
2114 else
2115 Result = FAIL;
2117 if (Result == PASS) {
2118 k = 0;
2119 j = FTL_Extract_Block_Table_Tag(
2120 pSpareBufBTLastPage, &tagarray);
2121 if (j) {
2122 for (; k < j; k++) {
2123 if (tagarray[k] == BT_Tag)
2124 break;
2128 if (k < j)
2129 bt_flag_last_page = tagarray[k];
2130 else
2131 Result = FAIL;
2133 if (Result == PASS) {
2134 if (bt_flag == bt_flag_last_page) {
2135 nand_dbg_print(NAND_DBG_DEBUG,
2136 "Block table is found"
2137 " in page after IPF "
2138 "at block %d "
2139 "page %d\n",
2140 (int)BT_Block, i);
2141 BT_Found = 1;
2142 *Page = i;
2143 g_cBlockTableStatus =
2144 CURRENT_BLOCK_TABLE;
2145 break;
2146 } else {
2147 Result = FAIL;
2153 if (search_in_previous_pages)
2154 i = i - bt_pages;
2155 else
2156 i = i - (bt_pages + 1);
2158 Result = PASS;
2160 nand_dbg_print(NAND_DBG_DEBUG,
2161 "Reading the spare area of Block %d Page %d",
2162 (int)BT_Block, i);
2164 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2165 nand_dbg_print(NAND_DBG_DEBUG,
2166 "Reading the spare area of Block %u Page %u",
2167 (unsigned int)BT_Block, i + bt_pages - 1);
2169 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2170 BT_Block, i + bt_pages - 1, 1);
2172 k = 0;
2173 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2174 if (j) {
2175 for (; k < j; k++) {
2176 if (tagarray[k] == BT_Tag)
2177 break;
2181 if (k < j)
2182 bt_flag = tagarray[k];
2183 else
2184 Result = FAIL;
2186 if (Result == PASS) {
2187 k = 0;
2188 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2189 &tagarray);
2190 if (j) {
2191 for (; k < j; k++) {
2192 if (tagarray[k] == BT_Tag)
2193 break;
2197 if (k < j) {
2198 bt_flag_last_page = tagarray[k];
2199 } else {
2200 Result = FAIL;
2201 break;
2204 if (Result == PASS) {
2205 if (bt_flag == bt_flag_last_page) {
2206 nand_dbg_print(NAND_DBG_DEBUG,
2207 "Block table is found "
2208 "in page prior to IPF "
2209 "at block %u page %d\n",
2210 (unsigned int)BT_Block, i);
2211 BT_Found = 1;
2212 *Page = i;
2213 g_cBlockTableStatus =
2214 IN_PROGRESS_BLOCK_TABLE;
2215 break;
2216 } else {
2217 Result = FAIL;
2218 break;
2224 if (Result == FAIL) {
2225 if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
2226 BT_Found = 1;
2227 *Page = i - (bt_pages + 1);
2229 if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
2230 goto func_return;
2233 if (Last_IPF == 0) {
2234 i = 0;
2235 Result = PASS;
2236 nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
2237 "Block %u Page %u", (unsigned int)BT_Block, i);
2239 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2240 nand_dbg_print(NAND_DBG_DEBUG,
2241 "Reading the spare area of Block %u Page %u",
2242 (unsigned int)BT_Block, i + bt_pages - 1);
2243 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2244 BT_Block, i + bt_pages - 1, 1);
2246 k = 0;
2247 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2248 if (j) {
2249 for (; k < j; k++) {
2250 if (tagarray[k] == BT_Tag)
2251 break;
2255 if (k < j)
2256 bt_flag = tagarray[k];
2257 else
2258 Result = FAIL;
2260 if (Result == PASS) {
2261 k = 0;
2262 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2263 &tagarray);
2264 if (j) {
2265 for (; k < j; k++) {
2266 if (tagarray[k] == BT_Tag)
2267 break;
2271 if (k < j)
2272 bt_flag_last_page = tagarray[k];
2273 else
2274 Result = FAIL;
2276 if (Result == PASS) {
2277 if (bt_flag == bt_flag_last_page) {
2278 nand_dbg_print(NAND_DBG_DEBUG,
2279 "Block table is found "
2280 "in page after IPF at "
2281 "block %u page %u\n",
2282 (unsigned int)BT_Block,
2283 (unsigned int)i);
2284 BT_Found = 1;
2285 *Page = i;
2286 g_cBlockTableStatus =
2287 CURRENT_BLOCK_TABLE;
2288 goto func_return;
2289 } else {
2290 Result = FAIL;
2295 if (Result == FAIL)
2296 goto func_return;
2298 func_return:
2299 return Result;
2302 u8 *get_blk_table_start_addr(void)
2304 return g_pBlockTable;
2307 unsigned long get_blk_table_len(void)
2309 return DeviceInfo.wDataBlockNum * sizeof(u32);
2312 u8 *get_wear_leveling_table_start_addr(void)
2314 return g_pWearCounter;
2317 unsigned long get_wear_leveling_table_len(void)
2319 return DeviceInfo.wDataBlockNum * sizeof(u8);
2322 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2323 * Function: FTL_Read_Block_Table
2324 * Inputs: none
2325 * Outputs: PASS / FAIL
2326 * Description: read the flash spare area and find a block containing the
2327 * most recent block table(having largest block_table_counter).
2328 * Find the last written Block table in this block.
2329 * Check the correctness of Block Table
2330 * If CDMA is enabled, this function is called in
2331 * polling mode.
2332 * We don't need to store changes in Block table in this
2333 * function as it is called only at initialization
2335 * Note: Currently this function is called at initialization
2336 * before any read/erase/write command issued to flash so,
2337 * there is no need to wait for CDMA list to complete as of now
2338 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2339 static int FTL_Read_Block_Table(void)
2341 u16 i = 0;
2342 int k, j;
2343 u8 *tempBuf, *tagarray;
2344 int wResult = FAIL;
2345 int status = FAIL;
2346 u8 block_table_found = 0;
2347 int search_result;
2348 u32 Block;
2349 u16 Page = 0;
2350 u16 PageCount;
2351 u16 bt_pages;
2352 int wBytesCopied = 0, tempvar;
2354 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2355 __FILE__, __LINE__, __func__);
2357 tempBuf = tmp_buf1_read_blk_table;
2358 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2360 for (j = DeviceInfo.wSpectraStartBlock;
2361 j <= (int)DeviceInfo.wSpectraEndBlock;
2362 j++) {
2363 status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
2364 k = 0;
2365 i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
2366 if (i) {
2367 status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
2368 j, 0, 1);
2369 for (; k < i; k++) {
2370 if (tagarray[k] == tempBuf[3])
2371 break;
2375 if (k < i)
2376 k = tagarray[k];
2377 else
2378 continue;
2380 nand_dbg_print(NAND_DBG_DEBUG,
2381 "Block table is contained in Block %d %d\n",
2382 (unsigned int)j, (unsigned int)k);
2384 if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
2385 g_pBTBlocks[k-FIRST_BT_ID] = j;
2386 block_table_found = 1;
2387 } else {
2388 printk(KERN_ERR "FTL_Read_Block_Table -"
2389 "This should never happens. "
2390 "Two block table have same counter %u!\n", k);
2394 if (block_table_found) {
2395 if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
2396 g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
2397 j = LAST_BT_ID;
2398 while ((j > FIRST_BT_ID) &&
2399 (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
2400 j--;
2401 if (j == FIRST_BT_ID) {
2402 j = LAST_BT_ID;
2403 last_erased = LAST_BT_ID;
2404 } else {
2405 last_erased = (u8)j + 1;
2406 while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
2407 g_pBTBlocks[j - FIRST_BT_ID]))
2408 j--;
2410 } else {
2411 j = FIRST_BT_ID;
2412 while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
2413 j++;
2414 last_erased = (u8)j;
2415 while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
2416 g_pBTBlocks[j - FIRST_BT_ID]))
2417 j++;
2418 if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
2419 j--;
2422 if (last_erased > j)
2423 j += (1 + LAST_BT_ID - FIRST_BT_ID);
2425 for (; (j >= last_erased) && (FAIL == wResult); j--) {
2426 i = (j - FIRST_BT_ID) %
2427 (1 + LAST_BT_ID - FIRST_BT_ID);
2428 search_result =
2429 FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
2430 i + FIRST_BT_ID, &Page);
2431 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2432 block_table_found = 0;
2434 while ((search_result == PASS) && (FAIL == wResult)) {
2435 nand_dbg_print(NAND_DBG_DEBUG,
2436 "FTL_Read_Block_Table:"
2437 "Block: %u Page: %u "
2438 "contains block table\n",
2439 (unsigned int)g_pBTBlocks[i],
2440 (unsigned int)Page);
2442 tempBuf = tmp_buf2_read_blk_table;
2444 for (k = 0; k < bt_pages; k++) {
2445 Block = g_pBTBlocks[i];
2446 PageCount = 1;
2448 status =
2449 GLOB_LLD_Read_Page_Main_Polling(
2450 tempBuf, Block, Page, PageCount);
2452 tempvar = k ? 0 : 4;
2454 wBytesCopied +=
2455 FTL_Copy_Block_Table_From_Flash(
2456 tempBuf + tempvar,
2457 DeviceInfo.wPageDataSize - tempvar,
2458 wBytesCopied);
2460 Page++;
2463 wResult = FTL_Check_Block_Table(FAIL);
2464 if (FAIL == wResult) {
2465 block_table_found = 0;
2466 if (Page > bt_pages)
2467 Page -= ((bt_pages<<1) + 1);
2468 else
2469 search_result = FAIL;
2475 if (PASS == wResult) {
2476 if (!block_table_found)
2477 FTL_Execute_SPL_Recovery();
2479 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2480 g_wBlockTableOffset = (u16)Page + 1;
2481 else
2482 g_wBlockTableOffset = (u16)Page - bt_pages;
2484 g_wBlockTableIndex = (u32)g_pBTBlocks[i];
2486 #if CMD_DMA
2487 if (DeviceInfo.MLCDevice)
2488 memcpy(g_pBTStartingCopy, g_pBlockTable,
2489 DeviceInfo.wDataBlockNum * sizeof(u32)
2490 + DeviceInfo.wDataBlockNum * sizeof(u8)
2491 + DeviceInfo.wDataBlockNum * sizeof(u16));
2492 else
2493 memcpy(g_pBTStartingCopy, g_pBlockTable,
2494 DeviceInfo.wDataBlockNum * sizeof(u32)
2495 + DeviceInfo.wDataBlockNum * sizeof(u8));
2496 #endif
2499 if (FAIL == wResult)
2500 printk(KERN_ERR "Yunpeng - "
2501 "Can not find valid spectra block table!\n");
2503 #if AUTO_FORMAT_FLASH
2504 if (FAIL == wResult) {
2505 nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
2506 wResult = FTL_Format_Flash(0);
2508 #endif
2510 return wResult;
2513 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2514 * Function: FTL_Get_Page_Num
2515 * Inputs: Size in bytes
2516 * Outputs: Size in pages
2517 * Description: It calculates the pages required for the length passed
2518 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2519 static u32 FTL_Get_Page_Num(u64 length)
2521 return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
2522 (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
2525 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2526 * Function: FTL_Get_Physical_Block_Addr
2527 * Inputs: Block Address (byte format)
2528 * Outputs: Physical address of the block.
2529 * Description: It translates LBA to PBA by returning address stored
2530 * at the LBA location in the block table
2531 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2532 static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
2534 u32 *pbt;
2535 u64 physical_addr;
2537 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2538 __FILE__, __LINE__, __func__);
2540 pbt = (u32 *)g_pBlockTable;
2541 physical_addr = (u64) DeviceInfo.wBlockDataSize *
2542 (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
2544 return physical_addr;
2547 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2548 * Function: FTL_Get_Block_Index
2549 * Inputs: Physical Block no.
2550 * Outputs: Logical block no. /BAD_BLOCK
2551 * Description: It returns the logical block no. for the PBA passed
2552 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2553 static u32 FTL_Get_Block_Index(u32 wBlockNum)
2555 u32 *pbt = (u32 *)g_pBlockTable;
2556 u32 i;
2558 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2559 __FILE__, __LINE__, __func__);
2561 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
2562 if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
2563 return i;
2565 return BAD_BLOCK;
2568 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2569 * Function: GLOB_FTL_Wear_Leveling
2570 * Inputs: none
2571 * Outputs: PASS=0
2572 * Description: This is static wear leveling (done by explicit call)
2573 * do complete static wear leveling
2574 * do complete garbage collection
2575 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2576 int GLOB_FTL_Wear_Leveling(void)
2578 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2579 __FILE__, __LINE__, __func__);
2581 FTL_Static_Wear_Leveling();
2582 GLOB_FTL_Garbage_Collection();
2584 return PASS;
2587 static void find_least_most_worn(u8 *chg,
2588 u32 *least_idx, u8 *least_cnt,
2589 u32 *most_idx, u8 *most_cnt)
2591 u32 *pbt = (u32 *)g_pBlockTable;
2592 u32 idx;
2593 u8 cnt;
2594 int i;
2596 for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
2597 if (IS_BAD_BLOCK(i) || PASS == chg[i])
2598 continue;
2600 idx = (u32) ((~BAD_BLOCK) & pbt[i]);
2601 cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
2603 if (IS_SPARE_BLOCK(i)) {
2604 if (cnt > *most_cnt) {
2605 *most_cnt = cnt;
2606 *most_idx = idx;
2610 if (IS_DATA_BLOCK(i)) {
2611 if (cnt < *least_cnt) {
2612 *least_cnt = cnt;
2613 *least_idx = idx;
2617 if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
2618 debug_boundary_error(*most_idx,
2619 DeviceInfo.wDataBlockNum, 0);
2620 debug_boundary_error(*least_idx,
2621 DeviceInfo.wDataBlockNum, 0);
2622 continue;
2627 static int move_blks_for_wear_leveling(u8 *chg,
2628 u32 *least_idx, u32 *rep_blk_num, int *result)
2630 u32 *pbt = (u32 *)g_pBlockTable;
2631 u32 rep_blk;
2632 int j, ret_cp_blk, ret_erase;
2633 int ret = PASS;
2635 chg[*least_idx] = PASS;
2636 debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
2638 rep_blk = FTL_Replace_MWBlock();
2639 if (rep_blk != BAD_BLOCK) {
2640 nand_dbg_print(NAND_DBG_DEBUG,
2641 "More than two spare blocks exist so do it\n");
2642 nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
2643 rep_blk);
2645 chg[rep_blk] = PASS;
2647 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2648 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2649 FTL_Write_IN_Progress_Block_Table_Page();
2652 for (j = 0; j < RETRY_TIMES; j++) {
2653 ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
2654 DeviceInfo.wBlockDataSize,
2655 (u64)rep_blk * DeviceInfo.wBlockDataSize);
2656 if (FAIL == ret_cp_blk) {
2657 ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
2658 * DeviceInfo.wBlockDataSize);
2659 if (FAIL == ret_erase)
2660 MARK_BLOCK_AS_BAD(pbt[rep_blk]);
2661 } else {
2662 nand_dbg_print(NAND_DBG_DEBUG,
2663 "FTL_Copy_Block == OK\n");
2664 break;
2668 if (j < RETRY_TIMES) {
2669 u32 tmp;
2670 u32 old_idx = FTL_Get_Block_Index(*least_idx);
2671 u32 rep_idx = FTL_Get_Block_Index(rep_blk);
2672 tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
2673 pbt[old_idx] = (u32)((~SPARE_BLOCK) &
2674 pbt[rep_idx]);
2675 pbt[rep_idx] = tmp;
2676 #if CMD_DMA
2677 p_BTableChangesDelta = (struct BTableChangesDelta *)
2678 g_pBTDelta_Free;
2679 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2680 p_BTableChangesDelta->ftl_cmd_cnt =
2681 ftl_cmd_cnt;
2682 p_BTableChangesDelta->BT_Index = old_idx;
2683 p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
2684 p_BTableChangesDelta->ValidFields = 0x0C;
2686 p_BTableChangesDelta = (struct BTableChangesDelta *)
2687 g_pBTDelta_Free;
2688 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2690 p_BTableChangesDelta->ftl_cmd_cnt =
2691 ftl_cmd_cnt;
2692 p_BTableChangesDelta->BT_Index = rep_idx;
2693 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
2694 p_BTableChangesDelta->ValidFields = 0x0C;
2695 #endif
2696 } else {
2697 pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
2698 #if CMD_DMA
2699 p_BTableChangesDelta = (struct BTableChangesDelta *)
2700 g_pBTDelta_Free;
2701 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2703 p_BTableChangesDelta->ftl_cmd_cnt =
2704 ftl_cmd_cnt;
2705 p_BTableChangesDelta->BT_Index =
2706 FTL_Get_Block_Index(rep_blk);
2707 p_BTableChangesDelta->BT_Entry_Value =
2708 pbt[FTL_Get_Block_Index(rep_blk)];
2709 p_BTableChangesDelta->ValidFields = 0x0C;
2710 #endif
2711 *result = FAIL;
2712 ret = FAIL;
2715 if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
2716 ret = FAIL;
2717 } else {
2718 printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
2719 ret = FAIL;
2722 return ret;
2725 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2726 * Function: FTL_Static_Wear_Leveling
2727 * Inputs: none
2728 * Outputs: PASS=0 / FAIL=1
2729 * Description: This is static wear leveling (done by explicit call)
2730 * search for most&least used
2731 * if difference < GATE:
2732 * update the block table with exhange
2733 * mark block table in flash as IN_PROGRESS
2734 * copy flash block
2735 * the caller should handle GC clean up after calling this function
2736 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2737 int FTL_Static_Wear_Leveling(void)
2739 u8 most_worn_cnt;
2740 u8 least_worn_cnt;
2741 u32 most_worn_idx;
2742 u32 least_worn_idx;
2743 int result = PASS;
2744 int go_on = PASS;
2745 u32 replaced_blks = 0;
2746 u8 *chang_flag = flags_static_wear_leveling;
2748 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2749 __FILE__, __LINE__, __func__);
2751 if (!chang_flag)
2752 return FAIL;
2754 memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
2755 while (go_on == PASS) {
2756 nand_dbg_print(NAND_DBG_DEBUG,
2757 "starting static wear leveling\n");
2758 most_worn_cnt = 0;
2759 least_worn_cnt = 0xFF;
2760 least_worn_idx = BLOCK_TABLE_INDEX;
2761 most_worn_idx = BLOCK_TABLE_INDEX;
2763 find_least_most_worn(chang_flag, &least_worn_idx,
2764 &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
2766 nand_dbg_print(NAND_DBG_DEBUG,
2767 "Used and least worn is block %u, whos count is %u\n",
2768 (unsigned int)least_worn_idx,
2769 (unsigned int)least_worn_cnt);
2771 nand_dbg_print(NAND_DBG_DEBUG,
2772 "Free and most worn is block %u, whos count is %u\n",
2773 (unsigned int)most_worn_idx,
2774 (unsigned int)most_worn_cnt);
2776 if ((most_worn_cnt > least_worn_cnt) &&
2777 (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
2778 go_on = move_blks_for_wear_leveling(chang_flag,
2779 &least_worn_idx, &replaced_blks, &result);
2780 else
2781 go_on = FAIL;
2784 return result;
2787 #if CMD_DMA
2788 static int do_garbage_collection(u32 discard_cnt)
2790 u32 *pbt = (u32 *)g_pBlockTable;
2791 u32 pba;
2792 u8 bt_block_erased = 0;
2793 int i, cnt, ret = FAIL;
2794 u64 addr;
2796 i = 0;
2797 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
2798 ((ftl_cmd_cnt + 28) < 256)) {
2799 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
2800 (pbt[i] & DISCARD_BLOCK)) {
2801 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2802 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2803 FTL_Write_IN_Progress_Block_Table_Page();
2806 addr = FTL_Get_Physical_Block_Addr((u64)i *
2807 DeviceInfo.wBlockDataSize);
2808 pba = BLK_FROM_ADDR(addr);
2810 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
2811 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
2812 nand_dbg_print(NAND_DBG_DEBUG,
2813 "GC will erase BT block %u\n",
2814 (unsigned int)pba);
2815 discard_cnt--;
2816 i++;
2817 bt_block_erased = 1;
2818 break;
2822 if (bt_block_erased) {
2823 bt_block_erased = 0;
2824 continue;
2827 addr = FTL_Get_Physical_Block_Addr((u64)i *
2828 DeviceInfo.wBlockDataSize);
2830 if (PASS == GLOB_FTL_Block_Erase(addr)) {
2831 pbt[i] &= (u32)(~DISCARD_BLOCK);
2832 pbt[i] |= (u32)(SPARE_BLOCK);
2833 p_BTableChangesDelta =
2834 (struct BTableChangesDelta *)
2835 g_pBTDelta_Free;
2836 g_pBTDelta_Free +=
2837 sizeof(struct BTableChangesDelta);
2838 p_BTableChangesDelta->ftl_cmd_cnt =
2839 ftl_cmd_cnt - 1;
2840 p_BTableChangesDelta->BT_Index = i;
2841 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
2842 p_BTableChangesDelta->ValidFields = 0x0C;
2843 discard_cnt--;
2844 ret = PASS;
2845 } else {
2846 MARK_BLOCK_AS_BAD(pbt[i]);
2850 i++;
2853 return ret;
2856 #else
2857 static int do_garbage_collection(u32 discard_cnt)
2859 u32 *pbt = (u32 *)g_pBlockTable;
2860 u32 pba;
2861 u8 bt_block_erased = 0;
2862 int i, cnt, ret = FAIL;
2863 u64 addr;
2865 i = 0;
2866 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
2867 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
2868 (pbt[i] & DISCARD_BLOCK)) {
2869 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2870 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2871 FTL_Write_IN_Progress_Block_Table_Page();
2874 addr = FTL_Get_Physical_Block_Addr((u64)i *
2875 DeviceInfo.wBlockDataSize);
2876 pba = BLK_FROM_ADDR(addr);
2878 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
2879 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
2880 nand_dbg_print(NAND_DBG_DEBUG,
2881 "GC will erase BT block %d\n",
2882 pba);
2883 discard_cnt--;
2884 i++;
2885 bt_block_erased = 1;
2886 break;
2890 if (bt_block_erased) {
2891 bt_block_erased = 0;
2892 continue;
2895 /* If the discard block is L2 cache block, then just skip it */
2896 for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
2897 if (cache_l2.blk_array[cnt] == pba) {
2898 nand_dbg_print(NAND_DBG_DEBUG,
2899 "GC will erase L2 cache blk %d\n",
2900 pba);
2901 break;
2904 if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
2905 discard_cnt--;
2906 i++;
2907 continue;
2910 addr = FTL_Get_Physical_Block_Addr((u64)i *
2911 DeviceInfo.wBlockDataSize);
2913 if (PASS == GLOB_FTL_Block_Erase(addr)) {
2914 pbt[i] &= (u32)(~DISCARD_BLOCK);
2915 pbt[i] |= (u32)(SPARE_BLOCK);
2916 discard_cnt--;
2917 ret = PASS;
2918 } else {
2919 MARK_BLOCK_AS_BAD(pbt[i]);
2923 i++;
2926 return ret;
2928 #endif
2930 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2931 * Function: GLOB_FTL_Garbage_Collection
2932 * Inputs: none
2933 * Outputs: PASS / FAIL (returns the number of un-erased blocks
2934 * Description: search the block table for all discarded blocks to erase
2935 * for each discarded block:
2936 * set the flash block to IN_PROGRESS
2937 * erase the block
2938 * update the block table
2939 * write the block table to flash
2940 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2941 int GLOB_FTL_Garbage_Collection(void)
2943 u32 i;
2944 u32 wDiscard = 0;
2945 int wResult = FAIL;
2946 u32 *pbt = (u32 *)g_pBlockTable;
2948 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2949 __FILE__, __LINE__, __func__);
2951 if (GC_Called) {
2952 printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
2953 "has been re-entered! Exit.\n");
2954 return PASS;
2957 GC_Called = 1;
2959 GLOB_FTL_BT_Garbage_Collection();
2961 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
2962 if (IS_DISCARDED_BLOCK(i))
2963 wDiscard++;
2966 if (wDiscard <= 0) {
2967 GC_Called = 0;
2968 return wResult;
2971 nand_dbg_print(NAND_DBG_DEBUG,
2972 "Found %d discarded blocks\n", wDiscard);
2974 FTL_Write_Block_Table(FAIL);
2976 wResult = do_garbage_collection(wDiscard);
2978 FTL_Write_Block_Table(FAIL);
2980 GC_Called = 0;
2982 return wResult;
2986 #if CMD_DMA
2987 static int do_bt_garbage_collection(void)
2989 u32 pba, lba;
2990 u32 *pbt = (u32 *)g_pBlockTable;
2991 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
2992 u64 addr;
2993 int i, ret = FAIL;
2995 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2996 __FILE__, __LINE__, __func__);
2998 if (BT_GC_Called)
2999 return PASS;
3001 BT_GC_Called = 1;
3003 for (i = last_erased; (i <= LAST_BT_ID) &&
3004 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3005 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
3006 ((ftl_cmd_cnt + 28)) < 256; i++) {
3007 pba = pBTBlocksNode[i - FIRST_BT_ID];
3008 lba = FTL_Get_Block_Index(pba);
3009 nand_dbg_print(NAND_DBG_DEBUG,
3010 "do_bt_garbage_collection: pba %d, lba %d\n",
3011 pba, lba);
3012 nand_dbg_print(NAND_DBG_DEBUG,
3013 "Block Table Entry: %d", pbt[lba]);
3015 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3016 (pbt[lba] & DISCARD_BLOCK)) {
3017 nand_dbg_print(NAND_DBG_DEBUG,
3018 "do_bt_garbage_collection_cdma: "
3019 "Erasing Block tables present in block %d\n",
3020 pba);
3021 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3022 DeviceInfo.wBlockDataSize);
3023 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3024 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3025 pbt[lba] |= (u32)(SPARE_BLOCK);
3027 p_BTableChangesDelta =
3028 (struct BTableChangesDelta *)
3029 g_pBTDelta_Free;
3030 g_pBTDelta_Free +=
3031 sizeof(struct BTableChangesDelta);
3033 p_BTableChangesDelta->ftl_cmd_cnt =
3034 ftl_cmd_cnt - 1;
3035 p_BTableChangesDelta->BT_Index = lba;
3036 p_BTableChangesDelta->BT_Entry_Value =
3037 pbt[lba];
3039 p_BTableChangesDelta->ValidFields = 0x0C;
3041 ret = PASS;
3042 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3043 BTBLOCK_INVAL;
3044 nand_dbg_print(NAND_DBG_DEBUG,
3045 "resetting bt entry at index %d "
3046 "value %d\n", i,
3047 pBTBlocksNode[i - FIRST_BT_ID]);
3048 if (last_erased == LAST_BT_ID)
3049 last_erased = FIRST_BT_ID;
3050 else
3051 last_erased++;
3052 } else {
3053 MARK_BLOCK_AS_BAD(pbt[lba]);
3058 BT_GC_Called = 0;
3060 return ret;
3063 #else
3064 static int do_bt_garbage_collection(void)
3066 u32 pba, lba;
3067 u32 *pbt = (u32 *)g_pBlockTable;
3068 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3069 u64 addr;
3070 int i, ret = FAIL;
3072 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3073 __FILE__, __LINE__, __func__);
3075 if (BT_GC_Called)
3076 return PASS;
3078 BT_GC_Called = 1;
3080 for (i = last_erased; (i <= LAST_BT_ID) &&
3081 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3082 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
3083 pba = pBTBlocksNode[i - FIRST_BT_ID];
3084 lba = FTL_Get_Block_Index(pba);
3085 nand_dbg_print(NAND_DBG_DEBUG,
3086 "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3087 pba, lba);
3088 nand_dbg_print(NAND_DBG_DEBUG,
3089 "Block Table Entry: %d", pbt[lba]);
3091 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3092 (pbt[lba] & DISCARD_BLOCK)) {
3093 nand_dbg_print(NAND_DBG_DEBUG,
3094 "do_bt_garbage_collection: "
3095 "Erasing Block tables present in block %d\n",
3096 pba);
3097 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3098 DeviceInfo.wBlockDataSize);
3099 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3100 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3101 pbt[lba] |= (u32)(SPARE_BLOCK);
3102 ret = PASS;
3103 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3104 BTBLOCK_INVAL;
3105 nand_dbg_print(NAND_DBG_DEBUG,
3106 "resetting bt entry at index %d "
3107 "value %d\n", i,
3108 pBTBlocksNode[i - FIRST_BT_ID]);
3109 if (last_erased == LAST_BT_ID)
3110 last_erased = FIRST_BT_ID;
3111 else
3112 last_erased++;
3113 } else {
3114 MARK_BLOCK_AS_BAD(pbt[lba]);
3119 BT_GC_Called = 0;
3121 return ret;
3124 #endif
3126 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3127 * Function: GLOB_FTL_BT_Garbage_Collection
3128 * Inputs: none
3129 * Outputs: PASS / FAIL (returns the number of un-erased blocks
3130 * Description: Erases discarded blocks containing Block table
3132 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3133 int GLOB_FTL_BT_Garbage_Collection(void)
3135 return do_bt_garbage_collection();
3138 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3139 * Function: FTL_Replace_OneBlock
3140 * Inputs: Block number 1
3141 * Block number 2
3142 * Outputs: Replaced Block Number
3143 * Description: Interchange block table entries at wBlockNum and wReplaceNum
3145 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3146 static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
3148 u32 tmp_blk;
3149 u32 replace_node = BAD_BLOCK;
3150 u32 *pbt = (u32 *)g_pBlockTable;
3152 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3153 __FILE__, __LINE__, __func__);
3155 if (rep_blk != BAD_BLOCK) {
3156 if (IS_BAD_BLOCK(blk))
3157 tmp_blk = pbt[blk];
3158 else
3159 tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
3161 replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
3162 pbt[blk] = replace_node;
3163 pbt[rep_blk] = tmp_blk;
3165 #if CMD_DMA
3166 p_BTableChangesDelta =
3167 (struct BTableChangesDelta *)g_pBTDelta_Free;
3168 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3170 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3171 p_BTableChangesDelta->BT_Index = blk;
3172 p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
3174 p_BTableChangesDelta->ValidFields = 0x0C;
3176 p_BTableChangesDelta =
3177 (struct BTableChangesDelta *)g_pBTDelta_Free;
3178 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3180 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3181 p_BTableChangesDelta->BT_Index = rep_blk;
3182 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
3183 p_BTableChangesDelta->ValidFields = 0x0C;
3184 #endif
3187 return replace_node;
3190 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3191 * Function: FTL_Write_Block_Table_Data
3192 * Inputs: Block table size in pages
3193 * Outputs: PASS=0 / FAIL=1
3194 * Description: Write block table data in flash
3195 * If first page and last page
3196 * Write data+BT flag
3197 * else
3198 * Write data
3199 * BT flag is a counter. Its value is incremented for block table
3200 * write in a new Block
3201 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3202 static int FTL_Write_Block_Table_Data(void)
3204 u64 dwBlockTableAddr, pTempAddr;
3205 u32 Block;
3206 u16 Page, PageCount;
3207 u8 *tempBuf = tmp_buf_write_blk_table_data;
3208 int wBytesCopied;
3209 u16 bt_pages;
3211 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3212 __FILE__, __LINE__, __func__);
3214 dwBlockTableAddr =
3215 (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
3216 (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
3217 pTempAddr = dwBlockTableAddr;
3219 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
3221 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
3222 "page= %d BlockTableIndex= %d "
3223 "BlockTableOffset=%d\n", bt_pages,
3224 g_wBlockTableIndex, g_wBlockTableOffset);
3226 Block = BLK_FROM_ADDR(pTempAddr);
3227 Page = PAGE_FROM_ADDR(pTempAddr, Block);
3228 PageCount = 1;
3230 if (bt_block_changed) {
3231 if (bt_flag == LAST_BT_ID) {
3232 bt_flag = FIRST_BT_ID;
3233 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3234 } else if (bt_flag < LAST_BT_ID) {
3235 bt_flag++;
3236 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3239 if ((bt_flag > (LAST_BT_ID-4)) &&
3240 g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
3241 BTBLOCK_INVAL) {
3242 bt_block_changed = 0;
3243 GLOB_FTL_BT_Garbage_Collection();
3246 bt_block_changed = 0;
3247 nand_dbg_print(NAND_DBG_DEBUG,
3248 "Block Table Counter is %u Block %u\n",
3249 bt_flag, (unsigned int)Block);
3252 memset(tempBuf, 0, 3);
3253 tempBuf[3] = bt_flag;
3254 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
3255 DeviceInfo.wPageDataSize - 4, 0);
3256 memset(&tempBuf[wBytesCopied + 4], 0xff,
3257 DeviceInfo.wPageSize - (wBytesCopied + 4));
3258 FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
3259 bt_flag);
3261 #if CMD_DMA
3262 memcpy(g_pNextBlockTable, tempBuf,
3263 DeviceInfo.wPageSize * sizeof(u8));
3264 nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
3265 "Block %u Page %u\n", (unsigned int)Block, Page);
3266 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
3267 Block, Page, 1,
3268 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3269 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
3270 "%s, Line %d, Function: %s, "
3271 "new Bad Block %d generated!\n",
3272 __FILE__, __LINE__, __func__, Block);
3273 goto func_return;
3276 ftl_cmd_cnt++;
3277 g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
3278 #else
3279 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
3280 nand_dbg_print(NAND_DBG_WARN,
3281 "NAND Program fail in %s, Line %d, Function: %s, "
3282 "new Bad Block %d generated!\n",
3283 __FILE__, __LINE__, __func__, Block);
3284 goto func_return;
3286 #endif
3288 if (bt_pages > 1) {
3289 PageCount = bt_pages - 1;
3290 if (PageCount > 1) {
3291 wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
3292 DeviceInfo.wPageDataSize * (PageCount - 1),
3293 wBytesCopied);
3295 #if CMD_DMA
3296 memcpy(g_pNextBlockTable, tempBuf,
3297 (PageCount - 1) * DeviceInfo.wPageDataSize);
3298 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
3299 g_pNextBlockTable, Block, Page + 1,
3300 PageCount - 1)) {
3301 nand_dbg_print(NAND_DBG_WARN,
3302 "NAND Program fail in %s, Line %d, "
3303 "Function: %s, "
3304 "new Bad Block %d generated!\n",
3305 __FILE__, __LINE__, __func__,
3306 (int)Block);
3307 goto func_return;
3310 ftl_cmd_cnt++;
3311 g_pNextBlockTable += (PageCount - 1) *
3312 DeviceInfo.wPageDataSize * sizeof(u8);
3313 #else
3314 if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
3315 Block, Page + 1, PageCount - 1)) {
3316 nand_dbg_print(NAND_DBG_WARN,
3317 "NAND Program fail in %s, Line %d, "
3318 "Function: %s, "
3319 "new Bad Block %d generated!\n",
3320 __FILE__, __LINE__, __func__,
3321 (int)Block);
3322 goto func_return;
3324 #endif
3327 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
3328 DeviceInfo.wPageDataSize, wBytesCopied);
3329 memset(&tempBuf[wBytesCopied], 0xff,
3330 DeviceInfo.wPageSize-wBytesCopied);
3331 FTL_Insert_Block_Table_Signature(
3332 &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
3333 #if CMD_DMA
3334 memcpy(g_pNextBlockTable, tempBuf,
3335 DeviceInfo.wPageSize * sizeof(u8));
3336 nand_dbg_print(NAND_DBG_DEBUG,
3337 "Writing the last Page of Block Table "
3338 "Block %u Page %u\n",
3339 (unsigned int)Block, Page + bt_pages - 1);
3340 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
3341 g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
3342 LLD_CMD_FLAG_MODE_CDMA |
3343 LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3344 nand_dbg_print(NAND_DBG_WARN,
3345 "NAND Program fail in %s, Line %d, "
3346 "Function: %s, new Bad Block %d generated!\n",
3347 __FILE__, __LINE__, __func__, Block);
3348 goto func_return;
3350 ftl_cmd_cnt++;
3351 #else
3352 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
3353 Block, Page+bt_pages - 1, 1)) {
3354 nand_dbg_print(NAND_DBG_WARN,
3355 "NAND Program fail in %s, Line %d, "
3356 "Function: %s, "
3357 "new Bad Block %d generated!\n",
3358 __FILE__, __LINE__, __func__, Block);
3359 goto func_return;
3361 #endif
3364 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
3366 func_return:
3367 return PASS;
3370 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3371 * Function: FTL_Replace_Block_Table
3372 * Inputs: None
3373 * Outputs: PASS=0 / FAIL=1
3374 * Description: Get a new block to write block table
3375 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3376 static u32 FTL_Replace_Block_Table(void)
3378 u32 blk;
3379 int gc;
3381 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3382 __FILE__, __LINE__, __func__);
3384 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3386 if ((BAD_BLOCK == blk) && (PASS == gc)) {
3387 GLOB_FTL_Garbage_Collection();
3388 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3390 if (BAD_BLOCK == blk)
3391 printk(KERN_ERR "%s, %s: There is no spare block. "
3392 "It should never happen\n",
3393 __FILE__, __func__);
3395 nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
3397 return blk;
3400 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3401 * Function: FTL_Replace_LWBlock
3402 * Inputs: Block number
3403 * Pointer to Garbage Collect flag
3404 * Outputs:
3405 * Description: Determine the least weared block by traversing
3406 * block table
3407 * Set Garbage collection to be called if number of spare
3408 * block is less than Free Block Gate count
3409 * Change Block table entry to map least worn block for current
3410 * operation
3411 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3412 static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
3414 u32 i;
3415 u32 *pbt = (u32 *)g_pBlockTable;
3416 u8 wLeastWornCounter = 0xFF;
3417 u32 wLeastWornIndex = BAD_BLOCK;
3418 u32 wSpareBlockNum = 0;
3419 u32 wDiscardBlockNum = 0;
3421 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3422 __FILE__, __LINE__, __func__);
3424 if (IS_SPARE_BLOCK(wBlockNum)) {
3425 *pGarbageCollect = FAIL;
3426 pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
3427 #if CMD_DMA
3428 p_BTableChangesDelta =
3429 (struct BTableChangesDelta *)g_pBTDelta_Free;
3430 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3431 p_BTableChangesDelta->ftl_cmd_cnt =
3432 ftl_cmd_cnt;
3433 p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
3434 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
3435 p_BTableChangesDelta->ValidFields = 0x0C;
3436 #endif
3437 return pbt[wBlockNum];
3440 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3441 if (IS_DISCARDED_BLOCK(i))
3442 wDiscardBlockNum++;
3444 if (IS_SPARE_BLOCK(i)) {
3445 u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
3446 if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
3447 printk(KERN_ERR "FTL_Replace_LWBlock: "
3448 "This should never occur!\n");
3449 if (g_pWearCounter[wPhysicalIndex -
3450 DeviceInfo.wSpectraStartBlock] <
3451 wLeastWornCounter) {
3452 wLeastWornCounter =
3453 g_pWearCounter[wPhysicalIndex -
3454 DeviceInfo.wSpectraStartBlock];
3455 wLeastWornIndex = i;
3457 wSpareBlockNum++;
3461 nand_dbg_print(NAND_DBG_WARN,
3462 "FTL_Replace_LWBlock: Least Worn Counter %d\n",
3463 (int)wLeastWornCounter);
3465 if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
3466 (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
3467 *pGarbageCollect = PASS;
3468 else
3469 *pGarbageCollect = FAIL;
3471 nand_dbg_print(NAND_DBG_DEBUG,
3472 "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
3473 " Blocks %u\n",
3474 (unsigned int)wDiscardBlockNum,
3475 (unsigned int)wSpareBlockNum);
3477 return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
3480 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3481 * Function: FTL_Replace_MWBlock
3482 * Inputs: None
3483 * Outputs: most worn spare block no./BAD_BLOCK
3484 * Description: It finds most worn spare block.
3485 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3486 static u32 FTL_Replace_MWBlock(void)
3488 u32 i;
3489 u32 *pbt = (u32 *)g_pBlockTable;
3490 u8 wMostWornCounter = 0;
3491 u32 wMostWornIndex = BAD_BLOCK;
3492 u32 wSpareBlockNum = 0;
3494 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3495 __FILE__, __LINE__, __func__);
3497 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3498 if (IS_SPARE_BLOCK(i)) {
3499 u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
3500 if (g_pWearCounter[wPhysicalIndex -
3501 DeviceInfo.wSpectraStartBlock] >
3502 wMostWornCounter) {
3503 wMostWornCounter =
3504 g_pWearCounter[wPhysicalIndex -
3505 DeviceInfo.wSpectraStartBlock];
3506 wMostWornIndex = wPhysicalIndex;
3508 wSpareBlockNum++;
3512 if (wSpareBlockNum <= 2)
3513 return BAD_BLOCK;
3515 return wMostWornIndex;
3518 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3519 * Function: FTL_Replace_Block
3520 * Inputs: Block Address
3521 * Outputs: PASS=0 / FAIL=1
3522 * Description: If block specified by blk_addr parameter is not free,
3523 * replace it with the least worn block.
3524 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3525 static int FTL_Replace_Block(u64 blk_addr)
3527 u32 current_blk = BLK_FROM_ADDR(blk_addr);
3528 u32 *pbt = (u32 *)g_pBlockTable;
3529 int wResult = PASS;
3530 int GarbageCollect = FAIL;
3532 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3533 __FILE__, __LINE__, __func__);
3535 if (IS_SPARE_BLOCK(current_blk)) {
3536 pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
3537 #if CMD_DMA
3538 p_BTableChangesDelta =
3539 (struct BTableChangesDelta *)g_pBTDelta_Free;
3540 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3541 p_BTableChangesDelta->ftl_cmd_cnt =
3542 ftl_cmd_cnt;
3543 p_BTableChangesDelta->BT_Index = current_blk;
3544 p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
3545 p_BTableChangesDelta->ValidFields = 0x0C ;
3546 #endif
3547 return wResult;
3550 FTL_Replace_LWBlock(current_blk, &GarbageCollect);
3552 if (PASS == GarbageCollect)
3553 wResult = GLOB_FTL_Garbage_Collection();
3555 return wResult;
3558 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3559 * Function: GLOB_FTL_Is_BadBlock
3560 * Inputs: block number to test
3561 * Outputs: PASS (block is BAD) / FAIL (block is not bad)
3562 * Description: test if this block number is flagged as bad
3563 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3564 int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
3566 u32 *pbt = (u32 *)g_pBlockTable;
3568 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3569 __FILE__, __LINE__, __func__);
3571 if (wBlockNum >= DeviceInfo.wSpectraStartBlock
3572 && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
3573 return PASS;
3574 else
3575 return FAIL;
3578 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3579 * Function: GLOB_FTL_Flush_Cache
3580 * Inputs: none
3581 * Outputs: PASS=0 / FAIL=1
3582 * Description: flush all the cache blocks to flash
3583 * if a cache block is not dirty, don't do anything with it
3584 * else, write the block and update the block table
3585 * Note: This function should be called at shutdown/power down.
3586 * to write important data into device
3587 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3588 int GLOB_FTL_Flush_Cache(void)
3590 int i, ret;
3592 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3593 __FILE__, __LINE__, __func__);
3595 for (i = 0; i < CACHE_ITEM_NUM; i++) {
3596 if (SET == Cache.array[i].changed) {
3597 #if CMD_DMA
3598 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
3599 int_cache[ftl_cmd_cnt].item = i;
3600 int_cache[ftl_cmd_cnt].cache.address =
3601 Cache.array[i].address;
3602 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
3603 #endif
3604 #endif
3605 ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
3606 if (PASS == ret) {
3607 Cache.array[i].changed = CLEAR;
3608 } else {
3609 printk(KERN_ALERT "Failed when write back to L2 cache!\n");
3610 /* TODO - How to handle this? */
3615 flush_l2_cache();
3617 return FTL_Write_Block_Table(FAIL);
3620 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3621 * Function: GLOB_FTL_Page_Read
3622 * Inputs: pointer to data
3623 * logical address of data (u64 is LBA * Bytes/Page)
3624 * Outputs: PASS=0 / FAIL=1
3625 * Description: reads a page of data into RAM from the cache
3626 * if the data is not already in cache, read from flash to cache
3627 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3628 int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
3630 u16 cache_item;
3631 int res = PASS;
3633 nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
3634 "page_addr: %llu\n", logical_addr);
3636 cache_item = FTL_Cache_If_Hit(logical_addr);
3638 if (UNHIT_CACHE_ITEM == cache_item) {
3639 nand_dbg_print(NAND_DBG_DEBUG,
3640 "GLOB_FTL_Page_Read: Cache not hit\n");
3641 res = FTL_Cache_Write();
3642 if (ERR == FTL_Cache_Read(logical_addr))
3643 res = ERR;
3644 cache_item = Cache.LRU;
3647 FTL_Cache_Read_Page(data, logical_addr, cache_item);
3649 return res;
3652 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3653 * Function: GLOB_FTL_Page_Write
3654 * Inputs: pointer to data
3655 * address of data (ADDRESSTYPE is LBA * Bytes/Page)
3656 * Outputs: PASS=0 / FAIL=1
3657 * Description: writes a page of data from RAM to the cache
3658 * if the data is not already in cache, write back the
3659 * least recently used block and read the addressed block
3660 * from flash to cache
3661 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3662 int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
3664 u16 cache_blk;
3665 u32 *pbt = (u32 *)g_pBlockTable;
3666 int wResult = PASS;
3668 nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
3669 "dwPageAddr: %llu\n", dwPageAddr);
3671 cache_blk = FTL_Cache_If_Hit(dwPageAddr);
3673 if (UNHIT_CACHE_ITEM == cache_blk) {
3674 wResult = FTL_Cache_Write();
3675 if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
3676 wResult = FTL_Replace_Block(dwPageAddr);
3677 pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
3678 if (wResult == FAIL)
3679 return FAIL;
3681 if (ERR == FTL_Cache_Read(dwPageAddr))
3682 wResult = ERR;
3683 cache_blk = Cache.LRU;
3684 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3685 } else {
3686 #if CMD_DMA
3687 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
3688 LLD_CMD_FLAG_ORDER_BEFORE_REST);
3689 #else
3690 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3691 #endif
3694 return wResult;
3697 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3698 * Function: GLOB_FTL_Block_Erase
3699 * Inputs: address of block to erase (now in byte format, should change to
3700 * block format)
3701 * Outputs: PASS=0 / FAIL=1
3702 * Description: erases the specified block
3703 * increments the erase count
3704 * If erase count reaches its upper limit,call function to
3705 * do the ajustment as per the relative erase count values
3706 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3707 int GLOB_FTL_Block_Erase(u64 blk_addr)
3709 int status;
3710 u32 BlkIdx;
3712 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3713 __FILE__, __LINE__, __func__);
3715 BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
3717 if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
3718 printk(KERN_ERR "GLOB_FTL_Block_Erase: "
3719 "This should never occur\n");
3720 return FAIL;
3723 #if CMD_DMA
3724 status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
3725 if (status == FAIL)
3726 nand_dbg_print(NAND_DBG_WARN,
3727 "NAND Program fail in %s, Line %d, "
3728 "Function: %s, new Bad Block %d generated!\n",
3729 __FILE__, __LINE__, __func__, BlkIdx);
3730 #else
3731 status = GLOB_LLD_Erase_Block(BlkIdx);
3732 if (status == FAIL) {
3733 nand_dbg_print(NAND_DBG_WARN,
3734 "NAND Program fail in %s, Line %d, "
3735 "Function: %s, new Bad Block %d generated!\n",
3736 __FILE__, __LINE__, __func__, BlkIdx);
3737 return status;
3739 #endif
3741 if (DeviceInfo.MLCDevice) {
3742 g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
3743 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
3744 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3745 FTL_Write_IN_Progress_Block_Table_Page();
3749 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
3751 #if CMD_DMA
3752 p_BTableChangesDelta =
3753 (struct BTableChangesDelta *)g_pBTDelta_Free;
3754 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3755 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3756 p_BTableChangesDelta->WC_Index =
3757 BlkIdx - DeviceInfo.wSpectraStartBlock;
3758 p_BTableChangesDelta->WC_Entry_Value =
3759 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
3760 p_BTableChangesDelta->ValidFields = 0x30;
3762 if (DeviceInfo.MLCDevice) {
3763 p_BTableChangesDelta =
3764 (struct BTableChangesDelta *)g_pBTDelta_Free;
3765 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3766 p_BTableChangesDelta->ftl_cmd_cnt =
3767 ftl_cmd_cnt;
3768 p_BTableChangesDelta->RC_Index =
3769 BlkIdx - DeviceInfo.wSpectraStartBlock;
3770 p_BTableChangesDelta->RC_Entry_Value =
3771 g_pReadCounter[BlkIdx -
3772 DeviceInfo.wSpectraStartBlock];
3773 p_BTableChangesDelta->ValidFields = 0xC0;
3776 ftl_cmd_cnt++;
3777 #endif
3779 if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
3780 FTL_Adjust_Relative_Erase_Count(BlkIdx);
3782 return status;
3786 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3787 * Function: FTL_Adjust_Relative_Erase_Count
3788 * Inputs: index to block that was just incremented and is at the max
3789 * Outputs: PASS=0 / FAIL=1
3790 * Description: If any erase counts at MAX, adjusts erase count of every
3791 * block by substracting least worn
3792 * counter from counter value of every entry in wear table
3793 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3794 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
3796 u8 wLeastWornCounter = MAX_BYTE_VALUE;
3797 u8 wWearCounter;
3798 u32 i, wWearIndex;
3799 u32 *pbt = (u32 *)g_pBlockTable;
3800 int wResult = PASS;
3802 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3803 __FILE__, __LINE__, __func__);
3805 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3806 if (IS_BAD_BLOCK(i))
3807 continue;
3808 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
3810 if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
3811 printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
3812 "This should never occur\n");
3813 wWearCounter = g_pWearCounter[wWearIndex -
3814 DeviceInfo.wSpectraStartBlock];
3815 if (wWearCounter < wLeastWornCounter)
3816 wLeastWornCounter = wWearCounter;
3819 if (wLeastWornCounter == 0) {
3820 nand_dbg_print(NAND_DBG_WARN,
3821 "Adjusting Wear Levelling Counters: Special Case\n");
3822 g_pWearCounter[Index_of_MAX -
3823 DeviceInfo.wSpectraStartBlock]--;
3824 #if CMD_DMA
3825 p_BTableChangesDelta =
3826 (struct BTableChangesDelta *)g_pBTDelta_Free;
3827 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3828 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3829 p_BTableChangesDelta->WC_Index =
3830 Index_of_MAX - DeviceInfo.wSpectraStartBlock;
3831 p_BTableChangesDelta->WC_Entry_Value =
3832 g_pWearCounter[Index_of_MAX -
3833 DeviceInfo.wSpectraStartBlock];
3834 p_BTableChangesDelta->ValidFields = 0x30;
3835 #endif
3836 FTL_Static_Wear_Leveling();
3837 } else {
3838 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
3839 if (!IS_BAD_BLOCK(i)) {
3840 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
3841 g_pWearCounter[wWearIndex -
3842 DeviceInfo.wSpectraStartBlock] =
3843 (u8)(g_pWearCounter
3844 [wWearIndex -
3845 DeviceInfo.wSpectraStartBlock] -
3846 wLeastWornCounter);
3847 #if CMD_DMA
3848 p_BTableChangesDelta =
3849 (struct BTableChangesDelta *)g_pBTDelta_Free;
3850 g_pBTDelta_Free +=
3851 sizeof(struct BTableChangesDelta);
3853 p_BTableChangesDelta->ftl_cmd_cnt =
3854 ftl_cmd_cnt;
3855 p_BTableChangesDelta->WC_Index = wWearIndex -
3856 DeviceInfo.wSpectraStartBlock;
3857 p_BTableChangesDelta->WC_Entry_Value =
3858 g_pWearCounter[wWearIndex -
3859 DeviceInfo.wSpectraStartBlock];
3860 p_BTableChangesDelta->ValidFields = 0x30;
3861 #endif
3865 return wResult;
3868 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3869 * Function: FTL_Write_IN_Progress_Block_Table_Page
3870 * Inputs: None
3871 * Outputs: None
3872 * Description: It writes in-progress flag page to the page next to
3873 * block table
3874 ***********************************************************************/
3875 static int FTL_Write_IN_Progress_Block_Table_Page(void)
3877 int wResult = PASS;
3878 u16 bt_pages;
3879 u16 dwIPFPageAddr;
3880 #if CMD_DMA
3881 #else
3882 u32 *pbt = (u32 *)g_pBlockTable;
3883 u32 wTempBlockTableIndex;
3884 #endif
3886 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3887 __FILE__, __LINE__, __func__);
3889 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
3891 dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
3893 nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
3894 "Block %d Page %d\n",
3895 g_wBlockTableIndex, dwIPFPageAddr);
3897 #if CMD_DMA
3898 wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
3899 g_wBlockTableIndex, dwIPFPageAddr, 1,
3900 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
3901 if (wResult == FAIL) {
3902 nand_dbg_print(NAND_DBG_WARN,
3903 "NAND Program fail in %s, Line %d, "
3904 "Function: %s, new Bad Block %d generated!\n",
3905 __FILE__, __LINE__, __func__,
3906 g_wBlockTableIndex);
3908 g_wBlockTableOffset = dwIPFPageAddr + 1;
3909 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
3910 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3911 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3912 p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
3913 p_BTableChangesDelta->ValidFields = 0x01;
3914 ftl_cmd_cnt++;
3915 #else
3916 wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
3917 g_wBlockTableIndex, dwIPFPageAddr, 1);
3918 if (wResult == FAIL) {
3919 nand_dbg_print(NAND_DBG_WARN,
3920 "NAND Program fail in %s, Line %d, "
3921 "Function: %s, new Bad Block %d generated!\n",
3922 __FILE__, __LINE__, __func__,
3923 (int)g_wBlockTableIndex);
3924 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
3925 wTempBlockTableIndex = FTL_Replace_Block_Table();
3926 bt_block_changed = 1;
3927 if (BAD_BLOCK == wTempBlockTableIndex)
3928 return ERR;
3929 g_wBlockTableIndex = wTempBlockTableIndex;
3930 g_wBlockTableOffset = 0;
3931 /* Block table tag is '00'. Means it's used one */
3932 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
3933 return FAIL;
3935 g_wBlockTableOffset = dwIPFPageAddr + 1;
3936 #endif
3937 return wResult;
3940 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3941 * Function: FTL_Read_Disturbance
3942 * Inputs: block address
3943 * Outputs: PASS=0 / FAIL=1
3944 * Description: used to handle read disturbance. Data in block that
3945 * reaches its read limit is moved to new block
3946 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3947 int FTL_Read_Disturbance(u32 blk_addr)
3949 int wResult = FAIL;
3950 u32 *pbt = (u32 *) g_pBlockTable;
3951 u32 dwOldBlockAddr = blk_addr;
3952 u32 wBlockNum;
3953 u32 i;
3954 u32 wLeastReadCounter = 0xFFFF;
3955 u32 wLeastReadIndex = BAD_BLOCK;
3956 u32 wSpareBlockNum = 0;
3957 u32 wTempNode;
3958 u32 wReplacedNode;
3959 u8 *g_pTempBuf;
3961 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
3962 __FILE__, __LINE__, __func__);
3964 #if CMD_DMA
3965 g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
3966 cp_back_buf_idx++;
3967 if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
3968 printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
3969 "Maybe too many pending commands in your CDMA chain.\n");
3970 return FAIL;
3972 #else
3973 g_pTempBuf = tmp_buf_read_disturbance;
3974 #endif
3976 wBlockNum = FTL_Get_Block_Index(blk_addr);
3978 do {
3979 /* This is a bug.Here 'i' should be logical block number
3980 * and start from 1 (0 is reserved for block table).
3981 * Have fixed it. - Yunpeng 2008. 12. 19
3983 for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
3984 if (IS_SPARE_BLOCK(i)) {
3985 u32 wPhysicalIndex =
3986 (u32)((~SPARE_BLOCK) & pbt[i]);
3987 if (g_pReadCounter[wPhysicalIndex -
3988 DeviceInfo.wSpectraStartBlock] <
3989 wLeastReadCounter) {
3990 wLeastReadCounter =
3991 g_pReadCounter[wPhysicalIndex -
3992 DeviceInfo.wSpectraStartBlock];
3993 wLeastReadIndex = i;
3995 wSpareBlockNum++;
3999 if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
4000 wResult = GLOB_FTL_Garbage_Collection();
4001 if (PASS == wResult)
4002 continue;
4003 else
4004 break;
4005 } else {
4006 wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
4007 wReplacedNode = (u32)((~SPARE_BLOCK) &
4008 pbt[wLeastReadIndex]);
4009 #if CMD_DMA
4010 pbt[wBlockNum] = wReplacedNode;
4011 pbt[wLeastReadIndex] = wTempNode;
4012 p_BTableChangesDelta =
4013 (struct BTableChangesDelta *)g_pBTDelta_Free;
4014 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4016 p_BTableChangesDelta->ftl_cmd_cnt =
4017 ftl_cmd_cnt;
4018 p_BTableChangesDelta->BT_Index = wBlockNum;
4019 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4020 p_BTableChangesDelta->ValidFields = 0x0C;
4022 p_BTableChangesDelta =
4023 (struct BTableChangesDelta *)g_pBTDelta_Free;
4024 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4026 p_BTableChangesDelta->ftl_cmd_cnt =
4027 ftl_cmd_cnt;
4028 p_BTableChangesDelta->BT_Index = wLeastReadIndex;
4029 p_BTableChangesDelta->BT_Entry_Value =
4030 pbt[wLeastReadIndex];
4031 p_BTableChangesDelta->ValidFields = 0x0C;
4033 wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
4034 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
4035 LLD_CMD_FLAG_MODE_CDMA);
4036 if (wResult == FAIL)
4037 return wResult;
4039 ftl_cmd_cnt++;
4041 if (wResult != FAIL) {
4042 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
4043 g_pTempBuf, pbt[wBlockNum], 0,
4044 DeviceInfo.wPagesPerBlock)) {
4045 nand_dbg_print(NAND_DBG_WARN,
4046 "NAND Program fail in "
4047 "%s, Line %d, Function: %s, "
4048 "new Bad Block %d "
4049 "generated!\n",
4050 __FILE__, __LINE__, __func__,
4051 (int)pbt[wBlockNum]);
4052 wResult = FAIL;
4053 MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
4055 ftl_cmd_cnt++;
4057 #else
4058 wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
4059 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
4060 if (wResult == FAIL)
4061 return wResult;
4063 if (wResult != FAIL) {
4064 /* This is a bug. At this time, pbt[wBlockNum]
4065 is still the physical address of
4066 discard block, and should not be write.
4067 Have fixed it as below.
4068 -- Yunpeng 2008.12.19
4070 wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
4071 wReplacedNode, 0,
4072 DeviceInfo.wPagesPerBlock);
4073 if (wResult == FAIL) {
4074 nand_dbg_print(NAND_DBG_WARN,
4075 "NAND Program fail in "
4076 "%s, Line %d, Function: %s, "
4077 "new Bad Block %d "
4078 "generated!\n",
4079 __FILE__, __LINE__, __func__,
4080 (int)wReplacedNode);
4081 MARK_BLOCK_AS_BAD(wReplacedNode);
4082 } else {
4083 pbt[wBlockNum] = wReplacedNode;
4084 pbt[wLeastReadIndex] = wTempNode;
4088 if ((wResult == PASS) && (g_cBlockTableStatus !=
4089 IN_PROGRESS_BLOCK_TABLE)) {
4090 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4091 FTL_Write_IN_Progress_Block_Table_Page();
4093 #endif
4095 } while (wResult != PASS)
4098 #if CMD_DMA
4099 /* ... */
4100 #endif
4102 return wResult;