2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/slab.h>
31 #define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
32 #define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
33 DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
35 #define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
36 BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
38 #define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
40 #define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
41 BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
43 #define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
46 void debug_boundary_lineno_error(int chnl
, int limit
, int no
,
47 int lineno
, char *filename
)
50 printk(KERN_ERR
"Boundary Check Fail value %d >= limit %d, "
51 "at %s:%d. Other info:%d. Aborting...\n",
52 chnl
, limit
, filename
, lineno
, no
);
54 /* static int globalmemsize; */
57 static u16
FTL_Cache_If_Hit(u64 dwPageAddr
);
58 static int FTL_Cache_Read(u64 dwPageAddr
);
59 static void FTL_Cache_Read_Page(u8
*pData
, u64 dwPageAddr
,
61 static void FTL_Cache_Write_Page(u8
*pData
, u64 dwPageAddr
,
62 u8 cache_blk
, u16 flag
);
63 static int FTL_Cache_Write(void);
64 static void FTL_Calculate_LRU(void);
65 static u32
FTL_Get_Block_Index(u32 wBlockNum
);
67 static int FTL_Search_Block_Table_IN_Block(u32 BT_Block
,
68 u8 BT_Tag
, u16
*Page
);
69 static int FTL_Read_Block_Table(void);
70 static int FTL_Write_Block_Table(int wForce
);
71 static int FTL_Write_Block_Table_Data(void);
72 static int FTL_Check_Block_Table(int wOldTable
);
73 static int FTL_Static_Wear_Leveling(void);
74 static u32
FTL_Replace_Block_Table(void);
75 static int FTL_Write_IN_Progress_Block_Table_Page(void);
77 static u32
FTL_Get_Page_Num(u64 length
);
78 static u64
FTL_Get_Physical_Block_Addr(u64 blk_addr
);
80 static u32
FTL_Replace_OneBlock(u32 wBlockNum
,
82 static u32
FTL_Replace_LWBlock(u32 wBlockNum
,
83 int *pGarbageCollect
);
84 static u32
FTL_Replace_MWBlock(void);
85 static int FTL_Replace_Block(u64 blk_addr
);
86 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX
);
88 struct device_info_tag DeviceInfo
;
89 struct flash_cache_tag Cache
;
90 static struct spectra_l2_cache_info cache_l2
;
92 static u8
*cache_l2_page_buf
;
93 static u8
*cache_l2_blk_buf
;
99 static u16 g_wBlockTableOffset
;
100 static u32 g_wBlockTableIndex
;
101 static u8 g_cBlockTableStatus
;
103 static u8
*g_pTempBuf
;
104 static u8
*flag_check_blk_table
;
105 static u8
*tmp_buf_search_bt_in_block
;
106 static u8
*spare_buf_search_bt_in_block
;
107 static u8
*spare_buf_bt_search_bt_in_block
;
108 static u8
*tmp_buf1_read_blk_table
;
109 static u8
*tmp_buf2_read_blk_table
;
110 static u8
*flags_static_wear_leveling
;
111 static u8
*tmp_buf_write_blk_table_data
;
112 static u8
*tmp_buf_read_disturbance
;
114 u8
*buf_read_page_main_spare
;
115 u8
*buf_write_page_main_spare
;
116 u8
*buf_read_page_spare
;
117 u8
*buf_get_bad_block
;
119 #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
120 struct flash_cache_delta_list_tag int_cache
[MAX_CHANS
+ MAX_DESCS
];
121 struct flash_cache_tag cache_start_copy
;
124 int g_wNumFreeBlocks
;
128 static u8 bt_flag
= FIRST_BT_ID
;
129 static u8 bt_block_changed
;
131 static u16 cache_block_to_write
;
132 static u8 last_erased
= FIRST_BT_ID
;
135 static u8 BT_GC_Called
;
138 #define COPY_BACK_BUF_NUM 10
140 static u8 ftl_cmd_cnt
; /* Init value is 0 */
143 u8
*g_pBTStartingCopy
;
144 u8
*g_pWearCounterCopy
;
145 u16
*g_pReadCounterCopy
;
146 u8
*g_pBlockTableCopies
;
147 u8
*g_pNextBlockTable
;
148 static u8
*cp_back_buf_copies
[COPY_BACK_BUF_NUM
];
149 static int cp_back_buf_idx
;
151 static u8
*g_temp_buf
;
153 #pragma pack(push, 1)
155 struct BTableChangesDelta
{
158 u16 g_wBlockTableOffset
;
159 u32 g_wBlockTableIndex
;
170 struct BTableChangesDelta
*p_BTableChangesDelta
;
174 #define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
175 #define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
177 #define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
179 #define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
181 #define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
183 #if SUPPORT_LARGE_BLOCKNUM
184 #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
187 #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
190 #define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
191 FTL_Get_WearCounter_Table_Mem_Size_Bytes
192 #define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
193 FTL_Get_ReadCounter_Table_Mem_Size_Bytes
195 static u32
FTL_Get_Block_Table_Flash_Size_Bytes(void)
199 if (DeviceInfo
.MLCDevice
) {
200 byte_num
= FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
201 DeviceInfo
.wDataBlockNum
* sizeof(u8
) +
202 DeviceInfo
.wDataBlockNum
* sizeof(u16
);
204 byte_num
= FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
205 DeviceInfo
.wDataBlockNum
* sizeof(u8
);
208 byte_num
+= 4 * sizeof(u8
);
213 static u16
FTL_Get_Block_Table_Flash_Size_Pages(void)
215 return (u16
)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
218 static int FTL_Copy_Block_Table_To_Flash(u8
*flashBuf
, u32 sizeToTx
,
221 u32 wBytesCopied
, blk_tbl_size
, wBytes
;
222 u32
*pbt
= (u32
*)g_pBlockTable
;
224 blk_tbl_size
= FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
226 (wBytes
< sizeToTx
) && ((wBytes
+ sizeTxed
) < blk_tbl_size
);
228 #if SUPPORT_LARGE_BLOCKNUM
229 flashBuf
[wBytes
] = (u8
)(pbt
[(wBytes
+ sizeTxed
) / 3]
230 >> (((wBytes
+ sizeTxed
) % 3) ?
231 ((((wBytes
+ sizeTxed
) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
233 flashBuf
[wBytes
] = (u8
)(pbt
[(wBytes
+ sizeTxed
) / 2]
234 >> (((wBytes
+ sizeTxed
) % 2) ? 0 : 8)) & 0xFF;
238 sizeTxed
= (sizeTxed
> blk_tbl_size
) ? (sizeTxed
- blk_tbl_size
) : 0;
239 blk_tbl_size
= FTL_Get_WearCounter_Table_Flash_Size_Bytes();
240 wBytesCopied
= wBytes
;
241 wBytes
= ((blk_tbl_size
- sizeTxed
) > (sizeToTx
- wBytesCopied
)) ?
242 (sizeToTx
- wBytesCopied
) : (blk_tbl_size
- sizeTxed
);
243 memcpy(flashBuf
+ wBytesCopied
, g_pWearCounter
+ sizeTxed
, wBytes
);
245 sizeTxed
= (sizeTxed
> blk_tbl_size
) ? (sizeTxed
- blk_tbl_size
) : 0;
247 if (DeviceInfo
.MLCDevice
) {
248 blk_tbl_size
= FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
249 wBytesCopied
+= wBytes
;
250 for (wBytes
= 0; ((wBytes
+ wBytesCopied
) < sizeToTx
) &&
251 ((wBytes
+ sizeTxed
) < blk_tbl_size
); wBytes
++)
252 flashBuf
[wBytes
+ wBytesCopied
] =
253 (g_pReadCounter
[(wBytes
+ sizeTxed
) / 2] >>
254 (((wBytes
+ sizeTxed
) % 2) ? 0 : 8)) & 0xFF;
257 return wBytesCopied
+ wBytes
;
260 static int FTL_Copy_Block_Table_From_Flash(u8
*flashBuf
,
261 u32 sizeToTx
, u32 sizeTxed
)
263 u32 wBytesCopied
, blk_tbl_size
, wBytes
;
264 u32
*pbt
= (u32
*)g_pBlockTable
;
266 blk_tbl_size
= FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
267 for (wBytes
= 0; (wBytes
< sizeToTx
) &&
268 ((wBytes
+ sizeTxed
) < blk_tbl_size
); wBytes
++) {
269 #if SUPPORT_LARGE_BLOCKNUM
270 if (!((wBytes
+ sizeTxed
) % 3))
271 pbt
[(wBytes
+ sizeTxed
) / 3] = 0;
272 pbt
[(wBytes
+ sizeTxed
) / 3] |=
273 (flashBuf
[wBytes
] << (((wBytes
+ sizeTxed
) % 3) ?
274 ((((wBytes
+ sizeTxed
) % 3) == 2) ? 0 : 8) : 16));
276 if (!((wBytes
+ sizeTxed
) % 2))
277 pbt
[(wBytes
+ sizeTxed
) / 2] = 0;
278 pbt
[(wBytes
+ sizeTxed
) / 2] |=
279 (flashBuf
[wBytes
] << (((wBytes
+ sizeTxed
) % 2) ?
284 sizeTxed
= (sizeTxed
> blk_tbl_size
) ? (sizeTxed
- blk_tbl_size
) : 0;
285 blk_tbl_size
= FTL_Get_WearCounter_Table_Flash_Size_Bytes();
286 wBytesCopied
= wBytes
;
287 wBytes
= ((blk_tbl_size
- sizeTxed
) > (sizeToTx
- wBytesCopied
)) ?
288 (sizeToTx
- wBytesCopied
) : (blk_tbl_size
- sizeTxed
);
289 memcpy(g_pWearCounter
+ sizeTxed
, flashBuf
+ wBytesCopied
, wBytes
);
290 sizeTxed
= (sizeTxed
> blk_tbl_size
) ? (sizeTxed
- blk_tbl_size
) : 0;
292 if (DeviceInfo
.MLCDevice
) {
293 wBytesCopied
+= wBytes
;
294 blk_tbl_size
= FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
295 for (wBytes
= 0; ((wBytes
+ wBytesCopied
) < sizeToTx
) &&
296 ((wBytes
+ sizeTxed
) < blk_tbl_size
); wBytes
++) {
297 if (((wBytes
+ sizeTxed
) % 2))
298 g_pReadCounter
[(wBytes
+ sizeTxed
) / 2] = 0;
299 g_pReadCounter
[(wBytes
+ sizeTxed
) / 2] |=
301 (((wBytes
+ sizeTxed
) % 2) ? 0 : 8));
305 return wBytesCopied
+wBytes
;
308 static int FTL_Insert_Block_Table_Signature(u8
*buf
, u8 tag
)
312 for (i
= 0; i
< BTSIG_BYTES
; i
++)
313 buf
[BTSIG_OFFSET
+ i
] =
314 ((tag
+ (i
* BTSIG_DELTA
) - FIRST_BT_ID
) %
315 (1 + LAST_BT_ID
-FIRST_BT_ID
)) + FIRST_BT_ID
;
320 static int FTL_Extract_Block_Table_Tag(u8
*buf
, u8
**tagarray
)
322 static u8 tag
[BTSIG_BYTES
>> 1];
323 int i
, j
, k
, tagi
, tagtemp
, status
;
325 *tagarray
= (u8
*)tag
;
328 for (i
= 0; i
< (BTSIG_BYTES
- 1); i
++) {
329 for (j
= i
+ 1; (j
< BTSIG_BYTES
) &&
330 (tagi
< (BTSIG_BYTES
>> 1)); j
++) {
331 tagtemp
= buf
[BTSIG_OFFSET
+ j
] -
332 buf
[BTSIG_OFFSET
+ i
];
333 if (tagtemp
&& !(tagtemp
% BTSIG_DELTA
)) {
334 tagtemp
= (buf
[BTSIG_OFFSET
+ i
] +
335 (1 + LAST_BT_ID
- FIRST_BT_ID
) -
337 (1 + LAST_BT_ID
- FIRST_BT_ID
);
339 for (k
= 0; k
< tagi
; k
++) {
340 if (tagtemp
== tag
[k
])
344 if (status
== FAIL
) {
345 tag
[tagi
++] = tagtemp
;
346 i
= (j
== (i
+ 1)) ? i
+ 1 : i
;
347 j
= (j
== (i
+ 1)) ? i
+ 1 : i
;
357 static int FTL_Execute_SPL_Recovery(void)
360 u32
*pbt
= (u32
*)g_pBlockTable
;
363 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
364 __FILE__
, __LINE__
, __func__
);
366 blks
= DeviceInfo
.wSpectraEndBlock
- DeviceInfo
.wSpectraStartBlock
;
367 for (j
= 0; j
<= blks
; j
++) {
369 if (((block
& BAD_BLOCK
) != BAD_BLOCK
) &&
370 ((block
& SPARE_BLOCK
) == SPARE_BLOCK
)) {
371 ret
= GLOB_LLD_Erase_Block(block
& ~BAD_BLOCK
);
373 nand_dbg_print(NAND_DBG_WARN
,
374 "NAND Program fail in %s, Line %d, "
375 "Function: %s, new Bad Block %d "
377 __FILE__
, __LINE__
, __func__
,
378 (int)(block
& ~BAD_BLOCK
));
379 MARK_BLOCK_AS_BAD(pbt
[j
]);
387 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
388 * Function: GLOB_FTL_IdentifyDevice
389 * Inputs: pointer to identify data structure
390 * Outputs: PASS / FAIL
391 * Description: the identify data structure is filled in with
392 * information for the block driver.
393 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
394 int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag
*dev_data
)
396 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
397 __FILE__
, __LINE__
, __func__
);
399 dev_data
->NumBlocks
= DeviceInfo
.wTotalBlocks
;
400 dev_data
->PagesPerBlock
= DeviceInfo
.wPagesPerBlock
;
401 dev_data
->PageDataSize
= DeviceInfo
.wPageDataSize
;
402 dev_data
->wECCBytesPerSector
= DeviceInfo
.wECCBytesPerSector
;
403 dev_data
->wDataBlockNum
= DeviceInfo
.wDataBlockNum
;
409 static int allocate_memory(void)
411 u32 block_table_size
, page_size
, block_size
, mem_size
;
418 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
419 __FILE__
, __LINE__
, __func__
);
421 page_size
= DeviceInfo
.wPageSize
;
422 block_size
= DeviceInfo
.wPagesPerBlock
* DeviceInfo
.wPageDataSize
;
424 block_table_size
= DeviceInfo
.wDataBlockNum
*
425 (sizeof(u32
) + sizeof(u8
) + sizeof(u16
));
426 block_table_size
+= (DeviceInfo
.wPageDataSize
-
427 (block_table_size
% DeviceInfo
.wPageDataSize
)) %
428 DeviceInfo
.wPageDataSize
;
430 /* Malloc memory for block tables */
431 g_pBlockTable
= kmalloc(block_table_size
, GFP_ATOMIC
);
433 goto block_table_fail
;
434 memset(g_pBlockTable
, 0, block_table_size
);
435 total_bytes
+= block_table_size
;
437 g_pWearCounter
= (u8
*)(g_pBlockTable
+
438 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
440 if (DeviceInfo
.MLCDevice
)
441 g_pReadCounter
= (u16
*)(g_pBlockTable
+
442 DeviceInfo
.wDataBlockNum
*
443 (sizeof(u32
) + sizeof(u8
)));
445 /* Malloc memory and init for cache items */
446 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
447 Cache
.array
[i
].address
= NAND_CACHE_INIT_ADDR
;
448 Cache
.array
[i
].use_cnt
= 0;
449 Cache
.array
[i
].changed
= CLEAR
;
450 Cache
.array
[i
].buf
= kmalloc(Cache
.cache_item_size
,
452 if (!Cache
.array
[i
].buf
)
453 goto cache_item_fail
;
454 memset(Cache
.array
[i
].buf
, 0, Cache
.cache_item_size
);
455 total_bytes
+= Cache
.cache_item_size
;
458 /* Malloc memory for IPF */
459 g_pIPF
= kmalloc(page_size
, GFP_ATOMIC
);
462 memset(g_pIPF
, 0, page_size
);
463 total_bytes
+= page_size
;
465 /* Malloc memory for data merging during Level2 Cache flush */
466 cache_l2_page_buf
= kmalloc(page_size
, GFP_ATOMIC
);
467 if (!cache_l2_page_buf
)
468 goto cache_l2_page_buf_fail
;
469 memset(cache_l2_page_buf
, 0xff, page_size
);
470 total_bytes
+= page_size
;
472 cache_l2_blk_buf
= kmalloc(block_size
, GFP_ATOMIC
);
473 if (!cache_l2_blk_buf
)
474 goto cache_l2_blk_buf_fail
;
475 memset(cache_l2_blk_buf
, 0xff, block_size
);
476 total_bytes
+= block_size
;
478 /* Malloc memory for temp buffer */
479 g_pTempBuf
= kmalloc(Cache
.cache_item_size
, GFP_ATOMIC
);
482 memset(g_pTempBuf
, 0, Cache
.cache_item_size
);
483 total_bytes
+= Cache
.cache_item_size
;
485 /* Malloc memory for block table blocks */
486 mem_size
= (1 + LAST_BT_ID
- FIRST_BT_ID
) * sizeof(u32
);
487 g_pBTBlocks
= kmalloc(mem_size
, GFP_ATOMIC
);
490 memset(g_pBTBlocks
, 0xff, mem_size
);
491 total_bytes
+= mem_size
;
493 /* Malloc memory for function FTL_Check_Block_Table */
494 flag_check_blk_table
= kmalloc(DeviceInfo
.wDataBlockNum
, GFP_ATOMIC
);
495 if (!flag_check_blk_table
)
496 goto flag_check_blk_table_fail
;
497 total_bytes
+= DeviceInfo
.wDataBlockNum
;
499 /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
500 tmp_buf_search_bt_in_block
= kmalloc(page_size
, GFP_ATOMIC
);
501 if (!tmp_buf_search_bt_in_block
)
502 goto tmp_buf_search_bt_in_block_fail
;
503 memset(tmp_buf_search_bt_in_block
, 0xff, page_size
);
504 total_bytes
+= page_size
;
506 mem_size
= DeviceInfo
.wPageSize
- DeviceInfo
.wPageDataSize
;
507 spare_buf_search_bt_in_block
= kmalloc(mem_size
, GFP_ATOMIC
);
508 if (!spare_buf_search_bt_in_block
)
509 goto spare_buf_search_bt_in_block_fail
;
510 memset(spare_buf_search_bt_in_block
, 0xff, mem_size
);
511 total_bytes
+= mem_size
;
513 spare_buf_bt_search_bt_in_block
= kmalloc(mem_size
, GFP_ATOMIC
);
514 if (!spare_buf_bt_search_bt_in_block
)
515 goto spare_buf_bt_search_bt_in_block_fail
;
516 memset(spare_buf_bt_search_bt_in_block
, 0xff, mem_size
);
517 total_bytes
+= mem_size
;
519 /* Malloc memory for function FTL_Read_Block_Table */
520 tmp_buf1_read_blk_table
= kmalloc(page_size
, GFP_ATOMIC
);
521 if (!tmp_buf1_read_blk_table
)
522 goto tmp_buf1_read_blk_table_fail
;
523 memset(tmp_buf1_read_blk_table
, 0xff, page_size
);
524 total_bytes
+= page_size
;
526 tmp_buf2_read_blk_table
= kmalloc(page_size
, GFP_ATOMIC
);
527 if (!tmp_buf2_read_blk_table
)
528 goto tmp_buf2_read_blk_table_fail
;
529 memset(tmp_buf2_read_blk_table
, 0xff, page_size
);
530 total_bytes
+= page_size
;
532 /* Malloc memory for function FTL_Static_Wear_Leveling */
533 flags_static_wear_leveling
= kmalloc(DeviceInfo
.wDataBlockNum
,
535 if (!flags_static_wear_leveling
)
536 goto flags_static_wear_leveling_fail
;
537 total_bytes
+= DeviceInfo
.wDataBlockNum
;
539 /* Malloc memory for function FTL_Write_Block_Table_Data */
540 if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
541 mem_size
= FTL_Get_Block_Table_Flash_Size_Bytes() -
542 2 * DeviceInfo
.wPageSize
;
544 mem_size
= DeviceInfo
.wPageSize
;
545 tmp_buf_write_blk_table_data
= kmalloc(mem_size
, GFP_ATOMIC
);
546 if (!tmp_buf_write_blk_table_data
)
547 goto tmp_buf_write_blk_table_data_fail
;
548 memset(tmp_buf_write_blk_table_data
, 0xff, mem_size
);
549 total_bytes
+= mem_size
;
551 /* Malloc memory for function FTL_Read_Disturbance */
552 tmp_buf_read_disturbance
= kmalloc(block_size
, GFP_ATOMIC
);
553 if (!tmp_buf_read_disturbance
)
554 goto tmp_buf_read_disturbance_fail
;
555 memset(tmp_buf_read_disturbance
, 0xff, block_size
);
556 total_bytes
+= block_size
;
558 /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
559 buf_read_page_main_spare
= kmalloc(DeviceInfo
.wPageSize
, GFP_ATOMIC
);
560 if (!buf_read_page_main_spare
)
561 goto buf_read_page_main_spare_fail
;
562 total_bytes
+= DeviceInfo
.wPageSize
;
564 /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
565 buf_write_page_main_spare
= kmalloc(DeviceInfo
.wPageSize
, GFP_ATOMIC
);
566 if (!buf_write_page_main_spare
)
567 goto buf_write_page_main_spare_fail
;
568 total_bytes
+= DeviceInfo
.wPageSize
;
570 /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
571 buf_read_page_spare
= kmalloc(DeviceInfo
.wPageSpareSize
, GFP_ATOMIC
);
572 if (!buf_read_page_spare
)
573 goto buf_read_page_spare_fail
;
574 memset(buf_read_page_spare
, 0xff, DeviceInfo
.wPageSpareSize
);
575 total_bytes
+= DeviceInfo
.wPageSpareSize
;
577 /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
578 buf_get_bad_block
= kmalloc(DeviceInfo
.wPageSpareSize
, GFP_ATOMIC
);
579 if (!buf_get_bad_block
)
580 goto buf_get_bad_block_fail
;
581 memset(buf_get_bad_block
, 0xff, DeviceInfo
.wPageSpareSize
);
582 total_bytes
+= DeviceInfo
.wPageSpareSize
;
585 g_temp_buf
= kmalloc(block_size
, GFP_ATOMIC
);
588 memset(g_temp_buf
, 0xff, block_size
);
589 total_bytes
+= block_size
;
591 /* Malloc memory for copy of block table used in CDMA mode */
592 g_pBTStartingCopy
= kmalloc(block_table_size
, GFP_ATOMIC
);
593 if (!g_pBTStartingCopy
)
594 goto bt_starting_copy
;
595 memset(g_pBTStartingCopy
, 0, block_table_size
);
596 total_bytes
+= block_table_size
;
598 g_pWearCounterCopy
= (u8
*)(g_pBTStartingCopy
+
599 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
601 if (DeviceInfo
.MLCDevice
)
602 g_pReadCounterCopy
= (u16
*)(g_pBTStartingCopy
+
603 DeviceInfo
.wDataBlockNum
*
604 (sizeof(u32
) + sizeof(u8
)));
606 /* Malloc memory for block table copies */
607 mem_size
= 5 * DeviceInfo
.wDataBlockNum
* sizeof(u32
) +
608 5 * DeviceInfo
.wDataBlockNum
* sizeof(u8
);
609 if (DeviceInfo
.MLCDevice
)
610 mem_size
+= 5 * DeviceInfo
.wDataBlockNum
* sizeof(u16
);
611 g_pBlockTableCopies
= kmalloc(mem_size
, GFP_ATOMIC
);
612 if (!g_pBlockTableCopies
)
613 goto blk_table_copies_fail
;
614 memset(g_pBlockTableCopies
, 0, mem_size
);
615 total_bytes
+= mem_size
;
616 g_pNextBlockTable
= g_pBlockTableCopies
;
618 /* Malloc memory for Block Table Delta */
619 mem_size
= MAX_DESCS
* sizeof(struct BTableChangesDelta
);
620 g_pBTDelta
= kmalloc(mem_size
, GFP_ATOMIC
);
623 memset(g_pBTDelta
, 0, mem_size
);
624 total_bytes
+= mem_size
;
625 g_pBTDelta_Free
= g_pBTDelta
;
627 /* Malloc memory for Copy Back Buffers */
628 for (j
= 0; j
< COPY_BACK_BUF_NUM
; j
++) {
629 cp_back_buf_copies
[j
] = kmalloc(block_size
, GFP_ATOMIC
);
630 if (!cp_back_buf_copies
[j
])
631 goto cp_back_buf_copies_fail
;
632 memset(cp_back_buf_copies
[j
], 0, block_size
);
633 total_bytes
+= block_size
;
637 /* Malloc memory for pending commands list */
638 mem_size
= sizeof(struct pending_cmd
) * MAX_DESCS
;
639 info
.pcmds
= kzalloc(mem_size
, GFP_KERNEL
);
641 goto pending_cmds_buf_fail
;
642 total_bytes
+= mem_size
;
644 /* Malloc memory for CDMA descripter table */
645 mem_size
= sizeof(struct cdma_descriptor
) * MAX_DESCS
;
646 info
.cdma_desc_buf
= kzalloc(mem_size
, GFP_KERNEL
);
647 if (!info
.cdma_desc_buf
)
648 goto cdma_desc_buf_fail
;
649 total_bytes
+= mem_size
;
651 /* Malloc memory for Memcpy descripter table */
652 mem_size
= sizeof(struct memcpy_descriptor
) * MAX_DESCS
;
653 info
.memcp_desc_buf
= kzalloc(mem_size
, GFP_KERNEL
);
654 if (!info
.memcp_desc_buf
)
655 goto memcp_desc_buf_fail
;
656 total_bytes
+= mem_size
;
659 nand_dbg_print(NAND_DBG_WARN
,
660 "Total memory allocated in FTL layer: %d\n", total_bytes
);
666 kfree(info
.cdma_desc_buf
);
669 pending_cmds_buf_fail
:
670 cp_back_buf_copies_fail
:
673 kfree(cp_back_buf_copies
[j
]);
676 kfree(g_pBlockTableCopies
);
677 blk_table_copies_fail
:
678 kfree(g_pBTStartingCopy
);
682 kfree(buf_get_bad_block
);
685 buf_get_bad_block_fail
:
686 kfree(buf_read_page_spare
);
687 buf_read_page_spare_fail
:
688 kfree(buf_write_page_main_spare
);
689 buf_write_page_main_spare_fail
:
690 kfree(buf_read_page_main_spare
);
691 buf_read_page_main_spare_fail
:
692 kfree(tmp_buf_read_disturbance
);
693 tmp_buf_read_disturbance_fail
:
694 kfree(tmp_buf_write_blk_table_data
);
695 tmp_buf_write_blk_table_data_fail
:
696 kfree(flags_static_wear_leveling
);
697 flags_static_wear_leveling_fail
:
698 kfree(tmp_buf2_read_blk_table
);
699 tmp_buf2_read_blk_table_fail
:
700 kfree(tmp_buf1_read_blk_table
);
701 tmp_buf1_read_blk_table_fail
:
702 kfree(spare_buf_bt_search_bt_in_block
);
703 spare_buf_bt_search_bt_in_block_fail
:
704 kfree(spare_buf_search_bt_in_block
);
705 spare_buf_search_bt_in_block_fail
:
706 kfree(tmp_buf_search_bt_in_block
);
707 tmp_buf_search_bt_in_block_fail
:
708 kfree(flag_check_blk_table
);
709 flag_check_blk_table_fail
:
714 kfree(cache_l2_blk_buf
);
715 cache_l2_blk_buf_fail
:
716 kfree(cache_l2_page_buf
);
717 cache_l2_page_buf_fail
:
723 kfree(Cache
.array
[i
].buf
);
724 kfree(g_pBlockTable
);
726 printk(KERN_ERR
"Failed to kmalloc memory in %s Line %d.\n",
733 static int free_memory(void)
738 kfree(info
.memcp_desc_buf
);
739 kfree(info
.cdma_desc_buf
);
741 for (i
= COPY_BACK_BUF_NUM
- 1; i
>= 0; i
--)
742 kfree(cp_back_buf_copies
[i
]);
744 kfree(g_pBlockTableCopies
);
745 kfree(g_pBTStartingCopy
);
747 kfree(buf_get_bad_block
);
749 kfree(buf_read_page_spare
);
750 kfree(buf_write_page_main_spare
);
751 kfree(buf_read_page_main_spare
);
752 kfree(tmp_buf_read_disturbance
);
753 kfree(tmp_buf_write_blk_table_data
);
754 kfree(flags_static_wear_leveling
);
755 kfree(tmp_buf2_read_blk_table
);
756 kfree(tmp_buf1_read_blk_table
);
757 kfree(spare_buf_bt_search_bt_in_block
);
758 kfree(spare_buf_search_bt_in_block
);
759 kfree(tmp_buf_search_bt_in_block
);
760 kfree(flag_check_blk_table
);
764 for (i
= CACHE_ITEM_NUM
- 1; i
>= 0; i
--)
765 kfree(Cache
.array
[i
].buf
);
766 kfree(g_pBlockTable
);
771 static void dump_cache_l2_table(void)
774 struct spectra_l2_cache_list
*pnd
;
778 list_for_each(p
, &cache_l2
.table
.list
) {
779 pnd
= list_entry(p
, struct spectra_l2_cache_list
, list
);
780 nand_dbg_print(NAND_DBG_WARN
, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n
, pnd
->logical_blk_num
);
782 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
783 if (pnd->pages_array[i] != MAX_U32_VALUE)
784 nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
791 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
792 * Function: GLOB_FTL_Init
794 * Outputs: PASS=0 / FAIL=1
795 * Description: allocates the memory for cache array,
796 * important data structures
797 * clears the cache array
798 * reads the block table from flash into array
799 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
800 int GLOB_FTL_Init(void)
804 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
805 __FILE__
, __LINE__
, __func__
);
807 Cache
.pages_per_item
= 1;
808 Cache
.cache_item_size
= 1 * DeviceInfo
.wPageDataSize
;
810 if (allocate_memory() != PASS
)
814 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
815 memcpy((void *)&cache_start_copy
, (void *)&Cache
,
816 sizeof(struct flash_cache_tag
));
817 memset((void *)&int_cache
, -1,
818 sizeof(struct flash_cache_delta_list_tag
) *
819 (MAX_CHANS
+ MAX_DESCS
));
824 if (FTL_Read_Block_Table() != PASS
)
827 /* Init the Level2 Cache data structure */
828 for (i
= 0; i
< BLK_NUM_FOR_L2_CACHE
; i
++)
829 cache_l2
.blk_array
[i
] = MAX_U32_VALUE
;
830 cache_l2
.cur_blk_idx
= 0;
831 cache_l2
.cur_page_num
= 0;
832 INIT_LIST_HEAD(&cache_l2
.table
.list
);
833 cache_l2
.table
.logical_blk_num
= MAX_U32_VALUE
;
835 dump_cache_l2_table();
843 static void process_cmd(int *first_failed_cmd
, u16 idx
, int event
)
845 printk(KERN_ERR
"temporary workaround function. "
846 "Should not be called! \n");
849 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
850 * Function: GLOB_FTL_Event_Status
852 * Outputs: Event Code
853 * Description: It is called by SBD after hardware interrupt signalling
854 * completion of commands chain
855 * It does following things
856 * get event status from LLD
857 * analyze command chain status
858 * determine last command executed
860 * rebuild the block table in case of uncorrectable error
862 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
863 int GLOB_FTL_Event_Status(int *first_failed_cmd
)
865 int event_code
= PASS
;
868 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
869 __FILE__
, __LINE__
, __func__
);
871 *first_failed_cmd
= 0;
873 event_code
= GLOB_LLD_Event_Status();
875 switch (event_code
) {
877 nand_dbg_print(NAND_DBG_DEBUG
, "Handling EVENT_PASS\n");
879 case EVENT_UNCORRECTABLE_DATA_ERROR
:
880 nand_dbg_print(NAND_DBG_DEBUG
, "Handling Uncorrectable ECC!\n");
882 case EVENT_PROGRAM_FAILURE
:
883 case EVENT_ERASE_FAILURE
:
884 nand_dbg_print(NAND_DBG_WARN
, "Handling Ugly case. "
885 "Event code: 0x%x\n", event_code
);
886 p_BTableChangesDelta
=
887 (struct BTableChangesDelta
*)g_pBTDelta
;
888 for (i_P
= MAX_CHANS
; i_P
< (ftl_cmd_cnt
+ MAX_CHANS
);
890 process_cmd(first_failed_cmd
, i_P
, event_code
);
891 memcpy(g_pBlockTable
, g_pBTStartingCopy
,
892 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
893 memcpy(g_pWearCounter
, g_pWearCounterCopy
,
894 DeviceInfo
.wDataBlockNum
* sizeof(u8
));
895 if (DeviceInfo
.MLCDevice
)
896 memcpy(g_pReadCounter
, g_pReadCounterCopy
,
897 DeviceInfo
.wDataBlockNum
* sizeof(u16
));
899 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
900 memcpy((void *)&Cache
, (void *)&cache_start_copy
,
901 sizeof(struct flash_cache_tag
));
902 memset((void *)&int_cache
, -1,
903 sizeof(struct flash_cache_delta_list_tag
) *
904 (MAX_DESCS
+ MAX_CHANS
));
908 nand_dbg_print(NAND_DBG_WARN
,
909 "Handling unexpected event code - 0x%x\n",
915 memcpy(g_pBTStartingCopy
, g_pBlockTable
,
916 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
917 memcpy(g_pWearCounterCopy
, g_pWearCounter
,
918 DeviceInfo
.wDataBlockNum
* sizeof(u8
));
919 if (DeviceInfo
.MLCDevice
)
920 memcpy(g_pReadCounterCopy
, g_pReadCounter
,
921 DeviceInfo
.wDataBlockNum
* sizeof(u16
));
923 g_pBTDelta_Free
= g_pBTDelta
;
925 g_pNextBlockTable
= g_pBlockTableCopies
;
928 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
929 memcpy((void *)&cache_start_copy
, (void *)&Cache
,
930 sizeof(struct flash_cache_tag
));
931 memset((void *)&int_cache
, -1,
932 sizeof(struct flash_cache_delta_list_tag
) *
933 (MAX_DESCS
+ MAX_CHANS
));
939 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
940 * Function: glob_ftl_execute_cmds
943 * Description: pass thru to LLD
944 ***************************************************************/
945 u16
glob_ftl_execute_cmds(void)
947 nand_dbg_print(NAND_DBG_TRACE
,
948 "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
949 (unsigned int)ftl_cmd_cnt
);
951 return glob_lld_execute_cmds();
957 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
958 * Function: GLOB_FTL_Read Immediate
959 * Inputs: pointer to data
961 * Outputs: PASS / FAIL
962 * Description: Reads one page of data into RAM directly from flash without
963 * using or disturbing cache.It is assumed this function is called
964 * with CMD-DMA disabled.
965 *****************************************************************/
966 int GLOB_FTL_Read_Immediate(u8
*read_data
, u64 addr
)
972 u32
*pbt
= (u32
*)g_pBlockTable
;
974 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
975 __FILE__
, __LINE__
, __func__
);
977 Block
= BLK_FROM_ADDR(addr
);
978 Page
= PAGE_FROM_ADDR(addr
, Block
);
980 if (!IS_SPARE_BLOCK(Block
))
983 phy_blk
= pbt
[Block
];
984 wResult
= GLOB_LLD_Read_Page_Main(read_data
, phy_blk
, Page
, 1);
986 if (DeviceInfo
.MLCDevice
) {
987 g_pReadCounter
[phy_blk
- DeviceInfo
.wSpectraStartBlock
]++;
988 if (g_pReadCounter
[phy_blk
- DeviceInfo
.wSpectraStartBlock
]
990 FTL_Read_Disturbance(phy_blk
);
991 if (g_cBlockTableStatus
!= IN_PROGRESS_BLOCK_TABLE
) {
992 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
993 FTL_Write_IN_Progress_Block_Table_Page();
1001 #ifdef SUPPORT_BIG_ENDIAN
1002 /*********************************************************************
1003 * Function: FTL_Invert_Block_Table
1006 * Description: Re-format the block table in ram based on BIG_ENDIAN and
1007 * LARGE_BLOCKNUM if necessary
1008 **********************************************************************/
1009 static void FTL_Invert_Block_Table(void)
1012 u32
*pbt
= (u32
*)g_pBlockTable
;
1014 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1015 __FILE__
, __LINE__
, __func__
);
1017 #ifdef SUPPORT_LARGE_BLOCKNUM
1018 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1019 pbt
[i
] = INVERTUINT32(pbt
[i
]);
1020 g_pWearCounter
[i
] = INVERTUINT32(g_pWearCounter
[i
]);
1023 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1024 pbt
[i
] = INVERTUINT16(pbt
[i
]);
1025 g_pWearCounter
[i
] = INVERTUINT16(g_pWearCounter
[i
]);
1031 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1032 * Function: GLOB_FTL_Flash_Init
1034 * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1035 * Description: The flash controller is initialized
1036 * The flash device is reset
1037 * Perform a flash READ ID command to confirm that a
1038 * valid device is attached and active.
1039 * The DeviceInfo structure gets filled in
1040 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1041 int GLOB_FTL_Flash_Init(void)
1045 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1046 __FILE__
, __LINE__
, __func__
);
1050 GLOB_LLD_Flash_Init();
1052 status
= GLOB_LLD_Read_Device_ID();
1057 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1059 * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1060 * Description: The flash controller is released
1061 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1062 int GLOB_FTL_Flash_Release(void)
1064 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1065 __FILE__
, __LINE__
, __func__
);
1067 return GLOB_LLD_Flash_Release();
1071 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1072 * Function: GLOB_FTL_Cache_Release
1075 * Description: release all allocated memory in GLOB_FTL_Init
1076 * (allocated in GLOB_FTL_Init)
1077 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1078 void GLOB_FTL_Cache_Release(void)
1080 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1081 __FILE__
, __LINE__
, __func__
);
1086 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1087 * Function: FTL_Cache_If_Hit
1088 * Inputs: Page Address
1089 * Outputs: Block number/UNHIT BLOCK
1090 * Description: Determines if the addressed page is in cache
1091 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1092 static u16
FTL_Cache_If_Hit(u64 page_addr
)
1098 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1099 __FILE__
, __LINE__
, __func__
);
1101 item
= UNHIT_CACHE_ITEM
;
1102 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
1103 addr
= Cache
.array
[i
].address
;
1104 if ((page_addr
>= addr
) &&
1105 (page_addr
< (addr
+ Cache
.cache_item_size
))) {
1114 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1115 * Function: FTL_Calculate_LRU
1118 * Description: Calculate the least recently block in a cache and record its
1119 * index in LRU field.
1120 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1121 static void FTL_Calculate_LRU(void)
1123 u16 i
, bCurrentLRU
, bTempCount
;
1125 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1126 __FILE__
, __LINE__
, __func__
);
1129 bTempCount
= MAX_WORD_VALUE
;
1131 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
1132 if (Cache
.array
[i
].use_cnt
< bTempCount
) {
1134 bTempCount
= Cache
.array
[i
].use_cnt
;
1138 Cache
.LRU
= bCurrentLRU
;
1141 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1142 * Function: FTL_Cache_Read_Page
1143 * Inputs: pointer to read buffer, logical address and cache item number
1145 * Description: Read the page from the cached block addressed by blocknumber
1146 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1147 static void FTL_Cache_Read_Page(u8
*data_buf
, u64 logic_addr
, u16 cache_item
)
1151 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1152 __FILE__
, __LINE__
, __func__
);
1154 start_addr
= Cache
.array
[cache_item
].buf
;
1155 start_addr
+= (u32
)(((logic_addr
- Cache
.array
[cache_item
].address
) >>
1156 DeviceInfo
.nBitsInPageDataSize
) * DeviceInfo
.wPageDataSize
);
1159 GLOB_LLD_MemCopy_CMD(data_buf
, start_addr
,
1160 DeviceInfo
.wPageDataSize
, 0);
1163 memcpy(data_buf
, start_addr
, DeviceInfo
.wPageDataSize
);
1166 if (Cache
.array
[cache_item
].use_cnt
< MAX_WORD_VALUE
)
1167 Cache
.array
[cache_item
].use_cnt
++;
1170 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1171 * Function: FTL_Cache_Read_All
1172 * Inputs: pointer to read buffer,block address
1173 * Outputs: PASS=0 / FAIL =1
1174 * Description: It reads pages in cache
1175 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1176 static int FTL_Cache_Read_All(u8
*pData
, u64 phy_addr
)
1183 u32
*pbt
= (u32
*)g_pBlockTable
;
1186 Block
= BLK_FROM_ADDR(phy_addr
);
1187 Page
= PAGE_FROM_ADDR(phy_addr
, Block
);
1188 PageCount
= Cache
.pages_per_item
;
1190 nand_dbg_print(NAND_DBG_DEBUG
,
1191 "%s, Line %d, Function: %s, Block: 0x%x\n",
1192 __FILE__
, __LINE__
, __func__
, Block
);
1195 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1196 if ((pbt
[i
] & (~BAD_BLOCK
)) == Block
) {
1198 if (IS_SPARE_BLOCK(i
) || IS_BAD_BLOCK(i
) ||
1199 IS_DISCARDED_BLOCK(i
)) {
1200 /* Add by yunpeng -2008.12.3 */
1202 GLOB_LLD_MemCopy_CMD(pData
, g_temp_buf
,
1203 PageCount
* DeviceInfo
.wPageDataSize
, 0);
1207 PageCount
* DeviceInfo
.wPageDataSize
);
1211 continue; /* break ?? */
1216 if (0xffffffff == lba
)
1217 printk(KERN_ERR
"FTL_Cache_Read_All: Block is not found in BT\n");
1220 wResult
= GLOB_LLD_Read_Page_Main_cdma(pData
, Block
, Page
,
1221 PageCount
, LLD_CMD_FLAG_MODE_CDMA
);
1222 if (DeviceInfo
.MLCDevice
) {
1223 g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
]++;
1224 nand_dbg_print(NAND_DBG_DEBUG
,
1225 "Read Counter modified in ftl_cmd_cnt %u"
1226 " Block %u Counter%u\n",
1227 ftl_cmd_cnt
, (unsigned int)Block
,
1228 g_pReadCounter
[Block
-
1229 DeviceInfo
.wSpectraStartBlock
]);
1231 p_BTableChangesDelta
=
1232 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
1233 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
1234 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
1235 p_BTableChangesDelta
->RC_Index
=
1236 Block
- DeviceInfo
.wSpectraStartBlock
;
1237 p_BTableChangesDelta
->RC_Entry_Value
=
1238 g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
];
1239 p_BTableChangesDelta
->ValidFields
= 0xC0;
1243 if (g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
] >=
1245 FTL_Read_Disturbance(Block
);
1246 if (g_cBlockTableStatus
!= IN_PROGRESS_BLOCK_TABLE
) {
1247 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
1248 FTL_Write_IN_Progress_Block_Table_Page();
1254 wResult
= GLOB_LLD_Read_Page_Main(pData
, Block
, Page
, PageCount
);
1255 if (wResult
== FAIL
)
1258 if (DeviceInfo
.MLCDevice
) {
1259 g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
]++;
1260 if (g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
] >=
1262 FTL_Read_Disturbance(Block
);
1263 if (g_cBlockTableStatus
!= IN_PROGRESS_BLOCK_TABLE
) {
1264 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
1265 FTL_Write_IN_Progress_Block_Table_Page();
1272 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1273 * Function: FTL_Cache_Write_All
1274 * Inputs: pointer to cache in sys memory
1275 * address of free block in flash
1276 * Outputs: PASS=0 / FAIL=1
1277 * Description: writes all the pages of the block in cache to flash
1279 * NOTE:need to make sure this works ok when cache is limited
1280 * to a partial block. This is where copy-back would be
1281 * activated. This would require knowing which pages in the
1282 * cached block are clean/dirty.Right now we only know if
1283 * the whole block is clean/dirty.
1284 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1285 static int FTL_Cache_Write_All(u8
*pData
, u64 blk_addr
)
1292 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1293 __FILE__
, __LINE__
, __func__
);
1295 nand_dbg_print(NAND_DBG_DEBUG
, "This block %d going to be written "
1296 "on %d\n", cache_block_to_write
,
1297 (u32
)(blk_addr
>> DeviceInfo
.nBitsInBlockDataSize
));
1299 Block
= BLK_FROM_ADDR(blk_addr
);
1300 Page
= PAGE_FROM_ADDR(blk_addr
, Block
);
1301 PageCount
= Cache
.pages_per_item
;
1304 if (FAIL
== GLOB_LLD_Write_Page_Main_cdma(pData
,
1305 Block
, Page
, PageCount
)) {
1306 nand_dbg_print(NAND_DBG_WARN
,
1307 "NAND Program fail in %s, Line %d, "
1308 "Function: %s, new Bad Block %d generated! "
1309 "Need Bad Block replacing.\n",
1310 __FILE__
, __LINE__
, __func__
, Block
);
1315 if (FAIL
== GLOB_LLD_Write_Page_Main(pData
, Block
, Page
, PageCount
)) {
1316 nand_dbg_print(NAND_DBG_WARN
, "NAND Program fail in %s,"
1317 " Line %d, Function %s, new Bad Block %d generated!"
1318 "Need Bad Block replacing.\n",
1319 __FILE__
, __LINE__
, __func__
, Block
);
1326 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1327 * Function: FTL_Copy_Block
1328 * Inputs: source block address
1329 * Destination block address
1330 * Outputs: PASS=0 / FAIL=1
1331 * Description: used only for static wear leveling to move the block
1332 * containing static data to new blocks(more worn)
1333 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1334 int FTL_Copy_Block(u64 old_blk_addr
, u64 blk_addr
)
1336 int i
, r1
, r2
, wResult
= PASS
;
1338 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1339 __FILE__
, __LINE__
, __func__
);
1341 for (i
= 0; i
< DeviceInfo
.wPagesPerBlock
; i
+= Cache
.pages_per_item
) {
1342 r1
= FTL_Cache_Read_All(g_pTempBuf
, old_blk_addr
+
1343 i
* DeviceInfo
.wPageDataSize
);
1344 r2
= FTL_Cache_Write_All(g_pTempBuf
, blk_addr
+
1345 i
* DeviceInfo
.wPageDataSize
);
1346 if ((ERR
== r1
) || (FAIL
== r2
)) {
1355 /* Search the block table to find out the least wear block and then return it */
1356 static u32
find_least_worn_blk_for_l2_cache(void)
1359 u32
*pbt
= (u32
*)g_pBlockTable
;
1360 u8 least_wear_cnt
= MAX_BYTE_VALUE
;
1361 u32 least_wear_blk_idx
= MAX_U32_VALUE
;
1364 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1365 if (IS_SPARE_BLOCK(i
)) {
1366 phy_idx
= (u32
)((~BAD_BLOCK
) & pbt
[i
]);
1367 if (phy_idx
> DeviceInfo
.wSpectraEndBlock
)
1368 printk(KERN_ERR
"find_least_worn_blk_for_l2_cache: "
1369 "Too big phy block num (%d)\n", phy_idx
);
1370 if (g_pWearCounter
[phy_idx
-DeviceInfo
.wSpectraStartBlock
] < least_wear_cnt
) {
1371 least_wear_cnt
= g_pWearCounter
[phy_idx
- DeviceInfo
.wSpectraStartBlock
];
1372 least_wear_blk_idx
= i
;
1377 nand_dbg_print(NAND_DBG_WARN
,
1378 "find_least_worn_blk_for_l2_cache: "
1379 "find block %d with least worn counter (%d)\n",
1380 least_wear_blk_idx
, least_wear_cnt
);
1382 return least_wear_blk_idx
;
1387 /* Get blocks for Level2 Cache */
1388 static int get_l2_cache_blks(void)
1392 u32
*pbt
= (u32
*)g_pBlockTable
;
1394 for (n
= 0; n
< BLK_NUM_FOR_L2_CACHE
; n
++) {
1395 blk
= find_least_worn_blk_for_l2_cache();
1396 if (blk
> DeviceInfo
.wDataBlockNum
) {
1397 nand_dbg_print(NAND_DBG_WARN
,
1398 "find_least_worn_blk_for_l2_cache: "
1399 "No enough free NAND blocks (n: %d) for L2 Cache!\n", n
);
1402 /* Tag the free block as discard in block table */
1403 pbt
[blk
] = (pbt
[blk
] & (~BAD_BLOCK
)) | DISCARD_BLOCK
;
1404 /* Add the free block to the L2 Cache block array */
1405 cache_l2
.blk_array
[n
] = pbt
[blk
] & (~BAD_BLOCK
);
1411 static int erase_l2_cache_blocks(void)
1414 u32 pblk
, lblk
= BAD_BLOCK
;
1416 u32
*pbt
= (u32
*)g_pBlockTable
;
1418 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
1419 __FILE__
, __LINE__
, __func__
);
1421 for (i
= 0; i
< BLK_NUM_FOR_L2_CACHE
; i
++) {
1422 pblk
= cache_l2
.blk_array
[i
];
1424 /* If the L2 cache block is invalid, then just skip it */
1425 if (MAX_U32_VALUE
== pblk
)
1428 BUG_ON(pblk
> DeviceInfo
.wSpectraEndBlock
);
1430 addr
= (u64
)pblk
<< DeviceInfo
.nBitsInBlockDataSize
;
1431 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
1432 /* Get logical block number of the erased block */
1433 lblk
= FTL_Get_Block_Index(pblk
);
1434 BUG_ON(BAD_BLOCK
== lblk
);
1435 /* Tag it as free in the block table */
1436 pbt
[lblk
] &= (u32
)(~DISCARD_BLOCK
);
1437 pbt
[lblk
] |= (u32
)(SPARE_BLOCK
);
1439 MARK_BLOCK_AS_BAD(pbt
[lblk
]);
1448 * Merge the valid data page in the L2 cache blocks into NAND.
1450 static int flush_l2_cache(void)
1452 struct list_head
*p
;
1453 struct spectra_l2_cache_list
*pnd
, *tmp_pnd
;
1454 u32
*pbt
= (u32
*)g_pBlockTable
;
1455 u32 phy_blk
, l2_blk
;
1460 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
1461 __FILE__
, __LINE__
, __func__
);
1463 if (list_empty(&cache_l2
.table
.list
)) /* No data to flush */
1466 //dump_cache_l2_table();
1468 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
1469 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
1470 FTL_Write_IN_Progress_Block_Table_Page();
1473 list_for_each(p
, &cache_l2
.table
.list
) {
1474 pnd
= list_entry(p
, struct spectra_l2_cache_list
, list
);
1475 if (IS_SPARE_BLOCK(pnd
->logical_blk_num
) ||
1476 IS_BAD_BLOCK(pnd
->logical_blk_num
) ||
1477 IS_DISCARDED_BLOCK(pnd
->logical_blk_num
)) {
1478 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d\n", __FILE__
, __LINE__
);
1479 memset(cache_l2_blk_buf
, 0xff, DeviceInfo
.wPagesPerBlock
* DeviceInfo
.wPageDataSize
);
1481 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d\n", __FILE__
, __LINE__
);
1482 phy_blk
= pbt
[pnd
->logical_blk_num
] & (~BAD_BLOCK
);
1483 ret
= GLOB_LLD_Read_Page_Main(cache_l2_blk_buf
,
1484 phy_blk
, 0, DeviceInfo
.wPagesPerBlock
);
1486 printk(KERN_ERR
"Read NAND page fail in %s, Line %d\n", __FILE__
, __LINE__
);
1490 for (i
= 0; i
< DeviceInfo
.wPagesPerBlock
; i
++) {
1491 if (pnd
->pages_array
[i
] != MAX_U32_VALUE
) {
1492 l2_blk
= cache_l2
.blk_array
[(pnd
->pages_array
[i
] >> 16) & 0xffff];
1493 l2_page
= pnd
->pages_array
[i
] & 0xffff;
1494 ret
= GLOB_LLD_Read_Page_Main(cache_l2_page_buf
, l2_blk
, l2_page
, 1);
1496 printk(KERN_ERR
"Read NAND page fail in %s, Line %d\n", __FILE__
, __LINE__
);
1498 memcpy(cache_l2_blk_buf
+ i
* DeviceInfo
.wPageDataSize
, cache_l2_page_buf
, DeviceInfo
.wPageDataSize
);
1502 /* Find a free block and tag the original block as discarded */
1503 addr
= (u64
)pnd
->logical_blk_num
<< DeviceInfo
.nBitsInBlockDataSize
;
1504 ret
= FTL_Replace_Block(addr
);
1506 printk(KERN_ERR
"FTL_Replace_Block fail in %s, Line %d\n", __FILE__
, __LINE__
);
1509 /* Write back the updated data into NAND */
1510 phy_blk
= pbt
[pnd
->logical_blk_num
] & (~BAD_BLOCK
);
1511 if (FAIL
== GLOB_LLD_Write_Page_Main(cache_l2_blk_buf
, phy_blk
, 0, DeviceInfo
.wPagesPerBlock
)) {
1512 nand_dbg_print(NAND_DBG_WARN
,
1513 "Program NAND block %d fail in %s, Line %d\n",
1514 phy_blk
, __FILE__
, __LINE__
);
1515 /* This may not be really a bad block. So just tag it as discarded. */
1516 /* Then it has a chance to be erased when garbage collection. */
1517 /* If it is really bad, then the erase will fail and it will be marked */
1518 /* as bad then. Otherwise it will be marked as free and can be used again */
1519 MARK_BLK_AS_DISCARD(pbt
[pnd
->logical_blk_num
]);
1520 /* Find another free block and write it again */
1521 FTL_Replace_Block(addr
);
1522 phy_blk
= pbt
[pnd
->logical_blk_num
] & (~BAD_BLOCK
);
1523 if (FAIL
== GLOB_LLD_Write_Page_Main(cache_l2_blk_buf
, phy_blk
, 0, DeviceInfo
.wPagesPerBlock
)) {
1524 printk(KERN_ERR
"Failed to write back block %d when flush L2 cache."
1525 "Some data will be lost!\n", phy_blk
);
1526 MARK_BLOCK_AS_BAD(pbt
[pnd
->logical_blk_num
]);
1529 /* tag the new free block as used block */
1530 pbt
[pnd
->logical_blk_num
] &= (~SPARE_BLOCK
);
1534 /* Destroy the L2 Cache table and free the memory of all nodes */
1535 list_for_each_entry_safe(pnd
, tmp_pnd
, &cache_l2
.table
.list
, list
) {
1536 list_del(&pnd
->list
);
1540 /* Erase discard L2 cache blocks */
1541 if (erase_l2_cache_blocks() != PASS
)
1542 nand_dbg_print(NAND_DBG_WARN
,
1543 " Erase L2 cache blocks error in %s, Line %d\n",
1544 __FILE__
, __LINE__
);
1546 /* Init the Level2 Cache data structure */
1547 for (i
= 0; i
< BLK_NUM_FOR_L2_CACHE
; i
++)
1548 cache_l2
.blk_array
[i
] = MAX_U32_VALUE
;
1549 cache_l2
.cur_blk_idx
= 0;
1550 cache_l2
.cur_page_num
= 0;
1551 INIT_LIST_HEAD(&cache_l2
.table
.list
);
1552 cache_l2
.table
.logical_blk_num
= MAX_U32_VALUE
;
1558 * Write back a changed victim cache item to the Level2 Cache
1559 * and update the L2 Cache table to map the change.
1560 * If the L2 Cache is full, then start to do the L2 Cache flush.
1562 static int write_back_to_l2_cache(u8
*buf
, u64 logical_addr
)
1564 u32 logical_blk_num
;
1565 u16 logical_page_num
;
1566 struct list_head
*p
;
1567 struct spectra_l2_cache_list
*pnd
, *pnd_new
;
1571 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
1572 __FILE__
, __LINE__
, __func__
);
1575 * If Level2 Cache table is empty, then it means either:
1576 * 1. This is the first time that the function called after FTL_init
1578 * 2. The Level2 Cache has just been flushed
1580 * So, 'steal' some free blocks from NAND for L2 Cache using
1581 * by just mask them as discard in the block table
1583 if (list_empty(&cache_l2
.table
.list
)) {
1584 BUG_ON(cache_l2
.cur_blk_idx
!= 0);
1585 BUG_ON(cache_l2
.cur_page_num
!= 0);
1586 BUG_ON(cache_l2
.table
.logical_blk_num
!= MAX_U32_VALUE
);
1587 if (FAIL
== get_l2_cache_blks()) {
1588 GLOB_FTL_Garbage_Collection();
1589 if (FAIL
== get_l2_cache_blks()) {
1590 printk(KERN_ALERT
"Fail to get L2 cache blks!\n");
1596 logical_blk_num
= BLK_FROM_ADDR(logical_addr
);
1597 logical_page_num
= PAGE_FROM_ADDR(logical_addr
, logical_blk_num
);
1598 BUG_ON(logical_blk_num
== MAX_U32_VALUE
);
1600 /* Write the cache item data into the current position of L2 Cache */
1606 if (FAIL
== GLOB_LLD_Write_Page_Main(buf
,
1607 cache_l2
.blk_array
[cache_l2
.cur_blk_idx
],
1608 cache_l2
.cur_page_num
, 1)) {
1609 nand_dbg_print(NAND_DBG_WARN
, "NAND Program fail in "
1610 "%s, Line %d, new Bad Block %d generated!\n",
1612 cache_l2
.blk_array
[cache_l2
.cur_blk_idx
]);
1614 /* TODO: tag the current block as bad and try again */
1621 * Update the L2 Cache table.
1623 * First seaching in the table to see whether the logical block
1624 * has been mapped. If not, then kmalloc a new node for the
1625 * logical block, fill data, and then insert it to the list.
1626 * Otherwise, just update the mapped node directly.
1629 list_for_each(p
, &cache_l2
.table
.list
) {
1630 pnd
= list_entry(p
, struct spectra_l2_cache_list
, list
);
1631 if (pnd
->logical_blk_num
== logical_blk_num
) {
1632 pnd
->pages_array
[logical_page_num
] =
1633 (cache_l2
.cur_blk_idx
<< 16) |
1634 cache_l2
.cur_page_num
;
1639 if (!found
) { /* Create new node for the logical block here */
1641 /* The logical pages to physical pages map array is
1642 * located at the end of struct spectra_l2_cache_list.
1644 node_size
= sizeof(struct spectra_l2_cache_list
) +
1645 sizeof(u32
) * DeviceInfo
.wPagesPerBlock
;
1646 pnd_new
= kmalloc(node_size
, GFP_ATOMIC
);
1648 printk(KERN_ERR
"Failed to kmalloc in %s Line %d\n",
1649 __FILE__
, __LINE__
);
1651 * TODO: Need to flush all the L2 cache into NAND ASAP
1652 * since no memory available here
1655 pnd_new
->logical_blk_num
= logical_blk_num
;
1656 for (i
= 0; i
< DeviceInfo
.wPagesPerBlock
; i
++)
1657 pnd_new
->pages_array
[i
] = MAX_U32_VALUE
;
1658 pnd_new
->pages_array
[logical_page_num
] =
1659 (cache_l2
.cur_blk_idx
<< 16) | cache_l2
.cur_page_num
;
1660 list_add(&pnd_new
->list
, &cache_l2
.table
.list
);
1663 /* Increasing the current position pointer of the L2 Cache */
1664 cache_l2
.cur_page_num
++;
1665 if (cache_l2
.cur_page_num
>= DeviceInfo
.wPagesPerBlock
) {
1666 cache_l2
.cur_blk_idx
++;
1667 if (cache_l2
.cur_blk_idx
>= BLK_NUM_FOR_L2_CACHE
) {
1668 /* The L2 Cache is full. Need to flush it now */
1669 nand_dbg_print(NAND_DBG_WARN
,
1670 "L2 Cache is full, will start to flush it\n");
1673 cache_l2
.cur_page_num
= 0;
1681 * Seach in the Level2 Cache table to find the cache item.
1682 * If find, read the data from the NAND page of L2 Cache,
1683 * Otherwise, return FAIL.
1685 static int search_l2_cache(u8
*buf
, u64 logical_addr
)
1687 u32 logical_blk_num
;
1688 u16 logical_page_num
;
1689 struct list_head
*p
;
1690 struct spectra_l2_cache_list
*pnd
;
1691 u32 tmp
= MAX_U32_VALUE
;
1696 logical_blk_num
= BLK_FROM_ADDR(logical_addr
);
1697 logical_page_num
= PAGE_FROM_ADDR(logical_addr
, logical_blk_num
);
1699 list_for_each(p
, &cache_l2
.table
.list
) {
1700 pnd
= list_entry(p
, struct spectra_l2_cache_list
, list
);
1701 if (pnd
->logical_blk_num
== logical_blk_num
) {
1702 tmp
= pnd
->pages_array
[logical_page_num
];
1707 if (tmp
!= MAX_U32_VALUE
) { /* Found valid map */
1708 phy_blk
= cache_l2
.blk_array
[(tmp
>> 16) & 0xFFFF];
1709 phy_page
= tmp
& 0xFFFF;
1713 ret
= GLOB_LLD_Read_Page_Main(buf
, phy_blk
, phy_page
, 1);
1720 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1721 * Function: FTL_Cache_Write_Page
1722 * Inputs: Pointer to buffer, page address, cache block number
1723 * Outputs: PASS=0 / FAIL=1
1724 * Description: It writes the data in Cache Block
1725 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1726 static void FTL_Cache_Write_Page(u8
*pData
, u64 page_addr
,
1727 u8 cache_blk
, u16 flag
)
1732 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1733 __FILE__
, __LINE__
, __func__
);
1735 addr
= Cache
.array
[cache_blk
].address
;
1736 pDest
= Cache
.array
[cache_blk
].buf
;
1738 pDest
+= (unsigned long)(page_addr
- addr
);
1739 Cache
.array
[cache_blk
].changed
= SET
;
1741 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1742 int_cache
[ftl_cmd_cnt
].item
= cache_blk
;
1743 int_cache
[ftl_cmd_cnt
].cache
.address
=
1744 Cache
.array
[cache_blk
].address
;
1745 int_cache
[ftl_cmd_cnt
].cache
.changed
=
1746 Cache
.array
[cache_blk
].changed
;
1748 GLOB_LLD_MemCopy_CMD(pDest
, pData
, DeviceInfo
.wPageDataSize
, flag
);
1751 memcpy(pDest
, pData
, DeviceInfo
.wPageDataSize
);
1753 if (Cache
.array
[cache_blk
].use_cnt
< MAX_WORD_VALUE
)
1754 Cache
.array
[cache_blk
].use_cnt
++;
1757 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1758 * Function: FTL_Cache_Write
1760 * Outputs: PASS=0 / FAIL=1
1761 * Description: It writes least frequently used Cache block to flash if it
1763 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1764 static int FTL_Cache_Write(void)
1766 int i
, bResult
= PASS
;
1767 u16 bNO
, least_count
= 0xFFFF;
1769 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1770 __FILE__
, __LINE__
, __func__
);
1772 FTL_Calculate_LRU();
1775 nand_dbg_print(NAND_DBG_DEBUG
, "FTL_Cache_Write: "
1776 "Least used cache block is %d\n", bNO
);
1778 if (Cache
.array
[bNO
].changed
!= SET
)
1781 nand_dbg_print(NAND_DBG_DEBUG
, "FTL_Cache_Write: Cache"
1782 " Block %d containing logical block %d is dirty\n",
1784 (u32
)(Cache
.array
[bNO
].address
>>
1785 DeviceInfo
.nBitsInBlockDataSize
));
1787 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1788 int_cache
[ftl_cmd_cnt
].item
= bNO
;
1789 int_cache
[ftl_cmd_cnt
].cache
.address
=
1790 Cache
.array
[bNO
].address
;
1791 int_cache
[ftl_cmd_cnt
].cache
.changed
= CLEAR
;
1794 bResult
= write_back_to_l2_cache(Cache
.array
[bNO
].buf
,
1795 Cache
.array
[bNO
].address
);
1797 Cache
.array
[bNO
].changed
= CLEAR
;
1799 least_count
= Cache
.array
[bNO
].use_cnt
;
1801 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
1804 if (Cache
.array
[i
].use_cnt
> 0)
1805 Cache
.array
[i
].use_cnt
-= least_count
;
1811 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1812 * Function: FTL_Cache_Read
1813 * Inputs: Page address
1814 * Outputs: PASS=0 / FAIL=1
1815 * Description: It reads the block from device in Cache Block
1816 * Set the LRU count to 1
1817 * Mark the Cache Block as clean
1818 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1819 static int FTL_Cache_Read(u64 logical_addr
)
1821 u64 item_addr
, phy_addr
;
1825 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1826 __FILE__
, __LINE__
, __func__
);
1828 num
= Cache
.LRU
; /* The LRU cache item will be overwritten */
1830 item_addr
= (u64
)GLOB_u64_Div(logical_addr
, Cache
.cache_item_size
) *
1831 Cache
.cache_item_size
;
1832 Cache
.array
[num
].address
= item_addr
;
1833 Cache
.array
[num
].use_cnt
= 1;
1834 Cache
.array
[num
].changed
= CLEAR
;
1837 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1838 int_cache
[ftl_cmd_cnt
].item
= num
;
1839 int_cache
[ftl_cmd_cnt
].cache
.address
=
1840 Cache
.array
[num
].address
;
1841 int_cache
[ftl_cmd_cnt
].cache
.changed
=
1842 Cache
.array
[num
].changed
;
1846 * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
1847 * Otherwise, read it from NAND
1849 ret
= search_l2_cache(Cache
.array
[num
].buf
, logical_addr
);
1850 if (PASS
== ret
) /* Hit in L2 Cache */
1853 /* Compute the physical start address of NAND device according to */
1854 /* the logical start address of the cache item (LRU cache item) */
1855 phy_addr
= FTL_Get_Physical_Block_Addr(item_addr
) +
1856 GLOB_u64_Remainder(item_addr
, 2);
1858 return FTL_Cache_Read_All(Cache
.array
[num
].buf
, phy_addr
);
1861 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1862 * Function: FTL_Check_Block_Table
1864 * Outputs: PASS=0 / FAIL=1
1865 * Description: It checks the correctness of each block table entry
1866 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1867 static int FTL_Check_Block_Table(int wOldTable
)
1872 u32
*pbt
= (u32
*)g_pBlockTable
;
1873 u8
*pFlag
= flag_check_blk_table
;
1875 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1876 __FILE__
, __LINE__
, __func__
);
1878 if (NULL
!= pFlag
) {
1879 memset(pFlag
, FAIL
, DeviceInfo
.wDataBlockNum
);
1880 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1881 blk_idx
= (u32
)(pbt
[i
] & (~BAD_BLOCK
));
1884 * 20081006/KBV - Changed to pFlag[i] reference
1885 * to avoid buffer overflow
1889 * 2008-10-20 Yunpeng Note: This change avoid
1890 * buffer overflow, but changed function of
1891 * the code, so it should be re-write later
1893 if ((blk_idx
> DeviceInfo
.wSpectraEndBlock
) ||
1907 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1908 * Function: FTL_Write_Block_Table
1910 * Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
1912 * Description: It writes the block table
1913 * Block table always mapped to LBA 0 which inturn mapped
1914 * to any physical block
1915 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1916 static int FTL_Write_Block_Table(int wForce
)
1918 u32
*pbt
= (u32
*)g_pBlockTable
;
1919 int wSuccess
= PASS
;
1920 u32 wTempBlockTableIndex
;
1921 u16 bt_pages
, new_bt_offset
;
1922 u8 blockchangeoccured
= 0;
1924 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1925 __FILE__
, __LINE__
, __func__
);
1927 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
1929 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
)
1932 if (PASS
== wForce
) {
1933 g_wBlockTableOffset
=
1934 (u16
)(DeviceInfo
.wPagesPerBlock
- bt_pages
);
1936 p_BTableChangesDelta
=
1937 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
1938 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
1940 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
1941 p_BTableChangesDelta
->g_wBlockTableOffset
=
1942 g_wBlockTableOffset
;
1943 p_BTableChangesDelta
->ValidFields
= 0x01;
1947 nand_dbg_print(NAND_DBG_DEBUG
,
1948 "Inside FTL_Write_Block_Table: block %d Page:%d\n",
1949 g_wBlockTableIndex
, g_wBlockTableOffset
);
1952 new_bt_offset
= g_wBlockTableOffset
+ bt_pages
+ 1;
1953 if ((0 == (new_bt_offset
% DeviceInfo
.wPagesPerBlock
)) ||
1954 (new_bt_offset
> DeviceInfo
.wPagesPerBlock
) ||
1955 (FAIL
== wSuccess
)) {
1956 wTempBlockTableIndex
= FTL_Replace_Block_Table();
1957 if (BAD_BLOCK
== wTempBlockTableIndex
)
1959 if (!blockchangeoccured
) {
1960 bt_block_changed
= 1;
1961 blockchangeoccured
= 1;
1964 g_wBlockTableIndex
= wTempBlockTableIndex
;
1965 g_wBlockTableOffset
= 0;
1966 pbt
[BLOCK_TABLE_INDEX
] = g_wBlockTableIndex
;
1968 p_BTableChangesDelta
=
1969 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
1970 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
1972 p_BTableChangesDelta
->ftl_cmd_cnt
=
1974 p_BTableChangesDelta
->g_wBlockTableOffset
=
1975 g_wBlockTableOffset
;
1976 p_BTableChangesDelta
->g_wBlockTableIndex
=
1978 p_BTableChangesDelta
->ValidFields
= 0x03;
1980 p_BTableChangesDelta
=
1981 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
1983 sizeof(struct BTableChangesDelta
);
1985 p_BTableChangesDelta
->ftl_cmd_cnt
=
1987 p_BTableChangesDelta
->BT_Index
=
1989 p_BTableChangesDelta
->BT_Entry_Value
=
1990 pbt
[BLOCK_TABLE_INDEX
];
1991 p_BTableChangesDelta
->ValidFields
= 0x0C;
1995 wSuccess
= FTL_Write_Block_Table_Data();
1996 if (FAIL
== wSuccess
)
1997 MARK_BLOCK_AS_BAD(pbt
[BLOCK_TABLE_INDEX
]);
1998 } while (FAIL
== wSuccess
);
2000 g_cBlockTableStatus
= CURRENT_BLOCK_TABLE
;
2005 static int force_format_nand(void)
2009 /* Force erase the whole unprotected physical partiton of NAND */
2010 printk(KERN_ALERT
"Start to force erase whole NAND device ...\n");
2011 printk(KERN_ALERT
"From phyical block %d to %d\n",
2012 DeviceInfo
.wSpectraStartBlock
, DeviceInfo
.wSpectraEndBlock
);
2013 for (i
= DeviceInfo
.wSpectraStartBlock
; i
<= DeviceInfo
.wSpectraEndBlock
; i
++) {
2014 if (GLOB_LLD_Erase_Block(i
))
2015 printk(KERN_ERR
"Failed to force erase NAND block %d\n", i
);
2017 printk(KERN_ALERT
"Force Erase ends. Please reboot the system ...\n");
2023 int GLOB_FTL_Flash_Format(void)
2025 //return FTL_Format_Flash(1);
2026 return force_format_nand();
2030 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2031 * Function: FTL_Search_Block_Table_IN_Block
2032 * Inputs: Block Number
2034 * Outputs: PASS / FAIL
2035 * Page contatining the block table
2036 * Description: It searches the block table in the block
2037 * passed as an argument.
2039 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2040 static int FTL_Search_Block_Table_IN_Block(u32 BT_Block
,
2041 u8 BT_Tag
, u16
*Page
)
2048 u8
*tempbuf
= tmp_buf_search_bt_in_block
;
2049 u8
*pSpareBuf
= spare_buf_search_bt_in_block
;
2050 u8
*pSpareBufBTLastPage
= spare_buf_bt_search_bt_in_block
;
2051 u8 bt_flag_last_page
= 0xFF;
2052 u8 search_in_previous_pages
= 0;
2055 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
2056 __FILE__
, __LINE__
, __func__
);
2058 nand_dbg_print(NAND_DBG_DEBUG
,
2059 "Searching block table in %u block\n",
2060 (unsigned int)BT_Block
);
2062 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
2064 for (i
= bt_pages
; i
< DeviceInfo
.wPagesPerBlock
;
2065 i
+= (bt_pages
+ 1)) {
2066 nand_dbg_print(NAND_DBG_DEBUG
,
2067 "Searching last IPF: %d\n", i
);
2068 Result
= GLOB_LLD_Read_Page_Main_Polling(tempbuf
,
2071 if (0 == memcmp(tempbuf
, g_pIPF
, DeviceInfo
.wPageDataSize
)) {
2072 if ((i
+ bt_pages
+ 1) < DeviceInfo
.wPagesPerBlock
) {
2075 search_in_previous_pages
= 1;
2080 if (!search_in_previous_pages
) {
2081 if (i
!= bt_pages
) {
2082 i
-= (bt_pages
+ 1);
2090 if (!search_in_previous_pages
) {
2092 nand_dbg_print(NAND_DBG_DEBUG
,
2093 "Reading the spare area of Block %u Page %u",
2094 (unsigned int)BT_Block
, i
);
2095 Result
= GLOB_LLD_Read_Page_Spare(pSpareBuf
,
2097 nand_dbg_print(NAND_DBG_DEBUG
,
2098 "Reading the spare area of Block %u Page %u",
2099 (unsigned int)BT_Block
, i
+ bt_pages
- 1);
2100 Result
= GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage
,
2101 BT_Block
, i
+ bt_pages
- 1, 1);
2104 j
= FTL_Extract_Block_Table_Tag(pSpareBuf
, &tagarray
);
2106 for (; k
< j
; k
++) {
2107 if (tagarray
[k
] == BT_Tag
)
2113 bt_flag
= tagarray
[k
];
2117 if (Result
== PASS
) {
2119 j
= FTL_Extract_Block_Table_Tag(
2120 pSpareBufBTLastPage
, &tagarray
);
2122 for (; k
< j
; k
++) {
2123 if (tagarray
[k
] == BT_Tag
)
2129 bt_flag_last_page
= tagarray
[k
];
2133 if (Result
== PASS
) {
2134 if (bt_flag
== bt_flag_last_page
) {
2135 nand_dbg_print(NAND_DBG_DEBUG
,
2136 "Block table is found"
2137 " in page after IPF "
2143 g_cBlockTableStatus
=
2144 CURRENT_BLOCK_TABLE
;
2153 if (search_in_previous_pages
)
2156 i
= i
- (bt_pages
+ 1);
2160 nand_dbg_print(NAND_DBG_DEBUG
,
2161 "Reading the spare area of Block %d Page %d",
2164 Result
= GLOB_LLD_Read_Page_Spare(pSpareBuf
, BT_Block
, i
, 1);
2165 nand_dbg_print(NAND_DBG_DEBUG
,
2166 "Reading the spare area of Block %u Page %u",
2167 (unsigned int)BT_Block
, i
+ bt_pages
- 1);
2169 Result
= GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage
,
2170 BT_Block
, i
+ bt_pages
- 1, 1);
2173 j
= FTL_Extract_Block_Table_Tag(pSpareBuf
, &tagarray
);
2175 for (; k
< j
; k
++) {
2176 if (tagarray
[k
] == BT_Tag
)
2182 bt_flag
= tagarray
[k
];
2186 if (Result
== PASS
) {
2188 j
= FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage
,
2191 for (; k
< j
; k
++) {
2192 if (tagarray
[k
] == BT_Tag
)
2198 bt_flag_last_page
= tagarray
[k
];
2204 if (Result
== PASS
) {
2205 if (bt_flag
== bt_flag_last_page
) {
2206 nand_dbg_print(NAND_DBG_DEBUG
,
2207 "Block table is found "
2208 "in page prior to IPF "
2209 "at block %u page %d\n",
2210 (unsigned int)BT_Block
, i
);
2213 g_cBlockTableStatus
=
2214 IN_PROGRESS_BLOCK_TABLE
;
2224 if (Result
== FAIL
) {
2225 if ((Last_IPF
> bt_pages
) && (i
< Last_IPF
) && (!BT_Found
)) {
2227 *Page
= i
- (bt_pages
+ 1);
2229 if ((Last_IPF
== bt_pages
) && (i
< Last_IPF
) && (!BT_Found
))
2233 if (Last_IPF
== 0) {
2236 nand_dbg_print(NAND_DBG_DEBUG
, "Reading the spare area of "
2237 "Block %u Page %u", (unsigned int)BT_Block
, i
);
2239 Result
= GLOB_LLD_Read_Page_Spare(pSpareBuf
, BT_Block
, i
, 1);
2240 nand_dbg_print(NAND_DBG_DEBUG
,
2241 "Reading the spare area of Block %u Page %u",
2242 (unsigned int)BT_Block
, i
+ bt_pages
- 1);
2243 Result
= GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage
,
2244 BT_Block
, i
+ bt_pages
- 1, 1);
2247 j
= FTL_Extract_Block_Table_Tag(pSpareBuf
, &tagarray
);
2249 for (; k
< j
; k
++) {
2250 if (tagarray
[k
] == BT_Tag
)
2256 bt_flag
= tagarray
[k
];
2260 if (Result
== PASS
) {
2262 j
= FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage
,
2265 for (; k
< j
; k
++) {
2266 if (tagarray
[k
] == BT_Tag
)
2272 bt_flag_last_page
= tagarray
[k
];
2276 if (Result
== PASS
) {
2277 if (bt_flag
== bt_flag_last_page
) {
2278 nand_dbg_print(NAND_DBG_DEBUG
,
2279 "Block table is found "
2280 "in page after IPF at "
2281 "block %u page %u\n",
2282 (unsigned int)BT_Block
,
2286 g_cBlockTableStatus
=
2287 CURRENT_BLOCK_TABLE
;
2302 u8
*get_blk_table_start_addr(void)
2304 return g_pBlockTable
;
2307 unsigned long get_blk_table_len(void)
2309 return DeviceInfo
.wDataBlockNum
* sizeof(u32
);
2312 u8
*get_wear_leveling_table_start_addr(void)
2314 return g_pWearCounter
;
2317 unsigned long get_wear_leveling_table_len(void)
2319 return DeviceInfo
.wDataBlockNum
* sizeof(u8
);
2322 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2323 * Function: FTL_Read_Block_Table
2325 * Outputs: PASS / FAIL
2326 * Description: read the flash spare area and find a block containing the
2327 * most recent block table(having largest block_table_counter).
2328 * Find the last written Block table in this block.
2329 * Check the correctness of Block Table
2330 * If CDMA is enabled, this function is called in
2332 * We don't need to store changes in Block table in this
2333 * function as it is called only at initialization
2335 * Note: Currently this function is called at initialization
2336 * before any read/erase/write command issued to flash so,
2337 * there is no need to wait for CDMA list to complete as of now
2338 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2339 static int FTL_Read_Block_Table(void)
2343 u8
*tempBuf
, *tagarray
;
2346 u8 block_table_found
= 0;
2352 int wBytesCopied
= 0, tempvar
;
2354 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2355 __FILE__
, __LINE__
, __func__
);
2357 tempBuf
= tmp_buf1_read_blk_table
;
2358 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
2360 for (j
= DeviceInfo
.wSpectraStartBlock
;
2361 j
<= (int)DeviceInfo
.wSpectraEndBlock
;
2363 status
= GLOB_LLD_Read_Page_Spare(tempBuf
, j
, 0, 1);
2365 i
= FTL_Extract_Block_Table_Tag(tempBuf
, &tagarray
);
2367 status
= GLOB_LLD_Read_Page_Main_Polling(tempBuf
,
2369 for (; k
< i
; k
++) {
2370 if (tagarray
[k
] == tempBuf
[3])
2380 nand_dbg_print(NAND_DBG_DEBUG
,
2381 "Block table is contained in Block %d %d\n",
2382 (unsigned int)j
, (unsigned int)k
);
2384 if (g_pBTBlocks
[k
-FIRST_BT_ID
] == BTBLOCK_INVAL
) {
2385 g_pBTBlocks
[k
-FIRST_BT_ID
] = j
;
2386 block_table_found
= 1;
2388 printk(KERN_ERR
"FTL_Read_Block_Table -"
2389 "This should never happens. "
2390 "Two block table have same counter %u!\n", k
);
2394 if (block_table_found
) {
2395 if (g_pBTBlocks
[FIRST_BT_ID
- FIRST_BT_ID
] != BTBLOCK_INVAL
&&
2396 g_pBTBlocks
[LAST_BT_ID
- FIRST_BT_ID
] != BTBLOCK_INVAL
) {
2398 while ((j
> FIRST_BT_ID
) &&
2399 (g_pBTBlocks
[j
- FIRST_BT_ID
] != BTBLOCK_INVAL
))
2401 if (j
== FIRST_BT_ID
) {
2403 last_erased
= LAST_BT_ID
;
2405 last_erased
= (u8
)j
+ 1;
2406 while ((j
> FIRST_BT_ID
) && (BTBLOCK_INVAL
==
2407 g_pBTBlocks
[j
- FIRST_BT_ID
]))
2412 while (g_pBTBlocks
[j
- FIRST_BT_ID
] == BTBLOCK_INVAL
)
2414 last_erased
= (u8
)j
;
2415 while ((j
< LAST_BT_ID
) && (BTBLOCK_INVAL
!=
2416 g_pBTBlocks
[j
- FIRST_BT_ID
]))
2418 if (g_pBTBlocks
[j
-FIRST_BT_ID
] == BTBLOCK_INVAL
)
2422 if (last_erased
> j
)
2423 j
+= (1 + LAST_BT_ID
- FIRST_BT_ID
);
2425 for (; (j
>= last_erased
) && (FAIL
== wResult
); j
--) {
2426 i
= (j
- FIRST_BT_ID
) %
2427 (1 + LAST_BT_ID
- FIRST_BT_ID
);
2429 FTL_Search_Block_Table_IN_Block(g_pBTBlocks
[i
],
2430 i
+ FIRST_BT_ID
, &Page
);
2431 if (g_cBlockTableStatus
== IN_PROGRESS_BLOCK_TABLE
)
2432 block_table_found
= 0;
2434 while ((search_result
== PASS
) && (FAIL
== wResult
)) {
2435 nand_dbg_print(NAND_DBG_DEBUG
,
2436 "FTL_Read_Block_Table:"
2437 "Block: %u Page: %u "
2438 "contains block table\n",
2439 (unsigned int)g_pBTBlocks
[i
],
2440 (unsigned int)Page
);
2442 tempBuf
= tmp_buf2_read_blk_table
;
2444 for (k
= 0; k
< bt_pages
; k
++) {
2445 Block
= g_pBTBlocks
[i
];
2449 GLOB_LLD_Read_Page_Main_Polling(
2450 tempBuf
, Block
, Page
, PageCount
);
2452 tempvar
= k
? 0 : 4;
2455 FTL_Copy_Block_Table_From_Flash(
2457 DeviceInfo
.wPageDataSize
- tempvar
,
2463 wResult
= FTL_Check_Block_Table(FAIL
);
2464 if (FAIL
== wResult
) {
2465 block_table_found
= 0;
2466 if (Page
> bt_pages
)
2467 Page
-= ((bt_pages
<<1) + 1);
2469 search_result
= FAIL
;
2475 if (PASS
== wResult
) {
2476 if (!block_table_found
)
2477 FTL_Execute_SPL_Recovery();
2479 if (g_cBlockTableStatus
== IN_PROGRESS_BLOCK_TABLE
)
2480 g_wBlockTableOffset
= (u16
)Page
+ 1;
2482 g_wBlockTableOffset
= (u16
)Page
- bt_pages
;
2484 g_wBlockTableIndex
= (u32
)g_pBTBlocks
[i
];
2487 if (DeviceInfo
.MLCDevice
)
2488 memcpy(g_pBTStartingCopy
, g_pBlockTable
,
2489 DeviceInfo
.wDataBlockNum
* sizeof(u32
)
2490 + DeviceInfo
.wDataBlockNum
* sizeof(u8
)
2491 + DeviceInfo
.wDataBlockNum
* sizeof(u16
));
2493 memcpy(g_pBTStartingCopy
, g_pBlockTable
,
2494 DeviceInfo
.wDataBlockNum
* sizeof(u32
)
2495 + DeviceInfo
.wDataBlockNum
* sizeof(u8
));
2499 if (FAIL
== wResult
)
2500 printk(KERN_ERR
"Yunpeng - "
2501 "Can not find valid spectra block table!\n");
2503 #if AUTO_FORMAT_FLASH
2504 if (FAIL
== wResult
) {
2505 nand_dbg_print(NAND_DBG_DEBUG
, "doing auto-format\n");
2506 wResult
= FTL_Format_Flash(0);
2513 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2514 * Function: FTL_Get_Page_Num
2515 * Inputs: Size in bytes
2516 * Outputs: Size in pages
2517 * Description: It calculates the pages required for the length passed
2518 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2519 static u32
FTL_Get_Page_Num(u64 length
)
2521 return (u32
)((length
>> DeviceInfo
.nBitsInPageDataSize
) +
2522 (GLOB_u64_Remainder(length
, 1) > 0 ? 1 : 0));
2525 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2526 * Function: FTL_Get_Physical_Block_Addr
2527 * Inputs: Block Address (byte format)
2528 * Outputs: Physical address of the block.
2529 * Description: It translates LBA to PBA by returning address stored
2530 * at the LBA location in the block table
2531 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2532 static u64
FTL_Get_Physical_Block_Addr(u64 logical_addr
)
2537 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2538 __FILE__
, __LINE__
, __func__
);
2540 pbt
= (u32
*)g_pBlockTable
;
2541 physical_addr
= (u64
) DeviceInfo
.wBlockDataSize
*
2542 (pbt
[BLK_FROM_ADDR(logical_addr
)] & (~BAD_BLOCK
));
2544 return physical_addr
;
2547 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2548 * Function: FTL_Get_Block_Index
2549 * Inputs: Physical Block no.
2550 * Outputs: Logical block no. /BAD_BLOCK
2551 * Description: It returns the logical block no. for the PBA passed
2552 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2553 static u32
FTL_Get_Block_Index(u32 wBlockNum
)
2555 u32
*pbt
= (u32
*)g_pBlockTable
;
2558 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2559 __FILE__
, __LINE__
, __func__
);
2561 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++)
2562 if (wBlockNum
== (pbt
[i
] & (~BAD_BLOCK
)))
2568 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2569 * Function: GLOB_FTL_Wear_Leveling
2572 * Description: This is static wear leveling (done by explicit call)
2573 * do complete static wear leveling
2574 * do complete garbage collection
2575 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2576 int GLOB_FTL_Wear_Leveling(void)
2578 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
2579 __FILE__
, __LINE__
, __func__
);
2581 FTL_Static_Wear_Leveling();
2582 GLOB_FTL_Garbage_Collection();
2587 static void find_least_most_worn(u8
*chg
,
2588 u32
*least_idx
, u8
*least_cnt
,
2589 u32
*most_idx
, u8
*most_cnt
)
2591 u32
*pbt
= (u32
*)g_pBlockTable
;
2596 for (i
= BLOCK_TABLE_INDEX
+ 1; i
< DeviceInfo
.wDataBlockNum
; i
++) {
2597 if (IS_BAD_BLOCK(i
) || PASS
== chg
[i
])
2600 idx
= (u32
) ((~BAD_BLOCK
) & pbt
[i
]);
2601 cnt
= g_pWearCounter
[idx
- DeviceInfo
.wSpectraStartBlock
];
2603 if (IS_SPARE_BLOCK(i
)) {
2604 if (cnt
> *most_cnt
) {
2610 if (IS_DATA_BLOCK(i
)) {
2611 if (cnt
< *least_cnt
) {
2617 if (PASS
== chg
[*most_idx
] || PASS
== chg
[*least_idx
]) {
2618 debug_boundary_error(*most_idx
,
2619 DeviceInfo
.wDataBlockNum
, 0);
2620 debug_boundary_error(*least_idx
,
2621 DeviceInfo
.wDataBlockNum
, 0);
2627 static int move_blks_for_wear_leveling(u8
*chg
,
2628 u32
*least_idx
, u32
*rep_blk_num
, int *result
)
2630 u32
*pbt
= (u32
*)g_pBlockTable
;
2632 int j
, ret_cp_blk
, ret_erase
;
2635 chg
[*least_idx
] = PASS
;
2636 debug_boundary_error(*least_idx
, DeviceInfo
.wDataBlockNum
, 0);
2638 rep_blk
= FTL_Replace_MWBlock();
2639 if (rep_blk
!= BAD_BLOCK
) {
2640 nand_dbg_print(NAND_DBG_DEBUG
,
2641 "More than two spare blocks exist so do it\n");
2642 nand_dbg_print(NAND_DBG_DEBUG
, "Block Replaced is %d\n",
2645 chg
[rep_blk
] = PASS
;
2647 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
2648 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
2649 FTL_Write_IN_Progress_Block_Table_Page();
2652 for (j
= 0; j
< RETRY_TIMES
; j
++) {
2653 ret_cp_blk
= FTL_Copy_Block((u64
)(*least_idx
) *
2654 DeviceInfo
.wBlockDataSize
,
2655 (u64
)rep_blk
* DeviceInfo
.wBlockDataSize
);
2656 if (FAIL
== ret_cp_blk
) {
2657 ret_erase
= GLOB_FTL_Block_Erase((u64
)rep_blk
2658 * DeviceInfo
.wBlockDataSize
);
2659 if (FAIL
== ret_erase
)
2660 MARK_BLOCK_AS_BAD(pbt
[rep_blk
]);
2662 nand_dbg_print(NAND_DBG_DEBUG
,
2663 "FTL_Copy_Block == OK\n");
2668 if (j
< RETRY_TIMES
) {
2670 u32 old_idx
= FTL_Get_Block_Index(*least_idx
);
2671 u32 rep_idx
= FTL_Get_Block_Index(rep_blk
);
2672 tmp
= (u32
)(DISCARD_BLOCK
| pbt
[old_idx
]);
2673 pbt
[old_idx
] = (u32
)((~SPARE_BLOCK
) &
2677 p_BTableChangesDelta
= (struct BTableChangesDelta
*)
2679 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
2680 p_BTableChangesDelta
->ftl_cmd_cnt
=
2682 p_BTableChangesDelta
->BT_Index
= old_idx
;
2683 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[old_idx
];
2684 p_BTableChangesDelta
->ValidFields
= 0x0C;
2686 p_BTableChangesDelta
= (struct BTableChangesDelta
*)
2688 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
2690 p_BTableChangesDelta
->ftl_cmd_cnt
=
2692 p_BTableChangesDelta
->BT_Index
= rep_idx
;
2693 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[rep_idx
];
2694 p_BTableChangesDelta
->ValidFields
= 0x0C;
2697 pbt
[FTL_Get_Block_Index(rep_blk
)] |= BAD_BLOCK
;
2699 p_BTableChangesDelta
= (struct BTableChangesDelta
*)
2701 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
2703 p_BTableChangesDelta
->ftl_cmd_cnt
=
2705 p_BTableChangesDelta
->BT_Index
=
2706 FTL_Get_Block_Index(rep_blk
);
2707 p_BTableChangesDelta
->BT_Entry_Value
=
2708 pbt
[FTL_Get_Block_Index(rep_blk
)];
2709 p_BTableChangesDelta
->ValidFields
= 0x0C;
2715 if (((*rep_blk_num
)++) > WEAR_LEVELING_BLOCK_NUM
)
2718 printk(KERN_ERR
"Less than 3 spare blocks exist so quit\n");
2725 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2726 * Function: FTL_Static_Wear_Leveling
2728 * Outputs: PASS=0 / FAIL=1
2729 * Description: This is static wear leveling (done by explicit call)
2730 * search for most&least used
2731 * if difference < GATE:
2732 * update the block table with exhange
2733 * mark block table in flash as IN_PROGRESS
2735 * the caller should handle GC clean up after calling this function
2736 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2737 int FTL_Static_Wear_Leveling(void)
2745 u32 replaced_blks
= 0;
2746 u8
*chang_flag
= flags_static_wear_leveling
;
2748 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
2749 __FILE__
, __LINE__
, __func__
);
2754 memset(chang_flag
, FAIL
, DeviceInfo
.wDataBlockNum
);
2755 while (go_on
== PASS
) {
2756 nand_dbg_print(NAND_DBG_DEBUG
,
2757 "starting static wear leveling\n");
2759 least_worn_cnt
= 0xFF;
2760 least_worn_idx
= BLOCK_TABLE_INDEX
;
2761 most_worn_idx
= BLOCK_TABLE_INDEX
;
2763 find_least_most_worn(chang_flag
, &least_worn_idx
,
2764 &least_worn_cnt
, &most_worn_idx
, &most_worn_cnt
);
2766 nand_dbg_print(NAND_DBG_DEBUG
,
2767 "Used and least worn is block %u, whos count is %u\n",
2768 (unsigned int)least_worn_idx
,
2769 (unsigned int)least_worn_cnt
);
2771 nand_dbg_print(NAND_DBG_DEBUG
,
2772 "Free and most worn is block %u, whos count is %u\n",
2773 (unsigned int)most_worn_idx
,
2774 (unsigned int)most_worn_cnt
);
2776 if ((most_worn_cnt
> least_worn_cnt
) &&
2777 (most_worn_cnt
- least_worn_cnt
> WEAR_LEVELING_GATE
))
2778 go_on
= move_blks_for_wear_leveling(chang_flag
,
2779 &least_worn_idx
, &replaced_blks
, &result
);
2788 static int do_garbage_collection(u32 discard_cnt
)
2790 u32
*pbt
= (u32
*)g_pBlockTable
;
2792 u8 bt_block_erased
= 0;
2793 int i
, cnt
, ret
= FAIL
;
2797 while ((i
< DeviceInfo
.wDataBlockNum
) && (discard_cnt
> 0) &&
2798 ((ftl_cmd_cnt
+ 28) < 256)) {
2799 if (((pbt
[i
] & BAD_BLOCK
) != BAD_BLOCK
) &&
2800 (pbt
[i
] & DISCARD_BLOCK
)) {
2801 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
2802 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
2803 FTL_Write_IN_Progress_Block_Table_Page();
2806 addr
= FTL_Get_Physical_Block_Addr((u64
)i
*
2807 DeviceInfo
.wBlockDataSize
);
2808 pba
= BLK_FROM_ADDR(addr
);
2810 for (cnt
= FIRST_BT_ID
; cnt
<= LAST_BT_ID
; cnt
++) {
2811 if (pba
== g_pBTBlocks
[cnt
- FIRST_BT_ID
]) {
2812 nand_dbg_print(NAND_DBG_DEBUG
,
2813 "GC will erase BT block %u\n",
2817 bt_block_erased
= 1;
2822 if (bt_block_erased
) {
2823 bt_block_erased
= 0;
2827 addr
= FTL_Get_Physical_Block_Addr((u64
)i
*
2828 DeviceInfo
.wBlockDataSize
);
2830 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
2831 pbt
[i
] &= (u32
)(~DISCARD_BLOCK
);
2832 pbt
[i
] |= (u32
)(SPARE_BLOCK
);
2833 p_BTableChangesDelta
=
2834 (struct BTableChangesDelta
*)
2837 sizeof(struct BTableChangesDelta
);
2838 p_BTableChangesDelta
->ftl_cmd_cnt
=
2840 p_BTableChangesDelta
->BT_Index
= i
;
2841 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[i
];
2842 p_BTableChangesDelta
->ValidFields
= 0x0C;
2846 MARK_BLOCK_AS_BAD(pbt
[i
]);
2857 static int do_garbage_collection(u32 discard_cnt
)
2859 u32
*pbt
= (u32
*)g_pBlockTable
;
2861 u8 bt_block_erased
= 0;
2862 int i
, cnt
, ret
= FAIL
;
2866 while ((i
< DeviceInfo
.wDataBlockNum
) && (discard_cnt
> 0)) {
2867 if (((pbt
[i
] & BAD_BLOCK
) != BAD_BLOCK
) &&
2868 (pbt
[i
] & DISCARD_BLOCK
)) {
2869 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
2870 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
2871 FTL_Write_IN_Progress_Block_Table_Page();
2874 addr
= FTL_Get_Physical_Block_Addr((u64
)i
*
2875 DeviceInfo
.wBlockDataSize
);
2876 pba
= BLK_FROM_ADDR(addr
);
2878 for (cnt
= FIRST_BT_ID
; cnt
<= LAST_BT_ID
; cnt
++) {
2879 if (pba
== g_pBTBlocks
[cnt
- FIRST_BT_ID
]) {
2880 nand_dbg_print(NAND_DBG_DEBUG
,
2881 "GC will erase BT block %d\n",
2885 bt_block_erased
= 1;
2890 if (bt_block_erased
) {
2891 bt_block_erased
= 0;
2895 /* If the discard block is L2 cache block, then just skip it */
2896 for (cnt
= 0; cnt
< BLK_NUM_FOR_L2_CACHE
; cnt
++) {
2897 if (cache_l2
.blk_array
[cnt
] == pba
) {
2898 nand_dbg_print(NAND_DBG_DEBUG
,
2899 "GC will erase L2 cache blk %d\n",
2904 if (cnt
< BLK_NUM_FOR_L2_CACHE
) { /* Skip it */
2910 addr
= FTL_Get_Physical_Block_Addr((u64
)i
*
2911 DeviceInfo
.wBlockDataSize
);
2913 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
2914 pbt
[i
] &= (u32
)(~DISCARD_BLOCK
);
2915 pbt
[i
] |= (u32
)(SPARE_BLOCK
);
2919 MARK_BLOCK_AS_BAD(pbt
[i
]);
2930 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2931 * Function: GLOB_FTL_Garbage_Collection
2933 * Outputs: PASS / FAIL (returns the number of un-erased blocks
2934 * Description: search the block table for all discarded blocks to erase
2935 * for each discarded block:
2936 * set the flash block to IN_PROGRESS
2938 * update the block table
2939 * write the block table to flash
2940 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2941 int GLOB_FTL_Garbage_Collection(void)
2946 u32
*pbt
= (u32
*)g_pBlockTable
;
2948 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
2949 __FILE__
, __LINE__
, __func__
);
2952 printk(KERN_ALERT
"GLOB_FTL_Garbage_Collection() "
2953 "has been re-entered! Exit.\n");
2959 GLOB_FTL_BT_Garbage_Collection();
2961 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
2962 if (IS_DISCARDED_BLOCK(i
))
2966 if (wDiscard
<= 0) {
2971 nand_dbg_print(NAND_DBG_DEBUG
,
2972 "Found %d discarded blocks\n", wDiscard
);
2974 FTL_Write_Block_Table(FAIL
);
2976 wResult
= do_garbage_collection(wDiscard
);
2978 FTL_Write_Block_Table(FAIL
);
2987 static int do_bt_garbage_collection(void)
2990 u32
*pbt
= (u32
*)g_pBlockTable
;
2991 u32
*pBTBlocksNode
= (u32
*)g_pBTBlocks
;
2995 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2996 __FILE__
, __LINE__
, __func__
);
3003 for (i
= last_erased
; (i
<= LAST_BT_ID
) &&
3004 (g_pBTBlocks
[((i
+ 2) % (1 + LAST_BT_ID
- FIRST_BT_ID
)) +
3005 FIRST_BT_ID
- FIRST_BT_ID
] != BTBLOCK_INVAL
) &&
3006 ((ftl_cmd_cnt
+ 28)) < 256; i
++) {
3007 pba
= pBTBlocksNode
[i
- FIRST_BT_ID
];
3008 lba
= FTL_Get_Block_Index(pba
);
3009 nand_dbg_print(NAND_DBG_DEBUG
,
3010 "do_bt_garbage_collection: pba %d, lba %d\n",
3012 nand_dbg_print(NAND_DBG_DEBUG
,
3013 "Block Table Entry: %d", pbt
[lba
]);
3015 if (((pbt
[lba
] & BAD_BLOCK
) != BAD_BLOCK
) &&
3016 (pbt
[lba
] & DISCARD_BLOCK
)) {
3017 nand_dbg_print(NAND_DBG_DEBUG
,
3018 "do_bt_garbage_collection_cdma: "
3019 "Erasing Block tables present in block %d\n",
3021 addr
= FTL_Get_Physical_Block_Addr((u64
)lba
*
3022 DeviceInfo
.wBlockDataSize
);
3023 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
3024 pbt
[lba
] &= (u32
)(~DISCARD_BLOCK
);
3025 pbt
[lba
] |= (u32
)(SPARE_BLOCK
);
3027 p_BTableChangesDelta
=
3028 (struct BTableChangesDelta
*)
3031 sizeof(struct BTableChangesDelta
);
3033 p_BTableChangesDelta
->ftl_cmd_cnt
=
3035 p_BTableChangesDelta
->BT_Index
= lba
;
3036 p_BTableChangesDelta
->BT_Entry_Value
=
3039 p_BTableChangesDelta
->ValidFields
= 0x0C;
3042 pBTBlocksNode
[last_erased
- FIRST_BT_ID
] =
3044 nand_dbg_print(NAND_DBG_DEBUG
,
3045 "resetting bt entry at index %d "
3047 pBTBlocksNode
[i
- FIRST_BT_ID
]);
3048 if (last_erased
== LAST_BT_ID
)
3049 last_erased
= FIRST_BT_ID
;
3053 MARK_BLOCK_AS_BAD(pbt
[lba
]);
3064 static int do_bt_garbage_collection(void)
3067 u32
*pbt
= (u32
*)g_pBlockTable
;
3068 u32
*pBTBlocksNode
= (u32
*)g_pBTBlocks
;
3072 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3073 __FILE__
, __LINE__
, __func__
);
3080 for (i
= last_erased
; (i
<= LAST_BT_ID
) &&
3081 (g_pBTBlocks
[((i
+ 2) % (1 + LAST_BT_ID
- FIRST_BT_ID
)) +
3082 FIRST_BT_ID
- FIRST_BT_ID
] != BTBLOCK_INVAL
); i
++) {
3083 pba
= pBTBlocksNode
[i
- FIRST_BT_ID
];
3084 lba
= FTL_Get_Block_Index(pba
);
3085 nand_dbg_print(NAND_DBG_DEBUG
,
3086 "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3088 nand_dbg_print(NAND_DBG_DEBUG
,
3089 "Block Table Entry: %d", pbt
[lba
]);
3091 if (((pbt
[lba
] & BAD_BLOCK
) != BAD_BLOCK
) &&
3092 (pbt
[lba
] & DISCARD_BLOCK
)) {
3093 nand_dbg_print(NAND_DBG_DEBUG
,
3094 "do_bt_garbage_collection: "
3095 "Erasing Block tables present in block %d\n",
3097 addr
= FTL_Get_Physical_Block_Addr((u64
)lba
*
3098 DeviceInfo
.wBlockDataSize
);
3099 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
3100 pbt
[lba
] &= (u32
)(~DISCARD_BLOCK
);
3101 pbt
[lba
] |= (u32
)(SPARE_BLOCK
);
3103 pBTBlocksNode
[last_erased
- FIRST_BT_ID
] =
3105 nand_dbg_print(NAND_DBG_DEBUG
,
3106 "resetting bt entry at index %d "
3108 pBTBlocksNode
[i
- FIRST_BT_ID
]);
3109 if (last_erased
== LAST_BT_ID
)
3110 last_erased
= FIRST_BT_ID
;
3114 MARK_BLOCK_AS_BAD(pbt
[lba
]);
3126 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3127 * Function: GLOB_FTL_BT_Garbage_Collection
3129 * Outputs: PASS / FAIL (returns the number of un-erased blocks
3130 * Description: Erases discarded blocks containing Block table
3132 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3133 int GLOB_FTL_BT_Garbage_Collection(void)
3135 return do_bt_garbage_collection();
3138 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3139 * Function: FTL_Replace_OneBlock
3140 * Inputs: Block number 1
3142 * Outputs: Replaced Block Number
3143 * Description: Interchange block table entries at wBlockNum and wReplaceNum
3145 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3146 static u32
FTL_Replace_OneBlock(u32 blk
, u32 rep_blk
)
3149 u32 replace_node
= BAD_BLOCK
;
3150 u32
*pbt
= (u32
*)g_pBlockTable
;
3152 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3153 __FILE__
, __LINE__
, __func__
);
3155 if (rep_blk
!= BAD_BLOCK
) {
3156 if (IS_BAD_BLOCK(blk
))
3159 tmp_blk
= DISCARD_BLOCK
| (~SPARE_BLOCK
& pbt
[blk
]);
3161 replace_node
= (u32
) ((~SPARE_BLOCK
) & pbt
[rep_blk
]);
3162 pbt
[blk
] = replace_node
;
3163 pbt
[rep_blk
] = tmp_blk
;
3166 p_BTableChangesDelta
=
3167 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3168 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3170 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
3171 p_BTableChangesDelta
->BT_Index
= blk
;
3172 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[blk
];
3174 p_BTableChangesDelta
->ValidFields
= 0x0C;
3176 p_BTableChangesDelta
=
3177 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3178 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3180 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
3181 p_BTableChangesDelta
->BT_Index
= rep_blk
;
3182 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[rep_blk
];
3183 p_BTableChangesDelta
->ValidFields
= 0x0C;
3187 return replace_node
;
3190 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3191 * Function: FTL_Write_Block_Table_Data
3192 * Inputs: Block table size in pages
3193 * Outputs: PASS=0 / FAIL=1
3194 * Description: Write block table data in flash
3195 * If first page and last page
3196 * Write data+BT flag
3199 * BT flag is a counter. Its value is incremented for block table
3200 * write in a new Block
3201 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3202 static int FTL_Write_Block_Table_Data(void)
3204 u64 dwBlockTableAddr
, pTempAddr
;
3206 u16 Page
, PageCount
;
3207 u8
*tempBuf
= tmp_buf_write_blk_table_data
;
3211 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3212 __FILE__
, __LINE__
, __func__
);
3215 (u64
)((u64
)g_wBlockTableIndex
* DeviceInfo
.wBlockDataSize
+
3216 (u64
)g_wBlockTableOffset
* DeviceInfo
.wPageDataSize
);
3217 pTempAddr
= dwBlockTableAddr
;
3219 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
3221 nand_dbg_print(NAND_DBG_DEBUG
, "FTL_Write_Block_Table_Data: "
3222 "page= %d BlockTableIndex= %d "
3223 "BlockTableOffset=%d\n", bt_pages
,
3224 g_wBlockTableIndex
, g_wBlockTableOffset
);
3226 Block
= BLK_FROM_ADDR(pTempAddr
);
3227 Page
= PAGE_FROM_ADDR(pTempAddr
, Block
);
3230 if (bt_block_changed
) {
3231 if (bt_flag
== LAST_BT_ID
) {
3232 bt_flag
= FIRST_BT_ID
;
3233 g_pBTBlocks
[bt_flag
- FIRST_BT_ID
] = Block
;
3234 } else if (bt_flag
< LAST_BT_ID
) {
3236 g_pBTBlocks
[bt_flag
- FIRST_BT_ID
] = Block
;
3239 if ((bt_flag
> (LAST_BT_ID
-4)) &&
3240 g_pBTBlocks
[FIRST_BT_ID
- FIRST_BT_ID
] !=
3242 bt_block_changed
= 0;
3243 GLOB_FTL_BT_Garbage_Collection();
3246 bt_block_changed
= 0;
3247 nand_dbg_print(NAND_DBG_DEBUG
,
3248 "Block Table Counter is %u Block %u\n",
3249 bt_flag
, (unsigned int)Block
);
3252 memset(tempBuf
, 0, 3);
3253 tempBuf
[3] = bt_flag
;
3254 wBytesCopied
= FTL_Copy_Block_Table_To_Flash(tempBuf
+ 4,
3255 DeviceInfo
.wPageDataSize
- 4, 0);
3256 memset(&tempBuf
[wBytesCopied
+ 4], 0xff,
3257 DeviceInfo
.wPageSize
- (wBytesCopied
+ 4));
3258 FTL_Insert_Block_Table_Signature(&tempBuf
[DeviceInfo
.wPageDataSize
],
3262 memcpy(g_pNextBlockTable
, tempBuf
,
3263 DeviceInfo
.wPageSize
* sizeof(u8
));
3264 nand_dbg_print(NAND_DBG_DEBUG
, "Writing First Page of Block Table "
3265 "Block %u Page %u\n", (unsigned int)Block
, Page
);
3266 if (FAIL
== GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable
,
3268 LLD_CMD_FLAG_MODE_CDMA
| LLD_CMD_FLAG_ORDER_BEFORE_REST
)) {
3269 nand_dbg_print(NAND_DBG_WARN
, "NAND Program fail in "
3270 "%s, Line %d, Function: %s, "
3271 "new Bad Block %d generated!\n",
3272 __FILE__
, __LINE__
, __func__
, Block
);
3277 g_pNextBlockTable
+= ((DeviceInfo
.wPageSize
* sizeof(u8
)));
3279 if (FAIL
== GLOB_LLD_Write_Page_Main_Spare(tempBuf
, Block
, Page
, 1)) {
3280 nand_dbg_print(NAND_DBG_WARN
,
3281 "NAND Program fail in %s, Line %d, Function: %s, "
3282 "new Bad Block %d generated!\n",
3283 __FILE__
, __LINE__
, __func__
, Block
);
3289 PageCount
= bt_pages
- 1;
3290 if (PageCount
> 1) {
3291 wBytesCopied
+= FTL_Copy_Block_Table_To_Flash(tempBuf
,
3292 DeviceInfo
.wPageDataSize
* (PageCount
- 1),
3296 memcpy(g_pNextBlockTable
, tempBuf
,
3297 (PageCount
- 1) * DeviceInfo
.wPageDataSize
);
3298 if (FAIL
== GLOB_LLD_Write_Page_Main_cdma(
3299 g_pNextBlockTable
, Block
, Page
+ 1,
3301 nand_dbg_print(NAND_DBG_WARN
,
3302 "NAND Program fail in %s, Line %d, "
3304 "new Bad Block %d generated!\n",
3305 __FILE__
, __LINE__
, __func__
,
3311 g_pNextBlockTable
+= (PageCount
- 1) *
3312 DeviceInfo
.wPageDataSize
* sizeof(u8
);
3314 if (FAIL
== GLOB_LLD_Write_Page_Main(tempBuf
,
3315 Block
, Page
+ 1, PageCount
- 1)) {
3316 nand_dbg_print(NAND_DBG_WARN
,
3317 "NAND Program fail in %s, Line %d, "
3319 "new Bad Block %d generated!\n",
3320 __FILE__
, __LINE__
, __func__
,
3327 wBytesCopied
= FTL_Copy_Block_Table_To_Flash(tempBuf
,
3328 DeviceInfo
.wPageDataSize
, wBytesCopied
);
3329 memset(&tempBuf
[wBytesCopied
], 0xff,
3330 DeviceInfo
.wPageSize
-wBytesCopied
);
3331 FTL_Insert_Block_Table_Signature(
3332 &tempBuf
[DeviceInfo
.wPageDataSize
], bt_flag
);
3334 memcpy(g_pNextBlockTable
, tempBuf
,
3335 DeviceInfo
.wPageSize
* sizeof(u8
));
3336 nand_dbg_print(NAND_DBG_DEBUG
,
3337 "Writing the last Page of Block Table "
3338 "Block %u Page %u\n",
3339 (unsigned int)Block
, Page
+ bt_pages
- 1);
3340 if (FAIL
== GLOB_LLD_Write_Page_Main_Spare_cdma(
3341 g_pNextBlockTable
, Block
, Page
+ bt_pages
- 1, 1,
3342 LLD_CMD_FLAG_MODE_CDMA
|
3343 LLD_CMD_FLAG_ORDER_BEFORE_REST
)) {
3344 nand_dbg_print(NAND_DBG_WARN
,
3345 "NAND Program fail in %s, Line %d, "
3346 "Function: %s, new Bad Block %d generated!\n",
3347 __FILE__
, __LINE__
, __func__
, Block
);
3352 if (FAIL
== GLOB_LLD_Write_Page_Main_Spare(tempBuf
,
3353 Block
, Page
+bt_pages
- 1, 1)) {
3354 nand_dbg_print(NAND_DBG_WARN
,
3355 "NAND Program fail in %s, Line %d, "
3357 "new Bad Block %d generated!\n",
3358 __FILE__
, __LINE__
, __func__
, Block
);
3364 nand_dbg_print(NAND_DBG_DEBUG
, "FTL_Write_Block_Table_Data: done\n");
3370 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3371 * Function: FTL_Replace_Block_Table
3373 * Outputs: PASS=0 / FAIL=1
3374 * Description: Get a new block to write block table
3375 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3376 static u32
FTL_Replace_Block_Table(void)
3381 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3382 __FILE__
, __LINE__
, __func__
);
3384 blk
= FTL_Replace_LWBlock(BLOCK_TABLE_INDEX
, &gc
);
3386 if ((BAD_BLOCK
== blk
) && (PASS
== gc
)) {
3387 GLOB_FTL_Garbage_Collection();
3388 blk
= FTL_Replace_LWBlock(BLOCK_TABLE_INDEX
, &gc
);
3390 if (BAD_BLOCK
== blk
)
3391 printk(KERN_ERR
"%s, %s: There is no spare block. "
3392 "It should never happen\n",
3393 __FILE__
, __func__
);
3395 nand_dbg_print(NAND_DBG_DEBUG
, "New Block table Block is %d\n", blk
);
3400 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3401 * Function: FTL_Replace_LWBlock
3402 * Inputs: Block number
3403 * Pointer to Garbage Collect flag
3405 * Description: Determine the least weared block by traversing
3407 * Set Garbage collection to be called if number of spare
3408 * block is less than Free Block Gate count
3409 * Change Block table entry to map least worn block for current
3411 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3412 static u32
FTL_Replace_LWBlock(u32 wBlockNum
, int *pGarbageCollect
)
3415 u32
*pbt
= (u32
*)g_pBlockTable
;
3416 u8 wLeastWornCounter
= 0xFF;
3417 u32 wLeastWornIndex
= BAD_BLOCK
;
3418 u32 wSpareBlockNum
= 0;
3419 u32 wDiscardBlockNum
= 0;
3421 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3422 __FILE__
, __LINE__
, __func__
);
3424 if (IS_SPARE_BLOCK(wBlockNum
)) {
3425 *pGarbageCollect
= FAIL
;
3426 pbt
[wBlockNum
] = (u32
)(pbt
[wBlockNum
] & (~SPARE_BLOCK
));
3428 p_BTableChangesDelta
=
3429 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3430 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3431 p_BTableChangesDelta
->ftl_cmd_cnt
=
3433 p_BTableChangesDelta
->BT_Index
= (u32
)(wBlockNum
);
3434 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[wBlockNum
];
3435 p_BTableChangesDelta
->ValidFields
= 0x0C;
3437 return pbt
[wBlockNum
];
3440 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
3441 if (IS_DISCARDED_BLOCK(i
))
3444 if (IS_SPARE_BLOCK(i
)) {
3445 u32 wPhysicalIndex
= (u32
)((~BAD_BLOCK
) & pbt
[i
]);
3446 if (wPhysicalIndex
> DeviceInfo
.wSpectraEndBlock
)
3447 printk(KERN_ERR
"FTL_Replace_LWBlock: "
3448 "This should never occur!\n");
3449 if (g_pWearCounter
[wPhysicalIndex
-
3450 DeviceInfo
.wSpectraStartBlock
] <
3451 wLeastWornCounter
) {
3453 g_pWearCounter
[wPhysicalIndex
-
3454 DeviceInfo
.wSpectraStartBlock
];
3455 wLeastWornIndex
= i
;
3461 nand_dbg_print(NAND_DBG_WARN
,
3462 "FTL_Replace_LWBlock: Least Worn Counter %d\n",
3463 (int)wLeastWornCounter
);
3465 if ((wDiscardBlockNum
>= NUM_FREE_BLOCKS_GATE
) ||
3466 (wSpareBlockNum
<= NUM_FREE_BLOCKS_GATE
))
3467 *pGarbageCollect
= PASS
;
3469 *pGarbageCollect
= FAIL
;
3471 nand_dbg_print(NAND_DBG_DEBUG
,
3472 "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
3474 (unsigned int)wDiscardBlockNum
,
3475 (unsigned int)wSpareBlockNum
);
3477 return FTL_Replace_OneBlock(wBlockNum
, wLeastWornIndex
);
3480 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3481 * Function: FTL_Replace_MWBlock
3483 * Outputs: most worn spare block no./BAD_BLOCK
3484 * Description: It finds most worn spare block.
3485 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3486 static u32
FTL_Replace_MWBlock(void)
3489 u32
*pbt
= (u32
*)g_pBlockTable
;
3490 u8 wMostWornCounter
= 0;
3491 u32 wMostWornIndex
= BAD_BLOCK
;
3492 u32 wSpareBlockNum
= 0;
3494 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3495 __FILE__
, __LINE__
, __func__
);
3497 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
3498 if (IS_SPARE_BLOCK(i
)) {
3499 u32 wPhysicalIndex
= (u32
)((~SPARE_BLOCK
) & pbt
[i
]);
3500 if (g_pWearCounter
[wPhysicalIndex
-
3501 DeviceInfo
.wSpectraStartBlock
] >
3504 g_pWearCounter
[wPhysicalIndex
-
3505 DeviceInfo
.wSpectraStartBlock
];
3506 wMostWornIndex
= wPhysicalIndex
;
3512 if (wSpareBlockNum
<= 2)
3515 return wMostWornIndex
;
3518 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3519 * Function: FTL_Replace_Block
3520 * Inputs: Block Address
3521 * Outputs: PASS=0 / FAIL=1
3522 * Description: If block specified by blk_addr parameter is not free,
3523 * replace it with the least worn block.
3524 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3525 static int FTL_Replace_Block(u64 blk_addr
)
3527 u32 current_blk
= BLK_FROM_ADDR(blk_addr
);
3528 u32
*pbt
= (u32
*)g_pBlockTable
;
3530 int GarbageCollect
= FAIL
;
3532 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3533 __FILE__
, __LINE__
, __func__
);
3535 if (IS_SPARE_BLOCK(current_blk
)) {
3536 pbt
[current_blk
] = (~SPARE_BLOCK
) & pbt
[current_blk
];
3538 p_BTableChangesDelta
=
3539 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3540 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3541 p_BTableChangesDelta
->ftl_cmd_cnt
=
3543 p_BTableChangesDelta
->BT_Index
= current_blk
;
3544 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[current_blk
];
3545 p_BTableChangesDelta
->ValidFields
= 0x0C ;
3550 FTL_Replace_LWBlock(current_blk
, &GarbageCollect
);
3552 if (PASS
== GarbageCollect
)
3553 wResult
= GLOB_FTL_Garbage_Collection();
3558 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3559 * Function: GLOB_FTL_Is_BadBlock
3560 * Inputs: block number to test
3561 * Outputs: PASS (block is BAD) / FAIL (block is not bad)
3562 * Description: test if this block number is flagged as bad
3563 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3564 int GLOB_FTL_Is_BadBlock(u32 wBlockNum
)
3566 u32
*pbt
= (u32
*)g_pBlockTable
;
3568 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3569 __FILE__
, __LINE__
, __func__
);
3571 if (wBlockNum
>= DeviceInfo
.wSpectraStartBlock
3572 && BAD_BLOCK
== (pbt
[wBlockNum
] & BAD_BLOCK
))
3578 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3579 * Function: GLOB_FTL_Flush_Cache
3581 * Outputs: PASS=0 / FAIL=1
3582 * Description: flush all the cache blocks to flash
3583 * if a cache block is not dirty, don't do anything with it
3584 * else, write the block and update the block table
3585 * Note: This function should be called at shutdown/power down.
3586 * to write important data into device
3587 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3588 int GLOB_FTL_Flush_Cache(void)
3592 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
3593 __FILE__
, __LINE__
, __func__
);
3595 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
3596 if (SET
== Cache
.array
[i
].changed
) {
3598 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
3599 int_cache
[ftl_cmd_cnt
].item
= i
;
3600 int_cache
[ftl_cmd_cnt
].cache
.address
=
3601 Cache
.array
[i
].address
;
3602 int_cache
[ftl_cmd_cnt
].cache
.changed
= CLEAR
;
3605 ret
= write_back_to_l2_cache(Cache
.array
[i
].buf
, Cache
.array
[i
].address
);
3607 Cache
.array
[i
].changed
= CLEAR
;
3609 printk(KERN_ALERT
"Failed when write back to L2 cache!\n");
3610 /* TODO - How to handle this? */
3617 return FTL_Write_Block_Table(FAIL
);
3620 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3621 * Function: GLOB_FTL_Page_Read
3622 * Inputs: pointer to data
3623 * logical address of data (u64 is LBA * Bytes/Page)
3624 * Outputs: PASS=0 / FAIL=1
3625 * Description: reads a page of data into RAM from the cache
3626 * if the data is not already in cache, read from flash to cache
3627 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3628 int GLOB_FTL_Page_Read(u8
*data
, u64 logical_addr
)
3633 nand_dbg_print(NAND_DBG_DEBUG
, "GLOB_FTL_Page_Read - "
3634 "page_addr: %llu\n", logical_addr
);
3636 cache_item
= FTL_Cache_If_Hit(logical_addr
);
3638 if (UNHIT_CACHE_ITEM
== cache_item
) {
3639 nand_dbg_print(NAND_DBG_DEBUG
,
3640 "GLOB_FTL_Page_Read: Cache not hit\n");
3641 res
= FTL_Cache_Write();
3642 if (ERR
== FTL_Cache_Read(logical_addr
))
3644 cache_item
= Cache
.LRU
;
3647 FTL_Cache_Read_Page(data
, logical_addr
, cache_item
);
3652 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3653 * Function: GLOB_FTL_Page_Write
3654 * Inputs: pointer to data
3655 * address of data (ADDRESSTYPE is LBA * Bytes/Page)
3656 * Outputs: PASS=0 / FAIL=1
3657 * Description: writes a page of data from RAM to the cache
3658 * if the data is not already in cache, write back the
3659 * least recently used block and read the addressed block
3660 * from flash to cache
3661 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3662 int GLOB_FTL_Page_Write(u8
*pData
, u64 dwPageAddr
)
3665 u32
*pbt
= (u32
*)g_pBlockTable
;
3668 nand_dbg_print(NAND_DBG_TRACE
, "GLOB_FTL_Page_Write - "
3669 "dwPageAddr: %llu\n", dwPageAddr
);
3671 cache_blk
= FTL_Cache_If_Hit(dwPageAddr
);
3673 if (UNHIT_CACHE_ITEM
== cache_blk
) {
3674 wResult
= FTL_Cache_Write();
3675 if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr
))) {
3676 wResult
= FTL_Replace_Block(dwPageAddr
);
3677 pbt
[BLK_FROM_ADDR(dwPageAddr
)] |= SPARE_BLOCK
;
3678 if (wResult
== FAIL
)
3681 if (ERR
== FTL_Cache_Read(dwPageAddr
))
3683 cache_blk
= Cache
.LRU
;
3684 FTL_Cache_Write_Page(pData
, dwPageAddr
, cache_blk
, 0);
3687 FTL_Cache_Write_Page(pData
, dwPageAddr
, cache_blk
,
3688 LLD_CMD_FLAG_ORDER_BEFORE_REST
);
3690 FTL_Cache_Write_Page(pData
, dwPageAddr
, cache_blk
, 0);
3697 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3698 * Function: GLOB_FTL_Block_Erase
3699 * Inputs: address of block to erase (now in byte format, should change to
3701 * Outputs: PASS=0 / FAIL=1
3702 * Description: erases the specified block
3703 * increments the erase count
3704 * If erase count reaches its upper limit,call function to
3705 * do the ajustment as per the relative erase count values
3706 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3707 int GLOB_FTL_Block_Erase(u64 blk_addr
)
3712 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3713 __FILE__
, __LINE__
, __func__
);
3715 BlkIdx
= (u32
)(blk_addr
>> DeviceInfo
.nBitsInBlockDataSize
);
3717 if (BlkIdx
< DeviceInfo
.wSpectraStartBlock
) {
3718 printk(KERN_ERR
"GLOB_FTL_Block_Erase: "
3719 "This should never occur\n");
3724 status
= GLOB_LLD_Erase_Block_cdma(BlkIdx
, LLD_CMD_FLAG_MODE_CDMA
);
3726 nand_dbg_print(NAND_DBG_WARN
,
3727 "NAND Program fail in %s, Line %d, "
3728 "Function: %s, new Bad Block %d generated!\n",
3729 __FILE__
, __LINE__
, __func__
, BlkIdx
);
3731 status
= GLOB_LLD_Erase_Block(BlkIdx
);
3732 if (status
== FAIL
) {
3733 nand_dbg_print(NAND_DBG_WARN
,
3734 "NAND Program fail in %s, Line %d, "
3735 "Function: %s, new Bad Block %d generated!\n",
3736 __FILE__
, __LINE__
, __func__
, BlkIdx
);
3741 if (DeviceInfo
.MLCDevice
) {
3742 g_pReadCounter
[BlkIdx
- DeviceInfo
.wSpectraStartBlock
] = 0;
3743 if (g_cBlockTableStatus
!= IN_PROGRESS_BLOCK_TABLE
) {
3744 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
3745 FTL_Write_IN_Progress_Block_Table_Page();
3749 g_pWearCounter
[BlkIdx
- DeviceInfo
.wSpectraStartBlock
]++;
3752 p_BTableChangesDelta
=
3753 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3754 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3755 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
3756 p_BTableChangesDelta
->WC_Index
=
3757 BlkIdx
- DeviceInfo
.wSpectraStartBlock
;
3758 p_BTableChangesDelta
->WC_Entry_Value
=
3759 g_pWearCounter
[BlkIdx
- DeviceInfo
.wSpectraStartBlock
];
3760 p_BTableChangesDelta
->ValidFields
= 0x30;
3762 if (DeviceInfo
.MLCDevice
) {
3763 p_BTableChangesDelta
=
3764 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3765 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3766 p_BTableChangesDelta
->ftl_cmd_cnt
=
3768 p_BTableChangesDelta
->RC_Index
=
3769 BlkIdx
- DeviceInfo
.wSpectraStartBlock
;
3770 p_BTableChangesDelta
->RC_Entry_Value
=
3771 g_pReadCounter
[BlkIdx
-
3772 DeviceInfo
.wSpectraStartBlock
];
3773 p_BTableChangesDelta
->ValidFields
= 0xC0;
3779 if (g_pWearCounter
[BlkIdx
- DeviceInfo
.wSpectraStartBlock
] == 0xFE)
3780 FTL_Adjust_Relative_Erase_Count(BlkIdx
);
3786 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3787 * Function: FTL_Adjust_Relative_Erase_Count
3788 * Inputs: index to block that was just incremented and is at the max
3789 * Outputs: PASS=0 / FAIL=1
3790 * Description: If any erase counts at MAX, adjusts erase count of every
3791 * block by substracting least worn
3792 * counter from counter value of every entry in wear table
3793 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3794 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX
)
3796 u8 wLeastWornCounter
= MAX_BYTE_VALUE
;
3799 u32
*pbt
= (u32
*)g_pBlockTable
;
3802 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3803 __FILE__
, __LINE__
, __func__
);
3805 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
3806 if (IS_BAD_BLOCK(i
))
3808 wWearIndex
= (u32
)(pbt
[i
] & (~BAD_BLOCK
));
3810 if ((wWearIndex
- DeviceInfo
.wSpectraStartBlock
) < 0)
3811 printk(KERN_ERR
"FTL_Adjust_Relative_Erase_Count:"
3812 "This should never occur\n");
3813 wWearCounter
= g_pWearCounter
[wWearIndex
-
3814 DeviceInfo
.wSpectraStartBlock
];
3815 if (wWearCounter
< wLeastWornCounter
)
3816 wLeastWornCounter
= wWearCounter
;
3819 if (wLeastWornCounter
== 0) {
3820 nand_dbg_print(NAND_DBG_WARN
,
3821 "Adjusting Wear Levelling Counters: Special Case\n");
3822 g_pWearCounter
[Index_of_MAX
-
3823 DeviceInfo
.wSpectraStartBlock
]--;
3825 p_BTableChangesDelta
=
3826 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3827 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3828 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
3829 p_BTableChangesDelta
->WC_Index
=
3830 Index_of_MAX
- DeviceInfo
.wSpectraStartBlock
;
3831 p_BTableChangesDelta
->WC_Entry_Value
=
3832 g_pWearCounter
[Index_of_MAX
-
3833 DeviceInfo
.wSpectraStartBlock
];
3834 p_BTableChangesDelta
->ValidFields
= 0x30;
3836 FTL_Static_Wear_Leveling();
3838 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++)
3839 if (!IS_BAD_BLOCK(i
)) {
3840 wWearIndex
= (u32
)(pbt
[i
] & (~BAD_BLOCK
));
3841 g_pWearCounter
[wWearIndex
-
3842 DeviceInfo
.wSpectraStartBlock
] =
3845 DeviceInfo
.wSpectraStartBlock
] -
3848 p_BTableChangesDelta
=
3849 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3851 sizeof(struct BTableChangesDelta
);
3853 p_BTableChangesDelta
->ftl_cmd_cnt
=
3855 p_BTableChangesDelta
->WC_Index
= wWearIndex
-
3856 DeviceInfo
.wSpectraStartBlock
;
3857 p_BTableChangesDelta
->WC_Entry_Value
=
3858 g_pWearCounter
[wWearIndex
-
3859 DeviceInfo
.wSpectraStartBlock
];
3860 p_BTableChangesDelta
->ValidFields
= 0x30;
3868 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3869 * Function: FTL_Write_IN_Progress_Block_Table_Page
3872 * Description: It writes in-progress flag page to the page next to
3874 ***********************************************************************/
3875 static int FTL_Write_IN_Progress_Block_Table_Page(void)
3882 u32
*pbt
= (u32
*)g_pBlockTable
;
3883 u32 wTempBlockTableIndex
;
3886 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
3887 __FILE__
, __LINE__
, __func__
);
3889 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
3891 dwIPFPageAddr
= g_wBlockTableOffset
+ bt_pages
;
3893 nand_dbg_print(NAND_DBG_DEBUG
, "Writing IPF at "
3894 "Block %d Page %d\n",
3895 g_wBlockTableIndex
, dwIPFPageAddr
);
3898 wResult
= GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF
,
3899 g_wBlockTableIndex
, dwIPFPageAddr
, 1,
3900 LLD_CMD_FLAG_MODE_CDMA
| LLD_CMD_FLAG_ORDER_BEFORE_REST
);
3901 if (wResult
== FAIL
) {
3902 nand_dbg_print(NAND_DBG_WARN
,
3903 "NAND Program fail in %s, Line %d, "
3904 "Function: %s, new Bad Block %d generated!\n",
3905 __FILE__
, __LINE__
, __func__
,
3906 g_wBlockTableIndex
);
3908 g_wBlockTableOffset
= dwIPFPageAddr
+ 1;
3909 p_BTableChangesDelta
= (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3910 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3911 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
3912 p_BTableChangesDelta
->g_wBlockTableOffset
= g_wBlockTableOffset
;
3913 p_BTableChangesDelta
->ValidFields
= 0x01;
3916 wResult
= GLOB_LLD_Write_Page_Main_Spare(g_pIPF
,
3917 g_wBlockTableIndex
, dwIPFPageAddr
, 1);
3918 if (wResult
== FAIL
) {
3919 nand_dbg_print(NAND_DBG_WARN
,
3920 "NAND Program fail in %s, Line %d, "
3921 "Function: %s, new Bad Block %d generated!\n",
3922 __FILE__
, __LINE__
, __func__
,
3923 (int)g_wBlockTableIndex
);
3924 MARK_BLOCK_AS_BAD(pbt
[BLOCK_TABLE_INDEX
]);
3925 wTempBlockTableIndex
= FTL_Replace_Block_Table();
3926 bt_block_changed
= 1;
3927 if (BAD_BLOCK
== wTempBlockTableIndex
)
3929 g_wBlockTableIndex
= wTempBlockTableIndex
;
3930 g_wBlockTableOffset
= 0;
3931 /* Block table tag is '00'. Means it's used one */
3932 pbt
[BLOCK_TABLE_INDEX
] = g_wBlockTableIndex
;
3935 g_wBlockTableOffset
= dwIPFPageAddr
+ 1;
3940 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3941 * Function: FTL_Read_Disturbance
3942 * Inputs: block address
3943 * Outputs: PASS=0 / FAIL=1
3944 * Description: used to handle read disturbance. Data in block that
3945 * reaches its read limit is moved to new block
3946 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3947 int FTL_Read_Disturbance(u32 blk_addr
)
3950 u32
*pbt
= (u32
*) g_pBlockTable
;
3951 u32 dwOldBlockAddr
= blk_addr
;
3954 u32 wLeastReadCounter
= 0xFFFF;
3955 u32 wLeastReadIndex
= BAD_BLOCK
;
3956 u32 wSpareBlockNum
= 0;
3961 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
3962 __FILE__
, __LINE__
, __func__
);
3965 g_pTempBuf
= cp_back_buf_copies
[cp_back_buf_idx
];
3967 if (cp_back_buf_idx
> COPY_BACK_BUF_NUM
) {
3968 printk(KERN_ERR
"cp_back_buf_copies overflow! Exit."
3969 "Maybe too many pending commands in your CDMA chain.\n");
3973 g_pTempBuf
= tmp_buf_read_disturbance
;
3976 wBlockNum
= FTL_Get_Block_Index(blk_addr
);
3979 /* This is a bug.Here 'i' should be logical block number
3980 * and start from 1 (0 is reserved for block table).
3981 * Have fixed it. - Yunpeng 2008. 12. 19
3983 for (i
= 1; i
< DeviceInfo
.wDataBlockNum
; i
++) {
3984 if (IS_SPARE_BLOCK(i
)) {
3985 u32 wPhysicalIndex
=
3986 (u32
)((~SPARE_BLOCK
) & pbt
[i
]);
3987 if (g_pReadCounter
[wPhysicalIndex
-
3988 DeviceInfo
.wSpectraStartBlock
] <
3989 wLeastReadCounter
) {
3991 g_pReadCounter
[wPhysicalIndex
-
3992 DeviceInfo
.wSpectraStartBlock
];
3993 wLeastReadIndex
= i
;
3999 if (wSpareBlockNum
<= NUM_FREE_BLOCKS_GATE
) {
4000 wResult
= GLOB_FTL_Garbage_Collection();
4001 if (PASS
== wResult
)
4006 wTempNode
= (u32
)(DISCARD_BLOCK
| pbt
[wBlockNum
]);
4007 wReplacedNode
= (u32
)((~SPARE_BLOCK
) &
4008 pbt
[wLeastReadIndex
]);
4010 pbt
[wBlockNum
] = wReplacedNode
;
4011 pbt
[wLeastReadIndex
] = wTempNode
;
4012 p_BTableChangesDelta
=
4013 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4014 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4016 p_BTableChangesDelta
->ftl_cmd_cnt
=
4018 p_BTableChangesDelta
->BT_Index
= wBlockNum
;
4019 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[wBlockNum
];
4020 p_BTableChangesDelta
->ValidFields
= 0x0C;
4022 p_BTableChangesDelta
=
4023 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4024 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4026 p_BTableChangesDelta
->ftl_cmd_cnt
=
4028 p_BTableChangesDelta
->BT_Index
= wLeastReadIndex
;
4029 p_BTableChangesDelta
->BT_Entry_Value
=
4030 pbt
[wLeastReadIndex
];
4031 p_BTableChangesDelta
->ValidFields
= 0x0C;
4033 wResult
= GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf
,
4034 dwOldBlockAddr
, 0, DeviceInfo
.wPagesPerBlock
,
4035 LLD_CMD_FLAG_MODE_CDMA
);
4036 if (wResult
== FAIL
)
4041 if (wResult
!= FAIL
) {
4042 if (FAIL
== GLOB_LLD_Write_Page_Main_cdma(
4043 g_pTempBuf
, pbt
[wBlockNum
], 0,
4044 DeviceInfo
.wPagesPerBlock
)) {
4045 nand_dbg_print(NAND_DBG_WARN
,
4046 "NAND Program fail in "
4047 "%s, Line %d, Function: %s, "
4050 __FILE__
, __LINE__
, __func__
,
4051 (int)pbt
[wBlockNum
]);
4053 MARK_BLOCK_AS_BAD(pbt
[wBlockNum
]);
4058 wResult
= GLOB_LLD_Read_Page_Main(g_pTempBuf
,
4059 dwOldBlockAddr
, 0, DeviceInfo
.wPagesPerBlock
);
4060 if (wResult
== FAIL
)
4063 if (wResult
!= FAIL
) {
4064 /* This is a bug. At this time, pbt[wBlockNum]
4065 is still the physical address of
4066 discard block, and should not be write.
4067 Have fixed it as below.
4068 -- Yunpeng 2008.12.19
4070 wResult
= GLOB_LLD_Write_Page_Main(g_pTempBuf
,
4072 DeviceInfo
.wPagesPerBlock
);
4073 if (wResult
== FAIL
) {
4074 nand_dbg_print(NAND_DBG_WARN
,
4075 "NAND Program fail in "
4076 "%s, Line %d, Function: %s, "
4079 __FILE__
, __LINE__
, __func__
,
4080 (int)wReplacedNode
);
4081 MARK_BLOCK_AS_BAD(wReplacedNode
);
4083 pbt
[wBlockNum
] = wReplacedNode
;
4084 pbt
[wLeastReadIndex
] = wTempNode
;
4088 if ((wResult
== PASS
) && (g_cBlockTableStatus
!=
4089 IN_PROGRESS_BLOCK_TABLE
)) {
4090 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
4091 FTL_Write_IN_Progress_Block_Table_Page();
4095 } while (wResult
!= PASS
)