More detailed panic message for Nano2G VFL fast read problems
[kugel-rb.git] / firmware / target / arm / s5l8700 / ipodnano2g / ftl-nano2g.c
blob9bac47e5bbeca78428fa01267a4cb2c53b071082
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2009 by Michael Sparmann
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
24 #include <config.h>
25 #include <cpu.h>
26 #include <nand-target.h>
27 #include <ftl-target.h>
28 #include <string.h>
29 #include "kernel.h"
30 #include "panic.h"
34 #define FTL_COPYBUF_SIZE 1
35 #define FTL_WRITESPARE_SIZE 4
36 //#define FTL_FORCEMOUNT
40 #ifdef FTL_FORCEMOUNT
41 #ifndef FTL_READONLY
42 #define FTL_READONLY
43 #endif
44 #endif
47 #ifdef FTL_READONLY
48 uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
50 (void)sector;
51 (void)count;
52 (void)buffer;
53 return -1;
55 uint32_t ftl_sync(void)
57 return 0;
59 #endif
63 /* Keeps the state of a scattered page block.
64 This structure is used in memory only, not on flash,
65 but it equals the one the OFW uses. */
66 struct ftl_log_type
69 /* The ftl_cxt.nextblockusn at the time the block was allocated,
70 needed in order to be able to remove the oldest ones first. */
71 uint32_t usn;
73 /* The vBlock number at which the scattered pages are stored */
74 uint16_t scatteredvblock;
76 /* the lBlock number for which those pages are */
77 uint16_t logicalvblock;
79 /* Pointer to ftl_offsets, contains the mapping which lPage is
80 currently stored at which scattered vPage. */
81 uint16_t* pageoffsets;
83 /* Pages used in the vBlock, i.e. next page number to be written */
84 uint16_t pagesused;
86 /* Pages that are still up to date in this block, i.e. need to be
87 moved when this vBlock is deallocated. */
88 uint16_t pagescurrent;
90 /* A flag whether all pages are still sequential in this block.
91 Initialized to 1 on allocation, zeroed as soon as anything is
92 written out of sequence, so that the block will need copying
93 when committing to get the pages back into the right order.
94 This is used to half the number of block erases needed when
95 writing huge amounts of sequential data. */
96 uint32_t issequential;
98 } __attribute__((packed));
101 /* Keeps the state of the FTL, both on flash and in memory */
102 struct ftl_cxt_type
105 /* Update sequence number of the FTL context, decremented
106 every time a new revision of FTL meta data is written. */
107 uint32_t usn;
109 /* Update sequence number for user data blocks. Incremented
110 every time a portion of user pages is written, so that
111 a consistency check can determine which copy of a user
112 page is the most recent one. */
113 uint32_t nextblockusn;
115 /* Count of currently free pages in the block pool */
116 uint16_t freecount;
118 /* Index to the first free hyperblock in the blockpool ring buffer */
119 uint16_t nextfreeidx;
121 /* This is a counter that is used to better distribute block
122 wear. It is incremented on every block erase, and if it
123 gets too high (300 on writes, 20 on sync), the most and
124 least worn hyperblock will be swapped (causing an additional
125 block write) and the counter will be decreased by 20. */
126 uint16_t swapcounter;
128 /* Ring buffer of currently free hyperblocks. nextfreeidx is the
129 index to freecount free ones, the other ones are currently
130 allocated for scattered page hyperblocks. */
131 uint16_t blockpool[0x14];
133 /* Alignment to 32 bits */
134 uint16_t field_36;
136 /* vPages where the block map is stored */
137 uint32_t ftl_map_pages[8];
139 /* Probably additional map page number space for bigger chips */
140 uint8_t field_58[0x28];
142 /* vPages where the erase counters are stored */
143 uint32_t ftl_erasectr_pages[8];
145 /* Seems to be padding */
146 uint8_t field_A0[0x70];
148 /* Pointer to ftl_map used by Whimory, not used by us */
149 uint32_t ftl_map_ptr;
151 /* Pointer to ftl_erasectr used by Whimory, not used by us */
152 uint32_t ftl_erasectr_ptr;
154 /* Pointer to ftl_log used by Whimory, not used by us */
155 uint32_t ftl_log_ptr;
157 /* Flag used to indicate that some erase counter pages should be committed
158 because they were changed more than 100 times since the last commit. */
159 uint32_t erasedirty;
161 /* Seems to be unused */
162 uint16_t field_120;
164 /* vBlocks used to store the FTL context, map, and erase
165 counter pages. This is also a ring buffer, and the oldest
166 page gets swapped with the least used page from the block
167 pool ring buffer when a new one is allocated. */
168 uint16_t ftlctrlblocks[3];
170 /* The last used vPage number from ftlctrlblocks */
171 uint32_t ftlctrlpage;
173 /* Set on context sync, reset on write, so obviously never
174 zero in the context written to the flash */
175 uint32_t clean_flag;
177 /* Seems to be unused, but gets loaded from flash by Whimory. */
178 uint8_t field_130[0x15C];
180 } __attribute__((packed)) FTLCxtType;
183 /* Keeps the state of the bank's VFL, both on flash and in memory.
184 There is one of these per bank. */
185 typedef struct ftl_vfl_cxt_type
188 /* Cross-bank update sequence number, incremented on every VFL
189 context commit on any bank. */
190 uint32_t usn;
192 /* See ftl_cxt.ftlctrlblocks. This is stored to the VFL contexts
193 in order to be able to find the most recent FTL context copy
194 when mounting the FTL. The VFL context number this will be
195 written to on an FTL context commit is chosen semi-randomly. */
196 uint16_t ftlctrlblocks[3];
198 /* Alignment to 32 bits */
199 uint8_t field_A[2];
201 /* Decrementing update counter for VFL context commits per bank */
202 uint32_t updatecount;
204 /* Number of the currently active VFL context block, it's an index
205 into vflcxtblocks. */
206 uint16_t activecxtblock;
208 /* Number of the first free page in the active VFL context block */
209 uint16_t nextcxtpage;
211 /* Seems to be unused */
212 uint8_t field_14[4];
214 /* Incremented every time a block erase error leads to a remap,
215 but doesn't seem to be read anywhere. */
216 uint16_t field_18;
218 /* Number of spare blocks used */
219 uint16_t spareused;
221 /* pBlock number of the first spare block */
222 uint16_t firstspare;
224 /* Total number of spare blocks */
225 uint16_t sparecount;
227 /* Block remap table. Contains the vBlock number the n-th spare
228 block is used as a replacement for. 0 = unused, 0xFFFF = bad. */
229 uint16_t remaptable[0x334];
231 /* Bad block table. Each bit represents 8 blocks. 1 = OK, 0 = Bad.
232 If the entry is zero, you should look at the remap table to see
233 if the block is remapped, and if yes, where the replacement is. */
234 uint8_t bbt[0x11A];
236 /* pBlock numbers used to store the VFL context. This is a ring
237 buffer. On a VFL context write, always 8 pages are written,
238 and it passes if at least 4 of them can be read back. */
239 uint16_t vflcxtblocks[4];
241 /* Blocks scheduled for remapping are stored at the end of the
242 remap table. This is the first index used for them. */
243 uint16_t scheduledstart;
245 /* Probably padding */
246 uint8_t field_7AC[0x4C];
248 /* First checksum (addition) */
249 uint32_t checksum1;
251 /* Second checksum (XOR), there is a bug in whimory regarding this. */
252 uint32_t checksum2;
254 } __attribute__((packed)) FTLVFLCxtType;
257 /* Layout of the spare bytes of each page on the flash */
258 union ftl_spare_data_type
261 /* The layout used for actual user data (types 0x40 and 0x41) */
262 struct ftl_spare_data_user_type
265 /* The lPage, i.e. Sector, number */
266 uint32_t lpn;
268 /* The update sequence number of that page,
269 copied from ftl_cxt.nextblockusn on write */
270 uint32_t usn;
272 /* Seems to be unused */
273 uint8_t field_8;
275 /* Type field, 0x40 (data page) or 0x41
276 (last data page of hyperblock) */
277 uint8_t type;
279 /* ECC mark, usually 0xFF. If an error occurred while reading the
280 page during a copying operation earlier, this will be 0x55. */
281 uint8_t eccmark;
283 /* Seems to be unused */
284 uint8_t field_B;
286 /* ECC data for the user data */
287 uint8_t dataecc[0x28];
289 /* ECC data for the first 0xC bytes above */
290 uint8_t spareecc[0xC];
292 } __attribute__((packed)) user;
294 /* The layout used for meta data (other types) */
295 struct ftl_spare_data_meta_type
298 /* ftl_cxt.usn for FTL stuff, ftl_vfl_cxt.updatecount for VFL stuff */
299 uint32_t usn;
301 /* Index of the thing inside the page,
302 for example number / index of the map or erase counter page */
303 uint16_t idx;
305 /* Seems to be unused */
306 uint8_t field_6;
308 /* Seems to be unused */
309 uint8_t field_7;
311 /* Seems to be unused */
312 uint8_t field_8;
314 /* Type field:
315 0x43: FTL context page
316 0x44: Block map page
317 0x46: Erase counter page
318 0x47: "FTL is currently mounted", i.e. unclean shutdown, mark
319 0x80: VFL context page */
320 uint8_t type;
322 /* ECC mark, usually 0xFF. If an error occurred while reading the
323 page during a copying operation earlier, this will be 0x55. */
324 uint8_t eccmark;
326 /* Seems to be unused */
327 uint8_t field_B;
329 /* ECC data for the user data */
330 uint8_t dataecc[0x28];
332 /* ECC data for the first 0xC bytes above */
333 uint8_t spareecc[0xC];
335 } __attribute__((packed)) meta;
340 /* Keeps track of troublesome blocks, only in memory, lost on unmount. */
341 struct ftl_trouble_type
344 /* vBlock number of the block giving trouble */
345 uint16_t block;
347 /* Bank of the block giving trouble */
348 uint8_t bank;
350 /* Error counter, incremented by 3 on error, decremented by 1 on erase,
351 remaping will be done when it reaches 6. */
352 uint8_t errors;
354 } __attribute__((packed));
358 /* Pointer to an info structure regarding the flash type used */
359 const struct nand_device_info_type* ftl_nand_type;
361 /* Number of banks we detected a chip on */
362 uint32_t ftl_banks;
364 /* Block map, used vor pBlock to vBlock mapping */
365 uint16_t ftl_map[0x2000];
367 /* VFL context for each bank */
368 struct ftl_vfl_cxt_type ftl_vfl_cxt[4];
370 /* FTL context */
371 struct ftl_cxt_type ftl_cxt;
373 /* Temporary data buffers for internal use by the FTL */
374 uint8_t ftl_buffer[0x800] __attribute__((aligned(16)));
376 /* Temporary spare byte buffer for internal use by the FTL */
377 union ftl_spare_data_type ftl_sparebuffer[FTL_WRITESPARE_SIZE] __attribute__((aligned(16)));
380 #ifndef FTL_READONLY
382 /* Lowlevel BBT for each bank */
383 uint8_t ftl_bbt[4][0x410];
385 /* Erase counters for the vBlocks */
386 uint16_t ftl_erasectr[0x2000];
388 /* Used by ftl_log */
389 uint16_t ftl_offsets[0x11][0x200];
391 /* Structs keeping record of scattered page blocks */
392 struct ftl_log_type ftl_log[0x11];
394 /* Global cross-bank update sequence number of the VFL context */
395 uint32_t ftl_vfl_usn;
397 /* Keeps track (temporarily) of troublesome blocks */
398 struct ftl_trouble_type ftl_troublelog[5];
400 /* Counts erase counter page changes, after 100 of them the affected
401 page will be committed to the flash. */
402 uint8_t ftl_erasectr_dirt[8];
404 /* Buffer needed for copying pages around while moving or committing blocks.
405 This can't be shared with ftl_buffer, because this one could be overwritten
406 during the copying operation in order to e.g. commit a CXT. */
407 uint8_t ftl_copybuffer[FTL_COPYBUF_SIZE][0x800] __attribute__((aligned(16)));
408 union ftl_spare_data_type ftl_copyspare[FTL_COPYBUF_SIZE] __attribute__((aligned(16)));
410 /* Needed to store the old scattered page offsets in order to be able to roll
411 back if something fails while compacting a scattered page block. */
412 uint16_t ftl_offsets_backup[0x200] __attribute__((aligned(16)));
414 #endif
417 static struct mutex ftl_mtx;
421 /* Finds a device info page for the specified bank and returns its number.
422 Used to check if one is present, and to read the lowlevel BBT. */
423 uint32_t ftl_find_devinfo(uint32_t bank)
425 /* Scan the last 10% of the flash for device info pages */
426 uint32_t lowestBlock = (*ftl_nand_type).blocks
427 - ((*ftl_nand_type).blocks / 10);
428 uint32_t block, page, pagenum;
429 for (block = (*ftl_nand_type).blocks - 1; block >= lowestBlock; block--)
431 page = (*ftl_nand_type).pagesperblock - 8;
432 for (; page < (*ftl_nand_type).pagesperblock; page++)
434 pagenum = block * (*ftl_nand_type).pagesperblock + page;
435 if ((nand_read_page(bank, pagenum, ftl_buffer,
436 &ftl_sparebuffer[0], 1, 0) & 0x11F) != 0)
437 continue;
438 if (memcmp(ftl_buffer, "DEVICEINFOSIGN\0", 0x10) == 0)
439 return pagenum;
442 return 0;
446 /* Checks if all banks have proper device info pages */
447 uint32_t ftl_has_devinfo(void)
449 uint32_t i;
450 for (i = 0; i < ftl_banks; i++) if (ftl_find_devinfo(i) == 0) return 0;
451 return 1;
455 /* Loads the lowlevel BBT for a bank to the specified buffer.
456 This is based on some cryptic disassembly and not fully understood yet. */
457 uint32_t ftl_load_bbt(uint32_t bank, uint8_t* bbt)
459 uint32_t i, j;
460 uint32_t pagebase, page = ftl_find_devinfo(bank), page2;
461 uint32_t unk1, unk2, unk3;
462 if (page == 0) return 1;
463 pagebase = page & ~((*ftl_nand_type).pagesperblock - 1);
464 if ((nand_read_page(bank, page, ftl_buffer,
465 (uint32_t*)0, 1, 0) & 0x11F) != 0) return 1;
466 if (memcmp(&ftl_buffer[0x18], "BBT", 4) != 0) return 1;
467 unk1 = ((uint16_t*)ftl_buffer)[0x10];
468 unk2 = ((uint16_t*)ftl_buffer)[0x11];
469 unk3 = ((uint16_t*)ftl_buffer)[((uint32_t*)ftl_buffer)[4] * 6 + 10]
470 + ((uint16_t*)ftl_buffer)[((uint32_t*)ftl_buffer)[4] * 6 + 11];
471 for (i = 0; i < unk1; i++)
473 for (j = 0; ; j++)
475 page2 = unk2 + i + unk3 * j;
476 if (page2 >= (uint32_t)((*ftl_nand_type).pagesperblock - 8))
477 break;
478 if ((nand_read_page(bank, pagebase + page2, ftl_buffer,
479 (void*)0, 1, 0) & 0x11F) == 0)
481 memcpy(bbt, ftl_buffer, 0x410);
482 return 0;
486 return 1;
490 /* Calculates the checksums for the VFL context page of the specified bank */
491 void ftl_vfl_calculate_checksum(uint32_t bank,
492 uint32_t* checksum1, uint32_t* checksum2)
494 uint32_t i;
495 *checksum1 = 0xAABBCCDD;
496 *checksum2 = 0xAABBCCDD;
497 for (i = 0; i < 0x1FE; i++)
499 *checksum1 += ((uint32_t*)(&ftl_vfl_cxt[bank]))[i];
500 *checksum2 ^= ((uint32_t*)(&ftl_vfl_cxt[bank]))[i];
505 /* Checks if the checksums of the VFL context
506 of the specified bank are correct */
507 uint32_t ftl_vfl_verify_checksum(uint32_t bank)
509 uint32_t checksum1, checksum2;
510 ftl_vfl_calculate_checksum(bank, &checksum1, &checksum2);
511 if (checksum1 == ftl_vfl_cxt[bank].checksum1) return 0;
512 /* The following line is pretty obviously a bug in Whimory,
513 but we do it the same way for compatibility. */
514 if (checksum2 != ftl_vfl_cxt[bank].checksum2) return 0;
515 panicf("FTL: Bad VFL CXT checksum!");
516 return 1;
520 #ifndef FTL_READONLY
521 /* Updates the checksums of the VFL context of the specified bank */
522 void ftl_vfl_update_checksum(uint32_t bank)
524 ftl_vfl_calculate_checksum(bank, &ftl_vfl_cxt[bank].checksum1,
525 &ftl_vfl_cxt[bank].checksum2);
527 #endif
530 #ifndef FTL_READONLY
531 /* Writes 8 copies of the VFL context of the specified bank to flash,
532 and succeeds if at least 4 can be read back properly. */
533 uint32_t ftl_vfl_store_cxt(uint32_t bank)
535 uint32_t i;
536 ftl_vfl_cxt[bank].updatecount--;
537 ftl_vfl_cxt[bank].usn = ++ftl_vfl_usn;
538 ftl_vfl_cxt[bank].nextcxtpage += 8;
539 ftl_vfl_update_checksum(bank);
540 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
541 ftl_sparebuffer[0].meta.usn = ftl_vfl_cxt[bank].updatecount;
542 ftl_sparebuffer[0].meta.field_8 = 0;
543 ftl_sparebuffer[0].meta.type = 0x80;
544 for (i = 1; i <= 8; i++)
546 uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
547 uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
548 uint32_t page = block * (*ftl_nand_type).pagesperblock;
549 page += ftl_vfl_cxt[bank].nextcxtpage - i;
550 nand_write_page(bank, page, &ftl_vfl_cxt[bank], &ftl_sparebuffer[0], 1);
552 uint32_t good = 0;
553 for (i = 1; i <= 8; i++)
555 uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
556 uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
557 uint32_t page = block * (*ftl_nand_type).pagesperblock;
558 page += ftl_vfl_cxt[bank].nextcxtpage - i;
559 if ((nand_read_page(bank, page, ftl_buffer,
560 &ftl_sparebuffer[0], 1, 0) & 0x11F) != 0)
561 continue;
562 if (memcmp(ftl_buffer, &ftl_vfl_cxt[bank], 0x7AC) != 0)
563 continue;
564 if (ftl_sparebuffer[0].meta.usn != ftl_vfl_cxt[bank].updatecount)
565 continue;
566 if (ftl_sparebuffer[0].meta.field_8 == 0
567 && ftl_sparebuffer[0].meta.type == 0x80) good++;
569 return good > 3 ? 0 : 1;
571 #endif
574 #ifndef FTL_READONLY
575 /* Commits the VFL context of the specified bank to flash,
576 retries until it works or all available pages have been tried */
577 uint32_t ftl_vfl_commit_cxt(uint32_t bank)
579 if (ftl_vfl_cxt[bank].nextcxtpage + 8 <= (*ftl_nand_type).pagesperblock)
580 if (ftl_vfl_store_cxt(bank) == 0) return 0;
581 uint32_t current = ftl_vfl_cxt[bank].activecxtblock;
582 uint32_t i = current, j;
583 while (1)
585 i = (i + 1) & 3;
586 if (i == current) break;
587 if (ftl_vfl_cxt[bank].vflcxtblocks[i] == 0xFFFF) continue;
588 for (j = 0; j < 4; j++)
589 if (nand_block_erase(bank, ftl_vfl_cxt[bank].vflcxtblocks[i]
590 * (*ftl_nand_type).pagesperblock) == 0)
591 break;
592 if (j == 4) continue;
593 ftl_vfl_cxt[bank].activecxtblock = i;
594 ftl_vfl_cxt[bank].nextcxtpage = 0;
595 if (ftl_vfl_store_cxt(bank) == 0) return 0;
597 panicf("FTL: Failed to commit VFL CXT!");
598 return 1;
600 #endif
603 /* Returns a pointer to the most recently updated VFL context,
604 used to find out the current FTL context vBlock numbers
605 (planetbeing's "maxthing") */
606 struct ftl_vfl_cxt_type* ftl_vfl_get_newest_cxt(void)
608 uint32_t i, maxusn;
609 struct ftl_vfl_cxt_type* cxt = (struct ftl_vfl_cxt_type*)0;
610 maxusn = 0;
611 for (i = 0; i < ftl_banks; i++)
612 if (ftl_vfl_cxt[i].usn >= maxusn)
614 cxt = &ftl_vfl_cxt[i];
615 maxusn = ftl_vfl_cxt[i].usn;
617 return cxt;
621 /* Checks if the specified pBlock is marked bad in the supplied lowlevel BBT.
622 Only used while mounting the VFL. */
623 uint32_t ftl_is_good_block(uint8_t* bbt, uint32_t block)
625 if ((bbt[block >> 3] & (1 << (block & 7))) == 0) return 0;
626 else return 1;
630 /* Checks if the specified vBlock could be remapped */
631 uint32_t ftl_vfl_is_good_block(uint32_t bank, uint32_t block)
633 uint8_t bbtentry = ftl_vfl_cxt[bank].bbt[block >> 6];
634 if ((bbtentry & (1 << ((7 - (block >> 3)) & 7))) == 0) return 0;
635 else return 1;
639 #ifndef FTL_READONLY
640 /* Sets or unsets the bad bit of the specified vBlock
641 in the specified bank's VFL context */
642 void ftl_vfl_set_good_block(uint32_t bank, uint32_t block, uint32_t isgood)
644 uint8_t bit = (1 << ((7 - (block >> 3)) & 7));
645 if (isgood == 1) ftl_vfl_cxt[bank].bbt[block >> 6] |= bit;
646 else ftl_vfl_cxt[bank].bbt[block >> 6] &= ~bit;
648 #endif
651 /* Tries to read a VFL context from the specified bank, pBlock and page */
652 uint32_t ftl_vfl_read_page(uint32_t bank, uint32_t block,
653 uint32_t startpage, void* databuffer,
654 union ftl_spare_data_type* sparebuffer)
656 uint32_t i;
657 for (i = 0; i < 8; i++)
659 uint32_t page = block * (*ftl_nand_type).pagesperblock
660 + startpage + i;
661 if ((nand_read_page(bank, page, databuffer,
662 sparebuffer, 1, 1) & 0x11F) == 0)
663 if ((*sparebuffer).meta.field_8 == 0
664 && (*sparebuffer).meta.type == 0x80)
665 return 0;
667 return 1;
671 /* Translates a bank and vBlock to a pBlock, following remaps */
672 uint32_t ftl_vfl_get_physical_block(uint32_t bank, uint32_t block)
674 if (ftl_vfl_is_good_block(bank, block) == 1) return block;
676 uint32_t spareindex;
677 uint32_t spareused = ftl_vfl_cxt[bank].spareused;
678 for (spareindex = 0; spareindex < spareused; spareindex++)
679 if (ftl_vfl_cxt[bank].remaptable[spareindex] == block)
680 return ftl_vfl_cxt[bank].firstspare + spareindex;
681 return block;
685 #ifndef FTL_READONLY
686 /* Checks if remapping is scheduled for the specified bank and vBlock */
687 uint32_t ftl_vfl_check_remap_scheduled(uint32_t bank, uint32_t block)
689 uint32_t i;
690 for (i = 0x333; i > 0 && i > ftl_vfl_cxt[bank].scheduledstart; i--)
691 if (ftl_vfl_cxt[bank].remaptable[i] == block) return 1;
692 return 0;
694 #endif
697 #ifndef FTL_READONLY
698 /* Schedules remapping for the specified bank and vBlock */
699 void ftl_vfl_schedule_block_for_remap(uint32_t bank, uint32_t block)
701 if (ftl_vfl_check_remap_scheduled(bank, block) == 1)
702 return;
703 panicf("FTL: Scheduling bank %u block %u for remap!", (unsigned)bank, (unsigned)block);
704 if (ftl_vfl_cxt[bank].scheduledstart == ftl_vfl_cxt[bank].spareused)
705 return;
706 ftl_vfl_cxt[bank].remaptable[--ftl_vfl_cxt[bank].scheduledstart] = block;
707 ftl_vfl_commit_cxt(bank);
709 #endif
712 #ifndef FTL_READONLY
713 /* Removes the specified bank and vBlock combination
714 from the remap scheduled list */
715 void ftl_vfl_mark_remap_done(uint32_t bank, uint32_t block)
717 uint32_t i;
718 uint32_t start = ftl_vfl_cxt[bank].scheduledstart;
719 uint32_t lastscheduled = ftl_vfl_cxt[bank].remaptable[start];
720 for (i = 0x333; i > 0 && i > start; i--)
721 if (ftl_vfl_cxt[bank].remaptable[i] == block)
723 if (i != start && i != 0x333)
724 ftl_vfl_cxt[bank].remaptable[i] = lastscheduled;
725 ftl_vfl_cxt[bank].scheduledstart++;
726 return;
729 #endif
732 #ifndef FTL_READONLY
733 /* Logs that there is trouble for the specified vBlock on the specified bank.
734 The vBlock will be scheduled for remap
735 if there is too much trouble with it. */
736 void ftl_vfl_log_trouble(uint32_t bank, uint32_t vblock)
738 uint32_t i;
739 for (i = 0; i < 5; i++)
740 if (ftl_troublelog[i].block == vblock
741 && ftl_troublelog[i].bank == bank)
743 ftl_troublelog[i].errors += 3;
744 if (ftl_troublelog[i].errors > 5)
746 ftl_vfl_schedule_block_for_remap(bank, vblock);
747 ftl_troublelog[i].block = 0xFFFF;
749 return;
751 for (i = 0; i < 5; i++)
752 if (ftl_troublelog[i].block == 0xFFFF)
754 ftl_troublelog[i].block = vblock;
755 ftl_troublelog[i].bank = bank;
756 ftl_troublelog[i].errors = 3;
757 return;
760 #endif
763 #ifndef FTL_READONLY
764 /* Logs a successful erase for the specified vBlock on the specified bank */
765 void ftl_vfl_log_success(uint32_t bank, uint32_t vblock)
767 uint32_t i;
768 for (i = 0; i < 5; i++)
769 if (ftl_troublelog[i].block == vblock
770 && ftl_troublelog[i].bank == bank)
772 if (--ftl_troublelog[i].errors == 0)
773 ftl_troublelog[i].block = 0xFFFF;
774 return;
777 #endif
780 #ifndef FTL_READONLY
781 /* Tries to remap the specified vBlock on the specified bank,
782 not caring about data in there.
783 If it worked, it will return the new pBlock number,
784 if not (no more spare blocks available), it will return zero. */
785 uint32_t ftl_vfl_remap_block(uint32_t bank, uint32_t block)
787 uint32_t i;
788 uint32_t newblock = 0, newidx;
789 panicf("FTL: Remapping bank %u block %u!", (unsigned)bank, (unsigned)block);
790 if (bank >= ftl_banks || block >= (*ftl_nand_type).blocks) return 0;
791 for (i = 0; i < ftl_vfl_cxt[bank].sparecount; i++)
792 if (ftl_vfl_cxt[bank].remaptable[i] == 0)
794 newblock = ftl_vfl_cxt[bank].firstspare + i;
795 newidx = i;
796 break;
798 if (newblock == 0) return 0;
799 for (i = 0; i < 9; i++)
800 if (nand_block_erase(bank,
801 newblock * (*ftl_nand_type).pagesperblock) == 0)
802 break;
803 for (i = 0; i < newidx; i++)
804 if (ftl_vfl_cxt[bank].remaptable[i] == block)
805 ftl_vfl_cxt[bank].remaptable[i] = 0xFFFF;
806 ftl_vfl_cxt[bank].remaptable[newidx] = block;
807 ftl_vfl_cxt[bank].spareused++;
808 ftl_vfl_set_good_block(bank, block, 0);
809 return newblock;
811 #endif
814 /* Reads the specified vPage, dealing with all kinds of trouble */
815 uint32_t ftl_vfl_read(uint32_t vpage, void* buffer, void* sparebuffer,
816 uint32_t checkempty, uint32_t remaponfail)
818 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
819 uint32_t syshyperblocks = (*ftl_nand_type).blocks
820 - (*ftl_nand_type).userblocks - 0x17;
821 uint32_t abspage = vpage + ppb * syshyperblocks;
822 if (abspage >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
823 panicf("FTL: Trying to read out-of-bounds vPage %u", (unsigned)vpage);
824 //return 4;
826 uint32_t bank = abspage % ftl_banks;
827 uint32_t block = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
828 uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
829 uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
830 uint32_t physpage = physblock * (*ftl_nand_type).pagesperblock + page;
832 uint32_t ret = nand_read_page(bank, physpage, buffer,
833 sparebuffer, 1, checkempty);
835 if ((ret & 0x11D) != 0 && (ret & 2) == 0)
837 nand_reset(bank);
838 ret = nand_read_page(bank, physpage, buffer,
839 sparebuffer, 1, checkempty);
840 #ifdef FTL_READONLY
841 (void)remaponfail;
842 #else
843 if (remaponfail == 1 &&(ret & 0x11D) != 0 && (ret & 2) == 0)
844 ftl_vfl_schedule_block_for_remap(bank, block);
845 #endif
846 return ret;
849 return ret;
853 /* Multi-bank version of ftl_vfl_read, will read ftl_banks pages in parallel */
854 uint32_t ftl_vfl_read_fast(uint32_t vpage, void* buffer, void* sparebuffer,
855 uint32_t checkempty, uint32_t remaponfail)
857 uint32_t i, rc = 0;
858 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
859 uint32_t syshyperblocks = (*ftl_nand_type).blocks
860 - (*ftl_nand_type).userblocks - 0x17;
861 uint32_t abspage = vpage + ppb * syshyperblocks;
862 if (abspage + ftl_banks - 1 >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
863 panicf("FTL: Trying to read out-of-bounds vPage %u", (unsigned)vpage);
864 //return 4;
866 uint32_t bank = abspage % ftl_banks;
867 uint32_t block = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
868 uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
869 if (bank)
871 for (i = 0; i < ftl_banks; i++)
873 void* databuf = (void*)0;
874 void* sparebuf = (void*)0;
875 if (buffer) databuf = (void*)((uint32_t)buffer + 0x800 * i);
876 if (sparebuffer) sparebuf = (void*)((uint32_t)sparebuffer + 0x40 * i);
877 uint32_t ret = ftl_vfl_read(vpage + i, databuf, sparebuf, checkempty, remaponfail);
878 if (ret & 1) rc |= 1 << (i << 2);
879 if (ret & 2) rc |= 2 << (i << 2);
880 if (ret & 0x10) rc |= 4 << (i << 2);
881 if (ret & 0x100) rc |= 8 << (i << 2);
883 return rc;
885 uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
886 uint32_t physpage = physblock * (*ftl_nand_type).pagesperblock + page;
888 rc = nand_read_page_fast(physpage, buffer, sparebuffer, 1, checkempty);
889 if (!(rc & 0xdddd)) return rc;
891 for (i = 0; i < ftl_banks; i++)
893 if ((rc >> (i << 2)) & 0x2) continue;
894 if ((rc >> (i << 2)) & 0xf)
896 rc &= ~(0xf << (i << 2));
897 nand_reset(i);
898 uint32_t ret = nand_read_page(i, physpage,
899 (void*)((uint32_t)buffer + 0x800 * i),
900 (void*)((uint32_t)sparebuffer + 0x40 * i),
901 1, checkempty);
902 #ifdef FTL_READONLY
903 (void)remaponfail;
904 #else
905 if (remaponfail == 1 && (ret & 0x11D) != 0 && (ret & 2) == 0)
906 panicf("FTL: VFL fast read failed, RC %04X, bank %d, RET %03X, base %d",
907 rc, i, ret, vpage);
908 // ftl_vfl_schedule_block_for_remap(i, block);
909 #endif
910 if (ret & 1) rc |= 1 << (i << 2);
911 if (ret & 2) rc |= 2 << (i << 2);
912 if (ret & 0x10) rc |= 4 << (i << 2);
913 if (ret & 0x100) rc |= 8 << (i << 2);
917 return rc;
921 #ifndef FTL_READONLY
922 /* Writes the specified vPage, dealing with all kinds of trouble */
923 uint32_t ftl_vfl_write_single(uint32_t vpage, void* buffer, void* sparebuffer)
925 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
926 uint32_t syshyperblocks = (*ftl_nand_type).blocks
927 - (*ftl_nand_type).userblocks - 0x17;
928 uint32_t abspage = vpage + ppb * syshyperblocks;
929 if (abspage >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
930 panicf("FTL: Trying to write out-of-bounds vPage %u",
931 (unsigned)vpage);
932 //return 4;
934 uint32_t bank = abspage % ftl_banks;
935 uint32_t block = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
936 uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
937 uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
938 uint32_t physpage = physblock * (*ftl_nand_type).pagesperblock + page;
940 if (nand_write_page(bank, physpage, buffer, sparebuffer, 1) == 0)
941 return 0;
943 if ((nand_read_page(bank, physpage, ftl_buffer,
944 &ftl_sparebuffer[0], 1, 1) & 0x11F) == 0)
945 return 0;
947 panicf("FTL: write error on vPage %u, bank %u, pPage %u",
948 (unsigned)vpage, (unsigned)bank, (unsigned)physpage);
949 ftl_vfl_log_trouble(bank, block);
950 return 1;
952 #endif
955 #ifndef FTL_READONLY
956 /* Writes the specified vPage, dealing with all kinds of trouble */
957 uint32_t ftl_vfl_write(uint32_t vpage, uint32_t count, void* buffer, void* sparebuffer)
959 uint32_t i;
960 for (i = 0; i < count; i++)
962 void* databuf = (void*)0;
963 void* sparebuf = (void*)0;
964 if (buffer) databuf = (void*)((uint32_t)buffer + 0x800 * i);
965 if (sparebuffer) sparebuf = (void*)((uint32_t)sparebuffer + 0x40 * i);
966 uint32_t rc = ftl_vfl_write_single(vpage + i, databuf, sparebuf);
967 if (rc) return rc;
969 return 0;
971 #endif
974 /* Mounts the VFL on all banks */
975 uint32_t ftl_vfl_open(void)
977 uint32_t i, j, k;
978 uint32_t minusn, vflcxtidx, last;
979 FTLVFLCxtType* cxt;
980 uint16_t vflcxtblock[4];
981 #ifndef FTL_READONLY
982 ftl_vfl_usn = 0;
983 #else
984 /* Temporary BBT buffer if we're readonly,
985 as we won't need it again after mounting */
986 uint8_t bbt[0x410];
987 #endif
989 uint32_t syshyperblocks = (*ftl_nand_type).blocks
990 - (*ftl_nand_type).userblocks - 0x18;
992 for (i = 0; i < ftl_banks; i++)
993 #ifndef FTL_READONLY
994 if (ftl_load_bbt(i, ftl_bbt[i]) == 0)
995 #else
996 if (ftl_load_bbt(i, bbt) == 0)
997 #endif
999 for (j = 1; j <= syshyperblocks; j++)
1000 #ifndef FTL_READONLY
1001 if (ftl_is_good_block(ftl_bbt[i], j) != 0)
1002 #else
1003 if (ftl_is_good_block(bbt, j) != 0)
1004 #endif
1005 if (ftl_vfl_read_page(i, j, 0, ftl_buffer,
1006 &ftl_sparebuffer[0]) == 0)
1008 struct ftl_vfl_cxt_type* cxt;
1009 cxt = (struct ftl_vfl_cxt_type*)ftl_buffer;
1010 memcpy(vflcxtblock, &(*cxt).vflcxtblocks, 8);
1011 minusn = 0xFFFFFFFF;
1012 vflcxtidx = 4;
1013 for (k = 0; k < 4; k++)
1014 if (vflcxtblock[k] != 0xFFFF)
1015 if (ftl_vfl_read_page(i, vflcxtblock[k], 0,
1016 ftl_buffer,
1017 &ftl_sparebuffer[0]) == 0)
1018 if (ftl_sparebuffer[0].meta.usn > 0
1019 && ftl_sparebuffer[0].meta.usn <= minusn)
1021 minusn = ftl_sparebuffer[0].meta.usn;
1022 vflcxtidx = k;
1024 if (vflcxtidx == 4) //return 1;
1025 panicf("FTL: No VFL CXT block found on bank %u!",
1026 (unsigned)i);
1027 last = 0;
1028 uint32_t max = (*ftl_nand_type).pagesperblock;
1029 for (k = 8; k < max; k += 8)
1031 if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
1032 k, ftl_buffer,
1033 &ftl_sparebuffer[0]) != 0)
1034 break;
1035 last = k;
1037 if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
1038 last, ftl_buffer,
1039 &ftl_sparebuffer[0]) != 0)
1040 panicf("FTL: Re-reading VFL CXT block "
1041 "on bank %u failed!?", (unsigned)i);
1042 //return 1;
1043 memcpy(&ftl_vfl_cxt[i], ftl_buffer, 0x800);
1044 if (ftl_vfl_verify_checksum(i) != 0) return 1;
1045 #ifndef FTL_READONLY
1046 if (ftl_vfl_usn < ftl_vfl_cxt[i].usn)
1047 ftl_vfl_usn = ftl_vfl_cxt[i].usn;
1048 #endif
1049 break;
1052 else //return 1;
1053 panicf("FTL: Couldn't load bank %u lowlevel BBT!", (unsigned)i);
1054 cxt = ftl_vfl_get_newest_cxt();
1055 for (i = 0; i < ftl_banks; i++)
1056 memcpy(ftl_vfl_cxt[i].ftlctrlblocks, (*cxt).ftlctrlblocks, 6);
1057 return 0;
1061 /* Mounts the actual FTL */
1062 uint32_t ftl_open(void)
1064 uint32_t i;
1065 uint32_t ret;
1066 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1067 struct ftl_vfl_cxt_type* cxt = ftl_vfl_get_newest_cxt();
1069 uint32_t ftlcxtblock = 0xffffffff;
1070 uint32_t minusn = 0xffffffff;
1071 for (i = 0; i < 3; i++)
1073 ret = ftl_vfl_read(ppb * (*cxt).ftlctrlblocks[i],
1074 ftl_buffer, &ftl_sparebuffer[0], 1, 0);
1075 if ((ret &= 0x11F) != 0) continue;
1076 if (ftl_sparebuffer[0].meta.type - 0x43 > 4) continue;
1077 if (ftlcxtblock != 0xffffffff && ftl_sparebuffer[0].meta.usn >= minusn)
1078 continue;
1079 minusn = ftl_sparebuffer[0].meta.usn;
1080 ftlcxtblock = (*cxt).ftlctrlblocks[i];
1083 if (ftlcxtblock == 0xffffffff) //return 1;
1084 panicf("FTL: Couldn't find readable FTL CXT block!");
1086 uint32_t ftlcxtfound = 0;
1087 for (i = (*ftl_nand_type).pagesperblock * ftl_banks - 1; i > 0; i--)
1089 ret = ftl_vfl_read(ppb * ftlcxtblock + i,
1090 ftl_buffer, &ftl_sparebuffer[0], 1, 0);
1091 if ((ret & 0x11F) != 0) continue;
1092 else if (ftl_sparebuffer[0].meta.type == 0x43)
1094 memcpy(&ftl_cxt, ftl_buffer, 0x28C);
1095 ftlcxtfound = 1;
1096 break;
1098 else
1100 /* This will trip if there was an unclean unmount before. */
1101 #ifndef FTL_FORCEMOUNT
1102 panicf("FTL: Unclean shutdown before!");
1103 break;
1104 #endif
1108 if (ftlcxtfound == 0) //return 1;
1109 panicf("FTL: Couldn't find FTL CXT page!");
1111 uint32_t pagestoread = (*ftl_nand_type).userblocks >> 10;
1112 if (((*ftl_nand_type).userblocks & 0x1FF) != 0) pagestoread++;
1114 for (i = 0; i < pagestoread; i++)
1116 if ((ftl_vfl_read(ftl_cxt.ftl_map_pages[i],
1117 ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
1118 panicf("FTL: Failed to read block map page %u", (unsigned)i);
1119 //return 1;
1121 uint32_t toread = 2048;
1122 if (toread > ((*ftl_nand_type).userblocks << 1) - (i << 11))
1123 toread = ((*ftl_nand_type).userblocks << 1) - (i << 11);
1125 memcpy(&ftl_map[i << 10], ftl_buffer, toread);
1128 #ifndef FTL_READONLY
1129 pagestoread = ((*ftl_nand_type).userblocks + 23) >> 10;
1130 if ((((*ftl_nand_type).userblocks + 23) & 0x1FF) != 0) pagestoread++;
1132 for (i = 0; i < pagestoread; i++)
1134 if ((ftl_vfl_read(ftl_cxt.ftl_erasectr_pages[i],
1135 ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
1136 panicf("FTL: Failed to read erase counter page %u", (unsigned)i);
1137 //return 1;
1139 uint32_t toread = 2048;
1140 if (toread > (((*ftl_nand_type).userblocks + 23) << 1) - (i << 11))
1141 toread = (((*ftl_nand_type).userblocks + 23) << 1) - (i << 11);
1143 memcpy(&ftl_erasectr[i << 10], ftl_buffer, toread);
1146 for (i = 0; i < 0x11; i++)
1148 ftl_log[i].scatteredvblock = 0xFFFF;
1149 ftl_log[i].logicalvblock = 0xFFFF;
1150 ftl_log[i].pageoffsets = ftl_offsets[i];
1153 memset(ftl_troublelog, 0xFF, 20);
1154 memset(ftl_erasectr_dirt, 0, 8);
1155 #endif
1157 return 0;
1161 #ifndef FTL_READONLY
1162 /* Returns a pointer to the ftl_log entry for the specified vBlock,
1163 or null, if there is none */
1164 struct ftl_log_type* ftl_get_log_entry(uint32_t block)
1166 uint32_t i;
1167 for (i = 0; i < 0x11; i++)
1169 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1170 if (ftl_log[i].logicalvblock == block) return &ftl_log[i];
1172 return (struct ftl_log_type*)0;
1174 #endif
1176 /* Exposed function: Read highlevel sectors */
1177 uint32_t ftl_read(uint32_t sector, uint32_t count, void* buffer)
1179 uint32_t i, j;
1180 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1181 uint32_t error = 0;
1183 if (sector + count > (*ftl_nand_type).userblocks * ppb)
1184 return 1;
1186 if (count == 0) return 0;
1188 mutex_lock(&ftl_mtx);
1190 for (i = 0; i < count; i++)
1192 uint32_t block = (sector + i) / ppb;
1193 uint32_t page = (sector + i) % ppb;
1195 uint32_t abspage = ftl_map[block] * ppb + page;
1196 #ifndef FTL_READONLY
1197 struct ftl_log_type* logentry = ftl_get_log_entry(block);
1198 if (logentry != (struct ftl_log_type*)0)
1199 if ((*logentry).scatteredvblock != 0xFFFF
1200 && (*logentry).pageoffsets[page] != 0xFFFF)
1201 abspage = (*logentry).scatteredvblock * ppb
1202 + (*logentry).pageoffsets[page];
1203 #endif
1205 #ifndef FTL_READONLY
1206 if (count >= i + ftl_banks && !(page & (ftl_banks - 1))
1207 && logentry == (struct ftl_log_type*)0)
1208 #else
1209 if (count >= i + ftl_banks && !(page & (ftl_banks - 1)))
1210 #endif
1212 uint32_t ret = ftl_vfl_read_fast(abspage, &((uint8_t*)buffer)[i << 11],
1213 &ftl_sparebuffer[0], 1, 1);
1214 for (j = 0; j < ftl_banks; j++)
1215 if (ret & (2 << (j << 2)))
1216 memset(&((uint8_t*)buffer)[(i + j) << 11], 0, 0x800);
1217 else if ((ret & (0xd << (j << 2))) || ftl_sparebuffer[j].user.eccmark != 0xFF)
1219 error = 1;
1220 memset(&((uint8_t*)buffer)[(i + j) << 11], 0, 0x800);
1222 i += ftl_banks - 1;
1224 else
1226 uint32_t ret = ftl_vfl_read(abspage, &((uint8_t*)buffer)[i << 11],
1227 &ftl_sparebuffer[0], 1, 1);
1228 if (ret & 2) memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
1229 else if ((ret & 0x11D) != 0 || ftl_sparebuffer[0].user.eccmark != 0xFF)
1231 error = 1;
1232 memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
1237 mutex_unlock(&ftl_mtx);
1239 return error;
1243 #ifndef FTL_READONLY
1244 /* Performs a vBlock erase, dealing with hardware,
1245 remapping and all kinds of trouble */
1246 uint32_t ftl_erase_block_internal(uint32_t block)
1248 uint32_t i, j;
1249 block = block + (*ftl_nand_type).blocks
1250 - (*ftl_nand_type).userblocks - 0x17;
1251 if (block == 0 || block >= (*ftl_nand_type).blocks) return 1;
1252 for (i = 0; i < ftl_banks; i++)
1254 if (ftl_vfl_check_remap_scheduled(i, block) == 1)
1256 ftl_vfl_remap_block(i, block);
1257 ftl_vfl_mark_remap_done(i, block);
1259 ftl_vfl_log_success(i, block);
1260 uint32_t pblock = ftl_vfl_get_physical_block(i, block);
1261 uint32_t rc;
1262 for (j = 0; j < 3; j++)
1264 rc = nand_block_erase(i, pblock * (*ftl_nand_type).pagesperblock);
1265 if (rc == 0) break;
1267 if (rc != 0)
1269 panicf("FTL: Block erase failed on bank %u block %u",
1270 (unsigned)i, (unsigned)block);
1271 if (pblock != block)
1273 uint32_t spareindex = pblock - ftl_vfl_cxt[i].firstspare;
1274 ftl_vfl_cxt[i].remaptable[spareindex] = 0xFFFF;
1276 ftl_vfl_cxt[i].field_18++;
1277 if (ftl_vfl_remap_block(i, block) == 0) return 1;
1278 if (ftl_vfl_commit_cxt(i) != 0) return 1;
1279 memset(&ftl_sparebuffer, 0, 0x40);
1280 nand_write_page(i, pblock, &ftl_vfl_cxt[0], &ftl_sparebuffer, 1);
1283 return 0;
1285 #endif
1288 #ifndef FTL_READONLY
1289 /* Highlevel vBlock erase, that increments the erase counter for the block */
1290 uint32_t ftl_erase_block(uint32_t block)
1292 ftl_erasectr[block]++;
1293 if (ftl_erasectr_dirt[block >> 10] == 100) ftl_cxt.erasedirty = 1;
1294 else ftl_erasectr_dirt[block >> 10]++;
1295 return ftl_erase_block_internal(block);
1297 #endif
1300 #ifndef FTL_READONLY
1301 /* Allocates a block from the pool,
1302 returning its vBlock number, or 0xFFFFFFFF on error */
1303 uint32_t ftl_allocate_pool_block(void)
1305 uint32_t i;
1306 uint32_t erasectr = 0xFFFFFFFF, bestidx = 0xFFFFFFFF, block;
1307 for (i = 0; i < ftl_cxt.freecount; i++)
1309 uint32_t idx = ftl_cxt.nextfreeidx + i;
1310 if (idx >= 0x14) idx -= 0x14;
1311 if (!ftl_cxt.blockpool[idx]) continue;
1312 if (ftl_erasectr[ftl_cxt.blockpool[idx]] < erasectr)
1314 erasectr = ftl_erasectr[ftl_cxt.blockpool[idx]];
1315 bestidx = idx;
1318 if (bestidx == 0xFFFFFFFF) panicf("Out of pool blocks!");
1319 block = ftl_cxt.blockpool[bestidx];
1320 if (bestidx != ftl_cxt.nextfreeidx)
1322 ftl_cxt.blockpool[bestidx] = ftl_cxt.blockpool[ftl_cxt.nextfreeidx];
1323 ftl_cxt.blockpool[ftl_cxt.nextfreeidx] = block;
1325 if (block > (uint32_t)(*ftl_nand_type).userblocks + 0x17)
1326 panicf("FTL: Bad block number in pool: %u", (unsigned)block);
1327 if (ftl_erase_block(block) != 0) return 0xFFFFFFFF;
1328 if (++ftl_cxt.nextfreeidx == 0x14) ftl_cxt.nextfreeidx = 0;
1329 ftl_cxt.freecount--;
1330 return block;
1332 #endif
1335 #ifndef FTL_READONLY
1336 /* Releases a vBlock back into the pool */
1337 void ftl_release_pool_block(uint32_t block)
1339 if (block >= (uint32_t)(*ftl_nand_type).userblocks + 0x17)
1340 panicf("FTL: Tried to release block %u", (unsigned)block);
1341 uint32_t idx = ftl_cxt.nextfreeidx + ftl_cxt.freecount++;
1342 if (idx >= 0x14) idx -= 0x14;
1343 ftl_cxt.blockpool[idx] = block;
1345 #endif
1348 #ifndef FTL_READONLY
1349 /* Commits the location of the FTL context blocks
1350 to a semi-randomly chosen VFL context */
1351 uint32_t ftl_store_ctrl_block_list(void)
1353 uint32_t i;
1354 for (i = 0; i < ftl_banks; i++)
1355 memcpy(ftl_vfl_cxt[i].ftlctrlblocks, ftl_cxt.ftlctrlblocks, 6);
1356 return ftl_vfl_commit_cxt(ftl_vfl_usn % ftl_banks);
1358 #endif
1361 #ifndef FTL_READONLY
1362 /* Saves the n-th erase counter page to the flash,
1363 because it is too dirty or needs to be moved. */
1364 uint32_t ftl_save_erasectr_page(uint32_t index)
1366 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
1367 ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
1368 ftl_sparebuffer[0].meta.idx = index;
1369 ftl_sparebuffer[0].meta.type = 0x46;
1370 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_erasectr[index << 10],
1371 &ftl_sparebuffer[0]) != 0)
1372 return 1;
1373 if ((ftl_vfl_read(ftl_cxt.ftlctrlpage, ftl_buffer,
1374 &ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
1375 return 1;
1376 if (memcmp(ftl_buffer, &ftl_erasectr[index << 10], 0x800) != 0) return 1;
1377 if (ftl_sparebuffer[0].meta.type != 0x46) return 1;
1378 if (ftl_sparebuffer[0].meta.idx != index) return 1;
1379 if (ftl_sparebuffer[0].meta.usn != ftl_cxt.usn) return 1;
1380 ftl_cxt.ftl_erasectr_pages[index] = ftl_cxt.ftlctrlpage;
1381 ftl_erasectr_dirt[index] = 0;
1382 return 0;
1384 #endif
1387 #ifndef FTL_READONLY
1388 /* Increments ftl_cxt.ftlctrlpage to the next available FTL context page,
1389 allocating a new context block if neccessary. */
1390 uint32_t ftl_next_ctrl_pool_page(void)
1392 uint32_t i;
1393 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1394 if (++ftl_cxt.ftlctrlpage % ppb != 0) return 0;
1395 for (i = 0; i < 3; i++)
1396 if ((ftl_cxt.ftlctrlblocks[i] + 1) * ppb == ftl_cxt.ftlctrlpage)
1397 break;
1398 i = (i + 1) % 3;
1399 uint32_t oldblock = ftl_cxt.ftlctrlblocks[i];
1400 uint32_t newblock = ftl_allocate_pool_block();
1401 if (newblock == 0xFFFFFFFF) return 1;
1402 ftl_cxt.ftlctrlblocks[i] = newblock;
1403 ftl_cxt.ftlctrlpage = newblock * ppb;
1404 uint32_t pagestoread = ((*ftl_nand_type).userblocks + 23) >> 10;
1405 if ((((*ftl_nand_type).userblocks + 23) & 0x1FF) != 0) pagestoread++;
1406 for (i = 0; i < pagestoread; i++)
1407 if (oldblock * ppb <= ftl_cxt.ftl_erasectr_pages[i]
1408 && (oldblock + 1) * ppb > ftl_cxt.ftl_erasectr_pages[i])
1410 ftl_cxt.usn--;
1411 if (ftl_save_erasectr_page(i) != 0)
1413 ftl_cxt.ftlctrlblocks[i] = oldblock;
1414 ftl_cxt.ftlctrlpage = oldblock * (ppb + 1) - 1;
1415 ftl_release_pool_block(newblock);
1416 return 1;
1418 ftl_cxt.ftlctrlpage++;
1420 ftl_release_pool_block(oldblock);
1421 return ftl_store_ctrl_block_list();
1423 #endif
1426 #ifndef FTL_READONLY
1427 /* Copies a vPage from one location to another */
1428 uint32_t ftl_copy_page(uint32_t source, uint32_t destination,
1429 uint32_t lpn, uint32_t type)
1431 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1432 uint32_t rc = ftl_vfl_read(source, ftl_copybuffer[0],
1433 &ftl_copyspare[0], 1, 1) & 0x11F;
1434 memset(&ftl_copyspare[0], 0xFF, 0x40);
1435 ftl_copyspare[0].user.lpn = lpn;
1436 ftl_copyspare[0].user.usn = ++ftl_cxt.nextblockusn;
1437 ftl_copyspare[0].user.type = 0x40;
1438 if ((rc & 2) != 0) memset(ftl_copybuffer[0], 0, 0x800);
1439 else if (rc != 0) ftl_copyspare[0].user.eccmark = 0x55;
1440 if (type == 1 && destination % ppb == ppb - 1)
1441 ftl_copyspare[0].user.type = 0x41;
1442 return ftl_vfl_write(destination, 1, ftl_copybuffer[0], &ftl_copyspare[0]);
1444 #endif
1447 #ifndef FTL_READONLY
1448 /* Copies a pBlock to a vBlock */
1449 uint32_t ftl_copy_block(uint32_t source, uint32_t destination)
1451 uint32_t i, j;
1452 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1453 uint32_t error = 0;
1454 ftl_cxt.nextblockusn++;
1455 for (i = 0; i < ppb; i += FTL_COPYBUF_SIZE)
1457 uint32_t rc = ftl_read(source * ppb + i,
1458 FTL_COPYBUF_SIZE, ftl_copybuffer[0]);
1459 memset(&ftl_copyspare[0], 0xFF, 0x40 * FTL_COPYBUF_SIZE);
1460 for (j = 0; j < FTL_COPYBUF_SIZE; j++)
1462 ftl_copyspare[j].user.lpn = source * ppb + i + j;
1463 ftl_copyspare[j].user.usn = ftl_cxt.nextblockusn;
1464 ftl_copyspare[j].user.type = 0x40;
1465 if (rc)
1467 if (ftl_read(source * ppb + i + j, 1, ftl_copybuffer[j]))
1468 ftl_copyspare[j].user.eccmark = 0x55;
1470 if (i + j == ppb - 1) ftl_copyspare[j].user.type = 0x41;
1472 if (ftl_vfl_write(destination * ppb + i, FTL_COPYBUF_SIZE,
1473 ftl_copybuffer[0], &ftl_copyspare[0]))
1475 error = 1;
1476 break;
1479 if (error != 0)
1481 ftl_erase_block(destination);
1482 return 1;
1484 return 0;
1486 #endif
1489 #ifndef FTL_READONLY
1490 /* Clears ftl_log.issequential, if something violating that is written. */
1491 void ftl_check_still_sequential(struct ftl_log_type* entry, uint32_t page)
1493 if ((*entry).pagesused != (*entry).pagescurrent
1494 || (*entry).pageoffsets[page] != page)
1495 (*entry).issequential = 0;
1497 #endif
1500 #ifndef FTL_READONLY
1501 /* Copies all pages that are currently used from the scattered page block in
1502 use by the supplied ftl_log entry to a newly-allocated one, and releases
1503 the old one.
1504 In other words: It kicks the pages containing old garbage out of it to make
1505 space again. This is usually done when a scattered page block is being
1506 removed because it is full, but less than half of the pages in there are
1507 still in use and rest is just filled with old crap. */
1508 uint32_t ftl_compact_scattered(struct ftl_log_type* entry)
1510 uint32_t i, j;
1511 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1512 uint32_t error;
1513 struct ftl_log_type backup;
1514 if ((*entry).pagescurrent == 0)
1516 ftl_release_pool_block((*entry).scatteredvblock);
1517 (*entry).scatteredvblock = 0xFFFF;
1518 return 0;
1520 backup = *entry;
1521 memcpy(ftl_offsets_backup, (*entry).pageoffsets, 0x400);
1522 for (i = 0; i < 4; i++)
1524 uint32_t block = ftl_allocate_pool_block();
1525 if (block == 0xFFFFFFFF) return 1;
1526 (*entry).pagesused = 0;
1527 (*entry).pagescurrent = 0;
1528 (*entry).issequential = 1;
1529 (*entry).scatteredvblock = block;
1530 error = 0;
1531 for (j = 0; j < ppb; j++)
1532 if ((*entry).pageoffsets[j] != 0xFFFF)
1534 uint32_t lpn = (*entry).logicalvblock * ppb + j;
1535 uint32_t newpage = block * ppb + (*entry).pagesused;
1536 uint32_t oldpage = backup.scatteredvblock * ppb
1537 + (*entry).pageoffsets[j];
1538 if (ftl_copy_page(oldpage, newpage, lpn,
1539 (*entry).issequential) != 0)
1541 error = 1;
1542 break;
1544 (*entry).pageoffsets[j] = (*entry).pagesused++;
1545 (*entry).pagescurrent++;
1546 ftl_check_still_sequential(entry, j);
1548 if (backup.pagescurrent != (*entry).pagescurrent) error = 1;
1549 if (error == 0)
1551 ftl_release_pool_block(backup.scatteredvblock);
1552 break;
1554 *entry = backup;
1555 memcpy((*entry).pageoffsets, ftl_offsets_backup, 0x400);
1557 return error;
1559 #endif
1562 #ifndef FTL_READONLY
1563 /* Commits an ftl_log entry to proper blocks, no matter what's in there. */
1564 uint32_t ftl_commit_scattered(struct ftl_log_type* entry)
1566 uint32_t i;
1567 uint32_t error;
1568 uint32_t block;
1569 for (i = 0; i < 4; i++)
1571 block = ftl_allocate_pool_block();
1572 if (block == 0xFFFFFFFF) return 1;
1573 error = ftl_copy_block((*entry).logicalvblock, block);
1574 if (error == 0) break;
1575 ftl_release_pool_block(block);
1577 if (error != 0) return 1;
1578 ftl_release_pool_block((*entry).scatteredvblock);
1579 (*entry).scatteredvblock = 0xFFFF;
1580 ftl_release_pool_block(ftl_map[(*entry).logicalvblock]);
1581 ftl_map[(*entry).logicalvblock] = block;
1582 return 0;
1584 #endif
1587 #ifndef FTL_READONLY
1588 /* Fills the rest of a scattered page block that was actually written
1589 sequentially until now, in order to be able to save a block erase by
1590 committing it without needing to copy it again.
1591 If this fails for whichever reason, it will be committed the usual way. */
1592 uint32_t ftl_commit_sequential(struct ftl_log_type* entry)
1594 uint32_t i;
1595 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1597 if ((*entry).issequential != 1
1598 || (*entry).pagescurrent != (*entry).pagesused)
1599 return 1;
1601 for (; (*entry).pagesused < ppb; )
1603 uint32_t lpn = (*entry).logicalvblock * ppb + (*entry).pagesused;
1604 uint32_t newpage = (*entry).scatteredvblock * ppb
1605 + (*entry).pagesused;
1606 uint32_t count = FTL_COPYBUF_SIZE < ppb - (*entry).pagesused
1607 ? FTL_COPYBUF_SIZE : ppb - (*entry).pagesused;
1608 for (i = 0; i < count; i++)
1609 if ((*entry).pageoffsets[(*entry).pagesused + i] != 0xFFFF)
1610 return ftl_commit_scattered(entry);
1611 uint32_t rc = ftl_read(lpn, count, ftl_copybuffer[0]);
1612 memset(&ftl_copyspare[0], 0xFF, 0x40 * FTL_COPYBUF_SIZE);
1613 for (i = 0; i < count; i++)
1615 ftl_copyspare[i].user.lpn = lpn + i;
1616 ftl_copyspare[i].user.usn = ++ftl_cxt.nextblockusn;
1617 ftl_copyspare[i].user.type = 0x40;
1618 if (rc) ftl_copyspare[i].user.eccmark = 0x55;
1619 if ((*entry).pagesused + i == ppb - 1)
1620 ftl_copyspare[i].user.type = 0x41;
1622 if (ftl_vfl_write(newpage, count, ftl_copybuffer[0], &ftl_copyspare[0]))
1623 return ftl_commit_scattered(entry);
1624 (*entry).pagesused += count;
1626 ftl_release_pool_block(ftl_map[(*entry).logicalvblock]);
1627 ftl_map[(*entry).logicalvblock] = (*entry).scatteredvblock;
1628 (*entry).scatteredvblock = 0xFFFF;
1629 return 0;
1631 #endif
1634 #ifndef FTL_READONLY
1635 /* If a log entry is supplied, its scattered page block will be removed in
1636 whatever way seems most appropriate. Else, the oldest scattered page block
1637 will be freed by committing it. */
1638 uint32_t ftl_remove_scattered_block(struct ftl_log_type* entry)
1640 uint32_t i;
1641 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1642 uint32_t age = 0xFFFFFFFF, used = 0;
1643 if (entry == (struct ftl_log_type*)0)
1645 for (i = 0; i < 0x11; i++)
1647 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1648 if (ftl_log[i].pagesused == 0 || ftl_log[i].pagescurrent == 0)
1649 return 1;
1650 if (ftl_log[i].usn < age
1651 || (ftl_log[i].usn == age && ftl_log[i].pagescurrent > used))
1653 age = ftl_log[i].usn;
1654 used = ftl_log[i].pagescurrent;
1655 entry = &ftl_log[i];
1658 if (entry == (struct ftl_log_type*)0) return 1;
1660 else if ((*entry).pagescurrent < ppb / 2)
1662 ftl_cxt.swapcounter++;
1663 return ftl_compact_scattered(entry);
1665 ftl_cxt.swapcounter++;
1666 if ((*entry).issequential == 1) return ftl_commit_sequential(entry);
1667 else return ftl_commit_scattered(entry);
1669 #endif
1672 #ifndef FTL_READONLY
1673 /* Initialize a log entry to the values for an empty scattered page block */
1674 void ftl_init_log_entry(struct ftl_log_type* entry)
1676 (*entry).issequential = 1;
1677 (*entry).pagescurrent = 0;
1678 (*entry).pagesused = 0;
1679 memset((*entry).pageoffsets, 0xFF, 0x400);
1681 #endif
1684 #ifndef FTL_READONLY
1685 /* Allocates a log entry for the specified vBlock,
1686 first making space, if neccessary. */
1687 struct ftl_log_type* ftl_allocate_log_entry(uint32_t block)
1689 uint32_t i;
1690 struct ftl_log_type* entry = ftl_get_log_entry(block);
1691 (*entry).usn = ftl_cxt.nextblockusn - 1;
1692 if (entry != (struct ftl_log_type*)0) return entry;
1694 for (i = 0; i < 0x11; i++)
1696 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1697 if (ftl_log[i].pagesused == 0)
1699 entry = &ftl_log[i];
1700 break;
1704 if (entry == (struct ftl_log_type*)0)
1706 if (ftl_cxt.freecount < 3) panicf("FTL: Detected a pool block leak!");
1707 else if (ftl_cxt.freecount == 3)
1708 if (ftl_remove_scattered_block((struct ftl_log_type*)0) != 0)
1709 return (struct ftl_log_type*)0;
1710 entry = ftl_log;
1711 while ((*entry).scatteredvblock != 0xFFFF) entry = &entry[1];
1712 (*entry).scatteredvblock = ftl_allocate_pool_block();
1713 if ((*entry).scatteredvblock == 0xFFFF)
1714 return (struct ftl_log_type*)0;
1717 ftl_init_log_entry(entry);
1718 (*entry).logicalvblock = block;
1719 (*entry).usn = ftl_cxt.nextblockusn - 1;
1721 return entry;
1723 #endif
1726 #ifndef FTL_READONLY
1727 /* Commits the FTL block map, erase counters, and context to flash */
1728 uint32_t ftl_commit_cxt(void)
1730 uint32_t i;
1731 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1732 uint32_t mappages = ((*ftl_nand_type).userblocks + 0x3ff) >> 10;
1733 uint32_t ctrpages = ((*ftl_nand_type).userblocks + 23 + 0x3ff) >> 10;
1734 uint32_t endpage = ftl_cxt.ftlctrlpage + mappages + ctrpages + 1;
1735 if (endpage >= (ftl_cxt.ftlctrlpage / ppb + 1) * ppb)
1736 ftl_cxt.ftlctrlpage |= ppb - 1;
1737 for (i = 0; i < ctrpages; i++)
1739 if (ftl_next_ctrl_pool_page() != 0) return 1;
1740 if (ftl_save_erasectr_page(i) != 0) return 1;
1742 for (i = 0; i < mappages; i++)
1744 if (ftl_next_ctrl_pool_page() != 0) return 1;
1745 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
1746 ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
1747 ftl_sparebuffer[0].meta.idx = i;
1748 ftl_sparebuffer[0].meta.type = 0x44;
1749 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_map[i << 10],
1750 &ftl_sparebuffer[0]) != 0)
1751 return 1;
1752 ftl_cxt.ftl_map_pages[i] = ftl_cxt.ftlctrlpage;
1754 if (ftl_next_ctrl_pool_page() != 0) return 1;
1755 ftl_cxt.clean_flag = 1;
1756 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
1757 ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
1758 ftl_sparebuffer[0].meta.type = 0x43;
1759 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_cxt, &ftl_sparebuffer[0]) != 0)
1760 return 1;
1761 return 0;
1763 #endif
1766 #ifndef FTL_READONLY
1767 /* Swaps the most and least worn block on the flash,
1768 to better distribute wear. It will refuse to do anything
1769 if the wear spread is lower than 5 erases. */
1770 uint32_t ftl_swap_blocks(void)
1772 uint32_t i;
1773 uint32_t min = 0xFFFFFFFF, max = 0, maxidx = 0x14;
1774 uint32_t minidx = 0, minvb = 0, maxvb = 0;
1775 for (i = 0; i < ftl_cxt.freecount; i++)
1777 uint32_t idx = ftl_cxt.nextfreeidx + i;
1778 if (idx >= 0x14) idx -= 0x14;
1779 if (ftl_erasectr[ftl_cxt.blockpool[idx]] > max)
1781 maxidx = idx;
1782 maxvb = ftl_cxt.blockpool[idx];
1783 max = ftl_erasectr[maxidx];
1786 if (maxidx == 0x14) return 0;
1787 for (i = 0; i < (*ftl_nand_type).userblocks; i++)
1789 if (ftl_erasectr[ftl_map[i]] > max) max = ftl_erasectr[ftl_map[i]];
1790 if (ftl_get_log_entry(i) != (struct ftl_log_type*)0) continue;
1791 if (ftl_erasectr[ftl_map[i]] < min)
1793 minidx = i;
1794 minvb = ftl_map[i];
1795 min = ftl_erasectr[minidx];
1798 if (max - min < 5) return 0;
1799 if (minvb == maxvb) return 0;
1800 if (ftl_erase_block(maxvb) != 0) return 1;
1801 if (ftl_copy_block(minidx, maxvb) != 0) return 1;
1802 ftl_cxt.blockpool[maxidx] = minvb;
1803 ftl_map[minidx] = maxvb;
1804 return 0;
1806 #endif
1809 #ifndef FTL_READONLY
1810 /* Exposed function: Write highlevel sectors */
1811 uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
1813 uint32_t i, j, k;
1814 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1816 if (sector + count > (*ftl_nand_type).userblocks * ppb)
1817 return 1;
1819 if (count == 0) return 0;
1821 mutex_lock(&ftl_mtx);
1823 if (ftl_cxt.clean_flag == 1)
1825 for (i = 0; i < 3; i++)
1827 if (ftl_next_ctrl_pool_page() != 0)
1829 mutex_unlock(&ftl_mtx);
1830 return 1;
1832 memset(ftl_buffer, 0xFF, 0x800);
1833 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
1834 ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
1835 ftl_sparebuffer[0].meta.type = 0x47;
1836 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, ftl_buffer,
1837 &ftl_sparebuffer[0]) == 0)
1838 break;
1840 if (i == 3)
1842 mutex_unlock(&ftl_mtx);
1843 return 1;
1845 ftl_cxt.clean_flag = 0;
1848 for (i = 0; i < count; )
1850 uint32_t block = (sector + i) / ppb;
1851 uint32_t page = (sector + i) % ppb;
1853 struct ftl_log_type* logentry = ftl_allocate_log_entry(block);
1854 if (logentry == (struct ftl_log_type*)0)
1856 mutex_unlock(&ftl_mtx);
1857 return 1;
1859 if (page == 0 && count - i >= ppb)
1861 uint32_t vblock = (*logentry).scatteredvblock;
1862 (*logentry).scatteredvblock = 0xFFFF;
1863 if ((*logentry).pagesused != 0)
1865 ftl_release_pool_block(vblock);
1866 vblock = ftl_allocate_pool_block();
1867 if (vblock == 0xFFFFFFFF)
1869 mutex_unlock(&ftl_mtx);
1870 return 1;
1873 ftl_cxt.nextblockusn++;
1874 for (j = 0; j < ppb; j += FTL_WRITESPARE_SIZE)
1876 memset(&ftl_sparebuffer[0], 0xFF, 0x40 * FTL_WRITESPARE_SIZE);
1877 for (k = 0; k < FTL_WRITESPARE_SIZE; k++)
1879 ftl_sparebuffer[k].user.lpn = sector + i + j + k;
1880 ftl_sparebuffer[k].user.usn = ftl_cxt.nextblockusn;
1881 ftl_sparebuffer[k].user.type = 0x40;
1882 if (j == ppb - 1) ftl_sparebuffer[k].user.type = 0x41;
1884 uint32_t rc = ftl_vfl_write(vblock * ppb + j, FTL_WRITESPARE_SIZE,
1885 &((uint8_t*)buffer)[(i + j) << 11],
1886 &ftl_sparebuffer[0]);
1887 if (rc)
1888 for (k = 0; k < ftl_banks; k++)
1889 if (rc & (1 << k))
1891 while (ftl_vfl_write(vblock * ppb + j + k, 1,
1892 &((uint8_t*)buffer)[(i + j + k) << 11],
1893 &ftl_sparebuffer[k]));
1896 ftl_release_pool_block(ftl_map[block]);
1897 ftl_map[block] = vblock;
1898 i += ppb;
1900 else
1902 if ((*logentry).pagesused == ppb)
1904 ftl_remove_scattered_block(logentry);
1905 logentry = ftl_allocate_log_entry(block);
1906 if (logentry == (struct ftl_log_type*)0)
1908 mutex_unlock(&ftl_mtx);
1909 return 1;
1912 uint32_t cnt = FTL_WRITESPARE_SIZE;
1913 if (cnt > count - i) cnt = count - i;
1914 if (cnt > ppb - (*logentry).pagesused) cnt = ppb - (*logentry).pagesused;
1915 if (cnt > ppb - page) cnt = ppb - page;
1916 memset(&ftl_sparebuffer[0], 0xFF, 0x40 * cnt);
1917 for (j = 0; j < cnt; j++)
1919 ftl_sparebuffer[j].user.lpn = sector + i + j;
1920 ftl_sparebuffer[j].user.usn = ++ftl_cxt.nextblockusn;
1921 ftl_sparebuffer[j].user.type = 0x40;
1922 if ((*logentry).pagesused + j == ppb - 1 && (*logentry).issequential)
1923 ftl_sparebuffer[j].user.type = 0x41;
1925 uint32_t abspage = (*logentry).scatteredvblock * ppb
1926 + (*logentry).pagesused;
1927 (*logentry).pagesused += cnt;
1928 if (ftl_vfl_write(abspage, cnt, &((uint8_t*)buffer)[i << 11],
1929 &ftl_sparebuffer[0]) == 0)
1931 for (j = 0; j < cnt; j++)
1933 if ((*logentry).pageoffsets[page + j] == 0xFFFF)
1934 (*logentry).pagescurrent++;
1935 (*logentry).pageoffsets[page + j] = (*logentry).pagesused - cnt + j;
1936 if ((*logentry).pagesused - cnt + j + 1 != (*logentry).pagescurrent
1937 || (*logentry).pageoffsets[page + j] != page + j)
1938 (*logentry).issequential = 0;
1940 i += cnt;
1942 else panicf("FTL: Write error: %u %u %u!",
1943 (unsigned)sector, (unsigned)count, (unsigned)i);
1945 if ((*logentry).pagesused == ppb) ftl_remove_scattered_block(logentry);
1947 if (ftl_cxt.swapcounter >= 300)
1949 ftl_cxt.swapcounter -= 20;
1950 for (i = 0; i < 4; i++) if (ftl_swap_blocks() == 0) break;
1952 if (ftl_cxt.erasedirty == 1)
1954 ftl_cxt.erasedirty = 0;
1955 for (i = 0; i < 8; i++)
1956 if (ftl_erasectr_dirt[i] >= 100)
1958 ftl_next_ctrl_pool_page();
1959 ftl_save_erasectr_page(i);
1962 mutex_unlock(&ftl_mtx);
1963 return 0;
1965 #endif
1968 #ifndef FTL_READONLY
1969 /* Exposed function: Performes a sync / unmount,
1970 i.e. commits all scattered page blocks,
1971 distributes wear, and commits the FTL context. */
1972 uint32_t ftl_sync(void)
1974 uint32_t i;
1975 uint32_t rc = 0;
1976 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1977 if (ftl_cxt.clean_flag == 1) return 0;
1979 mutex_lock(&ftl_mtx);
1981 if (ftl_cxt.swapcounter >= 20)
1982 for (i = 0; i < 4; i++)
1983 if (ftl_swap_blocks() == 0)
1985 ftl_cxt.swapcounter -= 20;
1986 break;
1988 for (i = 0; i < 0x11; i++)
1990 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1991 ftl_cxt.nextblockusn++;
1992 if (ftl_log[i].issequential == 1)
1993 rc |= ftl_commit_sequential(&ftl_log[i]);
1994 else rc |= ftl_commit_scattered(&ftl_log[i]);
1996 if (rc != 0)
1998 mutex_unlock(&ftl_mtx);
1999 return 1;
2001 for (i = 0; i < 5; i++)
2002 if (ftl_commit_cxt() == 0)
2004 mutex_unlock(&ftl_mtx);
2005 return 0;
2007 else ftl_cxt.ftlctrlpage |= ppb - 1;
2008 mutex_unlock(&ftl_mtx);
2009 return 1;
2011 #endif
2014 /* Initializes and mounts the FTL.
2015 As long as nothing was written, you won't need to unmount it.
2016 Before shutting down after writing something, call ftl_sync(),
2017 which will just do nothing if everything was already clean. */
2018 uint32_t ftl_init(void)
2020 mutex_init(&ftl_mtx);
2021 uint32_t i;
2022 uint32_t result = 0;
2023 uint32_t foundsignature, founddevinfo, blockwiped, repaired, skip;
2024 if (nand_device_init() != 0) //return 1;
2025 panicf("FTL: Lowlevel NAND driver init failed!");
2026 ftl_banks = 0;
2027 for (i = 0; i < 4; i++)
2028 if (nand_get_device_type(i) != 0) ftl_banks = i + 1;
2029 ftl_nand_type = nand_get_device_type(0);
2030 foundsignature = 0;
2031 blockwiped = 1;
2032 mutex_unlock(&ftl_mtx);
2033 for (i = 0; i < (*ftl_nand_type).pagesperblock; i++)
2035 result = nand_read_page(0, i, ftl_buffer, (uint32_t*)0, 1, 1);
2036 if ((result & 0x11F) == 0)
2038 blockwiped = 0;
2039 if (((uint32_t*)ftl_buffer)[0] != 0x41303034) continue;
2040 foundsignature = 1;
2041 break;
2043 else if ((result & 2) != 2) blockwiped = 0;
2046 founddevinfo = ftl_has_devinfo();
2048 repaired = 0;
2049 skip = 0;
2050 if (founddevinfo == 0)
2052 mutex_unlock(&ftl_mtx);
2053 panicf("FTL: No DEVICEINFO found!");
2054 //return 1;
2056 if (foundsignature != 0 && (result & 0x11F) != 0)
2058 mutex_unlock(&ftl_mtx);
2059 panicf("FTL: Problem with the signature!");
2060 //return 1;
2062 if (ftl_vfl_open() == 0)
2063 if (ftl_open() == 0)
2065 mutex_unlock(&ftl_mtx);
2066 return 0;
2069 panicf("FTL: Initialization failed!");
2071 /* Something went terribly wrong. We may want to allow the user to erase
2072 block zero in that condition, to make norboot reinitialize the FTL.
2073 (However there is curently no point in this, as iLoader would already
2074 fail if this would be the case.)
2076 nand_block_erase(0, 0);
2080 mutex_unlock(&ftl_mtx);
2081 return 1;