Move c/h files implementing/defining standard library stuff into a new libc directory...
[kugel-rb.git] / firmware / target / arm / s5l8700 / ipodnano2g / ftl-nano2g.c
blob9cb350d004359dbd583959109414c2af59eb00f4
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2009 by Michael Sparmann
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
24 #include <config.h>
25 #include <cpu.h>
26 #include <nand-target.h>
27 #include <ftl-target.h>
28 #include <string.h>
29 #include "kernel.h"
30 #include "panic.h"
34 #define FTL_COPYBUF_SIZE 32
35 #define FTL_WRITESPARE_SIZE 32
36 //#define FTL_FORCEMOUNT
40 #ifdef FTL_FORCEMOUNT
41 #ifndef FTL_READONLY
42 #define FTL_READONLY
43 #endif
44 #endif
47 #ifdef FTL_READONLY
48 uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
50 (void)sector;
51 (void)count;
52 (void)buffer;
53 return -1;
55 uint32_t ftl_sync(void)
57 return 0;
59 #endif
63 /* Keeps the state of a scattered page block.
64 This structure is used in memory only, not on flash,
65 but it equals the one the OFW uses. */
66 struct ftl_log_type
69 /* The ftl_cxt.nextblockusn at the time the block was allocated,
70 needed in order to be able to remove the oldest ones first. */
71 uint32_t usn;
73 /* The vBlock number at which the scattered pages are stored */
74 uint16_t scatteredvblock;
76 /* the lBlock number for which those pages are */
77 uint16_t logicalvblock;
79 /* Pointer to ftl_offsets, contains the mapping which lPage is
80 currently stored at which scattered vPage. */
81 uint16_t* pageoffsets;
83 /* Pages used in the vBlock, i.e. next page number to be written */
84 uint16_t pagesused;
86 /* Pages that are still up to date in this block, i.e. need to be
87 moved when this vBlock is deallocated. */
88 uint16_t pagescurrent;
90 /* A flag whether all pages are still sequential in this block.
91 Initialized to 1 on allocation, zeroed as soon as anything is
92 written out of sequence, so that the block will need copying
93 when committing to get the pages back into the right order.
94 This is used to half the number of block erases needed when
95 writing huge amounts of sequential data. */
96 uint32_t issequential;
98 } __attribute__((packed));
101 /* Keeps the state of the FTL, both on flash and in memory */
102 struct ftl_cxt_type
105 /* Update sequence number of the FTL context, decremented
106 every time a new revision of FTL meta data is written. */
107 uint32_t usn;
109 /* Update sequence number for user data blocks. Incremented
110 every time a portion of user pages is written, so that
111 a consistency check can determine which copy of a user
112 page is the most recent one. */
113 uint32_t nextblockusn;
115 /* Count of currently free pages in the block pool */
116 uint16_t freecount;
118 /* Index to the first free hyperblock in the blockpool ring buffer */
119 uint16_t nextfreeidx;
121 /* This is a counter that is used to better distribute block
122 wear. It is incremented on every block erase, and if it
123 gets too high (300 on writes, 20 on sync), the most and
124 least worn hyperblock will be swapped (causing an additional
125 block write) and the counter will be decreased by 20. */
126 uint16_t swapcounter;
128 /* Ring buffer of currently free hyperblocks. nextfreeidx is the
129 index to freecount free ones, the other ones are currently
130 allocated for scattered page hyperblocks. */
131 uint16_t blockpool[0x14];
133 /* Alignment to 32 bits */
134 uint16_t field_36;
136 /* vPages where the block map is stored */
137 uint32_t ftl_map_pages[8];
139 /* Probably additional map page number space for bigger chips */
140 uint8_t field_58[0x28];
142 /* vPages where the erase counters are stored */
143 uint32_t ftl_erasectr_pages[8];
145 /* Seems to be padding */
146 uint8_t field_A0[0x70];
148 /* Pointer to ftl_map used by Whimory, not used by us */
149 uint32_t ftl_map_ptr;
151 /* Pointer to ftl_erasectr used by Whimory, not used by us */
152 uint32_t ftl_erasectr_ptr;
154 /* Pointer to ftl_log used by Whimory, not used by us */
155 uint32_t ftl_log_ptr;
157 /* Flag used to indicate that some erase counter pages should be committed
158 because they were changed more than 100 times since the last commit. */
159 uint32_t erasedirty;
161 /* Seems to be unused */
162 uint16_t field_120;
164 /* vBlocks used to store the FTL context, map, and erase
165 counter pages. This is also a ring buffer, and the oldest
166 page gets swapped with the least used page from the block
167 pool ring buffer when a new one is allocated. */
168 uint16_t ftlctrlblocks[3];
170 /* The last used vPage number from ftlctrlblocks */
171 uint32_t ftlctrlpage;
173 /* Set on context sync, reset on write, so obviously never
174 zero in the context written to the flash */
175 uint32_t clean_flag;
177 /* Seems to be unused, but gets loaded from flash by Whimory. */
178 uint8_t field_130[0x15C];
180 } __attribute__((packed)) FTLCxtType;
183 /* Keeps the state of the bank's VFL, both on flash and in memory.
184 There is one of these per bank. */
185 typedef struct ftl_vfl_cxt_type
188 /* Cross-bank update sequence number, incremented on every VFL
189 context commit on any bank. */
190 uint32_t usn;
192 /* See ftl_cxt.ftlctrlblocks. This is stored to the VFL contexts
193 in order to be able to find the most recent FTL context copy
194 when mounting the FTL. The VFL context number this will be
195 written to on an FTL context commit is chosen semi-randomly. */
196 uint16_t ftlctrlblocks[3];
198 /* Alignment to 32 bits */
199 uint8_t field_A[2];
201 /* Decrementing update counter for VFL context commits per bank */
202 uint32_t updatecount;
204 /* Number of the currently active VFL context block, it's an index
205 into vflcxtblocks. */
206 uint16_t activecxtblock;
208 /* Number of the first free page in the active VFL context block */
209 uint16_t nextcxtpage;
211 /* Seems to be unused */
212 uint8_t field_14[4];
214 /* Incremented every time a block erase error leads to a remap,
215 but doesn't seem to be read anywhere. */
216 uint16_t field_18;
218 /* Number of spare blocks used */
219 uint16_t spareused;
221 /* pBlock number of the first spare block */
222 uint16_t firstspare;
224 /* Total number of spare blocks */
225 uint16_t sparecount;
227 /* Block remap table. Contains the vBlock number the n-th spare
228 block is used as a replacement for. 0 = unused, 0xFFFF = bad. */
229 uint16_t remaptable[0x334];
231 /* Bad block table. Each bit represents 8 blocks. 1 = OK, 0 = Bad.
232 If the entry is zero, you should look at the remap table to see
233 if the block is remapped, and if yes, where the replacement is. */
234 uint8_t bbt[0x11A];
236 /* pBlock numbers used to store the VFL context. This is a ring
237 buffer. On a VFL context write, always 8 pages are written,
238 and it passes if at least 4 of them can be read back. */
239 uint16_t vflcxtblocks[4];
241 /* Blocks scheduled for remapping are stored at the end of the
242 remap table. This is the first index used for them. */
243 uint16_t scheduledstart;
245 /* Probably padding */
246 uint8_t field_7AC[0x4C];
248 /* First checksum (addition) */
249 uint32_t checksum1;
251 /* Second checksum (XOR), there is a bug in whimory regarding this. */
252 uint32_t checksum2;
254 } __attribute__((packed)) FTLVFLCxtType;
257 /* Layout of the spare bytes of each page on the flash */
258 union ftl_spare_data_type
261 /* The layout used for actual user data (types 0x40 and 0x41) */
262 struct ftl_spare_data_user_type
265 /* The lPage, i.e. Sector, number */
266 uint32_t lpn;
268 /* The update sequence number of that page,
269 copied from ftl_cxt.nextblockusn on write */
270 uint32_t usn;
272 /* Seems to be unused */
273 uint8_t field_8;
275 /* Type field, 0x40 (data page) or 0x41
276 (last data page of hyperblock) */
277 uint8_t type;
279 /* ECC mark, usually 0xFF. If an error occurred while reading the
280 page during a copying operation earlier, this will be 0x55. */
281 uint8_t eccmark;
283 /* Seems to be unused */
284 uint8_t field_B;
286 /* ECC data for the user data */
287 uint8_t dataecc[0x28];
289 /* ECC data for the first 0xC bytes above */
290 uint8_t spareecc[0xC];
292 } __attribute__((packed)) user;
294 /* The layout used for meta data (other types) */
295 struct ftl_spare_data_meta_type
298 /* ftl_cxt.usn for FTL stuff, ftl_vfl_cxt.updatecount for VFL stuff */
299 uint32_t usn;
301 /* Index of the thing inside the page,
302 for example number / index of the map or erase counter page */
303 uint16_t idx;
305 /* Seems to be unused */
306 uint8_t field_6;
308 /* Seems to be unused */
309 uint8_t field_7;
311 /* Seems to be unused */
312 uint8_t field_8;
314 /* Type field:
315 0x43: FTL context page
316 0x44: Block map page
317 0x46: Erase counter page
318 0x47: "FTL is currently mounted", i.e. unclean shutdown, mark
319 0x80: VFL context page */
320 uint8_t type;
322 /* ECC mark, usually 0xFF. If an error occurred while reading the
323 page during a copying operation earlier, this will be 0x55. */
324 uint8_t eccmark;
326 /* Seems to be unused */
327 uint8_t field_B;
329 /* ECC data for the user data */
330 uint8_t dataecc[0x28];
332 /* ECC data for the first 0xC bytes above */
333 uint8_t spareecc[0xC];
335 } __attribute__((packed)) meta;
340 /* Keeps track of troublesome blocks, only in memory, lost on unmount. */
341 struct ftl_trouble_type
344 /* vBlock number of the block giving trouble */
345 uint16_t block;
347 /* Bank of the block giving trouble */
348 uint8_t bank;
350 /* Error counter, incremented by 3 on error, decremented by 1 on erase,
351 remaping will be done when it reaches 6. */
352 uint8_t errors;
354 } __attribute__((packed));
358 /* Pointer to an info structure regarding the flash type used */
359 const struct nand_device_info_type* ftl_nand_type;
361 /* Number of banks we detected a chip on */
362 uint32_t ftl_banks;
364 /* Block map, used vor pBlock to vBlock mapping */
365 uint16_t ftl_map[0x2000];
367 /* VFL context for each bank */
368 struct ftl_vfl_cxt_type ftl_vfl_cxt[4];
370 /* FTL context */
371 struct ftl_cxt_type ftl_cxt;
373 /* Temporary data buffers for internal use by the FTL */
374 uint8_t ftl_buffer[0x800] __attribute__((aligned(16)));
376 /* Temporary spare byte buffer for internal use by the FTL */
377 union ftl_spare_data_type ftl_sparebuffer[FTL_WRITESPARE_SIZE] __attribute__((aligned(16)));
380 #ifndef FTL_READONLY
382 /* Lowlevel BBT for each bank */
383 uint8_t ftl_bbt[4][0x410];
385 /* Erase counters for the vBlocks */
386 uint16_t ftl_erasectr[0x2000];
388 /* Used by ftl_log */
389 uint16_t ftl_offsets[0x11][0x200];
391 /* Structs keeping record of scattered page blocks */
392 struct ftl_log_type ftl_log[0x11];
394 /* Global cross-bank update sequence number of the VFL context */
395 uint32_t ftl_vfl_usn;
397 /* Keeps track (temporarily) of troublesome blocks */
398 struct ftl_trouble_type ftl_troublelog[5];
400 /* Counts erase counter page changes, after 100 of them the affected
401 page will be committed to the flash. */
402 uint8_t ftl_erasectr_dirt[8];
404 /* Buffer needed for copying pages around while moving or committing blocks.
405 This can't be shared with ftl_buffer, because this one could be overwritten
406 during the copying operation in order to e.g. commit a CXT. */
407 uint8_t ftl_copybuffer[FTL_COPYBUF_SIZE][0x800] __attribute__((aligned(16)));
408 union ftl_spare_data_type ftl_copyspare[FTL_COPYBUF_SIZE] __attribute__((aligned(16)));
410 /* Needed to store the old scattered page offsets in order to be able to roll
411 back if something fails while compacting a scattered page block. */
412 uint16_t ftl_offsets_backup[0x200] __attribute__((aligned(16)));
414 #endif
417 static struct mutex ftl_mtx;
421 /* Finds a device info page for the specified bank and returns its number.
422 Used to check if one is present, and to read the lowlevel BBT. */
423 uint32_t ftl_find_devinfo(uint32_t bank)
425 /* Scan the last 10% of the flash for device info pages */
426 uint32_t lowestBlock = ftl_nand_type->blocks
427 - (ftl_nand_type->blocks / 10);
428 uint32_t block, page, pagenum;
429 for (block = ftl_nand_type->blocks - 1; block >= lowestBlock; block--)
431 page = ftl_nand_type->pagesperblock - 8;
432 for (; page < ftl_nand_type->pagesperblock; page++)
434 pagenum = block * ftl_nand_type->pagesperblock + page;
435 if ((nand_read_page(bank, pagenum, ftl_buffer,
436 &ftl_sparebuffer[0], 1, 0) & 0x11F) != 0)
437 continue;
438 if (memcmp(ftl_buffer, "DEVICEINFOSIGN\0", 0x10) == 0)
439 return pagenum;
442 return 0;
446 /* Checks if all banks have proper device info pages */
447 uint32_t ftl_has_devinfo(void)
449 uint32_t i;
450 for (i = 0; i < ftl_banks; i++) if (ftl_find_devinfo(i) == 0) return 0;
451 return 1;
455 /* Loads the lowlevel BBT for a bank to the specified buffer.
456 This is based on some cryptic disassembly and not fully understood yet. */
457 uint32_t ftl_load_bbt(uint32_t bank, uint8_t* bbt)
459 uint32_t i, j;
460 uint32_t pagebase, page = ftl_find_devinfo(bank), page2;
461 uint32_t unk1, unk2, unk3;
462 if (page == 0) return 1;
463 pagebase = page & ~(ftl_nand_type->pagesperblock - 1);
464 if ((nand_read_page(bank, page, ftl_buffer,
465 (uint32_t*)0, 1, 0) & 0x11F) != 0) return 1;
466 if (memcmp(&ftl_buffer[0x18], "BBT", 4) != 0) return 1;
467 unk1 = ((uint16_t*)ftl_buffer)[0x10];
468 unk2 = ((uint16_t*)ftl_buffer)[0x11];
469 unk3 = ((uint16_t*)ftl_buffer)[((uint32_t*)ftl_buffer)[4] * 6 + 10]
470 + ((uint16_t*)ftl_buffer)[((uint32_t*)ftl_buffer)[4] * 6 + 11];
471 for (i = 0; i < unk1; i++)
473 for (j = 0; ; j++)
475 page2 = unk2 + i + unk3 * j;
476 if (page2 >= (uint32_t)(ftl_nand_type->pagesperblock - 8))
477 break;
478 if ((nand_read_page(bank, pagebase + page2, ftl_buffer,
479 (void*)0, 1, 0) & 0x11F) == 0)
481 memcpy(bbt, ftl_buffer, 0x410);
482 return 0;
486 return 1;
490 /* Calculates the checksums for the VFL context page of the specified bank */
491 void ftl_vfl_calculate_checksum(uint32_t bank,
492 uint32_t* checksum1, uint32_t* checksum2)
494 uint32_t i;
495 *checksum1 = 0xAABBCCDD;
496 *checksum2 = 0xAABBCCDD;
497 for (i = 0; i < 0x1FE; i++)
499 *checksum1 += ((uint32_t*)(&ftl_vfl_cxt[bank]))[i];
500 *checksum2 ^= ((uint32_t*)(&ftl_vfl_cxt[bank]))[i];
505 /* Checks if the checksums of the VFL context
506 of the specified bank are correct */
507 uint32_t ftl_vfl_verify_checksum(uint32_t bank)
509 uint32_t checksum1, checksum2;
510 ftl_vfl_calculate_checksum(bank, &checksum1, &checksum2);
511 if (checksum1 == ftl_vfl_cxt[bank].checksum1) return 0;
512 /* The following line is pretty obviously a bug in Whimory,
513 but we do it the same way for compatibility. */
514 if (checksum2 != ftl_vfl_cxt[bank].checksum2) return 0;
515 panicf("FTL: Bad VFL CXT checksum!");
516 return 1;
520 #ifndef FTL_READONLY
521 /* Updates the checksums of the VFL context of the specified bank */
522 void ftl_vfl_update_checksum(uint32_t bank)
524 ftl_vfl_calculate_checksum(bank, &ftl_vfl_cxt[bank].checksum1,
525 &ftl_vfl_cxt[bank].checksum2);
527 #endif
530 #ifndef FTL_READONLY
531 /* Writes 8 copies of the VFL context of the specified bank to flash,
532 and succeeds if at least 4 can be read back properly. */
533 uint32_t ftl_vfl_store_cxt(uint32_t bank)
535 uint32_t i;
536 ftl_vfl_cxt[bank].updatecount--;
537 ftl_vfl_cxt[bank].usn = ++ftl_vfl_usn;
538 ftl_vfl_cxt[bank].nextcxtpage += 8;
539 ftl_vfl_update_checksum(bank);
540 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
541 ftl_sparebuffer[0].meta.usn = ftl_vfl_cxt[bank].updatecount;
542 ftl_sparebuffer[0].meta.field_8 = 0;
543 ftl_sparebuffer[0].meta.type = 0x80;
544 for (i = 1; i <= 8; i++)
546 uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
547 uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
548 uint32_t page = block * ftl_nand_type->pagesperblock;
549 page += ftl_vfl_cxt[bank].nextcxtpage - i;
550 nand_write_page(bank, page, &ftl_vfl_cxt[bank], &ftl_sparebuffer[0], 1);
552 uint32_t good = 0;
553 for (i = 1; i <= 8; i++)
555 uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
556 uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
557 uint32_t page = block * ftl_nand_type->pagesperblock;
558 page += ftl_vfl_cxt[bank].nextcxtpage - i;
559 if ((nand_read_page(bank, page, ftl_buffer,
560 &ftl_sparebuffer[0], 1, 0) & 0x11F) != 0)
561 continue;
562 if (memcmp(ftl_buffer, &ftl_vfl_cxt[bank], 0x7AC) != 0)
563 continue;
564 if (ftl_sparebuffer[0].meta.usn != ftl_vfl_cxt[bank].updatecount)
565 continue;
566 if (ftl_sparebuffer[0].meta.field_8 == 0
567 && ftl_sparebuffer[0].meta.type == 0x80) good++;
569 return good > 3 ? 0 : 1;
571 #endif
574 #ifndef FTL_READONLY
575 /* Commits the VFL context of the specified bank to flash,
576 retries until it works or all available pages have been tried */
577 uint32_t ftl_vfl_commit_cxt(uint32_t bank)
579 if (ftl_vfl_cxt[bank].nextcxtpage + 8 <= ftl_nand_type->pagesperblock)
580 if (ftl_vfl_store_cxt(bank) == 0) return 0;
581 uint32_t current = ftl_vfl_cxt[bank].activecxtblock;
582 uint32_t i = current, j;
583 while (1)
585 i = (i + 1) & 3;
586 if (i == current) break;
587 if (ftl_vfl_cxt[bank].vflcxtblocks[i] == 0xFFFF) continue;
588 for (j = 0; j < 4; j++)
589 if (nand_block_erase(bank, ftl_vfl_cxt[bank].vflcxtblocks[i]
590 * ftl_nand_type->pagesperblock) == 0)
591 break;
592 if (j == 4) continue;
593 ftl_vfl_cxt[bank].activecxtblock = i;
594 ftl_vfl_cxt[bank].nextcxtpage = 0;
595 if (ftl_vfl_store_cxt(bank) == 0) return 0;
597 panicf("FTL: Failed to commit VFL CXT!");
598 return 1;
600 #endif
603 /* Returns a pointer to the most recently updated VFL context,
604 used to find out the current FTL context vBlock numbers
605 (planetbeing's "maxthing") */
606 struct ftl_vfl_cxt_type* ftl_vfl_get_newest_cxt(void)
608 uint32_t i, maxusn;
609 struct ftl_vfl_cxt_type* cxt = (struct ftl_vfl_cxt_type*)0;
610 maxusn = 0;
611 for (i = 0; i < ftl_banks; i++)
612 if (ftl_vfl_cxt[i].usn >= maxusn)
614 cxt = &ftl_vfl_cxt[i];
615 maxusn = ftl_vfl_cxt[i].usn;
617 return cxt;
621 /* Checks if the specified pBlock is marked bad in the supplied lowlevel BBT.
622 Only used while mounting the VFL. */
623 uint32_t ftl_is_good_block(uint8_t* bbt, uint32_t block)
625 if ((bbt[block >> 3] & (1 << (block & 7))) == 0) return 0;
626 else return 1;
630 /* Checks if the specified vBlock could be remapped */
631 uint32_t ftl_vfl_is_good_block(uint32_t bank, uint32_t block)
633 uint8_t bbtentry = ftl_vfl_cxt[bank].bbt[block >> 6];
634 if ((bbtentry & (1 << ((7 - (block >> 3)) & 7))) == 0) return 0;
635 else return 1;
639 #ifndef FTL_READONLY
640 /* Sets or unsets the bad bit of the specified vBlock
641 in the specified bank's VFL context */
642 void ftl_vfl_set_good_block(uint32_t bank, uint32_t block, uint32_t isgood)
644 uint8_t bit = (1 << ((7 - (block >> 3)) & 7));
645 if (isgood == 1) ftl_vfl_cxt[bank].bbt[block >> 6] |= bit;
646 else ftl_vfl_cxt[bank].bbt[block >> 6] &= ~bit;
648 #endif
651 /* Tries to read a VFL context from the specified bank, pBlock and page */
652 uint32_t ftl_vfl_read_page(uint32_t bank, uint32_t block,
653 uint32_t startpage, void* databuffer,
654 union ftl_spare_data_type* sparebuffer)
656 uint32_t i;
657 for (i = 0; i < 8; i++)
659 uint32_t page = block * ftl_nand_type->pagesperblock
660 + startpage + i;
661 if ((nand_read_page(bank, page, databuffer,
662 sparebuffer, 1, 1) & 0x11F) == 0)
663 if (sparebuffer->meta.field_8 == 0
664 && sparebuffer->meta.type == 0x80)
665 return 0;
667 return 1;
671 /* Translates a bank and vBlock to a pBlock, following remaps */
672 uint32_t ftl_vfl_get_physical_block(uint32_t bank, uint32_t block)
674 if (ftl_vfl_is_good_block(bank, block) == 1) return block;
676 uint32_t spareindex;
677 uint32_t spareused = ftl_vfl_cxt[bank].spareused;
678 for (spareindex = 0; spareindex < spareused; spareindex++)
679 if (ftl_vfl_cxt[bank].remaptable[spareindex] == block)
680 return ftl_vfl_cxt[bank].firstspare + spareindex;
681 return block;
685 #ifndef FTL_READONLY
686 /* Checks if remapping is scheduled for the specified bank and vBlock */
687 uint32_t ftl_vfl_check_remap_scheduled(uint32_t bank, uint32_t block)
689 uint32_t i;
690 for (i = 0x333; i > 0 && i > ftl_vfl_cxt[bank].scheduledstart; i--)
691 if (ftl_vfl_cxt[bank].remaptable[i] == block) return 1;
692 return 0;
694 #endif
697 #ifndef FTL_READONLY
698 /* Schedules remapping for the specified bank and vBlock */
699 void ftl_vfl_schedule_block_for_remap(uint32_t bank, uint32_t block)
701 if (ftl_vfl_check_remap_scheduled(bank, block) == 1)
702 return;
703 panicf("FTL: Scheduling bank %u block %u for remap!", (unsigned)bank, (unsigned)block);
704 if (ftl_vfl_cxt[bank].scheduledstart == ftl_vfl_cxt[bank].spareused)
705 return;
706 ftl_vfl_cxt[bank].remaptable[--ftl_vfl_cxt[bank].scheduledstart] = block;
707 ftl_vfl_commit_cxt(bank);
709 #endif
712 #ifndef FTL_READONLY
713 /* Removes the specified bank and vBlock combination
714 from the remap scheduled list */
715 void ftl_vfl_mark_remap_done(uint32_t bank, uint32_t block)
717 uint32_t i;
718 uint32_t start = ftl_vfl_cxt[bank].scheduledstart;
719 uint32_t lastscheduled = ftl_vfl_cxt[bank].remaptable[start];
720 for (i = 0x333; i > 0 && i > start; i--)
721 if (ftl_vfl_cxt[bank].remaptable[i] == block)
723 if (i != start && i != 0x333)
724 ftl_vfl_cxt[bank].remaptable[i] = lastscheduled;
725 ftl_vfl_cxt[bank].scheduledstart++;
726 return;
729 #endif
732 #ifndef FTL_READONLY
733 /* Logs that there is trouble for the specified vBlock on the specified bank.
734 The vBlock will be scheduled for remap
735 if there is too much trouble with it. */
736 void ftl_vfl_log_trouble(uint32_t bank, uint32_t vblock)
738 uint32_t i;
739 for (i = 0; i < 5; i++)
740 if (ftl_troublelog[i].block == vblock
741 && ftl_troublelog[i].bank == bank)
743 ftl_troublelog[i].errors += 3;
744 if (ftl_troublelog[i].errors > 5)
746 ftl_vfl_schedule_block_for_remap(bank, vblock);
747 ftl_troublelog[i].block = 0xFFFF;
749 return;
751 for (i = 0; i < 5; i++)
752 if (ftl_troublelog[i].block == 0xFFFF)
754 ftl_troublelog[i].block = vblock;
755 ftl_troublelog[i].bank = bank;
756 ftl_troublelog[i].errors = 3;
757 return;
760 #endif
763 #ifndef FTL_READONLY
764 /* Logs a successful erase for the specified vBlock on the specified bank */
765 void ftl_vfl_log_success(uint32_t bank, uint32_t vblock)
767 uint32_t i;
768 for (i = 0; i < 5; i++)
769 if (ftl_troublelog[i].block == vblock
770 && ftl_troublelog[i].bank == bank)
772 if (--ftl_troublelog[i].errors == 0)
773 ftl_troublelog[i].block = 0xFFFF;
774 return;
777 #endif
780 #ifndef FTL_READONLY
781 /* Tries to remap the specified vBlock on the specified bank,
782 not caring about data in there.
783 If it worked, it will return the new pBlock number,
784 if not (no more spare blocks available), it will return zero. */
785 uint32_t ftl_vfl_remap_block(uint32_t bank, uint32_t block)
787 uint32_t i;
788 uint32_t newblock = 0, newidx;
789 panicf("FTL: Remapping bank %u block %u!", (unsigned)bank, (unsigned)block);
790 if (bank >= ftl_banks || block >= ftl_nand_type->blocks) return 0;
791 for (i = 0; i < ftl_vfl_cxt[bank].sparecount; i++)
792 if (ftl_vfl_cxt[bank].remaptable[i] == 0)
794 newblock = ftl_vfl_cxt[bank].firstspare + i;
795 newidx = i;
796 break;
798 if (newblock == 0) return 0;
799 for (i = 0; i < 9; i++)
800 if (nand_block_erase(bank,
801 newblock * ftl_nand_type->pagesperblock) == 0)
802 break;
803 for (i = 0; i < newidx; i++)
804 if (ftl_vfl_cxt[bank].remaptable[i] == block)
805 ftl_vfl_cxt[bank].remaptable[i] = 0xFFFF;
806 ftl_vfl_cxt[bank].remaptable[newidx] = block;
807 ftl_vfl_cxt[bank].spareused++;
808 ftl_vfl_set_good_block(bank, block, 0);
809 return newblock;
811 #endif
814 /* Reads the specified vPage, dealing with all kinds of trouble */
815 uint32_t ftl_vfl_read(uint32_t vpage, void* buffer, void* sparebuffer,
816 uint32_t checkempty, uint32_t remaponfail)
818 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
819 uint32_t syshyperblocks = ftl_nand_type->blocks
820 - ftl_nand_type->userblocks - 0x17;
821 uint32_t abspage = vpage + ppb * syshyperblocks;
822 if (abspage >= ftl_nand_type->blocks * ppb || abspage < ppb)
823 panicf("FTL: Trying to read out-of-bounds vPage %u", (unsigned)vpage);
824 //return 4;
826 uint32_t bank = abspage % ftl_banks;
827 uint32_t block = abspage / (ftl_nand_type->pagesperblock * ftl_banks);
828 uint32_t page = (abspage / ftl_banks) % ftl_nand_type->pagesperblock;
829 uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
830 uint32_t physpage = physblock * ftl_nand_type->pagesperblock + page;
832 uint32_t ret = nand_read_page(bank, physpage, buffer,
833 sparebuffer, 1, checkempty);
835 if ((ret & 0x11D) != 0 && (ret & 2) == 0)
837 nand_reset(bank);
838 ret = nand_read_page(bank, physpage, buffer,
839 sparebuffer, 1, checkempty);
840 #ifdef FTL_READONLY
841 (void)remaponfail;
842 #else
843 if (remaponfail == 1 &&(ret & 0x11D) != 0 && (ret & 2) == 0)
844 ftl_vfl_schedule_block_for_remap(bank, block);
845 #endif
846 return ret;
849 return ret;
853 /* Multi-bank version of ftl_vfl_read, will read ftl_banks pages in parallel */
854 uint32_t ftl_vfl_read_fast(uint32_t vpage, void* buffer, void* sparebuffer,
855 uint32_t checkempty, uint32_t remaponfail)
857 uint32_t i, rc = 0;
858 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
859 uint32_t syshyperblocks = ftl_nand_type->blocks
860 - ftl_nand_type->userblocks - 0x17;
861 uint32_t abspage = vpage + ppb * syshyperblocks;
862 if (abspage + ftl_banks - 1 >= ftl_nand_type->blocks * ppb || abspage < ppb)
863 panicf("FTL: Trying to read out-of-bounds vPage %u", (unsigned)vpage);
864 //return 4;
866 uint32_t bank = abspage % ftl_banks;
867 uint32_t block = abspage / (ftl_nand_type->pagesperblock * ftl_banks);
868 uint32_t page = (abspage / ftl_banks) % ftl_nand_type->pagesperblock;
869 uint32_t remapped = 0;
870 for (i = 0; i < ftl_banks; i++)
871 if (ftl_vfl_get_physical_block(i, block) != block)
872 remapped = 1;
873 if (bank || remapped)
875 for (i = 0; i < ftl_banks; i++)
877 void* databuf = (void*)0;
878 void* sparebuf = (void*)0;
879 if (buffer) databuf = (void*)((uint32_t)buffer + 0x800 * i);
880 if (sparebuffer) sparebuf = (void*)((uint32_t)sparebuffer + 0x40 * i);
881 uint32_t ret = ftl_vfl_read(vpage + i, databuf, sparebuf, checkempty, remaponfail);
882 if (ret & 1) rc |= 1 << (i << 2);
883 if (ret & 2) rc |= 2 << (i << 2);
884 if (ret & 0x10) rc |= 4 << (i << 2);
885 if (ret & 0x100) rc |= 8 << (i << 2);
887 return rc;
889 uint32_t physpage = block * ftl_nand_type->pagesperblock + page;
891 rc = nand_read_page_fast(physpage, buffer, sparebuffer, 1, checkempty);
892 if (!(rc & 0xdddd)) return rc;
894 for (i = 0; i < ftl_banks; i++)
896 if ((rc >> (i << 2)) & 0x2) continue;
897 if ((rc >> (i << 2)) & 0xd)
899 rc &= ~(0xf << (i << 2));
900 nand_reset(i);
901 uint32_t ret = nand_read_page(i, physpage,
902 (void*)((uint32_t)buffer + 0x800 * i),
903 (void*)((uint32_t)sparebuffer + 0x40 * i),
904 1, checkempty);
905 #ifdef FTL_READONLY
906 (void)remaponfail;
907 #else
908 if (remaponfail == 1 && (ret & 0x11D) != 0 && (ret & 2) == 0)
909 ftl_vfl_schedule_block_for_remap(i, block);
910 #endif
911 if (ret & 1) rc |= 1 << (i << 2);
912 if (ret & 2) rc |= 2 << (i << 2);
913 if (ret & 0x10) rc |= 4 << (i << 2);
914 if (ret & 0x100) rc |= 8 << (i << 2);
918 return rc;
922 #ifndef FTL_READONLY
923 /* Writes the specified vPage, dealing with all kinds of trouble */
924 uint32_t ftl_vfl_write(uint32_t vpage, uint32_t count,
925 void* buffer, void* sparebuffer)
927 uint32_t i, j;
928 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
929 uint32_t syshyperblocks = ftl_nand_type->blocks
930 - ftl_nand_type->userblocks - 0x17;
931 uint32_t abspage = vpage + ppb * syshyperblocks;
932 if (abspage + count > ftl_nand_type->blocks * ppb || abspage < ppb)
933 panicf("FTL: Trying to write out-of-bounds vPage %u",
934 (unsigned)vpage);
935 //return 4;
937 uint32_t bank[5];
938 uint32_t block[5];
939 uint32_t physpage[5];
941 for (i = 0; i < count; i++, abspage++)
943 for (j = ftl_banks; j > 0; j--)
945 bank[j] = bank[j - 1];
946 block[j] = block[j - 1];
947 physpage[j] = physpage[j - 1];
949 bank[0] = abspage % ftl_banks;
950 block[0] = abspage / (ftl_nand_type->pagesperblock * ftl_banks);
951 uint32_t page = (abspage / ftl_banks) % ftl_nand_type->pagesperblock;
952 uint32_t physblock = ftl_vfl_get_physical_block(bank[0], block[0]);
953 physpage[0] = physblock * ftl_nand_type->pagesperblock + page;
955 if (i >= ftl_banks)
956 if (nand_write_page_collect(bank[ftl_banks]))
957 if (nand_read_page(bank[ftl_banks], physpage[ftl_banks],
958 ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F)
960 panicf("FTL: write error (2) on vPage %u, bank %u, pPage %u",
961 (unsigned)(vpage + i - ftl_banks),
962 (unsigned)bank[ftl_banks],
963 (unsigned)physpage[ftl_banks]);
964 ftl_vfl_log_trouble(bank[ftl_banks], block[ftl_banks]);
966 if (nand_write_page_start(bank[0], physpage[0],
967 (void*)((uint32_t)buffer + 0x800 * i),
968 (void*)((uint32_t)sparebuffer + 0x40 * i), 1))
969 if (nand_read_page(bank[0], physpage[0], ftl_buffer,
970 &ftl_sparebuffer[0], 1, 1) & 0x11F)
972 panicf("FTL: write error (1) on vPage %u, bank %u, pPage %u",
973 (unsigned)(vpage + i), (unsigned)bank[0], (unsigned)physpage[0]);
974 ftl_vfl_log_trouble(bank[0], block[0]);
978 for (i = count < ftl_banks ? count : ftl_banks; i > 0; i--)
979 if (nand_write_page_collect(bank[i - 1]))
980 if (nand_read_page(bank[i - 1], physpage[i - 1],
981 ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F)
983 panicf("FTL: write error (2) on vPage %u, bank %u, pPage %u",
984 (unsigned)(vpage + count - i),
985 (unsigned)bank[i - 1], (unsigned)physpage[i - 1]);
986 ftl_vfl_log_trouble(bank[i - 1], block[i - 1]);
989 return 0;
991 #endif
994 /* Mounts the VFL on all banks */
995 uint32_t ftl_vfl_open(void)
997 uint32_t i, j, k;
998 uint32_t minusn, vflcxtidx, last;
999 FTLVFLCxtType* cxt;
1000 uint16_t vflcxtblock[4];
1001 #ifndef FTL_READONLY
1002 ftl_vfl_usn = 0;
1003 #else
1004 /* Temporary BBT buffer if we're readonly,
1005 as we won't need it again after mounting */
1006 uint8_t bbt[0x410];
1007 #endif
1009 uint32_t syshyperblocks = ftl_nand_type->blocks
1010 - ftl_nand_type->userblocks - 0x18;
1012 for (i = 0; i < ftl_banks; i++)
1013 #ifndef FTL_READONLY
1014 if (ftl_load_bbt(i, ftl_bbt[i]) == 0)
1015 #else
1016 if (ftl_load_bbt(i, bbt) == 0)
1017 #endif
1019 for (j = 1; j <= syshyperblocks; j++)
1020 #ifndef FTL_READONLY
1021 if (ftl_is_good_block(ftl_bbt[i], j) != 0)
1022 #else
1023 if (ftl_is_good_block(bbt, j) != 0)
1024 #endif
1025 if (ftl_vfl_read_page(i, j, 0, ftl_buffer,
1026 &ftl_sparebuffer[0]) == 0)
1028 struct ftl_vfl_cxt_type* cxt;
1029 cxt = (struct ftl_vfl_cxt_type*)ftl_buffer;
1030 memcpy(vflcxtblock, &cxt->vflcxtblocks, 8);
1031 minusn = 0xFFFFFFFF;
1032 vflcxtidx = 4;
1033 for (k = 0; k < 4; k++)
1034 if (vflcxtblock[k] != 0xFFFF)
1035 if (ftl_vfl_read_page(i, vflcxtblock[k], 0,
1036 ftl_buffer,
1037 &ftl_sparebuffer[0]) == 0)
1038 if (ftl_sparebuffer[0].meta.usn > 0
1039 && ftl_sparebuffer[0].meta.usn <= minusn)
1041 minusn = ftl_sparebuffer[0].meta.usn;
1042 vflcxtidx = k;
1044 if (vflcxtidx == 4) //return 1;
1045 panicf("FTL: No VFL CXT block found on bank %u!",
1046 (unsigned)i);
1047 last = 0;
1048 uint32_t max = ftl_nand_type->pagesperblock;
1049 for (k = 8; k < max; k += 8)
1051 if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
1052 k, ftl_buffer,
1053 &ftl_sparebuffer[0]) != 0)
1054 break;
1055 last = k;
1057 if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
1058 last, ftl_buffer,
1059 &ftl_sparebuffer[0]) != 0)
1060 panicf("FTL: Re-reading VFL CXT block "
1061 "on bank %u failed!?", (unsigned)i);
1062 //return 1;
1063 memcpy(&ftl_vfl_cxt[i], ftl_buffer, 0x800);
1064 if (ftl_vfl_verify_checksum(i) != 0) return 1;
1065 #ifndef FTL_READONLY
1066 if (ftl_vfl_usn < ftl_vfl_cxt[i].usn)
1067 ftl_vfl_usn = ftl_vfl_cxt[i].usn;
1068 #endif
1069 break;
1072 else //return 1;
1073 panicf("FTL: Couldn't load bank %u lowlevel BBT!", (unsigned)i);
1074 cxt = ftl_vfl_get_newest_cxt();
1075 for (i = 0; i < ftl_banks; i++)
1076 memcpy(ftl_vfl_cxt[i].ftlctrlblocks, cxt->ftlctrlblocks, 6);
1077 return 0;
1081 /* Mounts the actual FTL */
1082 uint32_t ftl_open(void)
1084 uint32_t i;
1085 uint32_t ret;
1086 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1087 struct ftl_vfl_cxt_type* cxt = ftl_vfl_get_newest_cxt();
1089 uint32_t ftlcxtblock = 0xffffffff;
1090 uint32_t minusn = 0xffffffff;
1091 for (i = 0; i < 3; i++)
1093 ret = ftl_vfl_read(ppb * cxt->ftlctrlblocks[i],
1094 ftl_buffer, &ftl_sparebuffer[0], 1, 0);
1095 if ((ret &= 0x11F) != 0) continue;
1096 if (ftl_sparebuffer[0].meta.type - 0x43 > 4) continue;
1097 if (ftlcxtblock != 0xffffffff && ftl_sparebuffer[0].meta.usn >= minusn)
1098 continue;
1099 minusn = ftl_sparebuffer[0].meta.usn;
1100 ftlcxtblock = cxt->ftlctrlblocks[i];
1103 if (ftlcxtblock == 0xffffffff) //return 1;
1104 panicf("FTL: Couldn't find readable FTL CXT block!");
1106 uint32_t ftlcxtfound = 0;
1107 for (i = ftl_nand_type->pagesperblock * ftl_banks - 1; i > 0; i--)
1109 ret = ftl_vfl_read(ppb * ftlcxtblock + i,
1110 ftl_buffer, &ftl_sparebuffer[0], 1, 0);
1111 if ((ret & 0x11F) != 0) continue;
1112 else if (ftl_sparebuffer[0].meta.type == 0x43)
1114 memcpy(&ftl_cxt, ftl_buffer, 0x28C);
1115 ftlcxtfound = 1;
1116 break;
1118 else
1120 /* This will trip if there was an unclean unmount before. */
1121 #ifndef FTL_FORCEMOUNT
1122 panicf("FTL: Unclean shutdown before!");
1123 break;
1124 #endif
1128 if (ftlcxtfound == 0) //return 1;
1129 panicf("FTL: Couldn't find FTL CXT page!");
1131 uint32_t pagestoread = ftl_nand_type->userblocks >> 10;
1132 if ((ftl_nand_type->userblocks & 0x1FF) != 0) pagestoread++;
1134 for (i = 0; i < pagestoread; i++)
1136 if ((ftl_vfl_read(ftl_cxt.ftl_map_pages[i],
1137 ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
1138 panicf("FTL: Failed to read block map page %u", (unsigned)i);
1139 //return 1;
1141 uint32_t toread = 2048;
1142 if (toread > (ftl_nand_type->userblocks << 1) - (i << 11))
1143 toread = (ftl_nand_type->userblocks << 1) - (i << 11);
1145 memcpy(&ftl_map[i << 10], ftl_buffer, toread);
1148 #ifndef FTL_READONLY
1149 pagestoread = (ftl_nand_type->userblocks + 23) >> 10;
1150 if (((ftl_nand_type->userblocks + 23) & 0x1FF) != 0) pagestoread++;
1152 for (i = 0; i < pagestoread; i++)
1154 if ((ftl_vfl_read(ftl_cxt.ftl_erasectr_pages[i],
1155 ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
1156 panicf("FTL: Failed to read erase counter page %u", (unsigned)i);
1157 //return 1;
1159 uint32_t toread = 2048;
1160 if (toread > ((ftl_nand_type->userblocks + 23) << 1) - (i << 11))
1161 toread = ((ftl_nand_type->userblocks + 23) << 1) - (i << 11);
1163 memcpy(&ftl_erasectr[i << 10], ftl_buffer, toread);
1166 for (i = 0; i < 0x11; i++)
1168 ftl_log[i].scatteredvblock = 0xFFFF;
1169 ftl_log[i].logicalvblock = 0xFFFF;
1170 ftl_log[i].pageoffsets = ftl_offsets[i];
1173 memset(ftl_troublelog, 0xFF, 20);
1174 memset(ftl_erasectr_dirt, 0, 8);
1175 #endif
1177 return 0;
1181 #ifndef FTL_READONLY
1182 /* Returns a pointer to the ftl_log entry for the specified vBlock,
1183 or null, if there is none */
1184 struct ftl_log_type* ftl_get_log_entry(uint32_t block)
1186 uint32_t i;
1187 for (i = 0; i < 0x11; i++)
1189 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1190 if (ftl_log[i].logicalvblock == block) return &ftl_log[i];
1192 return (struct ftl_log_type*)0;
1194 #endif
1196 /* Exposed function: Read highlevel sectors */
1197 uint32_t ftl_read(uint32_t sector, uint32_t count, void* buffer)
1199 uint32_t i, j;
1200 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1201 uint32_t error = 0;
1203 if (sector + count > ftl_nand_type->userblocks * ppb)
1204 return 1;
1206 if (count == 0) return 0;
1208 mutex_lock(&ftl_mtx);
1210 for (i = 0; i < count; i++)
1212 uint32_t block = (sector + i) / ppb;
1213 uint32_t page = (sector + i) % ppb;
1215 uint32_t abspage = ftl_map[block] * ppb + page;
1216 #ifndef FTL_READONLY
1217 struct ftl_log_type* logentry = ftl_get_log_entry(block);
1218 if (logentry != (struct ftl_log_type*)0)
1219 if (logentry->scatteredvblock != 0xFFFF
1220 && logentry->pageoffsets[page] != 0xFFFF)
1221 abspage = logentry->scatteredvblock * ppb
1222 + logentry->pageoffsets[page];
1223 #endif
1225 #ifndef FTL_READONLY
1226 if (count >= i + ftl_banks && !(page & (ftl_banks - 1))
1227 && logentry == (struct ftl_log_type*)0)
1228 #else
1229 if (count >= i + ftl_banks && !(page & (ftl_banks - 1)))
1230 #endif
1232 uint32_t ret = ftl_vfl_read_fast(abspage, &((uint8_t*)buffer)[i << 11],
1233 &ftl_sparebuffer[0], 1, 1);
1234 for (j = 0; j < ftl_banks; j++)
1235 if (ret & (2 << (j << 2)))
1236 memset(&((uint8_t*)buffer)[(i + j) << 11], 0, 0x800);
1237 else if ((ret & (0xd << (j << 2))) || ftl_sparebuffer[j].user.eccmark != 0xFF)
1239 error = 1;
1240 memset(&((uint8_t*)buffer)[(i + j) << 11], 0, 0x800);
1242 i += ftl_banks - 1;
1244 else
1246 uint32_t ret = ftl_vfl_read(abspage, &((uint8_t*)buffer)[i << 11],
1247 &ftl_sparebuffer[0], 1, 1);
1248 if (ret & 2) memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
1249 else if ((ret & 0x11D) != 0 || ftl_sparebuffer[0].user.eccmark != 0xFF)
1251 error = 1;
1252 memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
1257 mutex_unlock(&ftl_mtx);
1259 return error;
1263 #ifndef FTL_READONLY
1264 /* Performs a vBlock erase, dealing with hardware,
1265 remapping and all kinds of trouble */
1266 uint32_t ftl_erase_block_internal(uint32_t block)
1268 uint32_t i, j;
1269 block = block + ftl_nand_type->blocks
1270 - ftl_nand_type->userblocks - 0x17;
1271 if (block == 0 || block >= ftl_nand_type->blocks) return 1;
1272 for (i = 0; i < ftl_banks; i++)
1274 if (ftl_vfl_check_remap_scheduled(i, block) == 1)
1276 ftl_vfl_remap_block(i, block);
1277 ftl_vfl_mark_remap_done(i, block);
1279 ftl_vfl_log_success(i, block);
1280 uint32_t pblock = ftl_vfl_get_physical_block(i, block);
1281 uint32_t rc;
1282 for (j = 0; j < 3; j++)
1284 rc = nand_block_erase(i, pblock * ftl_nand_type->pagesperblock);
1285 if (rc == 0) break;
1287 if (rc != 0)
1289 panicf("FTL: Block erase failed on bank %u block %u",
1290 (unsigned)i, (unsigned)block);
1291 if (pblock != block)
1293 uint32_t spareindex = pblock - ftl_vfl_cxt[i].firstspare;
1294 ftl_vfl_cxt[i].remaptable[spareindex] = 0xFFFF;
1296 ftl_vfl_cxt[i].field_18++;
1297 if (ftl_vfl_remap_block(i, block) == 0) return 1;
1298 if (ftl_vfl_commit_cxt(i) != 0) return 1;
1299 memset(&ftl_sparebuffer, 0, 0x40);
1300 nand_write_page(i, pblock, &ftl_vfl_cxt[0], &ftl_sparebuffer, 1);
1303 return 0;
1305 #endif
1308 #ifndef FTL_READONLY
1309 /* Highlevel vBlock erase, that increments the erase counter for the block */
1310 uint32_t ftl_erase_block(uint32_t block)
1312 ftl_erasectr[block]++;
1313 if (ftl_erasectr_dirt[block >> 10] == 100) ftl_cxt.erasedirty = 1;
1314 else ftl_erasectr_dirt[block >> 10]++;
1315 return ftl_erase_block_internal(block);
1317 #endif
1320 #ifndef FTL_READONLY
1321 /* Allocates a block from the pool,
1322 returning its vBlock number, or 0xFFFFFFFF on error */
1323 uint32_t ftl_allocate_pool_block(void)
1325 uint32_t i;
1326 uint32_t erasectr = 0xFFFFFFFF, bestidx = 0xFFFFFFFF, block;
1327 for (i = 0; i < ftl_cxt.freecount; i++)
1329 uint32_t idx = ftl_cxt.nextfreeidx + i;
1330 if (idx >= 0x14) idx -= 0x14;
1331 if (!ftl_cxt.blockpool[idx]) continue;
1332 if (ftl_erasectr[ftl_cxt.blockpool[idx]] < erasectr)
1334 erasectr = ftl_erasectr[ftl_cxt.blockpool[idx]];
1335 bestidx = idx;
1338 if (bestidx == 0xFFFFFFFF) panicf("Out of pool blocks!");
1339 block = ftl_cxt.blockpool[bestidx];
1340 if (bestidx != ftl_cxt.nextfreeidx)
1342 ftl_cxt.blockpool[bestidx] = ftl_cxt.blockpool[ftl_cxt.nextfreeidx];
1343 ftl_cxt.blockpool[ftl_cxt.nextfreeidx] = block;
1345 if (block > (uint32_t)ftl_nand_type->userblocks + 0x17)
1346 panicf("FTL: Bad block number in pool: %u", (unsigned)block);
1347 if (ftl_erase_block(block) != 0) return 0xFFFFFFFF;
1348 if (++ftl_cxt.nextfreeidx == 0x14) ftl_cxt.nextfreeidx = 0;
1349 ftl_cxt.freecount--;
1350 return block;
1352 #endif
1355 #ifndef FTL_READONLY
1356 /* Releases a vBlock back into the pool */
1357 void ftl_release_pool_block(uint32_t block)
1359 if (block >= (uint32_t)ftl_nand_type->userblocks + 0x17)
1360 panicf("FTL: Tried to release block %u", (unsigned)block);
1361 uint32_t idx = ftl_cxt.nextfreeidx + ftl_cxt.freecount++;
1362 if (idx >= 0x14) idx -= 0x14;
1363 ftl_cxt.blockpool[idx] = block;
1365 #endif
1368 #ifndef FTL_READONLY
1369 /* Commits the location of the FTL context blocks
1370 to a semi-randomly chosen VFL context */
1371 uint32_t ftl_store_ctrl_block_list(void)
1373 uint32_t i;
1374 for (i = 0; i < ftl_banks; i++)
1375 memcpy(ftl_vfl_cxt[i].ftlctrlblocks, ftl_cxt.ftlctrlblocks, 6);
1376 return ftl_vfl_commit_cxt(ftl_vfl_usn % ftl_banks);
1378 #endif
1381 #ifndef FTL_READONLY
1382 /* Saves the n-th erase counter page to the flash,
1383 because it is too dirty or needs to be moved. */
1384 uint32_t ftl_save_erasectr_page(uint32_t index)
1386 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
1387 ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
1388 ftl_sparebuffer[0].meta.idx = index;
1389 ftl_sparebuffer[0].meta.type = 0x46;
1390 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_erasectr[index << 10],
1391 &ftl_sparebuffer[0]) != 0)
1392 return 1;
1393 if ((ftl_vfl_read(ftl_cxt.ftlctrlpage, ftl_buffer,
1394 &ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
1395 return 1;
1396 if (memcmp(ftl_buffer, &ftl_erasectr[index << 10], 0x800) != 0) return 1;
1397 if (ftl_sparebuffer[0].meta.type != 0x46) return 1;
1398 if (ftl_sparebuffer[0].meta.idx != index) return 1;
1399 if (ftl_sparebuffer[0].meta.usn != ftl_cxt.usn) return 1;
1400 ftl_cxt.ftl_erasectr_pages[index] = ftl_cxt.ftlctrlpage;
1401 ftl_erasectr_dirt[index] = 0;
1402 return 0;
1404 #endif
1407 #ifndef FTL_READONLY
1408 /* Increments ftl_cxt.ftlctrlpage to the next available FTL context page,
1409 allocating a new context block if neccessary. */
1410 uint32_t ftl_next_ctrl_pool_page(void)
1412 uint32_t i;
1413 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1414 if (++ftl_cxt.ftlctrlpage % ppb != 0) return 0;
1415 for (i = 0; i < 3; i++)
1416 if ((ftl_cxt.ftlctrlblocks[i] + 1) * ppb == ftl_cxt.ftlctrlpage)
1417 break;
1418 i = (i + 1) % 3;
1419 uint32_t oldblock = ftl_cxt.ftlctrlblocks[i];
1420 uint32_t newblock = ftl_allocate_pool_block();
1421 if (newblock == 0xFFFFFFFF) return 1;
1422 ftl_cxt.ftlctrlblocks[i] = newblock;
1423 ftl_cxt.ftlctrlpage = newblock * ppb;
1424 uint32_t pagestoread = (ftl_nand_type->userblocks + 23) >> 10;
1425 if (((ftl_nand_type->userblocks + 23) & 0x1FF) != 0) pagestoread++;
1426 for (i = 0; i < pagestoread; i++)
1427 if (oldblock * ppb <= ftl_cxt.ftl_erasectr_pages[i]
1428 && (oldblock + 1) * ppb > ftl_cxt.ftl_erasectr_pages[i])
1430 ftl_cxt.usn--;
1431 if (ftl_save_erasectr_page(i) != 0)
1433 ftl_cxt.ftlctrlblocks[i] = oldblock;
1434 ftl_cxt.ftlctrlpage = oldblock * (ppb + 1) - 1;
1435 ftl_release_pool_block(newblock);
1436 return 1;
1438 ftl_cxt.ftlctrlpage++;
1440 ftl_release_pool_block(oldblock);
1441 return ftl_store_ctrl_block_list();
1443 #endif
1446 #ifndef FTL_READONLY
1447 /* Copies a vPage from one location to another */
1448 uint32_t ftl_copy_page(uint32_t source, uint32_t destination,
1449 uint32_t lpn, uint32_t type)
1451 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1452 uint32_t rc = ftl_vfl_read(source, ftl_copybuffer[0],
1453 &ftl_copyspare[0], 1, 1) & 0x11F;
1454 memset(&ftl_copyspare[0], 0xFF, 0x40);
1455 ftl_copyspare[0].user.lpn = lpn;
1456 ftl_copyspare[0].user.usn = ++ftl_cxt.nextblockusn;
1457 ftl_copyspare[0].user.type = 0x40;
1458 if ((rc & 2) != 0) memset(ftl_copybuffer[0], 0, 0x800);
1459 else if (rc != 0) ftl_copyspare[0].user.eccmark = 0x55;
1460 if (type == 1 && destination % ppb == ppb - 1)
1461 ftl_copyspare[0].user.type = 0x41;
1462 return ftl_vfl_write(destination, 1, ftl_copybuffer[0], &ftl_copyspare[0]);
1464 #endif
1467 #ifndef FTL_READONLY
1468 /* Copies a pBlock to a vBlock */
1469 uint32_t ftl_copy_block(uint32_t source, uint32_t destination)
1471 uint32_t i, j;
1472 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1473 uint32_t error = 0;
1474 ftl_cxt.nextblockusn++;
1475 for (i = 0; i < ppb; i += FTL_COPYBUF_SIZE)
1477 uint32_t rc = ftl_read(source * ppb + i,
1478 FTL_COPYBUF_SIZE, ftl_copybuffer[0]);
1479 memset(&ftl_copyspare[0], 0xFF, 0x40 * FTL_COPYBUF_SIZE);
1480 for (j = 0; j < FTL_COPYBUF_SIZE; j++)
1482 ftl_copyspare[j].user.lpn = source * ppb + i + j;
1483 ftl_copyspare[j].user.usn = ftl_cxt.nextblockusn;
1484 ftl_copyspare[j].user.type = 0x40;
1485 if (rc)
1487 if (ftl_read(source * ppb + i + j, 1, ftl_copybuffer[j]))
1488 ftl_copyspare[j].user.eccmark = 0x55;
1490 if (i + j == ppb - 1) ftl_copyspare[j].user.type = 0x41;
1492 if (ftl_vfl_write(destination * ppb + i, FTL_COPYBUF_SIZE,
1493 ftl_copybuffer[0], &ftl_copyspare[0]))
1495 error = 1;
1496 break;
1499 if (error != 0)
1501 ftl_erase_block(destination);
1502 return 1;
1504 return 0;
1506 #endif
1509 #ifndef FTL_READONLY
1510 /* Clears ftl_log.issequential, if something violating that is written. */
1511 void ftl_check_still_sequential(struct ftl_log_type* entry, uint32_t page)
1513 if (entry->pagesused != entry->pagescurrent
1514 || entry->pageoffsets[page] != page)
1515 entry->issequential = 0;
1517 #endif
1520 #ifndef FTL_READONLY
1521 /* Copies all pages that are currently used from the scattered page block in
1522 use by the supplied ftl_log entry to a newly-allocated one, and releases
1523 the old one.
1524 In other words: It kicks the pages containing old garbage out of it to make
1525 space again. This is usually done when a scattered page block is being
1526 removed because it is full, but less than half of the pages in there are
1527 still in use and rest is just filled with old crap. */
1528 uint32_t ftl_compact_scattered(struct ftl_log_type* entry)
1530 uint32_t i, j;
1531 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1532 uint32_t error;
1533 struct ftl_log_type backup;
1534 if (entry->pagescurrent == 0)
1536 ftl_release_pool_block(entry->scatteredvblock);
1537 entry->scatteredvblock = 0xFFFF;
1538 return 0;
1540 backup = *entry;
1541 memcpy(ftl_offsets_backup, entry->pageoffsets, 0x400);
1542 for (i = 0; i < 4; i++)
1544 uint32_t block = ftl_allocate_pool_block();
1545 if (block == 0xFFFFFFFF) return 1;
1546 entry->pagesused = 0;
1547 entry->pagescurrent = 0;
1548 entry->issequential = 1;
1549 entry->scatteredvblock = block;
1550 error = 0;
1551 for (j = 0; j < ppb; j++)
1552 if (entry->pageoffsets[j] != 0xFFFF)
1554 uint32_t lpn = entry->logicalvblock * ppb + j;
1555 uint32_t newpage = block * ppb + entry->pagesused;
1556 uint32_t oldpage = backup.scatteredvblock * ppb
1557 + entry->pageoffsets[j];
1558 if (ftl_copy_page(oldpage, newpage, lpn,
1559 entry->issequential) != 0)
1561 error = 1;
1562 break;
1564 entry->pageoffsets[j] = entry->pagesused++;
1565 entry->pagescurrent++;
1566 ftl_check_still_sequential(entry, j);
1568 if (backup.pagescurrent != entry->pagescurrent) error = 1;
1569 if (error == 0)
1571 ftl_release_pool_block(backup.scatteredvblock);
1572 break;
1574 *entry = backup;
1575 memcpy(entry->pageoffsets, ftl_offsets_backup, 0x400);
1577 return error;
1579 #endif
1582 #ifndef FTL_READONLY
1583 /* Commits an ftl_log entry to proper blocks, no matter what's in there. */
1584 uint32_t ftl_commit_scattered(struct ftl_log_type* entry)
1586 uint32_t i;
1587 uint32_t error;
1588 uint32_t block;
1589 for (i = 0; i < 4; i++)
1591 block = ftl_allocate_pool_block();
1592 if (block == 0xFFFFFFFF) return 1;
1593 error = ftl_copy_block(entry->logicalvblock, block);
1594 if (error == 0) break;
1595 ftl_release_pool_block(block);
1597 if (error != 0) return 1;
1598 ftl_release_pool_block(entry->scatteredvblock);
1599 entry->scatteredvblock = 0xFFFF;
1600 ftl_release_pool_block(ftl_map[entry->logicalvblock]);
1601 ftl_map[entry->logicalvblock] = block;
1602 return 0;
1604 #endif
1607 #ifndef FTL_READONLY
1608 /* Fills the rest of a scattered page block that was actually written
1609 sequentially until now, in order to be able to save a block erase by
1610 committing it without needing to copy it again.
1611 If this fails for whichever reason, it will be committed the usual way. */
1612 uint32_t ftl_commit_sequential(struct ftl_log_type* entry)
1614 uint32_t i;
1615 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1617 if (entry->issequential != 1
1618 || entry->pagescurrent != entry->pagesused)
1619 return 1;
1621 for (; entry->pagesused < ppb; )
1623 uint32_t lpn = entry->logicalvblock * ppb + entry->pagesused;
1624 uint32_t newpage = entry->scatteredvblock * ppb
1625 + entry->pagesused;
1626 uint32_t count = FTL_COPYBUF_SIZE < ppb - entry->pagesused
1627 ? FTL_COPYBUF_SIZE : ppb - entry->pagesused;
1628 for (i = 0; i < count; i++)
1629 if (entry->pageoffsets[entry->pagesused + i] != 0xFFFF)
1630 return ftl_commit_scattered(entry);
1631 uint32_t rc = ftl_read(lpn, count, ftl_copybuffer[0]);
1632 memset(&ftl_copyspare[0], 0xFF, 0x40 * FTL_COPYBUF_SIZE);
1633 for (i = 0; i < count; i++)
1635 ftl_copyspare[i].user.lpn = lpn + i;
1636 ftl_copyspare[i].user.usn = ++ftl_cxt.nextblockusn;
1637 ftl_copyspare[i].user.type = 0x40;
1638 if (rc) ftl_copyspare[i].user.eccmark = 0x55;
1639 if (entry->pagesused + i == ppb - 1)
1640 ftl_copyspare[i].user.type = 0x41;
1642 if (ftl_vfl_write(newpage, count, ftl_copybuffer[0], &ftl_copyspare[0]))
1643 return ftl_commit_scattered(entry);
1644 entry->pagesused += count;
1646 ftl_release_pool_block(ftl_map[entry->logicalvblock]);
1647 ftl_map[entry->logicalvblock] = entry->scatteredvblock;
1648 entry->scatteredvblock = 0xFFFF;
1649 return 0;
1651 #endif
1654 #ifndef FTL_READONLY
1655 /* If a log entry is supplied, its scattered page block will be removed in
1656 whatever way seems most appropriate. Else, the oldest scattered page block
1657 will be freed by committing it. */
1658 uint32_t ftl_remove_scattered_block(struct ftl_log_type* entry)
1660 uint32_t i;
1661 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1662 uint32_t age = 0xFFFFFFFF, used = 0;
1663 if (entry == (struct ftl_log_type*)0)
1665 for (i = 0; i < 0x11; i++)
1667 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1668 if (ftl_log[i].pagesused == 0 || ftl_log[i].pagescurrent == 0)
1669 return 1;
1670 if (ftl_log[i].usn < age
1671 || (ftl_log[i].usn == age && ftl_log[i].pagescurrent > used))
1673 age = ftl_log[i].usn;
1674 used = ftl_log[i].pagescurrent;
1675 entry = &ftl_log[i];
1678 if (entry == (struct ftl_log_type*)0) return 1;
1680 else if (entry->pagescurrent < ppb / 2)
1682 ftl_cxt.swapcounter++;
1683 return ftl_compact_scattered(entry);
1685 ftl_cxt.swapcounter++;
1686 if (entry->issequential == 1) return ftl_commit_sequential(entry);
1687 else return ftl_commit_scattered(entry);
1689 #endif
1692 #ifndef FTL_READONLY
1693 /* Initialize a log entry to the values for an empty scattered page block */
1694 void ftl_init_log_entry(struct ftl_log_type* entry)
1696 entry->issequential = 1;
1697 entry->pagescurrent = 0;
1698 entry->pagesused = 0;
1699 memset(entry->pageoffsets, 0xFF, 0x400);
1701 #endif
1704 #ifndef FTL_READONLY
1705 /* Allocates a log entry for the specified vBlock,
1706 first making space, if neccessary. */
1707 struct ftl_log_type* ftl_allocate_log_entry(uint32_t block)
1709 uint32_t i;
1710 struct ftl_log_type* entry = ftl_get_log_entry(block);
1711 entry->usn = ftl_cxt.nextblockusn - 1;
1712 if (entry != (struct ftl_log_type*)0) return entry;
1714 for (i = 0; i < 0x11; i++)
1716 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1717 if (ftl_log[i].pagesused == 0)
1719 entry = &ftl_log[i];
1720 break;
1724 if (entry == (struct ftl_log_type*)0)
1726 if (ftl_cxt.freecount < 3) panicf("FTL: Detected a pool block leak!");
1727 else if (ftl_cxt.freecount == 3)
1728 if (ftl_remove_scattered_block((struct ftl_log_type*)0) != 0)
1729 return (struct ftl_log_type*)0;
1730 entry = ftl_log;
1731 while (entry->scatteredvblock != 0xFFFF) entry = &entry[1];
1732 entry->scatteredvblock = ftl_allocate_pool_block();
1733 if (entry->scatteredvblock == 0xFFFF)
1734 return (struct ftl_log_type*)0;
1737 ftl_init_log_entry(entry);
1738 entry->logicalvblock = block;
1739 entry->usn = ftl_cxt.nextblockusn - 1;
1741 return entry;
1743 #endif
1746 #ifndef FTL_READONLY
1747 /* Commits the FTL block map, erase counters, and context to flash */
1748 uint32_t ftl_commit_cxt(void)
1750 uint32_t i;
1751 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1752 uint32_t mappages = (ftl_nand_type->userblocks + 0x3ff) >> 10;
1753 uint32_t ctrpages = (ftl_nand_type->userblocks + 23 + 0x3ff) >> 10;
1754 uint32_t endpage = ftl_cxt.ftlctrlpage + mappages + ctrpages + 1;
1755 if (endpage >= (ftl_cxt.ftlctrlpage / ppb + 1) * ppb)
1756 ftl_cxt.ftlctrlpage |= ppb - 1;
1757 for (i = 0; i < ctrpages; i++)
1759 if (ftl_next_ctrl_pool_page() != 0) return 1;
1760 if (ftl_save_erasectr_page(i) != 0) return 1;
1762 for (i = 0; i < mappages; i++)
1764 if (ftl_next_ctrl_pool_page() != 0) return 1;
1765 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
1766 ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
1767 ftl_sparebuffer[0].meta.idx = i;
1768 ftl_sparebuffer[0].meta.type = 0x44;
1769 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_map[i << 10],
1770 &ftl_sparebuffer[0]) != 0)
1771 return 1;
1772 ftl_cxt.ftl_map_pages[i] = ftl_cxt.ftlctrlpage;
1774 if (ftl_next_ctrl_pool_page() != 0) return 1;
1775 ftl_cxt.clean_flag = 1;
1776 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
1777 ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
1778 ftl_sparebuffer[0].meta.type = 0x43;
1779 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_cxt, &ftl_sparebuffer[0]) != 0)
1780 return 1;
1781 return 0;
1783 #endif
1786 #ifndef FTL_READONLY
1787 /* Swaps the most and least worn block on the flash,
1788 to better distribute wear. It will refuse to do anything
1789 if the wear spread is lower than 5 erases. */
1790 uint32_t ftl_swap_blocks(void)
1792 uint32_t i;
1793 uint32_t min = 0xFFFFFFFF, max = 0, maxidx = 0x14;
1794 uint32_t minidx = 0, minvb = 0, maxvb = 0;
1795 for (i = 0; i < ftl_cxt.freecount; i++)
1797 uint32_t idx = ftl_cxt.nextfreeidx + i;
1798 if (idx >= 0x14) idx -= 0x14;
1799 if (ftl_erasectr[ftl_cxt.blockpool[idx]] > max)
1801 maxidx = idx;
1802 maxvb = ftl_cxt.blockpool[idx];
1803 max = ftl_erasectr[maxidx];
1806 if (maxidx == 0x14) return 0;
1807 for (i = 0; i < ftl_nand_type->userblocks; i++)
1809 if (ftl_erasectr[ftl_map[i]] > max) max = ftl_erasectr[ftl_map[i]];
1810 if (ftl_get_log_entry(i) != (struct ftl_log_type*)0) continue;
1811 if (ftl_erasectr[ftl_map[i]] < min)
1813 minidx = i;
1814 minvb = ftl_map[i];
1815 min = ftl_erasectr[minidx];
1818 if (max - min < 5) return 0;
1819 if (minvb == maxvb) return 0;
1820 if (ftl_erase_block(maxvb) != 0) return 1;
1821 if (ftl_copy_block(minidx, maxvb) != 0) return 1;
1822 ftl_cxt.blockpool[maxidx] = minvb;
1823 ftl_map[minidx] = maxvb;
1824 return 0;
1826 #endif
1829 #ifndef FTL_READONLY
1830 /* Exposed function: Write highlevel sectors */
1831 uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
1833 uint32_t i, j, k;
1834 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1836 if (sector + count > ftl_nand_type->userblocks * ppb)
1837 return 1;
1839 if (count == 0) return 0;
1841 mutex_lock(&ftl_mtx);
1843 if (ftl_cxt.clean_flag == 1)
1845 for (i = 0; i < 3; i++)
1847 if (ftl_next_ctrl_pool_page() != 0)
1849 mutex_unlock(&ftl_mtx);
1850 return 1;
1852 memset(ftl_buffer, 0xFF, 0x800);
1853 memset(&ftl_sparebuffer[0], 0xFF, 0x40);
1854 ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
1855 ftl_sparebuffer[0].meta.type = 0x47;
1856 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, ftl_buffer,
1857 &ftl_sparebuffer[0]) == 0)
1858 break;
1860 if (i == 3)
1862 mutex_unlock(&ftl_mtx);
1863 return 1;
1865 ftl_cxt.clean_flag = 0;
1868 for (i = 0; i < count; )
1870 uint32_t block = (sector + i) / ppb;
1871 uint32_t page = (sector + i) % ppb;
1873 struct ftl_log_type* logentry = ftl_allocate_log_entry(block);
1874 if (logentry == (struct ftl_log_type*)0)
1876 mutex_unlock(&ftl_mtx);
1877 return 1;
1879 if (page == 0 && count - i >= ppb)
1881 uint32_t vblock = logentry->scatteredvblock;
1882 logentry->scatteredvblock = 0xFFFF;
1883 if (logentry->pagesused != 0)
1885 ftl_release_pool_block(vblock);
1886 vblock = ftl_allocate_pool_block();
1887 if (vblock == 0xFFFFFFFF)
1889 mutex_unlock(&ftl_mtx);
1890 return 1;
1893 ftl_cxt.nextblockusn++;
1894 for (j = 0; j < ppb; j += FTL_WRITESPARE_SIZE)
1896 memset(&ftl_sparebuffer[0], 0xFF, 0x40 * FTL_WRITESPARE_SIZE);
1897 for (k = 0; k < FTL_WRITESPARE_SIZE; k++)
1899 ftl_sparebuffer[k].user.lpn = sector + i + j + k;
1900 ftl_sparebuffer[k].user.usn = ftl_cxt.nextblockusn;
1901 ftl_sparebuffer[k].user.type = 0x40;
1902 if (j == ppb - 1) ftl_sparebuffer[k].user.type = 0x41;
1904 uint32_t rc = ftl_vfl_write(vblock * ppb + j, FTL_WRITESPARE_SIZE,
1905 &((uint8_t*)buffer)[(i + j) << 11],
1906 &ftl_sparebuffer[0]);
1907 if (rc)
1908 for (k = 0; k < ftl_banks; k++)
1909 if (rc & (1 << k))
1911 while (ftl_vfl_write(vblock * ppb + j + k, 1,
1912 &((uint8_t*)buffer)[(i + j + k) << 11],
1913 &ftl_sparebuffer[k]));
1916 ftl_release_pool_block(ftl_map[block]);
1917 ftl_map[block] = vblock;
1918 i += ppb;
1920 else
1922 if (logentry->pagesused == ppb)
1924 ftl_remove_scattered_block(logentry);
1925 logentry = ftl_allocate_log_entry(block);
1926 if (logentry == (struct ftl_log_type*)0)
1928 mutex_unlock(&ftl_mtx);
1929 return 1;
1932 uint32_t cnt = FTL_WRITESPARE_SIZE;
1933 if (cnt > count - i) cnt = count - i;
1934 if (cnt > ppb - logentry->pagesused) cnt = ppb - logentry->pagesused;
1935 if (cnt > ppb - page) cnt = ppb - page;
1936 memset(&ftl_sparebuffer[0], 0xFF, 0x40 * cnt);
1937 for (j = 0; j < cnt; j++)
1939 ftl_sparebuffer[j].user.lpn = sector + i + j;
1940 ftl_sparebuffer[j].user.usn = ++ftl_cxt.nextblockusn;
1941 ftl_sparebuffer[j].user.type = 0x40;
1942 if (logentry->pagesused + j == ppb - 1 && logentry->issequential)
1943 ftl_sparebuffer[j].user.type = 0x41;
1945 uint32_t abspage = logentry->scatteredvblock * ppb
1946 + logentry->pagesused;
1947 logentry->pagesused += cnt;
1948 if (ftl_vfl_write(abspage, cnt, &((uint8_t*)buffer)[i << 11],
1949 &ftl_sparebuffer[0]) == 0)
1951 for (j = 0; j < cnt; j++)
1953 if (logentry->pageoffsets[page + j] == 0xFFFF)
1954 logentry->pagescurrent++;
1955 logentry->pageoffsets[page + j] = logentry->pagesused - cnt + j;
1956 if (logentry->pagesused - cnt + j + 1 != logentry->pagescurrent
1957 || logentry->pageoffsets[page + j] != page + j)
1958 logentry->issequential = 0;
1960 i += cnt;
1962 else panicf("FTL: Write error: %u %u %u!",
1963 (unsigned)sector, (unsigned)count, (unsigned)i);
1965 if (logentry->pagesused == ppb) ftl_remove_scattered_block(logentry);
1967 if (ftl_cxt.swapcounter >= 300)
1969 ftl_cxt.swapcounter -= 20;
1970 for (i = 0; i < 4; i++) if (ftl_swap_blocks() == 0) break;
1972 if (ftl_cxt.erasedirty == 1)
1974 ftl_cxt.erasedirty = 0;
1975 for (i = 0; i < 8; i++)
1976 if (ftl_erasectr_dirt[i] >= 100)
1978 ftl_next_ctrl_pool_page();
1979 ftl_save_erasectr_page(i);
1982 mutex_unlock(&ftl_mtx);
1983 return 0;
1985 #endif
1988 #ifndef FTL_READONLY
1989 /* Exposed function: Performes a sync / unmount,
1990 i.e. commits all scattered page blocks,
1991 distributes wear, and commits the FTL context. */
1992 uint32_t ftl_sync(void)
1994 uint32_t i;
1995 uint32_t rc = 0;
1996 uint32_t ppb = ftl_nand_type->pagesperblock * ftl_banks;
1997 if (ftl_cxt.clean_flag == 1) return 0;
1999 mutex_lock(&ftl_mtx);
2001 if (ftl_cxt.swapcounter >= 20)
2002 for (i = 0; i < 4; i++)
2003 if (ftl_swap_blocks() == 0)
2005 ftl_cxt.swapcounter -= 20;
2006 break;
2008 for (i = 0; i < 0x11; i++)
2010 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
2011 ftl_cxt.nextblockusn++;
2012 if (ftl_log[i].issequential == 1)
2013 rc |= ftl_commit_sequential(&ftl_log[i]);
2014 else rc |= ftl_commit_scattered(&ftl_log[i]);
2016 if (rc != 0)
2018 mutex_unlock(&ftl_mtx);
2019 return 1;
2021 for (i = 0; i < 5; i++)
2022 if (ftl_commit_cxt() == 0)
2024 mutex_unlock(&ftl_mtx);
2025 return 0;
2027 else ftl_cxt.ftlctrlpage |= ppb - 1;
2028 mutex_unlock(&ftl_mtx);
2029 return 1;
2031 #endif
2034 /* Initializes and mounts the FTL.
2035 As long as nothing was written, you won't need to unmount it.
2036 Before shutting down after writing something, call ftl_sync(),
2037 which will just do nothing if everything was already clean. */
2038 uint32_t ftl_init(void)
2040 mutex_init(&ftl_mtx);
2041 uint32_t i;
2042 uint32_t result = 0;
2043 uint32_t foundsignature, founddevinfo, blockwiped, repaired, skip;
2044 if (nand_device_init() != 0) //return 1;
2045 panicf("FTL: Lowlevel NAND driver init failed!");
2046 ftl_banks = 0;
2047 for (i = 0; i < 4; i++)
2048 if (nand_get_device_type(i) != 0) ftl_banks = i + 1;
2049 ftl_nand_type = nand_get_device_type(0);
2050 foundsignature = 0;
2051 blockwiped = 1;
2052 mutex_unlock(&ftl_mtx);
2053 for (i = 0; i < ftl_nand_type->pagesperblock; i++)
2055 result = nand_read_page(0, i, ftl_buffer, (uint32_t*)0, 1, 1);
2056 if ((result & 0x11F) == 0)
2058 blockwiped = 0;
2059 if (((uint32_t*)ftl_buffer)[0] != 0x41303034) continue;
2060 foundsignature = 1;
2061 break;
2063 else if ((result & 2) != 2) blockwiped = 0;
2066 founddevinfo = ftl_has_devinfo();
2068 repaired = 0;
2069 skip = 0;
2070 if (founddevinfo == 0)
2072 mutex_unlock(&ftl_mtx);
2073 panicf("FTL: No DEVICEINFO found!");
2074 //return 1;
2076 if (foundsignature != 0 && (result & 0x11F) != 0)
2078 mutex_unlock(&ftl_mtx);
2079 panicf("FTL: Problem with the signature!");
2080 //return 1;
2082 if (ftl_vfl_open() == 0)
2083 if (ftl_open() == 0)
2085 mutex_unlock(&ftl_mtx);
2086 return 0;
2089 panicf("FTL: Initialization failed!");
2091 /* Something went terribly wrong. We may want to allow the user to erase
2092 block zero in that condition, to make norboot reinitialize the FTL.
2093 (However there is curently no point in this, as iLoader would already
2094 fail if this would be the case.)
2096 nand_block_erase(0, 0);
2100 mutex_unlock(&ftl_mtx);
2101 return 1;