2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Google LLC
5 * Copyright (C) 1995, 1996, 1997 Wolfgang Solfrank
6 * Copyright (c) 1995 Martin Husemann
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/endian.h>
30 #include <sys/queue.h>
31 #include <sys/limits.h>
33 #include <sys/param.h>
46 static int _readfat(struct fat_descriptor
*);
47 static inline struct bootblock
* boot_of_(struct fat_descriptor
*);
48 static inline int fd_of_(struct fat_descriptor
*);
49 static inline bool valid_cl(struct fat_descriptor
*, cl_t
);
53 * Head bitmap for FAT scanning.
55 * FAT32 have up to 2^28 = 256M entries, and FAT16/12 have much less.
56 * For each cluster, we use 1 bit to represent if it's a head cluster
57 * (the first cluster of a cluster chain).
61 * Initially, we set all bits to 1. In readfat(), we traverse the
62 * whole FAT and mark each cluster identified as "next" cluster as
63 * 0. After the scan, we have a bitmap with 1's to indicate the
64 * corresponding cluster was a "head" cluster.
66 * We use head bitmap to identify lost chains: a head cluster that was
67 * not being claimed by any file or directories is the head cluster of
70 * Handle of lost chains
71 * =====================
72 * At the end of scanning, we can easily find all lost chain's heads
73 * by finding out the 1's in the head bitmap.
76 typedef struct long_bitmap
{
78 size_t count
; /* Total set bits in the map */
82 bitmap_clear(long_bitmap_t
*lbp
, cl_t cl
)
84 cl_t i
= cl
/ LONG_BIT
;
85 unsigned long clearmask
= ~(1UL << (cl
% LONG_BIT
));
87 assert((lbp
->map
[i
] & ~clearmask
) != 0);
88 lbp
->map
[i
] &= clearmask
;
93 bitmap_get(long_bitmap_t
*lbp
, cl_t cl
)
95 cl_t i
= cl
/ LONG_BIT
;
96 unsigned long usedbit
= 1UL << (cl
% LONG_BIT
);
98 return ((lbp
->map
[i
] & usedbit
) == usedbit
);
102 bitmap_none_in_range(long_bitmap_t
*lbp
, cl_t cl
)
104 cl_t i
= cl
/ LONG_BIT
;
106 return (lbp
->map
[i
] == 0);
110 bitmap_count(long_bitmap_t
*lbp
)
116 bitmap_ctor(long_bitmap_t
*lbp
, size_t bits
, bool allone
)
118 size_t bitmap_size
= roundup2(bits
, LONG_BIT
) / (LONG_BIT
/ 8);
121 lbp
->map
= calloc(1, bitmap_size
);
122 if (lbp
->map
== NULL
)
126 memset(lbp
->map
, 0xff, bitmap_size
);
135 bitmap_dtor(long_bitmap_t
*lbp
)
142 * FAT32 can be as big as 256MiB (2^26 entries * 4 bytes), when we
143 * can not ask the kernel to manage the access, use a simple LRU
144 * cache with chunk size of 128 KiB to manage it.
146 struct fat32_cache_entry
{
147 TAILQ_ENTRY(fat32_cache_entry
) entries
;
148 uint8_t *chunk
; /* pointer to chunk */
149 off_t addr
; /* offset */
150 bool dirty
; /* dirty bit */
153 static const size_t fat32_cache_chunk_size
= 131072; /* MAXPHYS */
154 static const size_t fat32_cache_size
= 4194304;
155 static const size_t fat32_cache_entries
= 32; /* XXXgcc: cache_size / cache_chunk_size */
158 * FAT table descriptor, represents a FAT table that is already loaded
161 struct fat_descriptor
{
162 struct bootblock
*boot
;
164 cl_t (*get
)(struct fat_descriptor
*, cl_t
);
165 int (*set
)(struct fat_descriptor
*, cl_t
, cl_t
);
166 long_bitmap_t headbitmap
;
172 size_t fat32_cached_chunks
;
173 TAILQ_HEAD(cachehead
, fat32_cache_entry
) fat32_cache_head
;
174 struct fat32_cache_entry
*fat32_cache_allentries
;
176 off_t fat32_lastaddr
;
180 fat_clear_cl_head(struct fat_descriptor
*fat
, cl_t cl
)
182 bitmap_clear(&fat
->headbitmap
, cl
);
186 fat_is_cl_head(struct fat_descriptor
*fat
, cl_t cl
)
188 return (bitmap_get(&fat
->headbitmap
, cl
));
192 fat_is_cl_head_in_range(struct fat_descriptor
*fat
, cl_t cl
)
194 return (!(bitmap_none_in_range(&fat
->headbitmap
, cl
)));
198 fat_get_head_count(struct fat_descriptor
*fat
)
200 return (bitmap_count(&fat
->headbitmap
));
206 * FAT12s are sufficiently small, expect it to always fit in the RAM.
208 static inline uint8_t *
209 fat_get_fat12_ptr(struct fat_descriptor
*fat
, cl_t cl
)
211 return (fat
->fatbuf
+ ((cl
+ (cl
>> 1))));
215 fat_get_fat12_next(struct fat_descriptor
*fat
, cl_t cl
)
220 p
= fat_get_fat12_ptr(fat
, cl
);
222 /* Odd cluster: lower 4 bits belongs to the subsequent cluster */
225 retval
&= CLUST12_MASK
;
227 if (retval
>= (CLUST_BAD
& CLUST12_MASK
))
228 retval
|= ~CLUST12_MASK
;
234 fat_set_fat12_next(struct fat_descriptor
*fat
, cl_t cl
, cl_t nextcl
)
238 /* Truncate 'nextcl' value, if needed */
239 nextcl
&= CLUST12_MASK
;
241 p
= fat_get_fat12_ptr(fat
, cl
);
244 * Read in the 4 bits from the subsequent (for even clusters)
245 * or the preceding (for odd clusters) cluster and combine
246 * it to the nextcl value for encoding
249 nextcl
|= ((p
[1] & 0xf0) << 8);
252 nextcl
|= (p
[0] & 0x0f);
255 le16enc(p
, (uint16_t)nextcl
);
263 * FAT16s are sufficiently small, expect it to always fit in the RAM.
265 static inline uint8_t *
266 fat_get_fat16_ptr(struct fat_descriptor
*fat
, cl_t cl
)
268 return (fat
->fatbuf
+ (cl
<< 1));
272 fat_get_fat16_next(struct fat_descriptor
*fat
, cl_t cl
)
277 p
= fat_get_fat16_ptr(fat
, cl
);
278 retval
= le16dec(p
) & CLUST16_MASK
;
280 if (retval
>= (CLUST_BAD
& CLUST16_MASK
))
281 retval
|= ~CLUST16_MASK
;
287 fat_set_fat16_next(struct fat_descriptor
*fat
, cl_t cl
, cl_t nextcl
)
291 /* Truncate 'nextcl' value, if needed */
292 nextcl
&= CLUST16_MASK
;
294 p
= fat_get_fat16_ptr(fat
, cl
);
296 le16enc(p
, (uint16_t)nextcl
);
304 static inline uint8_t *
305 fat_get_fat32_ptr(struct fat_descriptor
*fat
, cl_t cl
)
307 return (fat
->fatbuf
+ (cl
<< 2));
311 fat_get_fat32_next(struct fat_descriptor
*fat
, cl_t cl
)
316 p
= fat_get_fat32_ptr(fat
, cl
);
317 retval
= le32dec(p
) & CLUST32_MASK
;
319 if (retval
>= (CLUST_BAD
& CLUST32_MASK
))
320 retval
|= ~CLUST32_MASK
;
326 fat_set_fat32_next(struct fat_descriptor
*fat
, cl_t cl
, cl_t nextcl
)
330 /* Truncate 'nextcl' value, if needed */
331 nextcl
&= CLUST32_MASK
;
333 p
= fat_get_fat32_ptr(fat
, cl
);
335 le32enc(p
, (uint32_t)nextcl
);
341 fat_get_iosize(struct fat_descriptor
*fat
, off_t address
)
344 if (address
== fat
->fat32_lastaddr
) {
345 return (fat
->fatsize
& ((off_t
)fat32_cache_chunk_size
- 1));
347 return (fat32_cache_chunk_size
);
352 fat_flush_fat32_cache_entry(struct fat_descriptor
*fat
,
353 struct fat32_cache_entry
*entry
)
364 writesize
= fat_get_iosize(fat
, entry
->addr
);
366 fat_addr
= fat
->fat32_offset
+ entry
->addr
;
367 if (lseek(fd
, fat_addr
, SEEK_SET
) != fat_addr
||
368 (size_t)write(fd
, entry
->chunk
, writesize
) != writesize
) {
369 pfatal("Unable to write FAT");
373 entry
->dirty
= false;
377 static struct fat32_cache_entry
*
378 fat_get_fat32_cache_entry(struct fat_descriptor
*fat
, off_t addr
,
382 struct fat32_cache_entry
*entry
, *first
;
386 addr
&= ~(fat32_cache_chunk_size
- 1);
388 first
= TAILQ_FIRST(&fat
->fat32_cache_head
);
391 * Cache hit: if we already have the chunk, move it to list head
393 TAILQ_FOREACH(entry
, &fat
->fat32_cache_head
, entries
) {
394 if (entry
->addr
== addr
) {
398 if (entry
!= first
) {
400 TAILQ_REMOVE(&fat
->fat32_cache_head
, entry
, entries
);
401 TAILQ_INSERT_HEAD(&fat
->fat32_cache_head
, entry
, entries
);
408 * Cache miss: detach the chunk at tail of list, overwrite with
409 * the located chunk, and populate with data from disk.
411 entry
= TAILQ_LAST(&fat
->fat32_cache_head
, cachehead
);
412 TAILQ_REMOVE(&fat
->fat32_cache_head
, entry
, entries
);
413 if (fat_flush_fat32_cache_entry(fat
, entry
) != FSOK
) {
417 rwsize
= fat_get_iosize(fat
, addr
);
418 fat_addr
= fat
->fat32_offset
+ addr
;
421 if (lseek(fd
, fat_addr
, SEEK_SET
) != fat_addr
||
422 (size_t)read(fd
, entry
->chunk
, rwsize
) != rwsize
) {
423 pfatal("Unable to read FAT");
429 TAILQ_INSERT_HEAD(&fat
->fat32_cache_head
, entry
, entries
);
434 static inline uint8_t *
435 fat_get_fat32_cached_ptr(struct fat_descriptor
*fat
, cl_t cl
, bool writing
)
438 struct fat32_cache_entry
*entry
;
441 entry
= fat_get_fat32_cache_entry(fat
, addr
, writing
);
444 off
= addr
& (fat32_cache_chunk_size
- 1);
445 return (entry
->chunk
+ off
);
453 fat_get_fat32_cached_next(struct fat_descriptor
*fat
, cl_t cl
)
458 p
= fat_get_fat32_cached_ptr(fat
, cl
, false);
460 retval
= le32dec(p
) & CLUST32_MASK
;
461 if (retval
>= (CLUST_BAD
& CLUST32_MASK
))
462 retval
|= ~CLUST32_MASK
;
471 fat_set_fat32_cached_next(struct fat_descriptor
*fat
, cl_t cl
, cl_t nextcl
)
475 /* Truncate 'nextcl' value, if needed */
476 nextcl
&= CLUST32_MASK
;
478 p
= fat_get_fat32_cached_ptr(fat
, cl
, true);
480 le32enc(p
, (uint32_t)nextcl
);
487 cl_t
fat_get_cl_next(struct fat_descriptor
*fat
, cl_t cl
)
490 if (!valid_cl(fat
, cl
)) {
491 pfatal("Invalid cluster: %ud", cl
);
495 return (fat
->get(fat
, cl
));
498 int fat_set_cl_next(struct fat_descriptor
*fat
, cl_t cl
, cl_t nextcl
)
502 pwarn(" (NO WRITE)\n");
506 if (!valid_cl(fat
, cl
)) {
507 pfatal("Invalid cluster: %ud", cl
);
511 return (fat
->set(fat
, cl
, nextcl
));
514 static inline struct bootblock
*
515 boot_of_(struct fat_descriptor
*fat
) {
521 fat_get_boot(struct fat_descriptor
*fat
) {
523 return (boot_of_(fat
));
527 fd_of_(struct fat_descriptor
*fat
)
533 fat_get_fd(struct fat_descriptor
* fat
)
535 return (fd_of_(fat
));
539 * Whether a cl is in valid data range.
542 fat_is_valid_cl(struct fat_descriptor
*fat
, cl_t cl
)
545 return (valid_cl(fat
, cl
));
549 valid_cl(struct fat_descriptor
*fat
, cl_t cl
)
551 const struct bootblock
*boot
= boot_of_(fat
);
553 return (cl
>= CLUST_FIRST
&& cl
< boot
->NumClusters
);
557 cleardirty(struct fat_descriptor
*fat
)
559 int fd
, ret
= FSERROR
;
560 struct bootblock
*boot
;
565 boot
= boot_of_(fat
);
568 if (boot
->ClustMask
!= CLUST16_MASK
&& boot
->ClustMask
!= CLUST32_MASK
)
571 off
= boot
->bpbResSectors
;
572 off
*= boot
->bpbBytesPerSec
;
574 buffer
= malloc(len
= boot
->bpbBytesPerSec
);
575 if (buffer
== NULL
) {
576 perr("No memory for FAT sectors (%zu)", len
);
580 if ((size_t)pread(fd
, buffer
, len
, off
) != len
) {
581 perr("Unable to read FAT");
585 if (boot
->ClustMask
== CLUST16_MASK
) {
591 if ((size_t)pwrite(fd
, buffer
, len
, off
) != len
) {
592 perr("Unable to write FAT");
604 * Read a FAT from disk. Returns 1 if successful, 0 otherwise.
607 _readfat(struct fat_descriptor
*fat
)
613 struct bootblock
*boot
;
614 struct fat32_cache_entry
*entry
;
616 boot
= boot_of_(fat
);
618 fat
->fatsize
= boot
->FATsecs
* boot
->bpbBytesPerSec
;
620 off
= boot
->bpbResSectors
;
621 off
*= boot
->bpbBytesPerSec
;
623 fat
->is_mmapped
= false;
624 fat
->use_cache
= false;
626 /* Attempt to mmap() first */
628 fat
->fatbuf
= mmap(NULL
, fat
->fatsize
,
629 PROT_READ
| (rdonly
? 0 : PROT_WRITE
),
630 MAP_SHARED
, fd_of_(fat
), off
);
631 if (fat
->fatbuf
!= MAP_FAILED
) {
632 fat
->is_mmapped
= true;
638 * Unfortunately, we were unable to mmap().
640 * Only use the cache manager when it's necessary, that is,
641 * when the FAT is sufficiently large; in that case, only
642 * read in the first 4 MiB of FAT into memory, and split the
643 * buffer into chunks and insert to the LRU queue to populate
644 * the cache with data.
646 if (boot
->ClustMask
== CLUST32_MASK
&&
647 fat
->fatsize
>= fat32_cache_size
) {
648 readsize
= fat32_cache_size
;
649 fat
->use_cache
= true;
651 fat
->fat32_offset
= boot
->bpbResSectors
* boot
->bpbBytesPerSec
;
652 fat
->fat32_lastaddr
= fat
->fatsize
& ~(fat32_cache_chunk_size
);
654 readsize
= fat
->fatsize
;
656 fat
->fatbuf
= malloc(readsize
);
657 if (fat
->fatbuf
== NULL
) {
658 perr("No space for FAT (%zu)", readsize
);
662 if (lseek(fd
, off
, SEEK_SET
) != off
) {
663 perr("Unable to read FAT");
666 if ((size_t)read(fd
, fat
->fatbuf
, readsize
) != readsize
) {
667 perr("Unable to read FAT");
672 * When cache is used, split the buffer into chunks, and
673 * connect the buffer into the cache.
675 if (fat
->use_cache
) {
676 TAILQ_INIT(&fat
->fat32_cache_head
);
677 entry
= calloc(fat32_cache_entries
, sizeof(*entry
));
679 perr("No space for FAT cache (%zu of %zu)",
680 fat32_cache_entries
, sizeof(entry
));
683 for (i
= 0; i
< fat32_cache_entries
; i
++) {
684 entry
[i
].addr
= fat32_cache_chunk_size
* i
;
685 entry
[i
].chunk
= &fat
->fatbuf
[entry
[i
].addr
];
686 TAILQ_INSERT_TAIL(&fat
->fat32_cache_head
,
689 fat
->fat32_cache_allentries
= entry
;
701 releasefat(struct fat_descriptor
*fat
)
703 if (fat
->is_mmapped
) {
704 munmap(fat
->fatbuf
, fat
->fatsize
);
706 if (fat
->use_cache
) {
707 free(fat
->fat32_cache_allentries
);
708 fat
->fat32_cache_allentries
= NULL
;
713 bitmap_dtor(&fat
->headbitmap
);
717 * Read or map a FAT and populate head bitmap
720 readfat(int fs
, struct bootblock
*boot
, struct fat_descriptor
**fp
)
722 struct fat_descriptor
*fat
;
727 boot
->NumFree
= boot
->NumBad
= 0;
729 fat
= calloc(1, sizeof(struct fat_descriptor
));
731 perr("No space for FAT descriptor");
738 if (!_readfat(fat
)) {
742 buffer
= fat
->fatbuf
;
744 /* Populate accessors */
745 switch(boot
->ClustMask
) {
747 fat
->get
= fat_get_fat12_next
;
748 fat
->set
= fat_set_fat12_next
;
751 fat
->get
= fat_get_fat16_next
;
752 fat
->set
= fat_set_fat16_next
;
755 if (fat
->is_mmapped
|| !fat
->use_cache
) {
756 fat
->get
= fat_get_fat32_next
;
757 fat
->set
= fat_set_fat32_next
;
759 fat
->get
= fat_get_fat32_cached_next
;
760 fat
->set
= fat_set_fat32_cached_next
;
764 pfatal("Invalid ClustMask: %d", boot
->ClustMask
);
770 if (bitmap_ctor(&fat
->headbitmap
, boot
->NumClusters
,
772 perr("No space for head bitmap for FAT clusters (%zu)",
773 (size_t)boot
->NumClusters
);
779 if (buffer
[0] != boot
->bpbMedia
780 || buffer
[1] != 0xff || buffer
[2] != 0xff
781 || (boot
->ClustMask
== CLUST16_MASK
&& buffer
[3] != 0xff)
782 || (boot
->ClustMask
== CLUST32_MASK
783 && ((buffer
[3]&0x0f) != 0x0f
784 || buffer
[4] != 0xff || buffer
[5] != 0xff
785 || buffer
[6] != 0xff || (buffer
[7]&0x0f) != 0x0f))) {
787 /* Windows 95 OSR2 (and possibly any later) changes
788 * the FAT signature to 0xXXffff7f for FAT16 and to
789 * 0xXXffff0fffffff07 for FAT32 upon boot, to know that the
790 * file system is dirty if it doesn't reboot cleanly.
791 * Check this special condition before errorring out.
793 if (buffer
[0] == boot
->bpbMedia
&& buffer
[1] == 0xff
795 && ((boot
->ClustMask
== CLUST16_MASK
&& buffer
[3] == 0x7f)
796 || (boot
->ClustMask
== CLUST32_MASK
797 && buffer
[3] == 0x0f && buffer
[4] == 0xff
798 && buffer
[5] == 0xff && buffer
[6] == 0xff
799 && buffer
[7] == 0x07)))
802 /* just some odd byte sequence in FAT */
803 switch (boot
->ClustMask
) {
805 pwarn("%s (%02x%02x%02x%02x%02x%02x%02x%02x)\n",
806 "FAT starts with odd byte sequence",
807 buffer
[0], buffer
[1], buffer
[2], buffer
[3],
808 buffer
[4], buffer
[5], buffer
[6], buffer
[7]);
811 pwarn("%s (%02x%02x%02x%02x)\n",
812 "FAT starts with odd byte sequence",
813 buffer
[0], buffer
[1], buffer
[2], buffer
[3]);
816 pwarn("%s (%02x%02x%02x)\n",
817 "FAT starts with odd byte sequence",
818 buffer
[0], buffer
[1], buffer
[2]);
822 if (ask(1, "Correct")) {
826 *p
++ = (u_char
)boot
->bpbMedia
;
829 switch (boot
->ClustMask
) {
848 * Traverse the FAT table and populate head map. Initially, we
849 * consider all clusters as possible head cluster (beginning of
850 * a file or directory), and traverse the whole allocation table
851 * by marking every non-head nodes as such (detailed below) and
852 * fix obvious issues while we walk.
854 * For each "next" cluster, the possible values are:
856 * a) CLUST_FREE or CLUST_BAD. The *current* cluster can't be a
858 * b) An out-of-range value. The only fix would be to truncate at
860 * c) A valid cluster. It means that cluster (nextcl) is not a
861 * head cluster. Note that during the scan, every cluster is
862 * expected to be seen for at most once, and when we saw them
863 * twice, it means a cross-linked chain which should be
864 * truncated at the current cluster.
866 * After scan, the remaining set bits indicates all possible
867 * head nodes, because they were never claimed by any other
868 * node as the next node, but we do not know if these chains
869 * would end with a valid EOF marker. We will check that in
870 * checkchain() at a later time when checking directories,
871 * where these head nodes would be marked as non-head.
873 * In the final pass, all head nodes should be cleared, and if
874 * there is still head nodes, these would be leaders of lost
877 for (cl
= CLUST_FIRST
; cl
< boot
->NumClusters
; cl
++) {
878 nextcl
= fat_get_cl_next(fat
, cl
);
880 /* Check if the next cluster number is valid */
881 if (nextcl
== CLUST_FREE
) {
882 /* Save a hint for next free cluster */
883 if (boot
->FSNext
== 0) {
886 if (fat_is_cl_head(fat
, cl
)) {
887 fat_clear_cl_head(fat
, cl
);
890 } else if (nextcl
== CLUST_BAD
) {
891 if (fat_is_cl_head(fat
, cl
)) {
892 fat_clear_cl_head(fat
, cl
);
895 } else if (!valid_cl(fat
, nextcl
) && nextcl
< CLUST_RSRVD
) {
896 pwarn("Cluster %u continues with out of range "
897 "cluster number %u\n",
899 nextcl
& boot
->ClustMask
);
900 if (ask(0, "Truncate")) {
901 ret
|= fat_set_cl_next(fat
, cl
, CLUST_EOF
);
904 } else if (valid_cl(fat
, nextcl
)) {
905 if (fat_is_cl_head(fat
, nextcl
)) {
906 fat_clear_cl_head(fat
, nextcl
);
908 pwarn("Cluster %u crossed another chain at %u\n",
910 if (ask(0, "Truncate")) {
911 ret
|= fat_set_cl_next(fat
, cl
, CLUST_EOF
);
929 * Get type of reserved cluster
934 if (cl
== CLUST_FREE
)
944 * Examine a cluster chain for errors and count its size.
947 checkchain(struct fat_descriptor
*fat
, cl_t head
, size_t *chainsize
)
949 cl_t prev_cl
, current_cl
, next_cl
;
953 * We expect that the caller to give us a real, unvisited 'head'
954 * cluster, and it must be a valid cluster. While scanning the
955 * FAT table, we already excluded all clusters that was claimed
956 * as a "next" cluster. Assert all the three conditions.
958 assert(valid_cl(fat
, head
));
959 assert(fat_is_cl_head(fat
, head
));
962 * Immediately mark the 'head' cluster that we are about to visit.
964 fat_clear_cl_head(fat
, head
);
967 * The allocation of a non-zero sized file or directory is
968 * represented as a singly linked list, and the tail node
969 * would be the EOF marker (>=CLUST_EOFS).
971 * With a valid head node at hand, we expect all subsequent
972 * cluster to be either a not yet seen and valid cluster (we
973 * would continue counting), or the EOF marker (we conclude
974 * the scan of this chain).
976 * For all other cases, the chain is invalid, and the only
977 * viable fix would be to truncate at the current node (mark
978 * it as EOF) when the next node violates that.
981 prev_cl
= current_cl
= head
;
982 for (next_cl
= fat_get_cl_next(fat
, current_cl
);
983 valid_cl(fat
, next_cl
);
984 prev_cl
= current_cl
, current_cl
= next_cl
, next_cl
= fat_get_cl_next(fat
, current_cl
))
988 if (next_cl
>= CLUST_EOFS
) {
994 * The chain ended with an out-of-range cluster number.
996 * If the current node is e.g. CLUST_FREE, CLUST_BAD, etc.,
997 * it should not be present in a chain and we has to truncate
998 * at the previous node.
1000 * If the current cluster points to an invalid cluster, the
1001 * current cluster might have useful data and we truncate at
1002 * the current cluster instead.
1004 if (next_cl
== CLUST_FREE
|| next_cl
>= CLUST_RSRVD
) {
1005 pwarn("Cluster chain starting at %u ends with cluster marked %s\n",
1006 head
, rsrvdcltype(next_cl
));
1007 current_cl
= prev_cl
;
1009 pwarn("Cluster chain starting at %u ends with cluster out of range (%u)\n",
1011 next_cl
& boot_of_(fat
)->ClustMask
);
1015 if (*chainsize
> 0) {
1017 next_cl
= CLUST_EOF
;
1020 next_cl
= CLUST_FREE
;
1022 if (ask(0, "%s", op
)) {
1023 return (fat_set_cl_next(fat
, current_cl
, next_cl
) | FSFATMOD
);
1030 * Clear cluster chain from head.
1033 clearchain(struct fat_descriptor
*fat
, cl_t head
)
1035 cl_t current_cl
, next_cl
;
1036 struct bootblock
*boot
= boot_of_(fat
);
1040 while (valid_cl(fat
, current_cl
)) {
1041 next_cl
= fat_get_cl_next(fat
, current_cl
);
1042 (void)fat_set_cl_next(fat
, current_cl
, CLUST_FREE
);
1044 current_cl
= next_cl
;
1049 * Overwrite the n-th FAT with FAT0
1052 copyfat(struct fat_descriptor
*fat
, int n
)
1054 size_t rwsize
, tailsize
, blobs
, i
;
1055 off_t dst_off
, src_off
;
1056 struct bootblock
*boot
;
1061 boot
= boot_of_(fat
);
1063 blobs
= howmany(fat
->fatsize
, fat32_cache_size
);
1064 tailsize
= fat
->fatsize
% fat32_cache_size
;
1065 if (tailsize
== 0) {
1066 tailsize
= fat32_cache_size
;
1068 rwsize
= fat32_cache_size
;
1070 src_off
= fat
->fat32_offset
;
1071 dst_off
= boot
->bpbResSectors
+ n
* boot
->FATsecs
;
1072 dst_off
*= boot
->bpbBytesPerSec
;
1074 for (i
= 0; i
< blobs
;
1075 i
++, src_off
+= fat32_cache_size
, dst_off
+= fat32_cache_size
) {
1076 if (i
== blobs
- 1) {
1079 if ((lseek(fd
, src_off
, SEEK_SET
) != src_off
||
1080 (size_t)read(fd
, fat
->fatbuf
, rwsize
) != rwsize
) &&
1082 perr("Unable to read FAT0");
1086 if ((lseek(fd
, dst_off
, SEEK_SET
) != dst_off
||
1087 (size_t)write(fd
, fat
->fatbuf
, rwsize
) != rwsize
) &&
1089 perr("Unable to write FAT %d", n
);
1100 writefat(struct fat_descriptor
*fat
)
1106 struct bootblock
*boot
;
1107 struct fat32_cache_entry
*entry
;
1109 boot
= boot_of_(fat
);
1112 if (fat
->use_cache
) {
1114 * Attempt to flush all in-flight cache, and bail out
1115 * if we encountered an error (but only emit error
1116 * message once). Stop proceeding with copyfat()
1117 * if any flush failed.
1119 TAILQ_FOREACH(entry
, &fat
->fat32_cache_head
, entries
) {
1120 if (fat_flush_fat32_cache_entry(fat
, entry
) != FSOK
) {
1122 perr("Unable to write FAT");
1130 /* Update backup copies of FAT, error is not fatal */
1131 for (i
= 1; i
< boot
->bpbFATs
; i
++) {
1132 if (copyfat(fat
, i
) != FSOK
)
1136 writesz
= fat
->fatsize
;
1138 for (i
= fat
->is_mmapped
? 1 : 0; i
< boot
->bpbFATs
; i
++) {
1139 dst_base
= boot
->bpbResSectors
+ i
* boot
->FATsecs
;
1140 dst_base
*= boot
->bpbBytesPerSec
;
1141 if ((lseek(fd
, dst_base
, SEEK_SET
) != dst_base
||
1142 (size_t)write(fd
, fat
->fatbuf
, writesz
) != writesz
) &&
1144 perr("Unable to write FAT %d", i
);
1145 ret
= ((i
== 0) ? FSFATAL
: FSERROR
);
1154 * Check a complete in-memory FAT for lost cluster chains
1157 checklost(struct fat_descriptor
*fat
)
1162 size_t chains
, chainlength
;
1163 struct bootblock
*boot
;
1165 dosfs
= fd_of_(fat
);
1166 boot
= boot_of_(fat
);
1169 * At this point, we have already traversed all directories.
1170 * All remaining chain heads in the bitmap are heads of lost
1173 chains
= fat_get_head_count(fat
);
1174 for (head
= CLUST_FIRST
;
1175 chains
> 0 && head
< boot
->NumClusters
;
1178 * We expect the bitmap to be very sparse, so skip if
1179 * the range is full of 0's
1181 if (head
% LONG_BIT
== 0 &&
1182 !fat_is_cl_head_in_range(fat
, head
)) {
1186 if (fat_is_cl_head(fat
, head
)) {
1187 ret
= checkchain(fat
, head
, &chainlength
);
1188 if (ret
!= FSERROR
&& chainlength
> 0) {
1189 pwarn("Lost cluster chain at cluster %u\n"
1190 "%zd Cluster(s) lost\n",
1192 mod
|= ret
= reconnect(fat
, head
,
1197 if (ret
== FSERROR
&& ask(0, "Clear")) {
1198 clearchain(fat
, head
);
1208 if (boot
->bpbFSInfo
) {
1210 if (boot
->FSFree
!= 0xffffffffU
&&
1211 boot
->FSFree
!= boot
->NumFree
) {
1212 pwarn("Free space in FSInfo block (%u) not correct (%u)\n",
1213 boot
->FSFree
, boot
->NumFree
);
1214 if (ask(1, "Fix")) {
1215 boot
->FSFree
= boot
->NumFree
;
1219 if (boot
->FSNext
!= 0xffffffffU
&&
1220 (boot
->FSNext
>= boot
->NumClusters
||
1221 (boot
->NumFree
&& fat_get_cl_next(fat
, boot
->FSNext
) != CLUST_FREE
))) {
1222 pwarn("Next free cluster in FSInfo block (%u) %s\n",
1224 (boot
->FSNext
>= boot
->NumClusters
) ? "invalid" : "not free");
1226 for (head
= CLUST_FIRST
; head
< boot
->NumClusters
; head
++)
1227 if (fat_get_cl_next(fat
, head
) == CLUST_FREE
) {
1228 boot
->FSNext
= head
;
1234 mod
|= writefsinfo(dosfs
, boot
);