4 * Written 1992,1993 by Werner Almesberger
6 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
8 * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
12 #include <linux/msdos_fs.h>
13 #include <linux/buffer_head.h>
15 static struct fat_cache
*fat_cache
,cache
[FAT_CACHE
];
16 static spinlock_t fat_cache_lock
= SPIN_LOCK_UNLOCKED
;
18 int __fat_access(struct super_block
*sb
, int nr
, int new_value
)
20 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
21 struct buffer_head
*bh
, *bh2
, *c_bh
, *c_bh2
;
22 unsigned char *p_first
, *p_last
;
23 int copy
, first
, last
, next
, b
;
25 if (sbi
->fat_bits
== 32) {
27 } else if (sbi
->fat_bits
== 16) {
33 b
= sbi
->fat_start
+ (first
>> sb
->s_blocksize_bits
);
34 if (!(bh
= sb_bread(sb
, b
))) {
35 printk(KERN_ERR
"FAT: bread(block %d) in"
36 " fat_access failed\n", b
);
39 if ((first
>> sb
->s_blocksize_bits
) == (last
>> sb
->s_blocksize_bits
)) {
42 if (!(bh2
= sb_bread(sb
, b
+ 1))) {
44 printk(KERN_ERR
"FAT: bread(block %d) in"
45 " fat_access failed\n", b
+ 1);
49 if (sbi
->fat_bits
== 32) {
50 p_first
= p_last
= NULL
; /* GCC needs that stuff */
51 next
= CF_LE_L(((__u32
*) bh
->b_data
)[(first
&
52 (sb
->s_blocksize
- 1)) >> 2]);
53 /* Fscking Microsoft marketing department. Their "32" is 28. */
55 } else if (sbi
->fat_bits
== 16) {
56 p_first
= p_last
= NULL
; /* GCC needs that stuff */
57 next
= CF_LE_W(((__u16
*) bh
->b_data
)[(first
&
58 (sb
->s_blocksize
- 1)) >> 1]);
60 p_first
= &((__u8
*)bh
->b_data
)[first
& (sb
->s_blocksize
- 1)];
61 p_last
= &((__u8
*)bh2
->b_data
)[(first
+ 1) & (sb
->s_blocksize
- 1)];
63 next
= ((*p_first
>> 4) | (*p_last
<< 4)) & 0xfff;
65 next
= (*p_first
+(*p_last
<< 8)) & 0xfff;
67 if (new_value
!= -1) {
68 if (sbi
->fat_bits
== 32) {
69 ((__u32
*)bh
->b_data
)[(first
& (sb
->s_blocksize
- 1)) >> 2]
71 } else if (sbi
->fat_bits
== 16) {
72 ((__u16
*)bh
->b_data
)[(first
& (sb
->s_blocksize
- 1)) >> 1]
76 *p_first
= (*p_first
& 0xf) | (new_value
<< 4);
77 *p_last
= new_value
>> 4;
80 *p_first
= new_value
& 0xff;
81 *p_last
= (*p_last
& 0xf0) | (new_value
>> 8);
83 mark_buffer_dirty(bh2
);
85 mark_buffer_dirty(bh
);
86 for (copy
= 1; copy
< sbi
->fats
; copy
++) {
87 b
= sbi
->fat_start
+ (first
>> sb
->s_blocksize_bits
)
88 + sbi
->fat_length
* copy
;
89 if (!(c_bh
= sb_bread(sb
, b
)))
92 if (!(c_bh2
= sb_bread(sb
, b
+1))) {
96 memcpy(c_bh2
->b_data
, bh2
->b_data
, sb
->s_blocksize
);
97 mark_buffer_dirty(c_bh2
);
100 memcpy(c_bh
->b_data
, bh
->b_data
, sb
->s_blocksize
);
101 mark_buffer_dirty(c_bh
);
112 * Returns the this'th FAT entry, -1 if it is an end-of-file entry. If
113 * new_value is != -1, that FAT entry is replaced by it.
115 int fat_access(struct super_block
*sb
, int nr
, int new_value
)
120 if (nr
< 2 || MSDOS_SB(sb
)->clusters
+ 2 <= nr
) {
121 fat_fs_panic(sb
, "invalid access to FAT (entry 0x%08x)", nr
);
124 if (new_value
== FAT_ENT_EOF
)
125 new_value
= EOF_FAT(sb
);
127 next
= __fat_access(sb
, nr
, new_value
);
130 if (next
>= BAD_FAT(sb
))
136 void fat_cache_init(void)
138 static int initialized
;
141 spin_lock(&fat_cache_lock
);
143 spin_unlock(&fat_cache_lock
);
146 fat_cache
= &cache
[0];
147 for (count
= 0; count
< FAT_CACHE
; count
++) {
148 cache
[count
].sb
= NULL
;
149 cache
[count
].next
= count
== FAT_CACHE
-1 ? NULL
:
153 spin_unlock(&fat_cache_lock
);
157 void fat_cache_lookup(struct inode
*inode
,int cluster
,int *f_clu
,int *d_clu
)
159 struct fat_cache
*walk
;
160 int first
= MSDOS_I(inode
)->i_start
;
164 spin_lock(&fat_cache_lock
);
165 for (walk
= fat_cache
; walk
; walk
= walk
->next
)
166 if (inode
->i_sb
== walk
->sb
167 && walk
->start_cluster
== first
168 && walk
->file_cluster
<= cluster
169 && walk
->file_cluster
> *f_clu
) {
170 *d_clu
= walk
->disk_cluster
;
172 printk("cache hit: %d (%d)\n",walk
->file_cluster
,*d_clu
);
174 if ((*f_clu
= walk
->file_cluster
) == cluster
) {
175 spin_unlock(&fat_cache_lock
);
179 spin_unlock(&fat_cache_lock
);
181 printk("cache miss\n");
187 static void list_cache(void)
189 struct fat_cache
*walk
;
191 for (walk
= fat_cache
; walk
; walk
= walk
->next
) {
193 printk("<%s,%d>(%d,%d) ", walk
->sb
->s_id
,
194 walk
->start_cluster
, walk
->file_cluster
,
203 void fat_cache_add(struct inode
*inode
,int f_clu
,int d_clu
)
205 struct fat_cache
*walk
,*last
;
206 int first
= MSDOS_I(inode
)->i_start
;
209 spin_lock(&fat_cache_lock
);
210 for (walk
= fat_cache
; walk
->next
; walk
= (last
= walk
)->next
)
211 if (inode
->i_sb
== walk
->sb
212 && walk
->start_cluster
== first
213 && walk
->file_cluster
== f_clu
) {
214 if (walk
->disk_cluster
!= d_clu
) {
215 printk(KERN_ERR
"FAT: cache corruption"
216 " (ino %lu)\n", inode
->i_ino
);
217 spin_unlock(&fat_cache_lock
);
218 fat_cache_inval_inode(inode
);
223 spin_unlock(&fat_cache_lock
);
226 last
->next
= walk
->next
;
227 walk
->next
= fat_cache
;
232 spin_unlock(&fat_cache_lock
);
235 walk
->sb
= inode
->i_sb
;
236 walk
->start_cluster
= first
;
237 walk
->file_cluster
= f_clu
;
238 walk
->disk_cluster
= d_clu
;
240 walk
->next
= fat_cache
;
242 spin_unlock(&fat_cache_lock
);
249 /* Cache invalidation occurs rarely, thus the LRU chain is not updated. It
250 fixes itself after a while. */
252 void fat_cache_inval_inode(struct inode
*inode
)
254 struct fat_cache
*walk
;
255 int first
= MSDOS_I(inode
)->i_start
;
257 spin_lock(&fat_cache_lock
);
258 for (walk
= fat_cache
; walk
; walk
= walk
->next
)
259 if (walk
->sb
== inode
->i_sb
260 && walk
->start_cluster
== first
)
262 spin_unlock(&fat_cache_lock
);
266 void fat_cache_inval_dev(struct super_block
*sb
)
268 struct fat_cache
*walk
;
270 spin_lock(&fat_cache_lock
);
271 for (walk
= fat_cache
; walk
; walk
= walk
->next
)
274 spin_unlock(&fat_cache_lock
);
278 static int fat_get_cluster(struct inode
*inode
, int cluster
)
280 struct super_block
*sb
= inode
->i_sb
;
283 if (!(nr
= MSDOS_I(inode
)->i_start
)) return 0;
284 if (!cluster
) return nr
;
286 for (fat_cache_lookup(inode
, cluster
, &count
, &nr
);
289 nr
= fat_access(sb
, nr
, -1);
290 if (nr
== FAT_ENT_EOF
) {
291 fat_fs_panic(sb
, "%s: request beyond EOF (ino %lu)",
292 __FUNCTION__
, inode
->i_ino
);
294 } else if (nr
== FAT_ENT_FREE
) {
295 fat_fs_panic(sb
, "%s: invalid cluster chain (ino %lu)",
296 __FUNCTION__
, inode
->i_ino
);
301 fat_cache_add(inode
, cluster
, nr
);
305 int fat_bmap(struct inode
*inode
, int sector
)
307 struct super_block
*sb
= inode
->i_sb
;
308 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
309 int cluster
, offset
, last_block
;
311 if ((sbi
->fat_bits
!= 32) &&
312 (inode
->i_ino
== MSDOS_ROOT_INO
|| (S_ISDIR(inode
->i_mode
) &&
313 !MSDOS_I(inode
)->i_start
))) {
314 if (sector
>= sbi
->dir_entries
>> sbi
->dir_per_block_bits
)
316 return sector
+ sbi
->dir_start
;
318 last_block
= (MSDOS_I(inode
)->mmu_private
+ (sb
->s_blocksize
- 1))
319 >> sb
->s_blocksize_bits
;
320 if (sector
>= last_block
)
323 cluster
= sector
/ sbi
->cluster_size
;
324 offset
= sector
% sbi
->cluster_size
;
325 cluster
= fat_get_cluster(inode
, cluster
);
330 return (cluster
- 2) * sbi
->cluster_size
+ sbi
->data_start
+ offset
;
334 /* Free all clusters after the skip'th cluster. Doesn't use the cache,
335 because this way we get an additional sanity check. */
337 int fat_free(struct inode
*inode
,int skip
)
339 struct super_block
*sb
= inode
->i_sb
;
342 if (!(nr
= MSDOS_I(inode
)->i_start
)) return 0;
346 nr
= fat_access(sb
, nr
, -1);
347 if (nr
== FAT_ENT_EOF
)
349 else if (nr
== FAT_ENT_FREE
) {
350 fat_fs_panic(sb
, "%s: invalid cluster chain (ino %lu)",
351 __FUNCTION__
, inode
->i_ino
);
357 fat_access(sb
, last
, FAT_ENT_EOF
);
358 fat_cache_inval_inode(inode
);
360 fat_cache_inval_inode(inode
);
361 MSDOS_I(inode
)->i_start
= 0;
362 MSDOS_I(inode
)->i_logstart
= 0;
363 mark_inode_dirty(inode
);
367 while (nr
!= FAT_ENT_EOF
) {
368 nr
= fat_access(sb
, nr
, FAT_ENT_FREE
);
371 else if (nr
== FAT_ENT_FREE
) {
372 fat_fs_panic(sb
, "%s: deleting beyond EOF (ino %lu)",
373 __FUNCTION__
, inode
->i_ino
);
377 if (MSDOS_SB(sb
)->free_clusters
!= -1)
378 MSDOS_SB(sb
)->free_clusters
++;
379 inode
->i_blocks
-= (1 << MSDOS_SB(sb
)->cluster_bits
) >> 9;
381 fat_clusters_flush(sb
);