2 Copyright © 2010, The AROS Development Team. All rights reserved.
9 * This is an LRU copyback cache.
11 * The cache consists of a fixed number of cache blocks, each of which can
12 * hold a contiguous range of disk blocks (each range is of a fixed
13 * length). Each range is addressed by its base block number, calculated by
14 * applying a mask to the requested block number.
16 * Each cache block contains two Exec list nodes, so it can be part of two
17 * lists simultaneously. The first node links a block into a hash table
18 * list, while the second links it into either the free list or the dirty
19 * list. Initially, all cache blocks are present in the free list, and are
20 * absent from the hash table.
22 * When a disk block is requested by the client, the range that contains
23 * that disk block is calculated. The range is then sought by looking up
24 * the hash table. Each position in the hash table holds a list containing
25 * all cache blocks (henceforth referred to as a block: individual disk
26 * blocks are not used internally) that map to that hash location. Each
27 * list in the hash table is typically very short, so look-up time is
30 * If the requested range is not in the hash table, a cache block is
31 * removed from the head of the free list and from any hash table list it
32 * is in, and used to read the appropriate range from disk. This block is
33 * then added to the hash table at its new location.
35 * Two elements are returned to the client when a requested block is found:
36 * an opaque cache block handle, and a pointer to the data buffer.
38 * When a range is freed, it remains in the hash table so that it can be
39 * found quickly if needed again. Unless dirty, it is also added to the
40 * tail of the free list through its second list node. The block then
41 * remains in the hash table until reused for a different range.
43 * If a disk block is marked dirty by the client, the entire containing
44 * range is marked dirty and added to the tail of the dirty list through
45 * the cache block's second node. The dirty list is flushed periodically
46 * (currently once per second), and additionally whenever the free list
53 #include <proto/exec.h>
54 #include <proto/dos.h>
55 #include <clib/alib_protos.h>
59 #include "ntfs_protos.h"
64 #define RANGE_SIZE (1 << RANGE_SHIFT)
65 #define RANGE_MASK (RANGE_SIZE - 1)
68 ((struct BlockRange *)(((A) != NULL) ? \
69 (((BYTE *)(A)) - (IPTR)&((struct BlockRange *)NULL)->node2) : NULL))
71 APTR
Cache_CreateCache(ULONG hash_size
, ULONG block_count
, ULONG block_size
)
78 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__
));
80 /* Allocate cache structure */
82 if((c
= AllocVec(sizeof(struct Cache
), MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
)
87 c
->block_size
= block_size
;
88 c
->block_count
= block_count
;
89 c
->hash_size
= hash_size
;
91 /* Allocate hash table */
93 c
->hash_table
= AllocVec(sizeof(struct MinList
) * hash_size
,
94 MEMF_PUBLIC
| MEMF_CLEAR
);
95 if(c
->hash_table
== NULL
)
98 /* Initialise each hash location's list, and free and dirty lists */
100 for(i
= 0; i
< hash_size
&& success
; i
++)
101 NewList((struct List
*)&c
->hash_table
[i
]);
102 NewList((struct List
*)&c
->free_list
);
103 NewList((struct List
*)&c
->dirty_list
);
105 /* Allocate cache blocks and add them to the free list */
107 c
->blocks
= AllocVec(sizeof(APTR
) * block_count
,
108 MEMF_PUBLIC
| MEMF_CLEAR
);
112 for(i
= 0; i
< block_count
&& success
; i
++)
114 b
= AllocVec(sizeof(struct BlockRange
)
115 + (c
->block_size
<< RANGE_SHIFT
), MEMF_PUBLIC
);
119 b
->data
= (UBYTE
*)b
+ sizeof(struct BlockRange
);
127 AddTail((struct List
*)&c
->free_list
,
128 (struct Node
*)&b
->node2
);
134 Cache_DestroyCache(c
);
141 VOID
Cache_DestroyCache(APTR cache
)
143 struct Cache
*c
= cache
;
146 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__
));
150 for(i
= 0; i
< c
->block_count
; i
++)
151 FreeVec(c
->blocks
[i
]);
153 FreeVec(c
->hash_table
);
157 APTR
Cache_GetBlock(APTR cache
, UQUAD blockNum
, UBYTE
**data
)
159 struct Cache
*c
= cache
;
160 struct BlockRange
*b
= NULL
, *b2
;
161 ULONG error
= 0, data_offset
;
163 &c
->hash_table
[(blockNum
>> RANGE_SHIFT
) & (c
->hash_size
- 1)];
166 D(bug("[NTFS]: %s(%d)\n", __PRETTY_FUNCTION__
, blockNum
));
168 /* Change block number to the start block of a range and get byte offset
171 data_offset
= (blockNum
- (blockNum
& ~RANGE_MASK
)) * c
->block_size
;
172 blockNum
&= ~RANGE_MASK
;
174 /* Check existing valid blocks first */
178 if(b2
->num
== blockNum
)
184 /* Block found, so increment its usage count and remove it from the
187 if(b
->use_count
++ == 0)
189 if(b
->state
!= BS_DIRTY
)
190 Remove((struct Node
*)&b
->node2
);
195 /* Get a free buffer to read block from disk */
197 n
= (struct MinNode
*)RemHead((struct List
*)&c
->free_list
);
200 /* No free blocks, so flush dirty list to try and free up some
201 * more blocks, then try again */
204 n
= (struct MinNode
*)RemHead((struct List
*)&c
->free_list
);
209 b
= (struct BlockRange
*)NODE2(n
);
211 /* Read the block from disk */
213 if((error
= AccessDisk(FALSE
, blockNum
, RANGE_SIZE
,
214 c
->block_size
, b
->data
)) == 0)
216 /* Remove block from its old position in the hash */
218 if(b
->state
== BS_VALID
)
219 Remove((struct Node
*)b
);
221 /* Add it to the hash at the new location */
223 AddHead((struct List
*)l
, (struct Node
*)&b
->node1
);
230 /* Read failed, so put the block back on the free list */
233 AddHead((struct List
*)&c
->free_list
,
234 (struct Node
*)&b
->node2
);
239 error
= ERROR_NO_FREE_STORE
;
242 /* Set data pointer and error, and return cache block handle */
244 *data
= b
->data
+ data_offset
;
250 VOID
Cache_FreeBlock(APTR cache
, APTR block
)
252 struct Cache
*c
= cache
;
253 struct BlockRange
*b
= block
;
255 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__
));
257 /* Decrement usage count */
261 /* Put an unused block at the end of the free list unless it's dirty */
263 if(b
->use_count
== 0 && b
->state
!= BS_DIRTY
)
264 AddTail((struct List
*)&c
->free_list
, (struct Node
*)&b
->node2
);
269 VOID
Cache_MarkBlockDirty(APTR cache
, APTR block
)
271 struct Cache
*c
= cache
;
272 struct BlockRange
*b
= block
;
274 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__
));
276 if(b
->state
!= BS_DIRTY
)
279 AddTail((struct List
*)&c
->dirty_list
, (struct Node
*)&b
->node2
);
285 BOOL
Cache_Flush(APTR cache
)
287 struct Cache
*c
= cache
;
290 struct BlockRange
*b
;
292 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__
));
294 while((n
= (struct MinNode
*)RemHead((struct List
*)&c
->dirty_list
))
295 != NULL
&& error
== 0)
297 /* Write dirty block range to disk */
300 error
= AccessDisk(TRUE
, b
->num
, RANGE_SIZE
, c
->block_size
, b
->data
);
302 /* Transfer block range to free list if unused, or put back on dirty
303 * list upon an error */
308 if(b
->use_count
== 0)
309 AddTail((struct List
*)&c
->free_list
, (struct Node
*)&b
->node2
);
312 AddHead((struct List
*)&c
->dirty_list
, (struct Node
*)&b
->node2
);