2 Copyright © 2010, The AROS Development Team. All rights reserved.
9 * This is an LRU copyback cache.
11 * The cache consists of a fixed number of cache blocks, each of which can
12 * hold a contiguous range of disk blocks (each range is of a fixed
13 * length). Each range is addressed by its base block number, calculated by
14 * applying a mask to the requested block number.
16 * Each cache block contains two Exec list nodes, so it can be part of two
17 * lists simultaneously. The first node links a block into a hash table
18 * list, while the second links it into either the free list or the dirty
19 * list. Initially, all cache blocks are present in the free list, and are
20 * absent from the hash table.
22 * When a disk block is requested by the client, the range that contains
23 * that disk block is calculated. The range is then sought by looking up
24 * the hash table. Each position in the hash table holds a list containing
25 * all cache blocks (henceforth referred to as a block: individual disk
26 * blocks are not used internally) that map to that hash location. Each
27 * list in the hash table is typically very short, so look-up time is
30 * If the requested range is not in the hash table, a cache block is
31 * removed from the head of the free list and from any hash table list it
32 * is in, and used to read the appropriate range from disk. This block is
33 * then added to the hash table at its new location.
35 * Two elements are returned to the client when a requested block is found:
36 * an opaque cache block handle, and a pointer to the data buffer.
38 * When a range is freed, it remains in the hash table so that it can be
39 * found quickly if needed again. Unless dirty, it is also added to the
40 * tail of the free list through its second list node. The block then
41 * remains in the hash table until reused for a different range.
43 * If a disk block is marked dirty by the client, the entire containing
44 * range is marked dirty and added to the tail of the dirty list through
45 * the cache block's second node. The dirty list is flushed periodically
46 * (currently once per second), and additionally whenever the free list
53 #include <proto/exec.h>
54 #include <proto/dos.h>
55 #include <clib/alib_protos.h>
59 #include "fat_protos.h"
62 #define RANGE_SIZE (1 << RANGE_SHIFT)
63 #define RANGE_MASK (RANGE_SIZE - 1)
66 ((struct BlockRange *)(((A) != NULL) ? \
67 (((BYTE *)(A)) - (IPTR)&((struct BlockRange *)NULL)->node2) : NULL))
70 APTR
Cache_CreateCache(ULONG hash_size
, ULONG block_count
, ULONG block_size
)
77 /* Allocate cache structure */
79 if((c
= AllocVec(sizeof(struct Cache
), MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
)
84 c
->block_size
= block_size
;
85 c
->block_count
= block_count
;
86 c
->hash_size
= hash_size
;
88 /* Allocate hash table */
90 c
->hash_table
= AllocVec(sizeof(struct MinList
) * hash_size
,
91 MEMF_PUBLIC
| MEMF_CLEAR
);
92 if(c
->hash_table
== NULL
)
95 /* Initialise each hash location's list, and free and dirty lists */
97 for(i
= 0; i
< hash_size
&& success
; i
++)
98 NewList((struct List
*)&c
->hash_table
[i
]);
99 NewList((struct List
*)&c
->free_list
);
100 NewList((struct List
*)&c
->dirty_list
);
102 /* Allocate cache blocks and add them to the free list */
104 c
->blocks
= AllocVec(sizeof(APTR
) * block_count
,
105 MEMF_PUBLIC
| MEMF_CLEAR
);
109 for(i
= 0; i
< block_count
&& success
; i
++)
111 b
= AllocVec(sizeof(struct BlockRange
)
112 + (c
->block_size
<< RANGE_SHIFT
), MEMF_PUBLIC
);
116 b
->data
= (UBYTE
*)b
+ sizeof(struct BlockRange
);
124 AddTail((struct List
*)&c
->free_list
,
125 (struct Node
*)&b
->node2
);
131 Cache_DestroyCache(c
);
139 VOID
Cache_DestroyCache(APTR cache
)
141 struct Cache
*c
= cache
;
146 for(i
= 0; i
< c
->block_count
; i
++)
147 FreeVec(c
->blocks
[i
]);
149 FreeVec(c
->hash_table
);
154 APTR
Cache_GetBlock(APTR cache
, ULONG blockNum
, UBYTE
**data
)
156 struct Cache
*c
= cache
;
157 struct BlockRange
*b
= NULL
, *b2
;
158 ULONG error
= 0, data_offset
;
160 &c
->hash_table
[(blockNum
>> RANGE_SHIFT
) & (c
->hash_size
- 1)];
163 /* Change block number to the start block of a range and get byte offset
166 data_offset
= (blockNum
- (blockNum
& ~RANGE_MASK
)) * c
->block_size
;
167 blockNum
&= ~RANGE_MASK
;
169 /* Check existing valid blocks first */
173 if(b2
->num
== blockNum
)
179 /* Block found, so increment its usage count and remove it from the
182 if(b
->use_count
++ == 0)
184 if(b
->state
!= BS_DIRTY
)
185 Remove((struct Node
*)&b
->node2
);
190 /* Get a free buffer to read block from disk */
192 n
= (struct MinNode
*)RemHead((struct List
*)&c
->free_list
);
195 /* No free blocks, so flush dirty list to try and free up some
196 * more blocks, then try again */
199 n
= (struct MinNode
*)RemHead((struct List
*)&c
->free_list
);
204 b
= (struct BlockRange
*)NODE2(n
);
206 /* Read the block from disk */
208 if((error
= AccessDisk(FALSE
, blockNum
, RANGE_SIZE
,
209 c
->block_size
, b
->data
)) == 0)
211 /* Remove block from its old position in the hash */
213 if(b
->state
== BS_VALID
)
214 Remove((struct Node
*)b
);
216 /* Add it to the hash at the new location */
218 AddHead((struct List
*)l
, (struct Node
*)&b
->node1
);
225 /* Read failed, so put the block back on the free list */
228 AddHead((struct List
*)&c
->free_list
,
229 (struct Node
*)&b
->node2
);
234 error
= ERROR_NO_FREE_STORE
;
237 /* Set data pointer and error, and return cache block handle */
239 *data
= b
->data
+ data_offset
;
246 VOID
Cache_FreeBlock(APTR cache
, APTR block
)
248 struct Cache
*c
= cache
;
249 struct BlockRange
*b
= block
;
251 /* Decrement usage count */
255 /* Put an unused block at the end of the free list unless it's dirty */
257 if(b
->use_count
== 0 && b
->state
!= BS_DIRTY
)
258 AddTail((struct List
*)&c
->free_list
, (struct Node
*)&b
->node2
);
264 VOID
Cache_MarkBlockDirty(APTR cache
, APTR block
)
266 struct Cache
*c
= cache
;
267 struct BlockRange
*b
= block
;
269 if(b
->state
!= BS_DIRTY
)
272 AddTail((struct List
*)&c
->dirty_list
, (struct Node
*)&b
->node2
);
279 BOOL
Cache_Flush(APTR cache
)
281 struct Cache
*c
= cache
;
284 struct BlockRange
*b
;
286 while((n
= (struct MinNode
*)RemHead((struct List
*)&c
->dirty_list
))
287 != NULL
&& error
== 0)
289 /* Write dirty block range to disk */
292 error
= AccessDisk(TRUE
, b
->num
, RANGE_SIZE
, c
->block_size
, b
->data
);
294 /* Transfer block range to free list if unused, or put back on dirty
295 * list upon an error */
300 if(b
->use_count
== 0)
301 AddTail((struct List
*)&c
->free_list
, (struct Node
*)&b
->node2
);
304 AddHead((struct List
*)&c
->dirty_list
, (struct Node
*)&b
->node2
);