Fixed stupid bug in clearing memory.
[AROS.git] / workbench / fs / fat / cache.c
blobcebf3d328cba229d4a4b54bc19e2a78d87886179
1 /*
2 Copyright © 2010, The AROS Development Team. All rights reserved.
3 $Id$
5 Disk cache.
6 */
8 /*
9 * This is an LRU copyback cache.
11 * The cache consists of a fixed number of cache blocks, each of which can
12 * hold a contiguous range of disk blocks (each range is of a fixed
13 * length). Each range is addressed by its base block number, calculated by
14 * applying a mask to the requested block number.
16 * Each cache block contains two Exec list nodes, so it can be part of two
17 * lists simultaneously. The first node links a block into a hash table
18 * list, while the second links it into either the free list or the dirty
19 * list. Initially, all cache blocks are present in the free list, and are
20 * absent from the hash table.
22 * When a disk block is requested by the client, the range that contains
23 * that disk block is calculated. The range is then sought by looking up
24 * the hash table. Each position in the hash table holds a list containing
25 * all cache blocks (henceforth referred to as a block: individual disk
26 * blocks are not used internally) that map to that hash location. Each
27 * list in the hash table is typically very short, so look-up time is
28 * quick.
30 * If the requested range is not in the hash table, a cache block is
31 * removed from the head of the free list and from any hash table list it
32 * is in, and used to read the appropriate range from disk. This block is
33 * then added to the hash table at its new location.
35 * Two elements are returned to the client when a requested block is found:
36 * an opaque cache block handle, and a pointer to the data buffer.
38 * When a range is freed, it remains in the hash table so that it can be
39 * found quickly if needed again. Unless dirty, it is also added to the
40 * tail of the free list through its second list node. The block then
41 * remains in the hash table until reused for a different range.
43 * If a disk block is marked dirty by the client, the entire containing
44 * range is marked dirty and added to the tail of the dirty list through
45 * the cache block's second node. The dirty list is flushed periodically
46 * (currently once per second), and additionally whenever the free list
47 * becomes empty.
51 #include <dos/dos.h>
53 #include <proto/exec.h>
54 #include <proto/dos.h>
55 #include <clib/alib_protos.h>
57 #include "cache.h"
58 #include "fat_fs.h"
59 #include "fat_protos.h"
61 #define RANGE_SHIFT 5
62 #define RANGE_SIZE (1 << RANGE_SHIFT)
63 #define RANGE_MASK (RANGE_SIZE - 1)
65 #define NODE2(A) \
66 ((struct BlockRange *)(((A) != NULL) ? \
67 (((BYTE *)(A)) - (IPTR)&((struct BlockRange *)NULL)->node2) : NULL))
70 APTR Cache_CreateCache(ULONG hash_size, ULONG block_count, ULONG block_size)
72 struct Cache *c;
73 ULONG i;
74 BOOL success = TRUE;
75 struct BlockRange *b;
77 /* Allocate cache structure */
79 if((c = AllocVec(sizeof(struct Cache), MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
80 success = FALSE;
82 if(success)
84 c->block_size = block_size;
85 c->block_count = block_count;
86 c->hash_size = hash_size;
88 /* Allocate hash table */
90 c->hash_table = AllocVec(sizeof(struct MinList) * hash_size,
91 MEMF_PUBLIC | MEMF_CLEAR);
92 if(c->hash_table == NULL)
93 success = FALSE;
95 /* Initialise each hash location's list, and free and dirty lists */
97 for(i = 0; i < hash_size && success; i++)
98 NewList((struct List *)&c->hash_table[i]);
99 NewList((struct List *)&c->free_list);
100 NewList((struct List *)&c->dirty_list);
102 /* Allocate cache blocks and add them to the free list */
104 c->blocks = AllocVec(sizeof(APTR) * block_count,
105 MEMF_PUBLIC | MEMF_CLEAR);
106 if(c == NULL)
107 success = FALSE;
109 for(i = 0; i < block_count && success; i++)
111 b = AllocVec(sizeof(struct BlockRange)
112 + (c->block_size << RANGE_SHIFT), MEMF_PUBLIC);
113 b->use_count = 0;
114 b->state = BS_EMPTY;
115 b->num = 0;
116 b->data = (UBYTE *)b + sizeof(struct BlockRange);
118 if(b != NULL)
119 c->blocks[i] = b;
120 else
121 success = FALSE;
123 if(success)
124 AddTail((struct List *)&c->free_list,
125 (struct Node *)&b->node2);
129 if(!success)
131 Cache_DestroyCache(c);
132 c = NULL;
135 return c;
139 VOID Cache_DestroyCache(APTR cache)
141 struct Cache *c = cache;
142 ULONG i;
144 Cache_Flush(c);
146 for(i = 0; i < c->block_count; i++)
147 FreeVec(c->blocks[i]);
148 FreeVec(c->blocks);
149 FreeVec(c->hash_table);
150 FreeVec(c);
154 APTR Cache_GetBlock(APTR cache, ULONG blockNum, UBYTE **data)
156 struct Cache *c = cache;
157 struct BlockRange *b = NULL, *b2;
158 ULONG error = 0, data_offset;
159 struct MinList *l =
160 &c->hash_table[(blockNum >> RANGE_SHIFT) & (c->hash_size - 1)];
161 struct MinNode *n;
163 /* Change block number to the start block of a range and get byte offset
164 * within range */
166 data_offset = (blockNum - (blockNum & ~RANGE_MASK)) * c->block_size;
167 blockNum &= ~RANGE_MASK;
169 /* Check existing valid blocks first */
171 ForeachNode(l, b2)
173 if(b2->num == blockNum)
174 b = b2;
177 if(b != NULL)
179 /* Block found, so increment its usage count and remove it from the
180 * free list */
182 if(b->use_count++ == 0)
184 if(b->state != BS_DIRTY)
185 Remove((struct Node *)&b->node2);
188 else
190 /* Get a free buffer to read block from disk */
192 n = (struct MinNode *)RemHead((struct List *)&c->free_list);
193 if(n == NULL)
195 /* No free blocks, so flush dirty list to try and free up some
196 * more blocks, then try again */
198 Cache_Flush(c);
199 n = (struct MinNode *)RemHead((struct List *)&c->free_list);
202 if(n != NULL)
204 b = (struct BlockRange *)NODE2(n);
206 /* Read the block from disk */
208 if((error = AccessDisk(FALSE, blockNum, RANGE_SIZE,
209 c->block_size, b->data)) == 0)
211 /* Remove block from its old position in the hash */
213 if(b->state == BS_VALID)
214 Remove((struct Node *)b);
216 /* Add it to the hash at the new location */
218 AddHead((struct List *)l, (struct Node *)&b->node1);
219 b->num = blockNum;
220 b->state = BS_VALID;
221 b->use_count = 1;
223 else
225 /* Read failed, so put the block back on the free list */
227 b->state = BS_EMPTY;
228 AddHead((struct List *)&c->free_list,
229 (struct Node *)&b->node2);
230 b = NULL;
233 else
234 error = ERROR_NO_FREE_STORE;
237 /* Set data pointer and error, and return cache block handle */
239 *data = b->data + data_offset;
240 SetIoErr(error);
242 return b;
246 VOID Cache_FreeBlock(APTR cache, APTR block)
248 struct Cache *c = cache;
249 struct BlockRange *b = block;
251 /* Decrement usage count */
253 b->use_count--;
255 /* Put an unused block at the end of the free list unless it's dirty */
257 if(b->use_count == 0 && b->state != BS_DIRTY)
258 AddTail((struct List *)&c->free_list, (struct Node *)&b->node2);
260 return;
264 VOID Cache_MarkBlockDirty(APTR cache, APTR block)
266 struct Cache *c = cache;
267 struct BlockRange *b = block;
269 if(b->state != BS_DIRTY)
271 b->state = BS_DIRTY;
272 AddTail((struct List *)&c->dirty_list, (struct Node *)&b->node2);
275 return;
279 BOOL Cache_Flush(APTR cache)
281 struct Cache *c = cache;
282 ULONG error = 0;
283 struct MinNode *n;
284 struct BlockRange *b;
286 while((n = (struct MinNode *)RemHead((struct List *)&c->dirty_list))
287 != NULL && error == 0)
289 /* Write dirty block range to disk */
291 b = NODE2(n);
292 error = AccessDisk(TRUE, b->num, RANGE_SIZE, c->block_size, b->data);
294 /* Transfer block range to free list if unused, or put back on dirty
295 * list upon an error */
297 if(error == 0)
299 b->state = BS_VALID;
300 if(b->use_count == 0)
301 AddTail((struct List *)&c->free_list, (struct Node *)&b->node2);
303 else
304 AddHead((struct List *)&c->dirty_list, (struct Node *)&b->node2);
307 SetIoErr(error);
308 return error == 0;