- Make use of MUIA_List_Pool#? attributes. Unfortunately there's no
[AROS.git] / rom / filesys / fat / cache.c
blob60730f5cb153b2420237dc380769b0919bf60b7f
1 /*
2 Copyright © 2010-2015, The AROS Development Team. All rights reserved.
3 $Id$
5 Disk cache.
6 */
8 /*
9 * This is an LRU copyback cache.
11 * The cache consists of a fixed number of cache blocks, each of which can
12 * hold a contiguous range of disk blocks (each range is of a fixed
13 * length). Each range is addressed by its base block number, calculated by
14 * applying a mask to the requested block number.
16 * Each cache block contains two Exec list nodes, so it can be part of two
17 * lists simultaneously. The first node links a block into a hash table
18 * list, while the second links it into either the free list or the dirty
19 * list. Initially, all cache blocks are present in the free list, and are
20 * absent from the hash table.
22 * When a disk block is requested by the client, the range that contains
23 * that disk block is calculated. The range is then sought by looking up
24 * the hash table. Each position in the hash table holds a list containing
25 * all cache blocks (henceforth referred to as a block: individual disk
26 * blocks are not used internally) that map to that hash location. Each
27 * list in the hash table is typically very short, so look-up time is
28 * quick.
30 * If the requested range is not in the hash table, a cache block is
31 * removed from the head of the free list and from any hash table list it
32 * is in, and used to read the appropriate range from disk. This block is
33 * then added to the hash table at its new location.
35 * Two elements are returned to the client when a requested block is found:
36 * an opaque cache block handle, and a pointer to the data buffer.
38 * When a range is freed, it remains in the hash table so that it can be
39 * found quickly if needed again. Unless dirty, it is also added to the
40 * tail of the free list through its second list node. The block then
41 * remains in the hash table until reused for a different range.
43 * If a disk block is marked dirty by the client, the entire containing
44 * range is marked dirty and added to the tail of the dirty list through
45 * the cache block's second node. The dirty list is flushed periodically
46 * (currently once per second), and additionally whenever the free list
47 * becomes empty.
51 #include <dos/dos.h>
53 #include <proto/exec.h>
54 #include <proto/dos.h>
55 #include <clib/alib_protos.h>
57 #include "cache.h"
59 #define SysBase (c->sys_base)
60 #define DOSBase (c->dos_base)
62 #define RANGE_SHIFT 5
63 #define RANGE_SIZE (1 << RANGE_SHIFT)
64 #define RANGE_MASK (RANGE_SIZE - 1)
66 #define NODE2(A) \
67 ((struct BlockRange *)(((A) != NULL) ? \
68 (((BYTE *)(A)) - (IPTR)&((struct BlockRange *)NULL)->node2) : NULL))
71 APTR Cache_CreateCache(APTR priv, ULONG hash_size, ULONG block_count,
72 ULONG block_size, struct ExecBase *sys_base, struct DosLibrary *dos_base)
74 struct Cache _c, *c = &_c;
75 ULONG i;
76 BOOL success = TRUE;
77 struct BlockRange *b;
79 /* Allocate cache structure */
81 c->sys_base = sys_base;
82 if((c = AllocVec(sizeof(struct Cache), MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
83 success = FALSE;
85 if(success)
87 c->priv = priv;
88 c->sys_base = sys_base;
89 c->dos_base = dos_base;
90 c->block_size = block_size;
91 c->block_count = block_count;
92 c->hash_size = hash_size;
94 /* Allocate hash table */
96 c->hash_table = AllocVec(sizeof(struct MinList) * hash_size,
97 MEMF_PUBLIC | MEMF_CLEAR);
98 if(c->hash_table == NULL)
99 success = FALSE;
101 /* Initialise each hash location's list, and free and dirty lists */
103 for(i = 0; i < hash_size && success; i++)
104 NewList((struct List *)&c->hash_table[i]);
105 NewList((struct List *)&c->free_list);
106 NewList((struct List *)&c->dirty_list);
108 /* Allocate cache blocks and add them to the free list */
110 c->blocks = AllocVec(sizeof(APTR) * block_count,
111 MEMF_PUBLIC | MEMF_CLEAR);
112 if(c == NULL)
113 success = FALSE;
115 for(i = 0; i < block_count && success; i++)
117 b = AllocVec(sizeof(struct BlockRange)
118 + (c->block_size << RANGE_SHIFT), MEMF_PUBLIC);
119 b->use_count = 0;
120 b->state = BS_EMPTY;
121 b->num = 0;
122 b->data = (UBYTE *)b + sizeof(struct BlockRange);
124 if(b != NULL)
125 c->blocks[i] = b;
126 else
127 success = FALSE;
129 if(success)
130 AddTail((struct List *)&c->free_list,
131 (struct Node *)&b->node2);
135 if(!success)
137 Cache_DestroyCache(c);
138 c = NULL;
141 return c;
145 VOID Cache_DestroyCache(APTR cache)
147 struct Cache *c = cache;
148 ULONG i;
150 Cache_Flush(c);
152 for(i = 0; i < c->block_count; i++)
153 FreeVec(c->blocks[i]);
154 FreeVec(c->blocks);
155 FreeVec(c->hash_table);
156 FreeVec(c);
160 APTR Cache_GetBlock(APTR cache, ULONG blockNum, UBYTE **data)
162 struct Cache *c = cache;
163 struct BlockRange *b = NULL, *b2;
164 LONG error = 0, data_offset;
165 struct MinList *l =
166 &c->hash_table[(blockNum >> RANGE_SHIFT) & (c->hash_size - 1)];
167 struct MinNode *n;
169 /* Change block number to the start block of a range and get byte offset
170 * within range */
172 data_offset = (blockNum - (blockNum & ~RANGE_MASK)) * c->block_size;
173 blockNum &= ~RANGE_MASK;
175 /* Check existing valid blocks first */
177 ForeachNode(l, b2)
179 if(b2->num == blockNum)
180 b = b2;
183 if(b != NULL)
185 /* Block found, so increment its usage count and remove it from the
186 * free list */
188 if(b->use_count++ == 0)
190 if(b->state != BS_DIRTY)
191 Remove((struct Node *)&b->node2);
194 else
196 /* Get a free buffer to read block from disk */
198 n = (struct MinNode *)RemHead((struct List *)&c->free_list);
199 if(n == NULL)
201 /* No free blocks, so flush dirty list to try and free up some
202 * more blocks, then try again */
204 Cache_Flush(c);
206 n = (struct MinNode *)RemHead((struct List *)&c->free_list);
209 if(n != NULL)
211 b = (struct BlockRange *)NODE2(n);
213 /* Read the block from disk */
215 if(AccessDisk(FALSE, blockNum, RANGE_SIZE, c->block_size,
216 b->data, c->priv) == 0)
218 /* Remove block from its old position in the hash */
220 if(b->state == BS_VALID)
221 Remove((struct Node *)b);
223 /* Add it to the hash at the new location */
225 AddHead((struct List *)l, (struct Node *)&b->node1);
226 b->num = blockNum;
227 b->state = BS_VALID;
228 b->use_count = 1;
230 else
232 /* Read failed, so put the block back on the free list */
234 b->state = BS_EMPTY;
235 AddHead((struct List *)&c->free_list,
236 (struct Node *)&b->node2);
237 b = NULL;
238 error = ERROR_UNKNOWN;
241 else
242 error = ERROR_NO_FREE_STORE;
245 /* Set data pointer and error, and return cache block handle */
247 *data = b ? (b->data + data_offset) : NULL;
248 SetIoErr(error);
250 return b;
254 VOID Cache_FreeBlock(APTR cache, APTR block)
256 struct Cache *c = cache;
257 struct BlockRange *b = block;
259 /* Decrement usage count */
261 b->use_count--;
263 /* Put an unused block at the end of the free list unless it's dirty */
265 if(b->use_count == 0 && b->state != BS_DIRTY)
266 AddTail((struct List *)&c->free_list, (struct Node *)&b->node2);
268 return;
272 VOID Cache_MarkBlockDirty(APTR cache, APTR block)
274 struct Cache *c = cache;
275 struct BlockRange *b = block;
277 if(b->state != BS_DIRTY)
279 b->state = BS_DIRTY;
280 AddTail((struct List *)&c->dirty_list, (struct Node *)&b->node2);
283 return;
287 BOOL Cache_Flush(APTR cache)
289 struct Cache *c = cache;
290 LONG error = 0, td_error;
291 struct MinNode *n;
292 struct BlockRange *b;
294 while((n = (struct MinNode *)RemHead((struct List *)&c->dirty_list))
295 != NULL && error == 0)
297 /* Write dirty block range to disk */
299 b = NODE2(n);
300 td_error = AccessDisk(TRUE, b->num, RANGE_SIZE, c->block_size,
301 b->data, c->priv);
303 /* Transfer block range to free list if unused, or put back on dirty
304 * list upon an error */
306 if(td_error == 0)
308 b->state = BS_VALID;
309 if(b->use_count == 0)
310 AddTail((struct List *)&c->free_list,
311 (struct Node *)&b->node2);
313 else
315 AddHead((struct List *)&c->dirty_list, (struct Node *)&b->node2);
316 error = ERROR_UNKNOWN;
320 SetIoErr(error);
321 return error == 0;