Enable VFP unit as early as possible. This has to be so since e.g. gcc9 uses vfp...
[AROS.git] / workbench / fs / ntfs / cache.c
blobe9b84a2cc460e8743331a7ad3c884396cd94eb9e
1 /*
2 Copyright © 2010, The AROS Development Team. All rights reserved.
3 $Id$
5 Disk cache.
6 */
8 /*
9 * This is an LRU copyback cache.
11 * The cache consists of a fixed number of cache blocks, each of which can
12 * hold a contiguous range of disk blocks (each range is of a fixed
13 * length). Each range is addressed by its base block number, calculated by
14 * applying a mask to the requested block number.
16 * Each cache block contains two Exec list nodes, so it can be part of two
17 * lists simultaneously. The first node links a block into a hash table
18 * list, while the second links it into either the free list or the dirty
19 * list. Initially, all cache blocks are present in the free list, and are
20 * absent from the hash table.
22 * When a disk block is requested by the client, the range that contains
23 * that disk block is calculated. The range is then sought by looking up
24 * the hash table. Each position in the hash table holds a list containing
25 * all cache blocks (henceforth referred to as a block: individual disk
26 * blocks are not used internally) that map to that hash location. Each
27 * list in the hash table is typically very short, so look-up time is
28 * quick.
30 * If the requested range is not in the hash table, a cache block is
31 * removed from the head of the free list and from any hash table list it
32 * is in, and used to read the appropriate range from disk. This block is
33 * then added to the hash table at its new location.
35 * Two elements are returned to the client when a requested block is found:
36 * an opaque cache block handle, and a pointer to the data buffer.
38 * When a range is freed, it remains in the hash table so that it can be
39 * found quickly if needed again. Unless dirty, it is also added to the
40 * tail of the free list through its second list node. The block then
41 * remains in the hash table until reused for a different range.
43 * If a disk block is marked dirty by the client, the entire containing
44 * range is marked dirty and added to the tail of the dirty list through
45 * the cache block's second node. The dirty list is flushed periodically
46 * (currently once per second), and additionally whenever the free list
47 * becomes empty.
51 #include <dos/dos.h>
53 #include <proto/exec.h>
54 #include <proto/dos.h>
55 #include <clib/alib_protos.h>
57 #include "cache.h"
58 #include "ntfs_fs.h"
59 #include "ntfs_protos.h"
61 #include "debug.h"
63 #define RANGE_SHIFT 5
64 #define RANGE_SIZE (1 << RANGE_SHIFT)
65 #define RANGE_MASK (RANGE_SIZE - 1)
67 #define NODE2(A) \
68 ((struct BlockRange *)(((A) != NULL) ? \
69 (((BYTE *)(A)) - (IPTR)&((struct BlockRange *)NULL)->node2) : NULL))
71 APTR Cache_CreateCache(ULONG hash_size, ULONG block_count, ULONG block_size)
73 struct Cache *c;
74 ULONG i;
75 BOOL success = TRUE;
76 struct BlockRange *b;
78 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__));
80 /* Allocate cache structure */
82 if((c = AllocVec(sizeof(struct Cache), MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
83 success = FALSE;
85 if(success)
87 c->block_size = block_size;
88 c->block_count = block_count;
89 c->hash_size = hash_size;
91 /* Allocate hash table */
93 c->hash_table = AllocVec(sizeof(struct MinList) * hash_size,
94 MEMF_PUBLIC | MEMF_CLEAR);
95 if(c->hash_table == NULL)
96 success = FALSE;
98 /* Initialise each hash location's list, and free and dirty lists */
100 for(i = 0; i < hash_size && success; i++)
101 NewList((struct List *)&c->hash_table[i]);
102 NewList((struct List *)&c->free_list);
103 NewList((struct List *)&c->dirty_list);
105 /* Allocate cache blocks and add them to the free list */
107 c->blocks = AllocVec(sizeof(APTR) * block_count,
108 MEMF_PUBLIC | MEMF_CLEAR);
109 if(c == NULL)
110 success = FALSE;
112 for(i = 0; i < block_count && success; i++)
114 b = AllocVec(sizeof(struct BlockRange)
115 + (c->block_size << RANGE_SHIFT), MEMF_PUBLIC);
116 b->use_count = 0;
117 b->state = BS_EMPTY;
118 b->num = 0;
119 b->data = (UBYTE *)b + sizeof(struct BlockRange);
121 if(b != NULL)
122 c->blocks[i] = b;
123 else
124 success = FALSE;
126 if(success)
127 AddTail((struct List *)&c->free_list,
128 (struct Node *)&b->node2);
132 if(!success)
134 Cache_DestroyCache(c);
135 c = NULL;
138 return c;
141 VOID Cache_DestroyCache(APTR cache)
143 struct Cache *c = cache;
144 ULONG i;
146 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__));
148 Cache_Flush(c);
150 for(i = 0; i < c->block_count; i++)
151 FreeVec(c->blocks[i]);
152 FreeVec(c->blocks);
153 FreeVec(c->hash_table);
154 FreeVec(c);
157 APTR Cache_GetBlock(APTR cache, UQUAD blockNum, UBYTE **data)
159 struct Cache *c = cache;
160 struct BlockRange *b = NULL, *b2;
161 ULONG error = 0, data_offset;
162 struct MinList *l =
163 &c->hash_table[(blockNum >> RANGE_SHIFT) & (c->hash_size - 1)];
164 struct MinNode *n;
166 D(bug("[NTFS]: %s(%d)\n", __PRETTY_FUNCTION__, blockNum));
168 /* Change block number to the start block of a range and get byte offset
169 * within range */
171 data_offset = (blockNum - (blockNum & ~RANGE_MASK)) * c->block_size;
172 blockNum &= ~RANGE_MASK;
174 /* Check existing valid blocks first */
176 ForeachNode(l, b2)
178 if(b2->num == blockNum)
179 b = b2;
182 if(b != NULL)
184 /* Block found, so increment its usage count and remove it from the
185 * free list */
187 if(b->use_count++ == 0)
189 if(b->state != BS_DIRTY)
190 Remove((struct Node *)&b->node2);
193 else
195 /* Get a free buffer to read block from disk */
197 n = (struct MinNode *)RemHead((struct List *)&c->free_list);
198 if(n == NULL)
200 /* No free blocks, so flush dirty list to try and free up some
201 * more blocks, then try again */
203 Cache_Flush(c);
204 n = (struct MinNode *)RemHead((struct List *)&c->free_list);
207 if(n != NULL)
209 b = (struct BlockRange *)NODE2(n);
211 /* Read the block from disk */
213 if((error = AccessDisk(FALSE, blockNum, RANGE_SIZE,
214 c->block_size, b->data)) == 0)
216 /* Remove block from its old position in the hash */
218 if(b->state == BS_VALID)
219 Remove((struct Node *)b);
221 /* Add it to the hash at the new location */
223 AddHead((struct List *)l, (struct Node *)&b->node1);
224 b->num = blockNum;
225 b->state = BS_VALID;
226 b->use_count = 1;
228 else
230 /* Read failed, so put the block back on the free list */
232 b->state = BS_EMPTY;
233 AddHead((struct List *)&c->free_list,
234 (struct Node *)&b->node2);
235 b = NULL;
238 else
239 error = ERROR_NO_FREE_STORE;
242 /* Set data pointer and error, and return cache block handle */
244 *data = b->data + data_offset;
245 SetIoErr(error);
247 return b;
250 VOID Cache_FreeBlock(APTR cache, APTR block)
252 struct Cache *c = cache;
253 struct BlockRange *b = block;
255 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__));
257 /* Decrement usage count */
259 b->use_count--;
261 /* Put an unused block at the end of the free list unless it's dirty */
263 if(b->use_count == 0 && b->state != BS_DIRTY)
264 AddTail((struct List *)&c->free_list, (struct Node *)&b->node2);
266 return;
269 VOID Cache_MarkBlockDirty(APTR cache, APTR block)
271 struct Cache *c = cache;
272 struct BlockRange *b = block;
274 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__));
276 if(b->state != BS_DIRTY)
278 b->state = BS_DIRTY;
279 AddTail((struct List *)&c->dirty_list, (struct Node *)&b->node2);
282 return;
285 BOOL Cache_Flush(APTR cache)
287 struct Cache *c = cache;
288 ULONG error = 0;
289 struct MinNode *n;
290 struct BlockRange *b;
292 D(bug("[NTFS]: %s()\n", __PRETTY_FUNCTION__));
294 while((n = (struct MinNode *)RemHead((struct List *)&c->dirty_list))
295 != NULL && error == 0)
297 /* Write dirty block range to disk */
299 b = NODE2(n);
300 error = AccessDisk(TRUE, b->num, RANGE_SIZE, c->block_size, b->data);
302 /* Transfer block range to free list if unused, or put back on dirty
303 * list upon an error */
305 if(error == 0)
307 b->state = BS_VALID;
308 if(b->use_count == 0)
309 AddTail((struct List *)&c->free_list, (struct Node *)&b->node2);
311 else
312 AddHead((struct List *)&c->dirty_list, (struct Node *)&b->node2);
315 SetIoErr(error);
316 return error == 0;