sbin/hammer: Have consistent naming for buffer variables
[dragonfly.git] / sbin / hammer / blockmap.c
blob1ff4eb2071a2fdc6cb4d2650e4df729ee0705574
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/blockmap.c,v 1.2 2008/06/17 04:03:38 dillon Exp $
37 #include "hammer_util.h"
40 * Allocate big-blocks using our poor-man's volume->vol_free_off.
41 * We are bootstrapping the freemap itself and cannot update it yet.
43 hammer_off_t
44 bootstrap_bigblock(struct volume_info *volume)
46 hammer_off_t result_offset;
48 assert_volume_offset(volume);
49 result_offset = volume->vol_free_off;
51 volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
53 return(result_offset);
57 * Allocate a big-block for zone-3 for UNDO/REDO FIFO.
59 hammer_off_t
60 alloc_undo_bigblock(struct volume_info *volume)
62 hammer_blockmap_t freemap;
63 struct buffer_info *buffer1 = NULL;
64 struct buffer_info *buffer2 = NULL;
65 hammer_blockmap_layer1_t layer1;
66 hammer_blockmap_layer2_t layer2;
67 hammer_off_t layer1_offset;
68 hammer_off_t layer2_offset;
69 hammer_off_t result_offset;
71 /* Only root volume needs formatting */
72 assert(volume->vol_no == HAMMER_ROOT_VOLNO);
74 result_offset = bootstrap_bigblock(volume);
75 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
78 * Dive layer 1.
80 layer1_offset = freemap->phys_offset +
81 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
82 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
83 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
84 --layer1->blocks_free;
85 hammer_crc_set_layer1(layer1);
86 buffer1->cache.modified = 1;
89 * Dive layer 2, each entry represents a big-block.
91 layer2_offset = layer1->phys_offset +
92 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
93 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
94 assert(layer2->zone == 0);
95 layer2->zone = HAMMER_ZONE_UNDO_INDEX;
96 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
97 layer2->bytes_free = 0;
98 hammer_crc_set_layer2(layer2);
99 buffer2->cache.modified = 1;
101 --volume->ondisk->vol0_stat_freebigblocks;
103 rel_buffer(buffer1);
104 rel_buffer(buffer2);
106 return(result_offset);
110 * Allocate a chunk of data out of a blockmap. This is a simplified
111 * version which uses next_offset as a simple allocation iterator.
113 void *
114 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
115 struct buffer_info **bufferp)
117 struct volume_info *volume;
118 hammer_blockmap_t blockmap;
119 hammer_blockmap_t freemap;
120 struct buffer_info *buffer1 = NULL;
121 struct buffer_info *buffer2 = NULL;
122 hammer_blockmap_layer1_t layer1;
123 hammer_blockmap_layer2_t layer2;
124 hammer_off_t tmp_offset;
125 hammer_off_t layer1_offset;
126 hammer_off_t layer2_offset;
127 hammer_off_t block_offset;
128 void *ptr;
130 volume = get_root_volume();
132 blockmap = &volume->ondisk->vol0_blockmap[zone];
133 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
134 assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
137 * Alignment and buffer-boundary issues. If the allocation would
138 * cross a buffer boundary we have to skip to the next buffer.
140 bytes = HAMMER_DATA_DOALIGN(bytes);
141 assert(bytes > 0 && bytes <= HAMMER_BUFSIZE); /* not HAMMER_XBUFSIZE */
142 assert(hammer_is_index_record(zone));
144 again:
145 assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));
147 tmp_offset = blockmap->next_offset + bytes - 1;
148 if ((blockmap->next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
149 blockmap->next_offset = tmp_offset & ~HAMMER_BUFMASK64;
151 block_offset = blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
154 * Dive layer 1.
156 layer1_offset = freemap->phys_offset +
157 HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
158 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
159 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
160 assert(!(block_offset == 0 && layer1->blocks_free == 0));
163 * Dive layer 2, each entry represents a big-block.
165 layer2_offset = layer1->phys_offset +
166 HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
167 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
169 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX)
170 errx(1, "alloc_blockmap: layer2 ran out of space!");
173 * If we are entering a new big-block assign ownership to our
174 * zone. If the big-block is owned by another zone skip it.
176 if (layer2->zone == 0) {
177 --layer1->blocks_free;
178 hammer_crc_set_layer1(layer1);
179 layer2->zone = zone;
180 --volume->ondisk->vol0_stat_freebigblocks;
181 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
182 assert(layer2->append_off == 0);
184 if (layer2->zone != zone) {
185 blockmap->next_offset =
186 HAMMER_ZONE_LAYER2_NEXT_OFFSET(blockmap->next_offset);
187 goto again;
190 assert(layer2->append_off == block_offset);
191 layer2->bytes_free -= bytes;
192 *result_offp = blockmap->next_offset;
193 blockmap->next_offset += bytes;
194 layer2->append_off = (int)blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
195 hammer_crc_set_layer2(layer2);
197 ptr = get_buffer_data(*result_offp, bufferp, 0);
198 (*bufferp)->cache.modified = 1;
200 buffer1->cache.modified = 1;
201 buffer2->cache.modified = 1;
203 rel_buffer(buffer1);
204 rel_buffer(buffer2);
205 return(ptr);
208 hammer_off_t
209 blockmap_lookup(hammer_off_t zone_offset, int *errorp)
211 return(blockmap_lookup_save(zone_offset, NULL, NULL, errorp));
214 hammer_off_t
215 blockmap_lookup_save(hammer_off_t zone_offset,
216 hammer_blockmap_layer1_t save_layer1,
217 hammer_blockmap_layer2_t save_layer2,
218 int *errorp)
220 struct volume_info *root_volume = NULL;
221 hammer_volume_ondisk_t ondisk;
222 hammer_blockmap_t blockmap;
223 hammer_blockmap_t freemap;
224 hammer_blockmap_layer1_t layer1;
225 hammer_blockmap_layer2_t layer2;
226 struct buffer_info *buffer1 = NULL;
227 struct buffer_info *buffer2 = NULL;
228 hammer_off_t layer1_offset;
229 hammer_off_t layer2_offset;
230 hammer_off_t result_offset = HAMMER_OFF_BAD;;
231 int zone;
232 int error = 0;
234 if (save_layer1)
235 bzero(save_layer1, sizeof(*save_layer1));
236 if (save_layer2)
237 bzero(save_layer2, sizeof(*save_layer2));
239 zone = HAMMER_ZONE_DECODE(zone_offset);
241 if (zone <= HAMMER_ZONE_RAW_VOLUME_INDEX) {
242 error = -1;
243 goto done;
245 if (zone >= HAMMER_MAX_ZONES) {
246 error = -2;
247 goto done;
250 root_volume = get_root_volume();
251 ondisk = root_volume->ondisk;
252 blockmap = &ondisk->vol0_blockmap[zone];
255 * Handle blockmap offset translations.
257 if (hammer_is_index_record(zone)) {
258 result_offset = hammer_xlate_to_zone2(zone_offset);
259 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
260 if (zone_offset >= blockmap->alloc_offset) {
261 error = -3;
262 goto done;
264 result_offset = hammer_xlate_to_undo(ondisk, zone_offset);
265 } else {
266 /* assert(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); */
267 result_offset = zone_offset;
271 * The blockmap should match the requested zone (else the volume
272 * header is mashed).
274 if (hammer_is_index_record(zone) &&
275 HAMMER_ZONE_DECODE(blockmap->alloc_offset) != zone) {
276 error = -4;
277 goto done;
281 * Validate that the big-block is assigned to the zone. Also
282 * assign save_layer{1,2} if not NULL.
284 freemap = &ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
287 * Dive layer 1.
289 layer1_offset = freemap->phys_offset +
290 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
291 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
293 if (layer1 == NULL) {
294 error = -5;
295 goto done;
297 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
298 error = -6;
299 goto done;
301 if (save_layer1)
302 *save_layer1 = *layer1;
305 * Dive layer 2, each entry represents a big-block.
307 layer2_offset = layer1->phys_offset +
308 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
309 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
311 if (layer2 == NULL) {
312 error = -7;
313 goto done;
315 if (layer2->zone != zone) {
316 error = -8;
317 goto done;
319 if (save_layer2)
320 *save_layer2 = *layer2;
322 done:
323 rel_buffer(buffer1);
324 rel_buffer(buffer2);
326 if (errorp)
327 *errorp = error;
329 return(result_offset);