sbin/hammer: Split alloc_bigblock() into two functions [2/2]
[dragonfly.git] / sbin / hammer / blockmap.c
blobccb0910bdce8db5d221b8f3ba7c1dc532ee3c74c
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/blockmap.c,v 1.2 2008/06/17 04:03:38 dillon Exp $
37 #include "hammer_util.h"
40 * Allocate big-blocks using our poor-man's volume->vol_free_off.
41 * We are bootstrapping the freemap itself and cannot update it yet.
43 hammer_off_t
44 bootstrap_bigblock(struct volume_info *volume)
46 hammer_off_t result_offset;
48 assert_volume_offset(volume);
49 result_offset = volume->vol_free_off;
50 if (result_offset >= volume->vol_free_end)
51 errx(1, "alloc_bigblock: Ran out of room, filesystem too small");
53 volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
55 return(result_offset);
59 * Allocate a big-block for zone-3 for UNDO/REDO FIFO.
61 hammer_off_t
62 alloc_undo_bigblock(struct volume_info *volume)
64 hammer_blockmap_t freemap;
65 struct buffer_info *buffer1 = NULL;
66 struct buffer_info *buffer2 = NULL;
67 hammer_blockmap_layer1_t layer1;
68 hammer_blockmap_layer2_t layer2;
69 hammer_off_t layer1_offset;
70 hammer_off_t layer2_offset;
71 hammer_off_t result_offset;
73 /* Only root volume needs formatting */
74 assert(volume->vol_no == HAMMER_ROOT_VOLNO);
76 result_offset = bootstrap_bigblock(volume);
77 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
80 * Dive layer 1.
82 layer1_offset = freemap->phys_offset +
83 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
84 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
85 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
86 --layer1->blocks_free;
87 hammer_crc_set_layer1(layer1);
88 buffer1->cache.modified = 1;
91 * Dive layer 2, each entry represents a big-block.
93 layer2_offset = layer1->phys_offset +
94 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
95 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
96 assert(layer2->zone == 0);
97 layer2->zone = HAMMER_ZONE_UNDO_INDEX;
98 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
99 layer2->bytes_free = 0;
100 hammer_crc_set_layer2(layer2);
101 buffer2->cache.modified = 1;
103 --volume->ondisk->vol0_stat_freebigblocks;
105 rel_buffer(buffer1);
106 rel_buffer(buffer2);
108 return(result_offset);
112 * Allocate a chunk of data out of a blockmap. This is a simplified
113 * version which uses next_offset as a simple allocation iterator.
115 void *
116 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
117 struct buffer_info **bufferp)
119 struct volume_info *volume;
120 hammer_blockmap_t blockmap;
121 hammer_blockmap_t freemap;
122 struct buffer_info *buffer1 = NULL;
123 struct buffer_info *buffer2 = NULL;
124 hammer_blockmap_layer1_t layer1;
125 hammer_blockmap_layer2_t layer2;
126 hammer_off_t layer1_offset;
127 hammer_off_t layer2_offset;
128 hammer_off_t chunk_offset;
129 void *ptr;
131 volume = get_root_volume();
133 blockmap = &volume->ondisk->vol0_blockmap[zone];
134 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
135 assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
138 * Alignment and buffer-boundary issues. If the allocation would
139 * cross a buffer boundary we have to skip to the next buffer.
141 bytes = HAMMER_DATA_DOALIGN(bytes);
142 assert(bytes > 0 && bytes <= HAMMER_BUFSIZE); /* not HAMMER_XBUFSIZE */
143 assert(hammer_is_zone2_mapped_index(zone));
145 again:
146 assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));
148 if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
149 ~HAMMER_BUFMASK64) {
150 blockmap->next_offset = (blockmap->next_offset + bytes - 1) &
151 ~HAMMER_BUFMASK64;
153 chunk_offset = blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
156 * Dive layer 1.
158 layer1_offset = freemap->phys_offset +
159 HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
160 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
161 assert(!(chunk_offset == 0 && layer1->blocks_free == 0));
163 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
164 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
165 exit(1);
169 * Dive layer 2, each entry represents a big-block.
171 layer2_offset = layer1->phys_offset +
172 HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
173 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
175 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
176 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
177 exit(1);
181 * If we are entering a new big-block assign ownership to our
182 * zone. If the big-block is owned by another zone skip it.
184 if (layer2->zone == 0) {
185 --layer1->blocks_free;
186 hammer_crc_set_layer1(layer1);
187 layer2->zone = zone;
188 --volume->ondisk->vol0_stat_freebigblocks;
189 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
190 assert(layer2->append_off == 0);
192 if (layer2->zone != zone) {
193 blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
194 ~HAMMER_BIGBLOCK_MASK64;
195 goto again;
198 assert(layer2->append_off == chunk_offset);
199 layer2->bytes_free -= bytes;
200 *result_offp = blockmap->next_offset;
201 blockmap->next_offset += bytes;
202 layer2->append_off = (int)blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
203 hammer_crc_set_layer2(layer2);
205 ptr = get_buffer_data(*result_offp, bufferp, 0);
206 (*bufferp)->cache.modified = 1;
208 buffer1->cache.modified = 1;
209 buffer2->cache.modified = 1;
211 rel_buffer(buffer1);
212 rel_buffer(buffer2);
213 return(ptr);
216 hammer_off_t
217 blockmap_lookup(hammer_off_t zone_offset,
218 hammer_blockmap_layer1_t save_layer1,
219 hammer_blockmap_layer2_t save_layer2,
220 int *errorp)
222 struct volume_info *root_volume = NULL;
223 hammer_blockmap_t blockmap;
224 hammer_blockmap_t freemap;
225 hammer_blockmap_layer1_t layer1;
226 hammer_blockmap_layer2_t layer2;
227 struct buffer_info *buffer1 = NULL;
228 struct buffer_info *buffer2 = NULL;
229 hammer_off_t layer1_offset;
230 hammer_off_t layer2_offset;
231 hammer_off_t result_offset;
232 int zone;
233 int i;
234 int error = 0;
236 if (save_layer1)
237 bzero(save_layer1, sizeof(*save_layer1));
238 if (save_layer2)
239 bzero(save_layer2, sizeof(*save_layer2));
241 zone = HAMMER_ZONE_DECODE(zone_offset);
243 if (zone <= HAMMER_ZONE_RAW_VOLUME_INDEX)
244 error = -1;
245 if (zone >= HAMMER_MAX_ZONES)
246 error = -2;
247 if (error) {
248 result_offset = HAMMER_OFF_BAD;
249 goto done;
252 root_volume = get_root_volume();
253 blockmap = &root_volume->ondisk->vol0_blockmap[zone];
255 if (zone == HAMMER_ZONE_RAW_BUFFER_INDEX) {
256 result_offset = zone_offset;
257 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
258 i = HAMMER_OFF_SHORT_ENCODE(zone_offset) / HAMMER_BIGBLOCK_SIZE;
259 if (zone_offset >= blockmap->alloc_offset) {
260 error = -3;
261 result_offset = HAMMER_OFF_BAD;
262 goto done;
264 result_offset = root_volume->ondisk->vol0_undo_array[i] +
265 (zone_offset & HAMMER_BIGBLOCK_MASK64);
266 } else {
267 result_offset = hammer_xlate_to_zone2(zone_offset);
271 * The blockmap should match the requested zone (else the volume
272 * header is mashed).
274 if (HAMMER_ZONE_FREEMAP_INDEX != zone &&
275 HAMMER_ZONE_DECODE(blockmap->alloc_offset) != zone) {
276 error = -4;
277 goto done;
281 * Validate that the big-block is assigned to the zone. Also
282 * assign save_layer{1,2}.
285 freemap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
287 * Dive layer 1.
289 layer1_offset = freemap->phys_offset +
290 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
291 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
292 if (layer1 == NULL) {
293 error = -5;
294 goto done;
296 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
297 error = -6;
298 goto done;
301 if (save_layer1)
302 *save_layer1 = *layer1;
305 * Dive layer 2, each entry represents a big-block.
307 layer2_offset = layer1->phys_offset +
308 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
309 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
311 if (layer2 == NULL) {
312 error = -7;
313 goto done;
315 if (layer2->zone != zone) {
316 error = -8;
317 goto done;
319 if (save_layer2)
320 *save_layer2 = *layer2;
322 done:
323 rel_buffer(buffer1);
324 rel_buffer(buffer2);
326 if (errorp)
327 *errorp = error;
329 return(result_offset);