LIBPRIV: Add private includes for dfuife_curses.
[dragonfly.git] / sbin / hammer / blockmap.c
blob6b08e895fa7a0b3eaa68863cdc48ac1cc4f71b7f
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/blockmap.c,v 1.2 2008/06/17 04:03:38 dillon Exp $
37 #include "hammer_util.h"
40 * Allocate big-blocks using our poor-man's volume->vol_free_off.
41 * We are bootstrapping the freemap itself and cannot update it yet.
43 hammer_off_t
44 bootstrap_bigblock(struct volume_info *volume)
46 hammer_off_t result_offset;
48 assert_volume_offset(volume);
49 result_offset = volume->vol_free_off;
51 volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
53 return(result_offset);
57 * Allocate a big-block for zone-3 for UNDO/REDO FIFO.
59 hammer_off_t
60 alloc_undo_bigblock(struct volume_info *volume)
62 hammer_blockmap_t freemap;
63 struct buffer_info *buffer1 = NULL;
64 struct buffer_info *buffer2 = NULL;
65 hammer_blockmap_layer1_t layer1;
66 hammer_blockmap_layer2_t layer2;
67 hammer_off_t layer1_offset;
68 hammer_off_t layer2_offset;
69 hammer_off_t result_offset;
71 /* Only root volume needs formatting */
72 assert(volume->vol_no == HAMMER_ROOT_VOLNO);
74 result_offset = bootstrap_bigblock(volume);
75 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
78 * Dive layer 1.
80 layer1_offset = freemap->phys_offset +
81 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
82 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
83 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
84 --layer1->blocks_free;
85 hammer_crc_set_layer1(layer1);
86 buffer1->cache.modified = 1;
89 * Dive layer 2, each entry represents a big-block.
91 layer2_offset = layer1->phys_offset +
92 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
93 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
94 assert(layer2->zone == 0);
95 layer2->zone = HAMMER_ZONE_UNDO_INDEX;
96 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
97 layer2->bytes_free = 0;
98 hammer_crc_set_layer2(layer2);
99 buffer2->cache.modified = 1;
101 --volume->ondisk->vol0_stat_freebigblocks;
103 rel_buffer(buffer1);
104 rel_buffer(buffer2);
106 return(result_offset);
110 * Allocate a chunk of data out of a blockmap. This is a simplified
111 * version which uses next_offset as a simple allocation iterator.
113 void *
114 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
115 struct buffer_info **bufferp)
117 struct volume_info *volume;
118 hammer_blockmap_t blockmap;
119 hammer_blockmap_t freemap;
120 struct buffer_info *buffer1 = NULL;
121 struct buffer_info *buffer2 = NULL;
122 hammer_blockmap_layer1_t layer1;
123 hammer_blockmap_layer2_t layer2;
124 hammer_off_t tmp_offset;
125 hammer_off_t layer1_offset;
126 hammer_off_t layer2_offset;
127 hammer_off_t block_offset;
128 void *ptr;
130 volume = get_root_volume();
132 blockmap = &volume->ondisk->vol0_blockmap[zone];
133 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
134 assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
137 * Alignment and buffer-boundary issues. If the allocation would
138 * cross a buffer boundary we have to skip to the next buffer.
140 bytes = HAMMER_DATA_DOALIGN(bytes);
141 assert(bytes > 0 && bytes <= HAMMER_BUFSIZE); /* not HAMMER_XBUFSIZE */
142 assert(hammer_is_zone2_mapped_index(zone));
144 again:
145 assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));
147 tmp_offset = blockmap->next_offset + bytes - 1;
148 if ((blockmap->next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
149 blockmap->next_offset = tmp_offset & ~HAMMER_BUFMASK64;
151 block_offset = blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
154 * Dive layer 1.
156 layer1_offset = freemap->phys_offset +
157 HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
158 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
159 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
160 assert(!(block_offset == 0 && layer1->blocks_free == 0));
163 * Dive layer 2, each entry represents a big-block.
165 layer2_offset = layer1->phys_offset +
166 HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
167 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
169 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
170 fprintf(stderr, "alloc_blockmap: layer2 ran out of space!\n");
171 exit(1);
175 * If we are entering a new big-block assign ownership to our
176 * zone. If the big-block is owned by another zone skip it.
178 if (layer2->zone == 0) {
179 --layer1->blocks_free;
180 hammer_crc_set_layer1(layer1);
181 layer2->zone = zone;
182 --volume->ondisk->vol0_stat_freebigblocks;
183 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
184 assert(layer2->append_off == 0);
186 if (layer2->zone != zone) {
187 blockmap->next_offset =
188 HAMMER_ZONE_LAYER2_NEXT_OFFSET(blockmap->next_offset);
189 goto again;
192 assert(layer2->append_off == block_offset);
193 layer2->bytes_free -= bytes;
194 *result_offp = blockmap->next_offset;
195 blockmap->next_offset += bytes;
196 layer2->append_off = (int)blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
197 hammer_crc_set_layer2(layer2);
199 ptr = get_buffer_data(*result_offp, bufferp, 0);
200 (*bufferp)->cache.modified = 1;
202 buffer1->cache.modified = 1;
203 buffer2->cache.modified = 1;
205 rel_buffer(buffer1);
206 rel_buffer(buffer2);
207 return(ptr);
210 hammer_off_t
211 blockmap_lookup(hammer_off_t zone_offset,
212 hammer_blockmap_layer1_t save_layer1,
213 hammer_blockmap_layer2_t save_layer2,
214 int *errorp)
216 struct volume_info *root_volume = NULL;
217 hammer_volume_ondisk_t ondisk;
218 hammer_blockmap_t blockmap;
219 hammer_blockmap_t freemap;
220 hammer_blockmap_layer1_t layer1;
221 hammer_blockmap_layer2_t layer2;
222 struct buffer_info *buffer1 = NULL;
223 struct buffer_info *buffer2 = NULL;
224 hammer_off_t layer1_offset;
225 hammer_off_t layer2_offset;
226 hammer_off_t result_offset;
227 int zone;
228 int error = 0;
230 if (save_layer1)
231 bzero(save_layer1, sizeof(*save_layer1));
232 if (save_layer2)
233 bzero(save_layer2, sizeof(*save_layer2));
235 zone = HAMMER_ZONE_DECODE(zone_offset);
237 if (zone <= HAMMER_ZONE_RAW_VOLUME_INDEX)
238 error = -1;
239 if (zone >= HAMMER_MAX_ZONES)
240 error = -2;
241 if (error) {
242 result_offset = HAMMER_OFF_BAD;
243 goto done;
246 root_volume = get_root_volume();
247 ondisk = root_volume->ondisk;
248 blockmap = &ondisk->vol0_blockmap[zone];
250 if (zone == HAMMER_ZONE_RAW_BUFFER_INDEX) {
251 result_offset = zone_offset;
252 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
253 if (zone_offset >= blockmap->alloc_offset) {
254 error = -3;
255 result_offset = HAMMER_OFF_BAD;
256 goto done;
258 result_offset = hammer_xlate_to_undo(ondisk, zone_offset);
259 } else {
260 result_offset = hammer_xlate_to_zone2(zone_offset);
264 * The blockmap should match the requested zone (else the volume
265 * header is mashed).
267 if (HAMMER_ZONE_FREEMAP_INDEX != zone &&
268 HAMMER_ZONE_DECODE(blockmap->alloc_offset) != zone) {
269 error = -4;
270 goto done;
274 * Validate that the big-block is assigned to the zone. Also
275 * assign save_layer{1,2}.
278 freemap = &ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
280 * Dive layer 1.
282 layer1_offset = freemap->phys_offset +
283 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
284 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
285 if (layer1 == NULL) {
286 error = -5;
287 goto done;
289 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
290 error = -6;
291 goto done;
294 if (save_layer1)
295 *save_layer1 = *layer1;
298 * Dive layer 2, each entry represents a big-block.
300 layer2_offset = layer1->phys_offset +
301 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
302 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
304 if (layer2 == NULL) {
305 error = -7;
306 goto done;
308 if (layer2->zone != zone) {
309 error = -8;
310 goto done;
312 if (save_layer2)
313 *save_layer2 = *layer2;
315 done:
316 rel_buffer(buffer1);
317 rel_buffer(buffer2);
319 if (errorp)
320 *errorp = error;
322 return(result_offset);