fdisk - Use heads = 255 on file images
[dragonfly.git] / sbin / hammer / ondisk.c
blobb229be04198c734b0ddcb97b927157f771c1794f
1 /*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/ondisk.c,v 1.25 2008/08/21 23:28:43 thomas Exp $
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <stddef.h>
45 #include <err.h>
46 #include <fcntl.h>
47 #include "hammer_util.h"
49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
50 struct buffer_info **bufferp);
51 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
52 static void get_buffer_readahead(struct buffer_info *base);
53 #if 0
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
56 struct buffer_info **bufp, u_int16_t hdr_type);
57 static void readhammerbuf(struct volume_info *vol, void *data,
58 int64_t offset);
59 #endif
60 static void writehammerbuf(struct volume_info *vol, const void *data,
61 int64_t offset);
63 int DebugOpt;
65 uuid_t Hammer_FSType;
66 uuid_t Hammer_FSId;
67 int64_t BootAreaSize;
68 int64_t MemAreaSize;
69 int64_t UndoBufferSize;
70 int UsingSuperClusters;
71 int NumVolumes;
72 int RootVolNo = -1;
73 int UseReadBehind = -4;
74 int UseReadAhead = 4;
75 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
77 static __inline
78 int
79 buffer_hash(hammer_off_t buf_offset)
81 int hi;
83 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
84 return(hi);
88 * Lookup the requested information structure and related on-disk buffer.
89 * Missing structures are created.
91 struct volume_info *
92 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
94 struct volume_info *vol;
95 struct volume_info *scan;
96 struct hammer_volume_ondisk *ondisk;
97 int i, n;
100 * Allocate the volume structure
102 vol = malloc(sizeof(*vol));
103 bzero(vol, sizeof(*vol));
104 for (i = 0; i < HAMMER_BUFLISTS; ++i)
105 TAILQ_INIT(&vol->buffer_lists[i]);
106 vol->name = strdup(filename);
107 vol->fd = open(filename, oflags);
108 if (vol->fd < 0) {
109 free(vol->name);
110 free(vol);
111 err(1, "setup_volume: %s: Open failed", filename);
115 * Read or initialize the volume header
117 vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
118 if (isnew > 0) {
119 bzero(ondisk, HAMMER_BUFSIZE);
120 } else {
121 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
122 if (n != HAMMER_BUFSIZE) {
123 err(1, "setup_volume: %s: Read failed at offset 0",
124 filename);
126 vol_no = ondisk->vol_no;
127 if (RootVolNo < 0) {
128 RootVolNo = ondisk->vol_rootvol;
129 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
130 errx(1, "setup_volume: %s: root volume disagreement: "
131 "%d vs %d",
132 vol->name, RootVolNo, ondisk->vol_rootvol);
135 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
136 errx(1, "setup_volume: %s: Header does not indicate "
137 "that this is a hammer volume", vol->name);
139 if (TAILQ_EMPTY(&VolList)) {
140 Hammer_FSId = vol->ondisk->vol_fsid;
141 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
142 errx(1, "setup_volume: %s: FSId does match other "
143 "volumes!", vol->name);
146 vol->vol_no = vol_no;
148 if (isnew > 0) {
149 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
150 vol->cache.modified = 1;
154 * Link the volume structure in
156 TAILQ_FOREACH(scan, &VolList, entry) {
157 if (scan->vol_no == vol_no) {
158 errx(1, "setup_volume %s: Duplicate volume number %d "
159 "against %s", filename, vol_no, scan->name);
162 TAILQ_INSERT_TAIL(&VolList, vol, entry);
163 return(vol);
166 struct volume_info *
167 get_volume(int32_t vol_no)
169 struct volume_info *vol;
171 TAILQ_FOREACH(vol, &VolList, entry) {
172 if (vol->vol_no == vol_no)
173 break;
175 if (vol == NULL)
176 errx(1, "get_volume: Volume %d does not exist!", vol_no);
177 ++vol->cache.refs;
178 /* not added to or removed from hammer cache */
179 return(vol);
182 void
183 rel_volume(struct volume_info *volume)
185 /* not added to or removed from hammer cache */
186 --volume->cache.refs;
190 * Acquire the specified buffer.
192 struct buffer_info *
193 get_buffer(hammer_off_t buf_offset, int isnew)
195 void *ondisk;
196 struct buffer_info *buf;
197 struct volume_info *volume;
198 hammer_off_t orig_offset = buf_offset;
199 int vol_no;
200 int zone;
201 int hi, n;
202 int dora = 0;
204 zone = HAMMER_ZONE_DECODE(buf_offset);
205 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
206 buf_offset = blockmap_lookup(buf_offset, NULL, NULL);
208 assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
209 vol_no = HAMMER_VOL_DECODE(buf_offset);
210 volume = get_volume(vol_no);
211 buf_offset &= ~HAMMER_BUFMASK64;
213 hi = buffer_hash(buf_offset);
215 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
216 if (buf->buf_offset == buf_offset)
217 break;
219 if (buf == NULL) {
220 buf = malloc(sizeof(*buf));
221 bzero(buf, sizeof(*buf));
222 if (DebugOpt) {
223 fprintf(stderr, "get_buffer %016llx %016llx\n",
224 (long long)orig_offset, (long long)buf_offset);
226 buf->buf_offset = buf_offset;
227 buf->raw_offset = volume->ondisk->vol_buf_beg +
228 (buf_offset & HAMMER_OFF_SHORT_MASK);
229 buf->volume = volume;
230 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
231 ++volume->cache.refs;
232 buf->cache.u.buffer = buf;
233 hammer_cache_add(&buf->cache, ISBUFFER);
234 dora = (isnew == 0);
235 if (isnew < 0)
236 buf->flags |= HAMMER_BUFINFO_READAHEAD;
237 } else {
238 if (isnew >= 0) {
239 buf->flags &= ~HAMMER_BUFINFO_READAHEAD;
240 hammer_cache_used(&buf->cache);
242 ++buf->use_count;
244 ++buf->cache.refs;
245 hammer_cache_flush();
246 if ((ondisk = buf->ondisk) == NULL) {
247 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
248 if (isnew <= 0) {
249 n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
250 buf->raw_offset);
251 if (n != HAMMER_BUFSIZE) {
252 err(1, "get_buffer: %s:%016llx Read failed at "
253 "offset %016llx",
254 volume->name,
255 (long long)buf->buf_offset,
256 (long long)buf->raw_offset);
260 if (isnew > 0) {
261 bzero(ondisk, HAMMER_BUFSIZE);
262 buf->cache.modified = 1;
264 if (dora)
265 get_buffer_readahead(buf);
266 return(buf);
269 static void
270 get_buffer_readahead(struct buffer_info *base)
272 struct buffer_info *buf;
273 struct volume_info *vol;
274 hammer_off_t buf_offset;
275 int64_t raw_offset;
276 int ri = UseReadBehind;
277 int re = UseReadAhead;
278 int hi;
280 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
281 vol = base->volume;
283 while (ri < re) {
284 if (raw_offset >= vol->ondisk->vol_buf_end)
285 break;
286 if (raw_offset < vol->ondisk->vol_buf_beg) {
287 ++ri;
288 raw_offset += HAMMER_BUFSIZE;
289 continue;
291 buf_offset = HAMMER_VOL_ENCODE(vol->vol_no) |
292 HAMMER_ZONE_RAW_BUFFER |
293 (raw_offset - vol->ondisk->vol_buf_beg);
294 hi = buffer_hash(raw_offset);
295 TAILQ_FOREACH(buf, &vol->buffer_lists[hi], entry) {
296 if (buf->raw_offset == raw_offset)
297 break;
299 if (buf == NULL) {
300 buf = get_buffer(buf_offset, -1);
301 rel_buffer(buf);
303 ++ri;
304 raw_offset += HAMMER_BUFSIZE;
308 void
309 rel_buffer(struct buffer_info *buffer)
311 struct volume_info *volume;
312 int hi;
314 assert(buffer->cache.refs > 0);
315 if (--buffer->cache.refs == 0) {
316 if (buffer->cache.delete) {
317 hi = buffer_hash(buffer->buf_offset);
318 volume = buffer->volume;
319 if (buffer->cache.modified)
320 flush_buffer(buffer);
321 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
322 hammer_cache_del(&buffer->cache);
323 free(buffer->ondisk);
324 free(buffer);
325 rel_volume(volume);
330 void *
331 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
332 int isnew)
334 struct buffer_info *buffer;
336 if ((buffer = *bufferp) != NULL) {
337 if (isnew > 0 ||
338 ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
339 rel_buffer(buffer);
340 buffer = *bufferp = NULL;
343 if (buffer == NULL)
344 buffer = *bufferp = get_buffer(buf_offset, isnew);
345 return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
349 * Retrieve a pointer to a B-Tree node given a cluster offset. The underlying
350 * bufp is freed if non-NULL and a referenced buffer is loaded into it.
352 hammer_node_ondisk_t
353 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
355 struct buffer_info *buf;
357 if (*bufp)
358 rel_buffer(*bufp);
359 *bufp = buf = get_buffer(node_offset, 0);
360 return((void *)((char *)buf->ondisk +
361 (int32_t)(node_offset & HAMMER_BUFMASK)));
365 * Allocate HAMMER elements - btree nodes, data storage, and record elements
367 * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
368 * item and zero's out the remainder, so don't bzero() it.
370 void *
371 alloc_btree_element(hammer_off_t *offp)
373 struct buffer_info *buffer = NULL;
374 hammer_node_ondisk_t node;
376 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
377 offp, &buffer);
378 bzero(node, sizeof(*node));
379 /* XXX buffer not released, pointer remains valid */
380 return(node);
383 void *
384 alloc_data_element(hammer_off_t *offp, int32_t data_len,
385 struct buffer_info **data_bufferp)
387 void *data;
389 if (data_len >= HAMMER_BUFSIZE) {
390 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
391 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
392 offp, data_bufferp);
393 bzero(data, data_len);
394 } else if (data_len) {
395 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
396 offp, data_bufferp);
397 bzero(data, data_len);
398 } else {
399 data = NULL;
401 return (data);
405 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
406 * code will load each volume's freemap.
408 void
409 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
411 struct buffer_info *buffer = NULL;
412 hammer_off_t layer1_offset;
413 struct hammer_blockmap_layer1 *layer1;
414 int i, isnew;
416 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
417 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
418 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
419 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
420 &buffer, isnew);
421 bzero(layer1, sizeof(*layer1));
422 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
423 layer1->blocks_free = 0;
424 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
426 rel_buffer(buffer);
428 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
429 blockmap->phys_offset = layer1_offset;
430 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
431 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
432 blockmap->reserved01 = 0;
433 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
434 root_vol->cache.modified = 1;
438 * Load the volume's remaining free space into the freemap.
440 * Returns the number of bigblocks available.
442 int64_t
443 initialize_freemap(struct volume_info *vol)
445 struct volume_info *root_vol;
446 struct buffer_info *buffer1 = NULL;
447 struct buffer_info *buffer2 = NULL;
448 struct hammer_blockmap_layer1 *layer1;
449 struct hammer_blockmap_layer2 *layer2;
450 hammer_off_t layer1_base;
451 hammer_off_t layer1_offset;
452 hammer_off_t layer2_offset;
453 hammer_off_t phys_offset;
454 hammer_off_t aligned_vol_free_end;
455 int64_t count = 0;
456 int modified1 = 0;
458 root_vol = get_volume(RootVolNo);
459 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
460 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
462 printf("initialize freemap volume %d\n", vol->vol_no);
465 * Initialize the freemap. First preallocate the bigblocks required
466 * to implement layer2. This preallocation is a bootstrap allocation
467 * using blocks from the target volume.
469 layer1_base = root_vol->ondisk->vol0_blockmap[
470 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
471 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
472 phys_offset < aligned_vol_free_end;
473 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
474 layer1_offset = layer1_base +
475 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
476 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
477 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
478 layer1->phys_offset = alloc_bigblock(vol,
479 HAMMER_ZONE_FREEMAP_INDEX);
480 layer1->blocks_free = 0;
481 buffer1->cache.modified = 1;
482 layer1->layer1_crc = crc32(layer1,
483 HAMMER_LAYER1_CRCSIZE);
488 * Now fill everything in.
490 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
491 phys_offset < aligned_vol_free_end;
492 phys_offset += HAMMER_LARGEBLOCK_SIZE) {
493 modified1 = 0;
494 layer1_offset = layer1_base +
495 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
496 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
498 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
499 layer2_offset = layer1->phys_offset +
500 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
502 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
503 bzero(layer2, sizeof(*layer2));
504 if (phys_offset < vol->vol_free_off) {
506 * Fixups XXX - bigblocks already allocated as part
507 * of the freemap bootstrap.
509 if (layer2->zone == 0) {
510 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
511 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
512 layer2->bytes_free = 0;
514 } else if (phys_offset < vol->vol_free_end) {
515 ++layer1->blocks_free;
516 buffer1->cache.modified = 1;
517 layer2->zone = 0;
518 layer2->append_off = 0;
519 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
520 ++count;
521 modified1 = 1;
522 } else {
523 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
524 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
525 layer2->bytes_free = 0;
527 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
528 buffer2->cache.modified = 1;
531 * Finish-up layer 1
533 if (modified1) {
534 layer1->layer1_crc = crc32(layer1,
535 HAMMER_LAYER1_CRCSIZE);
536 buffer1->cache.modified = 1;
539 rel_buffer(buffer1);
540 rel_buffer(buffer2);
541 rel_volume(root_vol);
542 return(count);
546 * Allocate big-blocks using our poor-man's volume->vol_free_off.
548 * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
549 * itself and cannot update it yet.
551 hammer_off_t
552 alloc_bigblock(struct volume_info *volume, int zone)
554 struct buffer_info *buffer = NULL;
555 struct volume_info *root_vol;
556 hammer_off_t result_offset;
557 hammer_off_t layer_offset;
558 struct hammer_blockmap_layer1 *layer1;
559 struct hammer_blockmap_layer2 *layer2;
560 int didget;
562 if (volume == NULL) {
563 volume = get_volume(RootVolNo);
564 didget = 1;
565 } else {
566 didget = 0;
568 result_offset = volume->vol_free_off;
569 if (result_offset >= volume->vol_free_end)
570 panic("alloc_bigblock: Ran out of room, filesystem too small");
571 volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
574 * Update the freemap.
576 if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
577 root_vol = get_volume(RootVolNo);
578 layer_offset = root_vol->ondisk->vol0_blockmap[
579 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
580 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
581 layer1 = get_buffer_data(layer_offset, &buffer, 0);
582 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
583 --layer1->blocks_free;
584 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
585 buffer->cache.modified = 1;
586 layer_offset = layer1->phys_offset +
587 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
588 layer2 = get_buffer_data(layer_offset, &buffer, 0);
589 assert(layer2->zone == 0);
590 layer2->zone = zone;
591 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
592 layer2->bytes_free = 0;
593 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
594 buffer->cache.modified = 1;
596 --root_vol->ondisk->vol0_stat_freebigblocks;
597 root_vol->cache.modified = 1;
599 rel_buffer(buffer);
600 rel_volume(root_vol);
603 if (didget)
604 rel_volume(volume);
605 return(result_offset);
609 * Format the undo-map for the root volume.
611 void
612 format_undomap(hammer_volume_ondisk_t ondisk)
614 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
615 hammer_off_t undo_limit;
616 hammer_blockmap_t blockmap;
617 hammer_off_t scan;
618 int n;
619 int limit_index;
622 * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
623 * up to HAMMER_UNDO_LAYER2 large blocks. Size to approximately
624 * 0.1% of the disk.
626 * The minimum UNDO fifo size is 100MB.
628 undo_limit = UndoBufferSize;
629 if (undo_limit == 0) {
630 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
631 if (undo_limit < 100*1024*1024)
632 undo_limit = 100*1024*1024;
634 undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
635 ~HAMMER_LARGEBLOCK_MASK64;
636 if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
637 undo_limit = HAMMER_LARGEBLOCK_SIZE;
638 if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
639 undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
640 UndoBufferSize = undo_limit;
642 blockmap = &ondisk->vol0_blockmap[undo_zone];
643 bzero(blockmap, sizeof(*blockmap));
644 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
645 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
646 blockmap->next_offset = blockmap->first_offset;
647 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
648 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
650 n = 0;
651 scan = blockmap->next_offset;
652 limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
654 assert(limit_index <= HAMMER_UNDO_LAYER2);
656 for (n = 0; n < limit_index; ++n) {
657 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
658 HAMMER_ZONE_UNDO_INDEX);
659 scan += HAMMER_LARGEBLOCK_SIZE;
661 while (n < HAMMER_UNDO_LAYER2) {
662 ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
663 ++n;
668 * Format a new blockmap. This is mostly a degenerate case because
669 * all allocations are now actually done from the freemap.
671 void
672 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
674 blockmap->phys_offset = 0;
675 blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
676 HAMMER_SHORT_OFF_ENCODE(-1);
677 blockmap->first_offset = zone_base;
678 blockmap->next_offset = zone_base;
679 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
683 * Allocate a chunk of data out of a blockmap. This is a simplified
684 * version which uses next_offset as a simple allocation iterator.
686 static
687 void *
688 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
689 struct buffer_info **bufferp)
691 struct buffer_info *buffer1 = NULL;
692 struct buffer_info *buffer2 = NULL;
693 struct volume_info *volume;
694 hammer_blockmap_t blockmap;
695 hammer_blockmap_t freemap;
696 struct hammer_blockmap_layer1 *layer1;
697 struct hammer_blockmap_layer2 *layer2;
698 hammer_off_t layer1_offset;
699 hammer_off_t layer2_offset;
700 hammer_off_t zone2_offset;
701 void *ptr;
703 volume = get_volume(RootVolNo);
705 blockmap = &volume->ondisk->vol0_blockmap[zone];
706 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
709 * Alignment and buffer-boundary issues. If the allocation would
710 * cross a buffer boundary we have to skip to the next buffer.
712 bytes = (bytes + 15) & ~15;
714 again:
715 if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
716 ~HAMMER_BUFMASK64) {
717 volume->cache.modified = 1;
718 blockmap->next_offset = (blockmap->next_offset + bytes) &
719 ~HAMMER_BUFMASK64;
723 * Dive layer 1. For now we can't allocate data outside of volume 0.
725 layer1_offset = freemap->phys_offset +
726 HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
728 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
730 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
731 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
732 exit(1);
736 * Dive layer 2
738 layer2_offset = layer1->phys_offset +
739 HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
741 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
743 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
744 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
745 exit(1);
749 * If we are entering a new bigblock assign ownership to our
750 * zone. If the bigblock is owned by another zone skip it.
752 if (layer2->zone == 0) {
753 --layer1->blocks_free;
754 layer2->zone = zone;
755 assert(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
756 assert(layer2->append_off == 0);
758 if (layer2->zone != zone) {
759 blockmap->next_offset = (blockmap->next_offset + HAMMER_LARGEBLOCK_SIZE) &
760 ~HAMMER_LARGEBLOCK_MASK64;
761 goto again;
764 buffer1->cache.modified = 1;
765 buffer2->cache.modified = 1;
766 volume->cache.modified = 1;
767 assert(layer2->append_off ==
768 (blockmap->next_offset & HAMMER_LARGEBLOCK_MASK));
769 layer2->bytes_free -= bytes;
770 *result_offp = blockmap->next_offset;
771 blockmap->next_offset += bytes;
772 layer2->append_off = (int)blockmap->next_offset &
773 HAMMER_LARGEBLOCK_MASK;
775 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
776 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
778 zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) |
779 HAMMER_ZONE_ENCODE(zone, 0);
781 ptr = get_buffer_data(zone2_offset, bufferp, 0);
782 (*bufferp)->cache.modified = 1;
784 if (buffer1)
785 rel_buffer(buffer1);
786 if (buffer2)
787 rel_buffer(buffer2);
789 rel_volume(volume);
790 return(ptr);
794 * Flush various tracking structures to disk
798 * Flush various tracking structures to disk
800 void
801 flush_all_volumes(void)
803 struct volume_info *vol;
805 TAILQ_FOREACH(vol, &VolList, entry)
806 flush_volume(vol);
809 void
810 flush_volume(struct volume_info *volume)
812 struct buffer_info *buffer;
813 int i;
815 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
816 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
817 flush_buffer(buffer);
819 writehammerbuf(volume, volume->ondisk, 0);
820 volume->cache.modified = 0;
823 void
824 flush_buffer(struct buffer_info *buffer)
826 writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
827 buffer->cache.modified = 0;
830 #if 0
832 * Generic buffer initialization
834 static void
835 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
837 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
838 head->hdr_type = hdr_type;
839 head->hdr_size = 0;
840 head->hdr_crc = 0;
841 head->hdr_seq = 0;
844 #endif
846 #if 0
848 * Core I/O operations
850 static void
851 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
853 ssize_t n;
855 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
856 if (n != HAMMER_BUFSIZE)
857 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
860 #endif
862 static void
863 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
865 ssize_t n;
867 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
868 if (n != HAMMER_BUFSIZE)
869 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
872 void
873 panic(const char *ctl, ...)
875 va_list va;
877 va_start(va, ctl);
878 vfprintf(stderr, ctl, va);
879 va_end(va);
880 fprintf(stderr, "\n");
881 exit(1);