HAMMER utilities: Misc documentation and new options.
[dfdiff.git] / sbin / hammer / ondisk.c
blob54fba33f44d1eb37134e73d7b4c901f87f6c2f0b
1 /*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/ondisk.c,v 1.15 2008/04/27 00:43:55 dillon Exp $
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <err.h>
45 #include <fcntl.h>
46 #include "hammer_util.h"
48 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
49 struct buffer_info **bufferp);
50 static hammer_off_t alloc_bigblock(struct volume_info *volume,
51 hammer_off_t owner);
52 #if 0
53 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
54 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
55 struct buffer_info **bufp, u_int16_t hdr_type);
56 static void readhammerbuf(struct volume_info *vol, void *data,
57 int64_t offset);
58 #endif
59 static void writehammerbuf(struct volume_info *vol, const void *data,
60 int64_t offset);
63 uuid_t Hammer_FSType;
64 uuid_t Hammer_FSId;
65 int64_t BootAreaSize;
66 int64_t MemAreaSize;
67 int64_t UndoBufferSize;
68 int UsingSuperClusters;
69 int NumVolumes;
70 int RootVolNo = -1;
71 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
74 * Lookup the requested information structure and related on-disk buffer.
75 * Missing structures are created.
77 struct volume_info *
78 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
80 struct volume_info *vol;
81 struct volume_info *scan;
82 struct hammer_volume_ondisk *ondisk;
83 int n;
86 * Allocate the volume structure
88 vol = malloc(sizeof(*vol));
89 bzero(vol, sizeof(*vol));
90 TAILQ_INIT(&vol->buffer_list);
91 vol->name = strdup(filename);
92 vol->fd = open(filename, oflags);
93 if (vol->fd < 0) {
94 free(vol->name);
95 free(vol);
96 err(1, "setup_volume: %s: Open failed", filename);
100 * Read or initialize the volume header
102 vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
103 if (isnew) {
104 bzero(ondisk, HAMMER_BUFSIZE);
105 } else {
106 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
107 if (n != HAMMER_BUFSIZE) {
108 err(1, "setup_volume: %s: Read failed at offset 0",
109 filename);
111 vol_no = ondisk->vol_no;
112 if (RootVolNo < 0) {
113 RootVolNo = ondisk->vol_rootvol;
114 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
115 errx(1, "setup_volume: %s: root volume disagreement: "
116 "%d vs %d",
117 vol->name, RootVolNo, ondisk->vol_rootvol);
120 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
121 errx(1, "setup_volume: %s: Header does not indicate "
122 "that this is a hammer volume", vol->name);
124 if (TAILQ_EMPTY(&VolList)) {
125 Hammer_FSId = vol->ondisk->vol_fsid;
126 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
127 errx(1, "setup_volume: %s: FSId does match other "
128 "volumes!", vol->name);
131 vol->vol_no = vol_no;
133 if (isnew) {
134 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
135 vol->cache.modified = 1;
139 * Link the volume structure in
141 TAILQ_FOREACH(scan, &VolList, entry) {
142 if (scan->vol_no == vol_no) {
143 errx(1, "setup_volume %s: Duplicate volume number %d "
144 "against %s", filename, vol_no, scan->name);
147 TAILQ_INSERT_TAIL(&VolList, vol, entry);
148 return(vol);
151 struct volume_info *
152 get_volume(int32_t vol_no)
154 struct volume_info *vol;
156 TAILQ_FOREACH(vol, &VolList, entry) {
157 if (vol->vol_no == vol_no)
158 break;
160 if (vol == NULL)
161 errx(1, "get_volume: Volume %d does not exist!", vol_no);
162 ++vol->cache.refs;
163 /* not added to or removed from hammer cache */
164 return(vol);
167 void
168 rel_volume(struct volume_info *volume)
170 /* not added to or removed from hammer cache */
171 --volume->cache.refs;
175 * Acquire the specified buffer.
177 struct buffer_info *
178 get_buffer(hammer_off_t buf_offset, int isnew)
180 void *ondisk;
181 struct buffer_info *buf;
182 struct volume_info *volume;
183 int vol_no;
184 int zone;
185 int n;
187 zone = HAMMER_ZONE_DECODE(buf_offset);
188 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
189 buf_offset = blockmap_lookup(buf_offset, NULL, NULL);
191 assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
192 vol_no = HAMMER_VOL_DECODE(buf_offset);
193 volume = get_volume(vol_no);
194 buf_offset &= ~HAMMER_BUFMASK64;
196 TAILQ_FOREACH(buf, &volume->buffer_list, entry) {
197 if (buf->buf_offset == buf_offset)
198 break;
200 if (buf == NULL) {
201 buf = malloc(sizeof(*buf));
202 bzero(buf, sizeof(*buf));
203 buf->buf_offset = buf_offset;
204 buf->buf_disk_offset = volume->ondisk->vol_buf_beg +
205 (buf_offset & HAMMER_OFF_SHORT_MASK);
206 buf->volume = volume;
207 TAILQ_INSERT_TAIL(&volume->buffer_list, buf, entry);
208 ++volume->cache.refs;
209 buf->cache.u.buffer = buf;
210 hammer_cache_add(&buf->cache, ISBUFFER);
212 ++buf->cache.refs;
213 hammer_cache_flush();
214 if ((ondisk = buf->ondisk) == NULL) {
215 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
216 if (isnew == 0) {
217 n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
218 buf->buf_disk_offset);
219 if (n != HAMMER_BUFSIZE) {
220 err(1, "get_buffer: %s:%016llx Read failed at "
221 "offset %lld",
222 volume->name, buf->buf_offset,
223 buf->buf_disk_offset);
227 if (isnew) {
228 bzero(ondisk, HAMMER_BUFSIZE);
229 buf->cache.modified = 1;
231 return(buf);
234 void
235 rel_buffer(struct buffer_info *buffer)
237 struct volume_info *volume;
239 assert(buffer->cache.refs > 0);
240 if (--buffer->cache.refs == 0) {
241 if (buffer->cache.delete) {
242 volume = buffer->volume;
243 if (buffer->cache.modified)
244 flush_buffer(buffer);
245 TAILQ_REMOVE(&volume->buffer_list, buffer, entry);
246 hammer_cache_del(&buffer->cache);
247 free(buffer->ondisk);
248 free(buffer);
249 rel_volume(volume);
254 void *
255 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
256 int isnew)
258 struct buffer_info *buffer;
260 if ((buffer = *bufferp) != NULL) {
261 if (isnew ||
262 ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
263 rel_buffer(buffer);
264 buffer = *bufferp = NULL;
267 if (buffer == NULL)
268 buffer = *bufferp = get_buffer(buf_offset, isnew);
269 return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
273 * Retrieve a pointer to a B-Tree node given a cluster offset. The underlying
274 * bufp is freed if non-NULL and a referenced buffer is loaded into it.
276 hammer_node_ondisk_t
277 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
279 struct buffer_info *buf;
281 if (*bufp)
282 rel_buffer(*bufp);
283 *bufp = buf = get_buffer(node_offset, 0);
284 return((void *)((char *)buf->ondisk +
285 (int32_t)(node_offset & HAMMER_BUFMASK)));
289 * Allocate HAMMER elements - btree nodes, data storage, and record elements
291 * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
292 * item and zero's out the remainder, so don't bzero() it.
294 void *
295 alloc_btree_element(hammer_off_t *offp)
297 struct buffer_info *buffer = NULL;
298 hammer_node_ondisk_t node;
300 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
301 offp, &buffer);
302 bzero(node, sizeof(*node));
303 /* XXX buffer not released, pointer remains valid */
304 return(node);
307 hammer_record_ondisk_t
308 alloc_record_element(hammer_off_t *offp, int32_t data_len, void **datap)
310 struct buffer_info *record_buffer = NULL;
311 struct buffer_info *data_buffer = NULL;
312 hammer_record_ondisk_t rec;
314 rec = alloc_blockmap(HAMMER_ZONE_RECORD_INDEX, sizeof(*rec),
315 offp, &record_buffer);
316 bzero(rec, sizeof(*rec));
318 if (data_len >= HAMMER_BUFSIZE) {
319 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
320 *datap = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
321 &rec->base.data_off, &data_buffer);
322 rec->base.data_len = data_len;
323 bzero(*datap, data_len);
324 } else if (data_len) {
325 *datap = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
326 &rec->base.data_off, &data_buffer);
327 rec->base.data_len = data_len;
328 bzero(*datap, data_len);
329 } else {
330 *datap = NULL;
332 /* XXX buf not released, ptr remains valid */
333 return(rec);
337 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
338 * code will load each volume's freemap.
340 void
341 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
343 struct buffer_info *buffer = NULL;
344 hammer_off_t layer1_offset;
345 struct hammer_blockmap_layer1 *layer1;
346 int i, isnew;
348 layer1_offset = alloc_bigblock(root_vol, 0);
349 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
350 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
351 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
352 &buffer, isnew);
353 bzero(layer1, sizeof(*layer1));
354 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
355 layer1->layer1_crc = crc32(layer1, sizeof(*layer1));
357 rel_buffer(buffer);
359 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
360 blockmap->phys_offset = layer1_offset;
361 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
362 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
363 blockmap->reserved01 = 0;
364 blockmap->entry_crc = crc32(blockmap, sizeof(*blockmap));
365 root_vol->cache.modified = 1;
369 * Load the volume's remaining free space into the freemap. If this is
370 * the root volume, initialize the freemap owner for the layer1 bigblock.
372 * Returns the number of bigblocks available.
374 int64_t
375 initialize_freemap(struct volume_info *vol)
377 struct volume_info *root_vol;
378 struct buffer_info *buffer1 = NULL;
379 struct buffer_info *buffer2 = NULL;
380 struct hammer_blockmap_layer1 *layer1;
381 struct hammer_blockmap_layer2 *layer2;
382 hammer_off_t layer1_base;
383 hammer_off_t layer1_offset;
384 hammer_off_t layer2_offset;
385 hammer_off_t phys_offset;
386 hammer_off_t aligned_vol_free_end;
387 int64_t count = 0;
389 root_vol = get_volume(RootVolNo);
390 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
391 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
393 printf("initialize freemap volume %d\n", vol->vol_no);
396 * Initialize the freemap. First preallocate the bigblocks required
397 * to implement layer2. This preallocation is a bootstrap allocation
398 * using blocks from the target volume.
400 layer1_base = root_vol->ondisk->vol0_blockmap[
401 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
402 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
403 phys_offset < aligned_vol_free_end;
404 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
405 layer1_offset = layer1_base +
406 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
407 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
408 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
409 layer1->phys_offset = alloc_bigblock(vol, 0);
410 layer1->blocks_free = 0;
411 buffer1->cache.modified = 1;
416 * Now fill everything in.
418 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
419 phys_offset < aligned_vol_free_end;
420 phys_offset += HAMMER_LARGEBLOCK_SIZE) {
421 layer1_offset = layer1_base +
422 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
423 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
425 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
426 layer2_offset = layer1->phys_offset +
427 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
429 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
430 if (phys_offset < vol->vol_free_off) {
432 * Fixups XXX - bigblocks already allocated as part
433 * of the freemap bootstrap.
435 layer2->u.owner = HAMMER_ENCODE_FREEMAP(0, 0); /* XXX */
436 } else if (phys_offset < vol->vol_free_end) {
437 ++layer1->blocks_free;
438 buffer1->cache.modified = 1;
439 layer2->u.owner = HAMMER_BLOCKMAP_FREE;
440 ++count;
441 } else {
442 layer2->u.owner = HAMMER_BLOCKMAP_UNAVAIL;
444 layer2->entry_crc = crc32(layer2, sizeof(*layer2));
445 buffer2->cache.modified = 1;
448 * Finish-up layer 1
450 if (((phys_offset + HAMMER_LARGEBLOCK_SIZE) & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) {
451 layer1->layer1_crc = crc32(layer1, sizeof(*layer1));
452 buffer1->cache.modified = 1;
455 rel_buffer(buffer1);
456 rel_buffer(buffer2);
457 rel_volume(root_vol);
458 return(count);
462 * Allocate big-blocks using our poor-man's volume->vol_free_off and
463 * update the freemap if owner != 0.
465 hammer_off_t
466 alloc_bigblock(struct volume_info *volume, hammer_off_t owner)
468 struct buffer_info *buffer = NULL;
469 struct volume_info *root_vol;
470 hammer_off_t result_offset;
471 hammer_off_t layer_offset;
472 struct hammer_blockmap_layer1 *layer1;
473 struct hammer_blockmap_layer2 *layer2;
474 int didget;
476 if (volume == NULL) {
477 volume = get_volume(RootVolNo);
478 didget = 1;
479 } else {
480 didget = 0;
482 result_offset = volume->vol_free_off;
483 if (result_offset >= volume->vol_free_end)
484 panic("alloc_bigblock: Ran out of room, filesystem too small");
485 volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
488 * Update the freemap
490 if (owner) {
491 root_vol = get_volume(RootVolNo);
492 layer_offset = root_vol->ondisk->vol0_blockmap[
493 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
494 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
495 layer1 = get_buffer_data(layer_offset, &buffer, 0);
496 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
497 --layer1->blocks_free;
498 layer1->layer1_crc = crc32(layer1, sizeof(*layer1));
499 buffer->cache.modified = 1;
500 layer_offset = layer1->phys_offset +
501 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
502 layer2 = get_buffer_data(layer_offset, &buffer, 0);
503 assert(layer2->u.owner == HAMMER_BLOCKMAP_FREE);
504 layer2->u.owner = owner;
505 layer2->entry_crc = crc32(layer2, sizeof(*layer2));
506 buffer->cache.modified = 1;
508 rel_buffer(buffer);
509 rel_volume(root_vol);
512 if (didget)
513 rel_volume(volume);
514 return(result_offset);
518 * Format the undo-map for the root volume.
520 void
521 format_undomap(hammer_volume_ondisk_t ondisk)
523 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
524 hammer_off_t undo_limit;
525 hammer_blockmap_t blockmap;
526 hammer_off_t scan;
527 struct hammer_blockmap_layer2 *layer2;
528 int n;
529 int limit_index;
532 * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
533 * up to HAMMER_UNDO_LAYER2 large blocks. Size to approximately
534 * 0.1% of the disk.
536 undo_limit = UndoBufferSize;
537 if (undo_limit == 0)
538 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
539 undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
540 ~HAMMER_LARGEBLOCK_MASK64;
541 if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
542 undo_limit = HAMMER_LARGEBLOCK_SIZE;
543 if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
544 undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
545 UndoBufferSize = undo_limit;
547 blockmap = &ondisk->vol0_blockmap[undo_zone];
548 bzero(blockmap, sizeof(*blockmap));
549 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
550 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
551 blockmap->next_offset = blockmap->first_offset;
552 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
554 blockmap->entry_crc = crc32(blockmap, sizeof(*blockmap));
556 layer2 = &ondisk->vol0_undo_array[0];
557 n = 0;
558 scan = blockmap->next_offset;
559 limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
561 assert(limit_index < HAMMER_UNDO_LAYER2);
563 for (n = 0; n < limit_index; ++n) {
564 layer2->u.phys_offset = alloc_bigblock(NULL, scan);
565 layer2->bytes_free = -1; /* not used */
566 layer2->entry_crc = crc32(layer2, sizeof(*layer2));
568 scan += HAMMER_LARGEBLOCK_SIZE;
569 ++layer2;
571 while (n < HAMMER_UNDO_LAYER2) {
572 layer2->u.phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
573 layer2->bytes_free = -1;
574 layer2->entry_crc = crc32(layer2, sizeof(*layer2));
575 ++layer2;
576 ++n;
581 * Format a new blockmap. Set the owner to the base of the blockmap
582 * (meaning either the blockmap layer1 bigblock, layer2 bigblock, or
583 * target bigblock).
585 void
586 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_off)
588 blockmap->phys_offset = alloc_bigblock(NULL, zone_off);
589 blockmap->alloc_offset = zone_off;
590 blockmap->first_offset = zone_off;
591 blockmap->next_offset = zone_off;
592 blockmap->entry_crc = crc32(blockmap, sizeof(*blockmap));
595 static
596 void *
597 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
598 struct buffer_info **bufferp)
600 struct buffer_info *buffer1 = NULL;
601 struct buffer_info *buffer2 = NULL;
602 struct volume_info *volume;
603 hammer_blockmap_t rootmap;
604 struct hammer_blockmap_layer1 *layer1;
605 struct hammer_blockmap_layer2 *layer2;
606 hammer_off_t layer1_offset;
607 hammer_off_t layer2_offset;
608 hammer_off_t bigblock_offset;
609 void *ptr;
611 volume = get_volume(RootVolNo);
613 rootmap = &volume->ondisk->vol0_blockmap[zone];
616 * Alignment and buffer-boundary issues
618 bytes = (bytes + 7) & ~7;
619 if ((rootmap->phys_offset ^ (rootmap->phys_offset + bytes - 1)) &
620 ~HAMMER_BUFMASK64) {
621 volume->cache.modified = 1;
622 rootmap->phys_offset = (rootmap->phys_offset + bytes) &
623 ~HAMMER_BUFMASK64;
627 * Dive layer 1
629 layer1_offset = rootmap->phys_offset +
630 HAMMER_BLOCKMAP_LAYER1_OFFSET(rootmap->alloc_offset);
632 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
633 if ((rootmap->alloc_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) {
634 buffer1->cache.modified = 1;
635 bzero(layer1, sizeof(*layer1));
636 layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
637 layer1->phys_offset = alloc_bigblock(NULL,
638 rootmap->alloc_offset);
642 * Dive layer 2
644 layer2_offset = layer1->phys_offset +
645 HAMMER_BLOCKMAP_LAYER2_OFFSET(rootmap->alloc_offset);
647 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
649 if ((rootmap->alloc_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
650 buffer2->cache.modified = 1;
651 bzero(layer2, sizeof(*layer2));
652 layer2->u.phys_offset = alloc_bigblock(NULL,
653 rootmap->alloc_offset);
654 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
655 --layer1->blocks_free;
658 buffer1->cache.modified = 1;
659 buffer2->cache.modified = 1;
660 volume->cache.modified = 1;
661 layer2->bytes_free -= bytes;
662 *result_offp = rootmap->alloc_offset;
663 rootmap->alloc_offset += bytes;
664 rootmap->next_offset = rootmap->alloc_offset;
666 bigblock_offset = layer2->u.phys_offset +
667 (*result_offp & HAMMER_LARGEBLOCK_MASK);
668 ptr = get_buffer_data(bigblock_offset, bufferp, 0);
669 (*bufferp)->cache.modified = 1;
671 if (buffer1)
672 rel_buffer(buffer1);
673 if (buffer2)
674 rel_buffer(buffer2);
676 rel_volume(volume);
677 return(ptr);
680 #if 0
682 * Reserve space from the FIFO. Make sure that bytes does not cross a
683 * record boundary.
685 * Zero out base_bytes and initialize the fifo head and tail. The
686 * data area is not zerod.
688 static
689 hammer_off_t
690 hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
691 struct buffer_info **bufp, u_int16_t hdr_type)
693 struct buffer_info *buf;
694 struct volume_info *volume;
695 hammer_fifo_head_t head;
696 hammer_fifo_tail_t tail;
697 hammer_off_t off;
698 int32_t aligned_bytes;
700 aligned_bytes = (base_bytes + ext_bytes + HAMMER_TAIL_ONDISK_SIZE +
701 HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK;
703 volume = get_volume(RootVolNo);
704 off = volume->ondisk->vol0_fifo_end;
707 * For now don't deal with transitions across buffer boundaries,
708 * only newfs_hammer uses this function.
710 assert((off & ~HAMMER_BUFMASK64) ==
711 ((off + aligned_bytes) & ~HAMMER_BUFMASK));
713 *bufp = buf = get_buffer(off, 0);
715 buf->cache.modified = 1;
716 volume->cache.modified = 1;
718 head = (void *)((char *)buf->ondisk + ((int32_t)off & HAMMER_BUFMASK));
719 bzero(head, base_bytes);
721 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
722 head->hdr_type = hdr_type;
723 head->hdr_size = aligned_bytes;
724 head->hdr_seq = volume->ondisk->vol0_next_seq++;
726 tail = (void*)((char *)head + aligned_bytes - HAMMER_TAIL_ONDISK_SIZE);
727 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
728 tail->tail_type = hdr_type;
729 tail->tail_size = aligned_bytes;
731 volume->ondisk->vol0_fifo_end += aligned_bytes;
732 volume->cache.modified = 1;
734 rel_volume(volume);
736 return(off);
739 #endif
742 * Flush various tracking structures to disk
746 * Flush various tracking structures to disk
748 void
749 flush_all_volumes(void)
751 struct volume_info *vol;
753 TAILQ_FOREACH(vol, &VolList, entry)
754 flush_volume(vol);
757 void
758 flush_volume(struct volume_info *volume)
760 struct buffer_info *buffer;
762 TAILQ_FOREACH(buffer, &volume->buffer_list, entry)
763 flush_buffer(buffer);
764 writehammerbuf(volume, volume->ondisk, 0);
765 volume->cache.modified = 0;
768 void
769 flush_buffer(struct buffer_info *buffer)
771 writehammerbuf(buffer->volume, buffer->ondisk, buffer->buf_disk_offset);
772 buffer->cache.modified = 0;
775 #if 0
777 * Generic buffer initialization
779 static void
780 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
782 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
783 head->hdr_type = hdr_type;
784 head->hdr_size = 0;
785 head->hdr_crc = 0;
786 head->hdr_seq = 0;
789 #endif
791 #if 0
793 * Core I/O operations
795 static void
796 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
798 ssize_t n;
800 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
801 if (n != HAMMER_BUFSIZE)
802 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
805 #endif
807 static void
808 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
810 ssize_t n;
812 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
813 if (n != HAMMER_BUFSIZE)
814 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
817 void
818 panic(const char *ctl, ...)
820 va_list va;
822 va_start(va, ctl);
823 vfprintf(stderr, ctl, va);
824 va_end(va);
825 fprintf(stderr, "\n");
826 exit(1);