Merge commit 'crater/master' into DragonFly_RELEASE_2_2
[dragonfly.git] / sbin / hammer / ondisk.c
blob7702b1c1bd0191103ab2b19de1f93a6ea78c4145
1 /*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sbin/hammer/ondisk.c,v 1.25 2008/08/21 23:28:43 thomas Exp $
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <stddef.h>
45 #include <err.h>
46 #include <fcntl.h>
47 #include "hammer_util.h"
49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
50 struct buffer_info **bufferp);
51 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
52 #if 0
53 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
54 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
55 struct buffer_info **bufp, u_int16_t hdr_type);
56 static void readhammerbuf(struct volume_info *vol, void *data,
57 int64_t offset);
58 #endif
59 static void writehammerbuf(struct volume_info *vol, const void *data,
60 int64_t offset);
62 int DebugOpt;
64 uuid_t Hammer_FSType;
65 uuid_t Hammer_FSId;
66 int64_t BootAreaSize;
67 int64_t MemAreaSize;
68 int64_t UndoBufferSize;
69 int UsingSuperClusters;
70 int NumVolumes;
71 int RootVolNo = -1;
72 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
74 static __inline
75 int
76 buffer_hash(hammer_off_t buf_offset)
78 int hi;
80 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
81 return(hi);
85 * Lookup the requested information structure and related on-disk buffer.
86 * Missing structures are created.
88 struct volume_info *
89 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
91 struct volume_info *vol;
92 struct volume_info *scan;
93 struct hammer_volume_ondisk *ondisk;
94 int i, n;
97 * Allocate the volume structure
99 vol = malloc(sizeof(*vol));
100 bzero(vol, sizeof(*vol));
101 for (i = 0; i < HAMMER_BUFLISTS; ++i)
102 TAILQ_INIT(&vol->buffer_lists[i]);
103 vol->name = strdup(filename);
104 vol->fd = open(filename, oflags);
105 if (vol->fd < 0) {
106 free(vol->name);
107 free(vol);
108 err(1, "setup_volume: %s: Open failed", filename);
112 * Read or initialize the volume header
114 vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
115 if (isnew) {
116 bzero(ondisk, HAMMER_BUFSIZE);
117 } else {
118 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
119 if (n != HAMMER_BUFSIZE) {
120 err(1, "setup_volume: %s: Read failed at offset 0",
121 filename);
123 vol_no = ondisk->vol_no;
124 if (RootVolNo < 0) {
125 RootVolNo = ondisk->vol_rootvol;
126 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
127 errx(1, "setup_volume: %s: root volume disagreement: "
128 "%d vs %d",
129 vol->name, RootVolNo, ondisk->vol_rootvol);
132 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
133 errx(1, "setup_volume: %s: Header does not indicate "
134 "that this is a hammer volume", vol->name);
136 if (TAILQ_EMPTY(&VolList)) {
137 Hammer_FSId = vol->ondisk->vol_fsid;
138 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
139 errx(1, "setup_volume: %s: FSId does match other "
140 "volumes!", vol->name);
143 vol->vol_no = vol_no;
145 if (isnew) {
146 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
147 vol->cache.modified = 1;
151 * Link the volume structure in
153 TAILQ_FOREACH(scan, &VolList, entry) {
154 if (scan->vol_no == vol_no) {
155 errx(1, "setup_volume %s: Duplicate volume number %d "
156 "against %s", filename, vol_no, scan->name);
159 TAILQ_INSERT_TAIL(&VolList, vol, entry);
160 return(vol);
163 struct volume_info *
164 get_volume(int32_t vol_no)
166 struct volume_info *vol;
168 TAILQ_FOREACH(vol, &VolList, entry) {
169 if (vol->vol_no == vol_no)
170 break;
172 if (vol == NULL)
173 errx(1, "get_volume: Volume %d does not exist!", vol_no);
174 ++vol->cache.refs;
175 /* not added to or removed from hammer cache */
176 return(vol);
179 void
180 rel_volume(struct volume_info *volume)
182 /* not added to or removed from hammer cache */
183 --volume->cache.refs;
187 * Acquire the specified buffer.
189 struct buffer_info *
190 get_buffer(hammer_off_t buf_offset, int isnew)
192 void *ondisk;
193 struct buffer_info *buf;
194 struct volume_info *volume;
195 hammer_off_t orig_offset = buf_offset;
196 int vol_no;
197 int zone;
198 int hi, n;
200 zone = HAMMER_ZONE_DECODE(buf_offset);
201 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
202 buf_offset = blockmap_lookup(buf_offset, NULL, NULL);
204 assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
205 vol_no = HAMMER_VOL_DECODE(buf_offset);
206 volume = get_volume(vol_no);
207 buf_offset &= ~HAMMER_BUFMASK64;
209 hi = buffer_hash(buf_offset);
211 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
212 if (buf->buf_offset == buf_offset)
213 break;
215 if (buf == NULL) {
216 buf = malloc(sizeof(*buf));
217 bzero(buf, sizeof(*buf));
218 if (DebugOpt) {
219 fprintf(stderr, "get_buffer %016llx %016llx\n",
220 orig_offset, buf_offset);
222 buf->buf_offset = buf_offset;
223 buf->buf_disk_offset = volume->ondisk->vol_buf_beg +
224 (buf_offset & HAMMER_OFF_SHORT_MASK);
225 buf->volume = volume;
226 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
227 ++volume->cache.refs;
228 buf->cache.u.buffer = buf;
229 hammer_cache_add(&buf->cache, ISBUFFER);
231 ++buf->cache.refs;
232 hammer_cache_flush();
233 if ((ondisk = buf->ondisk) == NULL) {
234 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
235 if (isnew == 0) {
236 n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
237 buf->buf_disk_offset);
238 if (n != HAMMER_BUFSIZE) {
239 err(1, "get_buffer: %s:%016llx Read failed at "
240 "offset %lld",
241 volume->name, buf->buf_offset,
242 buf->buf_disk_offset);
246 if (isnew) {
247 bzero(ondisk, HAMMER_BUFSIZE);
248 buf->cache.modified = 1;
250 return(buf);
253 void
254 rel_buffer(struct buffer_info *buffer)
256 struct volume_info *volume;
257 int hi;
259 assert(buffer->cache.refs > 0);
260 if (--buffer->cache.refs == 0) {
261 if (buffer->cache.delete) {
262 hi = buffer_hash(buffer->buf_offset);
263 volume = buffer->volume;
264 if (buffer->cache.modified)
265 flush_buffer(buffer);
266 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
267 hammer_cache_del(&buffer->cache);
268 free(buffer->ondisk);
269 free(buffer);
270 rel_volume(volume);
275 void *
276 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
277 int isnew)
279 struct buffer_info *buffer;
281 if ((buffer = *bufferp) != NULL) {
282 if (isnew ||
283 ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
284 rel_buffer(buffer);
285 buffer = *bufferp = NULL;
288 if (buffer == NULL)
289 buffer = *bufferp = get_buffer(buf_offset, isnew);
290 return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
294 * Retrieve a pointer to a B-Tree node given a cluster offset. The underlying
295 * bufp is freed if non-NULL and a referenced buffer is loaded into it.
297 hammer_node_ondisk_t
298 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
300 struct buffer_info *buf;
302 if (*bufp)
303 rel_buffer(*bufp);
304 *bufp = buf = get_buffer(node_offset, 0);
305 return((void *)((char *)buf->ondisk +
306 (int32_t)(node_offset & HAMMER_BUFMASK)));
310 * Allocate HAMMER elements - btree nodes, data storage, and record elements
312 * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
313 * item and zero's out the remainder, so don't bzero() it.
315 void *
316 alloc_btree_element(hammer_off_t *offp)
318 struct buffer_info *buffer = NULL;
319 hammer_node_ondisk_t node;
321 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
322 offp, &buffer);
323 bzero(node, sizeof(*node));
324 /* XXX buffer not released, pointer remains valid */
325 return(node);
328 void *
329 alloc_data_element(hammer_off_t *offp, int32_t data_len,
330 struct buffer_info **data_bufferp)
332 void *data;
334 if (data_len >= HAMMER_BUFSIZE) {
335 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
336 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
337 offp, data_bufferp);
338 bzero(data, data_len);
339 } else if (data_len) {
340 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
341 offp, data_bufferp);
342 bzero(data, data_len);
343 } else {
344 data = NULL;
346 return (data);
350 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
351 * code will load each volume's freemap.
353 void
354 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
356 struct buffer_info *buffer = NULL;
357 hammer_off_t layer1_offset;
358 struct hammer_blockmap_layer1 *layer1;
359 int i, isnew;
361 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
362 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
363 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
364 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
365 &buffer, isnew);
366 bzero(layer1, sizeof(*layer1));
367 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
368 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
369 layer1->blocks_free = 0;
371 rel_buffer(buffer);
373 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
374 blockmap->phys_offset = layer1_offset;
375 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
376 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
377 blockmap->reserved01 = 0;
378 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
379 root_vol->cache.modified = 1;
383 * Load the volume's remaining free space into the freemap.
385 * Returns the number of bigblocks available.
387 int64_t
388 initialize_freemap(struct volume_info *vol)
390 struct volume_info *root_vol;
391 struct buffer_info *buffer1 = NULL;
392 struct buffer_info *buffer2 = NULL;
393 struct hammer_blockmap_layer1 *layer1;
394 struct hammer_blockmap_layer2 *layer2;
395 hammer_off_t layer1_base;
396 hammer_off_t layer1_offset;
397 hammer_off_t layer2_offset;
398 hammer_off_t phys_offset;
399 hammer_off_t aligned_vol_free_end;
400 int64_t count = 0;
401 int modified1 = 0;
403 root_vol = get_volume(RootVolNo);
404 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
405 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
407 printf("initialize freemap volume %d\n", vol->vol_no);
410 * Initialize the freemap. First preallocate the bigblocks required
411 * to implement layer2. This preallocation is a bootstrap allocation
412 * using blocks from the target volume.
414 layer1_base = root_vol->ondisk->vol0_blockmap[
415 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
416 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
417 phys_offset < aligned_vol_free_end;
418 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
419 layer1_offset = layer1_base +
420 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
421 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
422 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
423 layer1->phys_offset = alloc_bigblock(vol,
424 HAMMER_ZONE_FREEMAP_INDEX);
425 layer1->blocks_free = 0;
426 buffer1->cache.modified = 1;
427 layer1->layer1_crc = crc32(layer1,
428 HAMMER_LAYER1_CRCSIZE);
433 * Now fill everything in.
435 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
436 phys_offset < aligned_vol_free_end;
437 phys_offset += HAMMER_LARGEBLOCK_SIZE) {
438 modified1 = 0;
439 layer1_offset = layer1_base +
440 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
441 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
443 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
444 layer2_offset = layer1->phys_offset +
445 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
447 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
448 bzero(layer2, sizeof(*layer2));
449 if (phys_offset < vol->vol_free_off) {
451 * Fixups XXX - bigblocks already allocated as part
452 * of the freemap bootstrap.
454 if (layer2->zone == 0) {
455 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
456 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
457 layer2->bytes_free = 0;
459 } else if (phys_offset < vol->vol_free_end) {
460 ++layer1->blocks_free;
461 buffer1->cache.modified = 1;
462 layer2->zone = 0;
463 layer2->append_off = 0;
464 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
465 ++count;
466 modified1 = 1;
467 } else {
468 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
469 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
470 layer2->bytes_free = 0;
472 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
473 buffer2->cache.modified = 1;
476 * Finish-up layer 1
478 if (modified1) {
479 layer1->layer1_crc = crc32(layer1,
480 HAMMER_LAYER1_CRCSIZE);
481 buffer1->cache.modified = 1;
484 rel_buffer(buffer1);
485 rel_buffer(buffer2);
486 rel_volume(root_vol);
487 return(count);
491 * Allocate big-blocks using our poor-man's volume->vol_free_off.
493 * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
494 * itself and cannot update it yet.
496 hammer_off_t
497 alloc_bigblock(struct volume_info *volume, int zone)
499 struct buffer_info *buffer = NULL;
500 struct volume_info *root_vol;
501 hammer_off_t result_offset;
502 hammer_off_t layer_offset;
503 struct hammer_blockmap_layer1 *layer1;
504 struct hammer_blockmap_layer2 *layer2;
505 int didget;
507 if (volume == NULL) {
508 volume = get_volume(RootVolNo);
509 didget = 1;
510 } else {
511 didget = 0;
513 result_offset = volume->vol_free_off;
514 if (result_offset >= volume->vol_free_end)
515 panic("alloc_bigblock: Ran out of room, filesystem too small");
516 volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
519 * Update the freemap.
521 if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
522 root_vol = get_volume(RootVolNo);
523 layer_offset = root_vol->ondisk->vol0_blockmap[
524 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
525 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
526 layer1 = get_buffer_data(layer_offset, &buffer, 0);
527 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
528 --layer1->blocks_free;
529 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
530 buffer->cache.modified = 1;
531 layer_offset = layer1->phys_offset +
532 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
533 layer2 = get_buffer_data(layer_offset, &buffer, 0);
534 assert(layer2->zone == 0);
535 layer2->zone = zone;
536 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
537 layer2->bytes_free = 0;
538 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
539 buffer->cache.modified = 1;
541 --root_vol->ondisk->vol0_stat_freebigblocks;
542 root_vol->cache.modified = 1;
544 rel_buffer(buffer);
545 rel_volume(root_vol);
548 if (didget)
549 rel_volume(volume);
550 return(result_offset);
554 * Format the undo-map for the root volume.
556 void
557 format_undomap(hammer_volume_ondisk_t ondisk)
559 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
560 hammer_off_t undo_limit;
561 hammer_blockmap_t blockmap;
562 hammer_off_t scan;
563 int n;
564 int limit_index;
567 * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
568 * up to HAMMER_UNDO_LAYER2 large blocks. Size to approximately
569 * 0.1% of the disk.
571 * The minimum UNDO fifo size is 100MB.
573 undo_limit = UndoBufferSize;
574 if (undo_limit == 0) {
575 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
576 if (undo_limit < 100*1024*1024)
577 undo_limit = 100*1024*1024;
579 undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
580 ~HAMMER_LARGEBLOCK_MASK64;
581 if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
582 undo_limit = HAMMER_LARGEBLOCK_SIZE;
583 if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
584 undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
585 UndoBufferSize = undo_limit;
587 blockmap = &ondisk->vol0_blockmap[undo_zone];
588 bzero(blockmap, sizeof(*blockmap));
589 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
590 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
591 blockmap->next_offset = blockmap->first_offset;
592 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
593 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
595 n = 0;
596 scan = blockmap->next_offset;
597 limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
599 assert(limit_index <= HAMMER_UNDO_LAYER2);
601 for (n = 0; n < limit_index; ++n) {
602 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
603 HAMMER_ZONE_UNDO_INDEX);
604 scan += HAMMER_LARGEBLOCK_SIZE;
606 while (n < HAMMER_UNDO_LAYER2) {
607 ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
608 ++n;
613 * Format a new blockmap. This is mostly a degenerate case because
614 * all allocations are now actually done from the freemap.
616 void
617 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
619 blockmap->phys_offset = 0;
620 blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
621 HAMMER_SHORT_OFF_ENCODE(-1);
622 blockmap->first_offset = zone_base;
623 blockmap->next_offset = zone_base;
624 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
628 * Allocate a chunk of data out of a blockmap. This is a simplified
629 * version which uses next_offset as a simple allocation iterator.
631 static
632 void *
633 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
634 struct buffer_info **bufferp)
636 struct buffer_info *buffer1 = NULL;
637 struct buffer_info *buffer2 = NULL;
638 struct volume_info *volume;
639 hammer_blockmap_t blockmap;
640 hammer_blockmap_t freemap;
641 struct hammer_blockmap_layer1 *layer1;
642 struct hammer_blockmap_layer2 *layer2;
643 hammer_off_t layer1_offset;
644 hammer_off_t layer2_offset;
645 hammer_off_t zone2_offset;
646 void *ptr;
648 volume = get_volume(RootVolNo);
650 blockmap = &volume->ondisk->vol0_blockmap[zone];
651 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
654 * Alignment and buffer-boundary issues. If the allocation would
655 * cross a buffer boundary we have to skip to the next buffer.
657 bytes = (bytes + 15) & ~15;
659 again:
660 if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
661 ~HAMMER_BUFMASK64) {
662 volume->cache.modified = 1;
663 blockmap->next_offset = (blockmap->next_offset + bytes) &
664 ~HAMMER_BUFMASK64;
668 * Dive layer 1. For now we can't allocate data outside of volume 0.
670 layer1_offset = freemap->phys_offset +
671 HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
673 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
675 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
676 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
677 exit(1);
681 * Dive layer 2
683 layer2_offset = layer1->phys_offset +
684 HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
686 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
688 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
689 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
690 exit(1);
694 * If we are entering a new bigblock assign ownership to our
695 * zone. If the bigblock is owned by another zone skip it.
697 if (layer2->zone == 0) {
698 --layer1->blocks_free;
699 layer2->zone = zone;
700 assert(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
701 assert(layer2->append_off == 0);
703 if (layer2->zone != zone) {
704 blockmap->next_offset = (blockmap->next_offset + HAMMER_LARGEBLOCK_SIZE) &
705 ~HAMMER_LARGEBLOCK_MASK64;
706 goto again;
709 buffer1->cache.modified = 1;
710 buffer2->cache.modified = 1;
711 volume->cache.modified = 1;
712 assert(layer2->append_off ==
713 (blockmap->next_offset & HAMMER_LARGEBLOCK_MASK));
714 layer2->bytes_free -= bytes;
715 *result_offp = blockmap->next_offset;
716 blockmap->next_offset += bytes;
717 layer2->append_off = (int)blockmap->next_offset &
718 HAMMER_LARGEBLOCK_MASK;
720 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
721 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
723 zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) |
724 HAMMER_ZONE_ENCODE(zone, 0);
726 ptr = get_buffer_data(zone2_offset, bufferp, 0);
727 (*bufferp)->cache.modified = 1;
729 if (buffer1)
730 rel_buffer(buffer1);
731 if (buffer2)
732 rel_buffer(buffer2);
734 rel_volume(volume);
735 return(ptr);
739 * Flush various tracking structures to disk
743 * Flush various tracking structures to disk
745 void
746 flush_all_volumes(void)
748 struct volume_info *vol;
750 TAILQ_FOREACH(vol, &VolList, entry)
751 flush_volume(vol);
754 void
755 flush_volume(struct volume_info *volume)
757 struct buffer_info *buffer;
758 int i;
760 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
761 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
762 flush_buffer(buffer);
764 writehammerbuf(volume, volume->ondisk, 0);
765 volume->cache.modified = 0;
768 void
769 flush_buffer(struct buffer_info *buffer)
771 writehammerbuf(buffer->volume, buffer->ondisk, buffer->buf_disk_offset);
772 buffer->cache.modified = 0;
775 #if 0
777 * Generic buffer initialization
779 static void
780 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
782 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
783 head->hdr_type = hdr_type;
784 head->hdr_size = 0;
785 head->hdr_crc = 0;
786 head->hdr_seq = 0;
789 #endif
791 #if 0
793 * Core I/O operations
795 static void
796 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
798 ssize_t n;
800 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
801 if (n != HAMMER_BUFSIZE)
802 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
805 #endif
807 static void
808 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
810 ssize_t n;
812 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
813 if (n != HAMMER_BUFSIZE)
814 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
817 void
818 panic(const char *ctl, ...)
820 va_list va;
822 va_start(va, ctl);
823 vfprintf(stderr, ctl, va);
824 va_end(va);
825 fprintf(stderr, "\n");
826 exit(1);