sbin/hammer: Cleanup blocks with a single statement
[dragonfly.git] / sbin / hammer / ondisk.c
blob46f85f623c627a9b1a156d08e4d03af7231ef19f
1 /*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <sys/diskslice.h>
36 #include <sys/diskmbr.h>
38 #include "hammer_util.h"
40 static void check_volume(struct volume_info *volume);
41 static void get_buffer_readahead(struct buffer_info *base);
42 static __inline int readhammervol(struct volume_info *volume);
43 static __inline int readhammerbuf(struct buffer_info *buffer);
44 static __inline int writehammervol(struct volume_info *volume);
45 static __inline int writehammerbuf(struct buffer_info *buffer);
47 uuid_t Hammer_FSType;
48 uuid_t Hammer_FSId;
49 int UseReadBehind = -4;
50 int UseReadAhead = 4;
51 int DebugOpt;
52 uint32_t HammerVersion = -1;
54 TAILQ_HEAD(volume_list, volume_info);
55 static struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
56 static int valid_hammer_volumes;
58 static __inline
59 int
60 buffer_hash(hammer_off_t zone2_offset)
62 int hi;
64 hi = (int)(zone2_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
65 return(hi);
68 static struct buffer_info*
69 find_buffer(hammer_off_t zone2_offset)
71 struct volume_info *volume;
72 struct buffer_info *buffer;
73 int hi;
75 volume = get_volume(HAMMER_VOL_DECODE(zone2_offset));
76 assert(volume);
78 hi = buffer_hash(zone2_offset);
79 TAILQ_FOREACH(buffer, &volume->buffer_lists[hi], entry)
80 if (buffer->zone2_offset == zone2_offset)
81 return(buffer);
82 return(NULL);
85 static
86 struct volume_info *
87 __alloc_volume(const char *volname, int oflags)
89 struct volume_info *volume;
90 int i;
92 volume = calloc(1, sizeof(*volume));
93 volume->vol_no = -1;
94 volume->rdonly = (oflags == O_RDONLY);
95 volume->name = strdup(volname);
96 volume->fd = open(volume->name, oflags);
97 if (volume->fd < 0)
98 err(1, "alloc_volume: Failed to open %s", volume->name);
99 check_volume(volume);
101 volume->ondisk = calloc(1, HAMMER_BUFSIZE);
103 for (i = 0; i < HAMMER_BUFLISTS; ++i)
104 TAILQ_INIT(&volume->buffer_lists[i]);
106 return(volume);
109 static void
110 __add_volume(struct volume_info *volume)
112 struct volume_info *scan;
113 struct stat st1, st2;
115 if (fstat(volume->fd, &st1) != 0)
116 errx(1, "add_volume: %s: Failed to stat", volume->name);
118 TAILQ_FOREACH(scan, &VolList, entry) {
119 if (scan->vol_no == volume->vol_no)
120 errx(1, "add_volume: %s: Duplicate volume number %d "
121 "against %s",
122 volume->name, volume->vol_no, scan->name);
123 if (fstat(scan->fd, &st2) != 0)
124 errx(1, "add_volume: %s: Failed to stat %s",
125 volume->name, scan->name);
126 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev))
127 errx(1, "add_volume: %s: Specified more than once",
128 volume->name);
131 TAILQ_INSERT_TAIL(&VolList, volume, entry);
134 static void
135 __verify_volume(struct volume_info *volume)
137 hammer_volume_ondisk_t ondisk = volume->ondisk;
139 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME)
140 errx(1, "verify_volume: Invalid volume signature %016jx",
141 ondisk->vol_signature);
142 if (ondisk->vol_rootvol != HAMMER_ROOT_VOLNO)
143 errx(1, "verify_volume: Invalid root volume# %d",
144 ondisk->vol_rootvol);
145 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)))
146 errx(1, "verify_volume: %s: Header does not indicate "
147 "that this is a HAMMER volume", volume->name);
148 if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)))
149 errx(1, "verify_volume: %s: FSId does not match other volumes!",
150 volume->name);
154 * Initialize a volume structure and ondisk vol_no field.
156 struct volume_info *
157 init_volume(const char *filename, int oflags, int32_t vol_no)
159 struct volume_info *volume;
161 volume = __alloc_volume(filename, oflags);
162 volume->vol_no = volume->ondisk->vol_no = vol_no;
164 __add_volume(volume);
166 return(volume);
170 * Initialize a volume structure and read ondisk volume header.
172 struct volume_info*
173 load_volume(const char *filename, int oflags, int verify)
175 struct volume_info *volume;
176 int n;
178 volume = __alloc_volume(filename, oflags);
180 n = readhammervol(volume);
181 if (n == -1)
182 err(1, "load_volume: %s: Read failed at offset 0",
183 volume->name);
184 volume->vol_no = volume->ondisk->vol_no;
185 HammerVersion = volume->ondisk->vol_version;
187 if (valid_hammer_volumes++ == 0)
188 Hammer_FSId = volume->ondisk->vol_fsid;
189 if (verify)
190 __verify_volume(volume);
192 __add_volume(volume);
194 return(volume);
198 * Check basic volume characteristics.
200 static void
201 check_volume(struct volume_info *volume)
203 struct partinfo pinfo;
204 struct stat st;
207 * Get basic information about the volume
209 if (ioctl(volume->fd, DIOCGPART, &pinfo) < 0) {
211 * Allow the formatting of regular files as HAMMER volumes
213 if (fstat(volume->fd, &st) < 0)
214 err(1, "Unable to stat %s", volume->name);
215 volume->size = st.st_size;
216 volume->type = "REGFILE";
217 } else {
219 * When formatting a block device as a HAMMER volume the
220 * sector size must be compatible. HAMMER uses 16384 byte
221 * filesystem buffers.
223 if (pinfo.reserved_blocks)
224 errx(1, "HAMMER cannot be placed in a partition "
225 "which overlaps the disklabel or MBR");
226 if (pinfo.media_blksize > HAMMER_BUFSIZE ||
227 HAMMER_BUFSIZE % pinfo.media_blksize)
228 errx(1, "A media sector size of %d is not supported",
229 pinfo.media_blksize);
231 volume->size = pinfo.media_size;
232 volume->device_offset = pinfo.media_offset;
233 volume->type = "DEVICE";
237 void
238 assert_volume_offset(struct volume_info *volume)
240 assert(hammer_is_zone_raw_buffer(volume->vol_free_off));
241 assert(hammer_is_zone_raw_buffer(volume->vol_free_end));
242 if (volume->vol_free_off >= volume->vol_free_end)
243 errx(1, "Ran out of room, filesystem too small");
246 struct volume_info *
247 get_volume(int32_t vol_no)
249 struct volume_info *volume;
251 TAILQ_FOREACH(volume, &VolList, entry)
252 if (volume->vol_no == vol_no)
253 break;
255 return(volume);
258 struct volume_info *
259 get_root_volume(void)
261 return(get_volume(HAMMER_ROOT_VOLNO));
264 static hammer_off_t
265 __blockmap_xlate_to_zone2(hammer_off_t buf_offset)
267 hammer_off_t zone2_offset;
268 int error = 0;
270 if (hammer_is_zone_raw_buffer(buf_offset))
271 zone2_offset = buf_offset;
272 else
273 zone2_offset = blockmap_lookup(buf_offset, &error);
275 if (error)
276 return(HAMMER_OFF_BAD);
277 assert(hammer_is_zone_raw_buffer(zone2_offset));
279 return(zone2_offset);
282 static struct buffer_info *
283 __alloc_buffer(hammer_off_t zone2_offset, int isnew)
285 struct volume_info *volume;
286 struct buffer_info *buffer;
287 int hi;
289 volume = get_volume(HAMMER_VOL_DECODE(zone2_offset));
290 assert(volume != NULL);
292 buffer = calloc(1, sizeof(*buffer));
293 buffer->zone2_offset = zone2_offset;
294 buffer->raw_offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset);
295 buffer->volume = volume;
296 buffer->ondisk = calloc(1, HAMMER_BUFSIZE);
298 if (isnew <= 0)
299 if (readhammerbuf(buffer) == -1)
300 err(1, "Failed to read %s:%016jx at %016jx",
301 volume->name,
302 (intmax_t)buffer->zone2_offset,
303 (intmax_t)buffer->raw_offset);
305 hi = buffer_hash(zone2_offset);
306 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buffer, entry);
307 hammer_cache_add(&buffer->cache);
309 return(buffer);
313 * Acquire the 16KB buffer for specified zone offset.
315 static struct buffer_info *
316 get_buffer(hammer_off_t buf_offset, int isnew)
318 struct buffer_info *buffer;
319 hammer_off_t zone2_offset;
320 int dora = 0;
322 zone2_offset = __blockmap_xlate_to_zone2(buf_offset);
323 if (zone2_offset == HAMMER_OFF_BAD)
324 return(NULL);
326 zone2_offset &= ~HAMMER_BUFMASK64;
327 buffer = find_buffer(zone2_offset);
329 if (buffer == NULL) {
330 buffer = __alloc_buffer(zone2_offset, isnew);
331 dora = (isnew == 0);
332 } else {
333 assert(isnew != -1);
334 hammer_cache_used(&buffer->cache);
336 assert(buffer->ondisk != NULL);
338 ++buffer->cache.refs;
339 hammer_cache_flush();
341 if (isnew > 0) {
342 assert(buffer->cache.modified == 0);
343 bzero(buffer->ondisk, HAMMER_BUFSIZE);
344 buffer->cache.modified = 1;
346 if (dora)
347 get_buffer_readahead(buffer);
348 return(buffer);
351 static void
352 get_buffer_readahead(struct buffer_info *base)
354 struct buffer_info *buffer;
355 struct volume_info *volume;
356 hammer_off_t zone2_offset;
357 int64_t raw_offset;
358 int ri = UseReadBehind;
359 int re = UseReadAhead;
361 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
362 volume = base->volume;
364 while (ri < re) {
365 if (raw_offset >= volume->ondisk->vol_buf_end)
366 break;
367 if (raw_offset < volume->ondisk->vol_buf_beg || ri == 0) {
368 ++ri;
369 raw_offset += HAMMER_BUFSIZE;
370 continue;
372 zone2_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
373 raw_offset - volume->ondisk->vol_buf_beg);
374 buffer = find_buffer(zone2_offset);
375 if (buffer == NULL) {
376 /* call with -1 to prevent another readahead */
377 buffer = get_buffer(zone2_offset, -1);
378 rel_buffer(buffer);
380 ++ri;
381 raw_offset += HAMMER_BUFSIZE;
385 void
386 rel_buffer(struct buffer_info *buffer)
388 struct volume_info *volume;
389 int hi;
391 if (buffer == NULL)
392 return;
393 assert(buffer->cache.refs > 0);
394 if (--buffer->cache.refs == 0)
395 if (buffer->cache.delete) {
396 hi = buffer_hash(buffer->zone2_offset);
397 volume = buffer->volume;
398 if (buffer->cache.modified)
399 flush_buffer(buffer);
400 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
401 hammer_cache_del(&buffer->cache);
402 free(buffer->ondisk);
403 free(buffer);
408 * Retrieve a pointer to a buffer data given a buffer offset. The underlying
409 * bufferp is freed if isnew or the offset is out of range of the cached data.
410 * If bufferp is freed a referenced buffer is loaded into it.
412 void *
413 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
414 int isnew)
416 hammer_off_t xor;
418 if (*bufferp != NULL) {
419 /* XXX xor is always non zero for indirect zones */
420 xor = HAMMER_OFF_LONG_ENCODE(buf_offset) ^
421 HAMMER_OFF_LONG_ENCODE((*bufferp)->zone2_offset);
422 if (isnew > 0 || (xor & ~HAMMER_BUFMASK64)) {
423 rel_buffer(*bufferp);
424 *bufferp = NULL;
428 if (*bufferp == NULL) {
429 *bufferp = get_buffer(buf_offset, isnew);
430 if (*bufferp == NULL)
431 return(NULL);
434 return(((char *)(*bufferp)->ondisk) +
435 ((int32_t)buf_offset & HAMMER_BUFMASK));
439 * Allocate HAMMER elements - B-Tree nodes
441 hammer_node_ondisk_t
442 alloc_btree_node(hammer_off_t *offp, struct buffer_info **data_bufferp)
444 hammer_node_ondisk_t node;
446 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
447 offp, data_bufferp);
448 bzero(node, sizeof(*node));
449 return(node);
453 * Allocate HAMMER elements - meta data (inode, direntry, PFS, etc)
455 void *
456 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
457 struct buffer_info **data_bufferp)
459 void *data;
461 data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
462 offp, data_bufferp);
463 bzero(data, data_len);
464 return(data);
468 * Format a new blockmap. This is mostly a degenerate case because
469 * all allocations are now actually done from the freemap.
471 void
472 format_blockmap(struct volume_info *root_vol, int zone, hammer_off_t offset)
474 hammer_blockmap_t blockmap;
475 hammer_off_t zone_base;
477 /* Only root volume needs formatting */
478 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
480 assert(hammer_is_index_record(zone));
482 blockmap = &root_vol->ondisk->vol0_blockmap[zone];
483 zone_base = HAMMER_ZONE_ENCODE(zone, offset);
485 bzero(blockmap, sizeof(*blockmap));
486 blockmap->phys_offset = 0;
487 blockmap->first_offset = zone_base;
488 blockmap->next_offset = zone_base;
489 blockmap->alloc_offset = HAMMER_ENCODE(zone, 255, -1);
490 hammer_crc_set_blockmap(HammerVersion, blockmap);
494 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
495 * code will load each volume's freemap.
497 void
498 format_freemap(struct volume_info *root_vol)
500 struct buffer_info *buffer = NULL;
501 hammer_off_t layer1_offset;
502 hammer_blockmap_t blockmap;
503 hammer_blockmap_layer1_t layer1;
504 int i, isnew;
506 /* Only root volume needs formatting */
507 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
509 layer1_offset = bootstrap_bigblock(root_vol);
510 for (i = 0; i < HAMMER_BIGBLOCK_SIZE; i += sizeof(*layer1)) {
511 isnew = ((i % HAMMER_BUFSIZE) == 0);
512 layer1 = get_buffer_data(layer1_offset + i, &buffer, isnew);
513 bzero(layer1, sizeof(*layer1));
514 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
515 layer1->blocks_free = 0;
516 hammer_crc_set_layer1(HammerVersion, layer1);
518 assert(i == HAMMER_BIGBLOCK_SIZE);
519 rel_buffer(buffer);
521 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
522 bzero(blockmap, sizeof(*blockmap));
523 blockmap->phys_offset = layer1_offset;
524 blockmap->first_offset = 0;
525 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
526 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
527 hammer_crc_set_blockmap(HammerVersion, blockmap);
531 * Load the volume's remaining free space into the freemap.
533 * Returns the number of big-blocks available.
535 int64_t
536 initialize_freemap(struct volume_info *volume)
538 struct volume_info *root_vol;
539 struct buffer_info *buffer1 = NULL;
540 struct buffer_info *buffer2 = NULL;
541 hammer_blockmap_layer1_t layer1;
542 hammer_blockmap_layer2_t layer2;
543 hammer_off_t layer1_offset;
544 hammer_off_t layer2_offset;
545 hammer_off_t phys_offset;
546 hammer_off_t block_offset;
547 hammer_off_t aligned_vol_free_end;
548 hammer_blockmap_t freemap;
549 int64_t count = 0;
550 int64_t layer1_count = 0;
552 root_vol = get_root_volume();
554 assert_volume_offset(volume);
555 aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(volume->vol_free_end);
557 printf("initialize freemap volume %d\n", volume->vol_no);
560 * Initialize the freemap. First preallocate the big-blocks required
561 * to implement layer2. This preallocation is a bootstrap allocation
562 * using blocks from the target volume.
564 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
566 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
567 phys_offset < aligned_vol_free_end;
568 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
569 layer1_offset = freemap->phys_offset +
570 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
571 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
572 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
573 layer1->phys_offset = bootstrap_bigblock(volume);
574 layer1->blocks_free = 0;
575 buffer1->cache.modified = 1;
576 hammer_crc_set_layer1(HammerVersion, layer1);
581 * Now fill everything in.
583 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
584 phys_offset < aligned_vol_free_end;
585 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
586 layer1_count = 0;
587 layer1_offset = freemap->phys_offset +
588 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
589 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
590 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
592 for (block_offset = 0;
593 block_offset < HAMMER_BLOCKMAP_LAYER2;
594 block_offset += HAMMER_BIGBLOCK_SIZE) {
595 layer2_offset = layer1->phys_offset +
596 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
597 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
598 bzero(layer2, sizeof(*layer2));
600 if (phys_offset + block_offset < volume->vol_free_off) {
602 * Big-blocks already allocated as part
603 * of the freemap bootstrap.
605 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
606 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
607 layer2->bytes_free = 0;
608 } else if (phys_offset + block_offset < volume->vol_free_end) {
609 layer2->zone = 0;
610 layer2->append_off = 0;
611 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
612 ++count;
613 ++layer1_count;
614 } else {
615 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
616 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
617 layer2->bytes_free = 0;
619 hammer_crc_set_layer2(HammerVersion, layer2);
620 buffer2->cache.modified = 1;
623 layer1->blocks_free += layer1_count;
624 hammer_crc_set_layer1(HammerVersion, layer1);
625 buffer1->cache.modified = 1;
628 rel_buffer(buffer1);
629 rel_buffer(buffer2);
630 return(count);
634 * Returns the number of big-blocks available for filesystem data and undos
635 * without formatting.
637 int64_t
638 count_freemap(struct volume_info *volume)
640 hammer_off_t phys_offset;
641 hammer_off_t vol_free_off;
642 hammer_off_t aligned_vol_free_end;
643 int64_t count = 0;
645 vol_free_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
647 assert_volume_offset(volume);
648 aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(volume->vol_free_end);
650 if (volume->vol_no == HAMMER_ROOT_VOLNO)
651 vol_free_off += HAMMER_BIGBLOCK_SIZE;
653 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
654 phys_offset < aligned_vol_free_end;
655 phys_offset += HAMMER_BLOCKMAP_LAYER2)
656 vol_free_off += HAMMER_BIGBLOCK_SIZE;
658 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
659 phys_offset < aligned_vol_free_end;
660 phys_offset += HAMMER_BIGBLOCK_SIZE) {
661 if (phys_offset < vol_free_off)
663 else if (phys_offset < volume->vol_free_end)
664 ++count;
667 return(count);
671 * Format the undomap for the root volume.
673 void
674 format_undomap(struct volume_info *root_vol, int64_t *undo_buffer_size)
676 hammer_off_t undo_limit;
677 hammer_blockmap_t blockmap;
678 hammer_volume_ondisk_t ondisk;
679 struct buffer_info *buffer = NULL;
680 hammer_off_t scan;
681 int n;
682 int limit_index;
683 uint32_t seqno;
685 /* Only root volume needs formatting */
686 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
687 ondisk = root_vol->ondisk;
690 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
691 * up to HAMMER_MAX_UNDO_BIGBLOCKS big-blocks.
692 * Size to approximately 0.1% of the disk.
694 * The minimum UNDO fifo size is 512MB, or approximately 1% of
695 * the recommended 50G disk.
697 * Changing this minimum is rather dangerous as complex filesystem
698 * operations can cause the UNDO FIFO to fill up otherwise.
700 undo_limit = *undo_buffer_size;
701 if (undo_limit == 0) {
702 undo_limit = HAMMER_VOL_BUF_SIZE(ondisk) / 1000;
703 if (undo_limit < HAMMER_BIGBLOCK_SIZE * HAMMER_MIN_UNDO_BIGBLOCKS)
704 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_MIN_UNDO_BIGBLOCKS;
706 undo_limit = HAMMER_BIGBLOCK_DOALIGN(undo_limit);
707 if (undo_limit < HAMMER_BIGBLOCK_SIZE)
708 undo_limit = HAMMER_BIGBLOCK_SIZE;
709 if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_MAX_UNDO_BIGBLOCKS)
710 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_MAX_UNDO_BIGBLOCKS;
711 *undo_buffer_size = undo_limit;
713 blockmap = &ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
714 bzero(blockmap, sizeof(*blockmap));
715 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
716 blockmap->first_offset = HAMMER_ENCODE_UNDO(0);
717 blockmap->next_offset = blockmap->first_offset;
718 blockmap->alloc_offset = HAMMER_ENCODE_UNDO(undo_limit);
719 hammer_crc_set_blockmap(HammerVersion, blockmap);
721 limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
722 assert(limit_index <= HAMMER_MAX_UNDO_BIGBLOCKS);
724 for (n = 0; n < limit_index; ++n)
725 ondisk->vol0_undo_array[n] = alloc_undo_bigblock(root_vol);
726 while (n < HAMMER_MAX_UNDO_BIGBLOCKS)
727 ondisk->vol0_undo_array[n++] = HAMMER_BLOCKMAP_UNAVAIL;
730 * Pre-initialize the UNDO blocks (HAMMER version 4+)
732 printf("initializing the undo map (%jd MB)\n",
733 (intmax_t)HAMMER_OFF_LONG_ENCODE(blockmap->alloc_offset) /
734 (1024 * 1024));
736 scan = blockmap->first_offset;
737 seqno = 0;
739 while (scan < blockmap->alloc_offset) {
740 hammer_fifo_head_t head;
741 hammer_fifo_tail_t tail;
742 int isnew;
743 int bytes = HAMMER_UNDO_ALIGN;
745 isnew = ((scan & HAMMER_BUFMASK64) == 0);
746 head = get_buffer_data(scan, &buffer, isnew);
747 buffer->cache.modified = 1;
748 tail = (void *)((char *)head + bytes - sizeof(*tail));
750 bzero(head, bytes);
751 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
752 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
753 head->hdr_size = bytes;
754 head->hdr_seq = seqno++;
756 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
757 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
758 tail->tail_size = bytes;
760 hammer_crc_set_fifo_head(HammerVersion, head, bytes);
762 scan += bytes;
764 rel_buffer(buffer);
767 const char *zone_labels[] = {
768 "", /* 0 */
769 "raw_volume", /* 1 */
770 "raw_buffer", /* 2 */
771 "undo", /* 3 */
772 "freemap", /* 4 */
773 "", /* 5 */
774 "", /* 6 */
775 "", /* 7 */
776 "btree", /* 8 */
777 "meta", /* 9 */
778 "large_data", /* 10 */
779 "small_data", /* 11 */
780 "", /* 12 */
781 "", /* 13 */
782 "", /* 14 */
783 "unavail", /* 15 */
786 void
787 print_blockmap(const struct volume_info *volume)
789 hammer_blockmap_t blockmap;
790 hammer_volume_ondisk_t ondisk;
791 int64_t size, used;
792 int i;
793 #define INDENT ""
795 ondisk = volume->ondisk;
796 printf(INDENT"vol_label\t%s\n", ondisk->vol_label);
797 printf(INDENT"vol_count\t%d\n", ondisk->vol_count);
798 printf(INDENT"vol_bot_beg\t%s\n", sizetostr(ondisk->vol_bot_beg));
799 printf(INDENT"vol_mem_beg\t%s\n", sizetostr(ondisk->vol_mem_beg));
800 printf(INDENT"vol_buf_beg\t%s\n", sizetostr(ondisk->vol_buf_beg));
801 printf(INDENT"vol_buf_end\t%s\n", sizetostr(ondisk->vol_buf_end));
802 printf(INDENT"vol0_next_tid\t%016jx\n",
803 (uintmax_t)ondisk->vol0_next_tid);
805 blockmap = &ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
806 size = HAMMER_OFF_LONG_ENCODE(blockmap->alloc_offset);
807 if (blockmap->first_offset <= blockmap->next_offset)
808 used = blockmap->next_offset - blockmap->first_offset;
809 else
810 used = blockmap->alloc_offset - blockmap->first_offset +
811 HAMMER_OFF_LONG_ENCODE(blockmap->next_offset);
812 printf(INDENT"undo_size\t%s\n", sizetostr(size));
813 printf(INDENT"undo_used\t%s\n", sizetostr(used));
815 printf(INDENT"zone # "
816 "phys first next alloc\n");
817 for (i = 0; i < HAMMER_MAX_ZONES; i++) {
818 blockmap = &ondisk->vol0_blockmap[i];
819 printf(INDENT"zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
820 i, zone_labels[i],
821 (uintmax_t)blockmap->phys_offset,
822 (uintmax_t)blockmap->first_offset,
823 (uintmax_t)blockmap->next_offset,
824 (uintmax_t)blockmap->alloc_offset);
829 * Flush various tracking structures to disk
831 void
832 flush_all_volumes(void)
834 struct volume_info *volume;
836 TAILQ_FOREACH(volume, &VolList, entry)
837 flush_volume(volume);
840 void
841 flush_volume(struct volume_info *volume)
843 struct buffer_info *buffer;
844 int i;
846 for (i = 0; i < HAMMER_BUFLISTS; ++i)
847 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
848 flush_buffer(buffer);
849 if (writehammervol(volume) == -1)
850 err(1, "Write volume %d (%s)", volume->vol_no, volume->name);
853 void
854 flush_buffer(struct buffer_info *buffer)
856 struct volume_info *volume;
858 volume = buffer->volume;
859 if (writehammerbuf(buffer) == -1)
860 err(1, "Write volume %d (%s)", volume->vol_no, volume->name);
861 buffer->cache.modified = 0;
865 * Core I/O operations
867 static int
868 __read(struct volume_info *volume, void *data, int64_t offset, int size)
870 ssize_t n;
872 n = pread(volume->fd, data, size, offset);
873 if (n != size)
874 return(-1);
875 return(0);
878 static __inline int
879 readhammervol(struct volume_info *volume)
881 return(__read(volume, volume->ondisk, 0, HAMMER_BUFSIZE));
884 static __inline int
885 readhammerbuf(struct buffer_info *buffer)
887 return(__read(buffer->volume, buffer->ondisk, buffer->raw_offset,
888 HAMMER_BUFSIZE));
891 static int
892 __write(struct volume_info *volume, const void *data, int64_t offset, int size)
894 ssize_t n;
896 if (volume->rdonly)
897 return(0);
899 n = pwrite(volume->fd, data, size, offset);
900 if (n != size)
901 return(-1);
902 return(0);
905 static __inline int
906 writehammervol(struct volume_info *volume)
908 return(__write(volume, volume->ondisk, 0, HAMMER_BUFSIZE));
911 static __inline int
912 writehammerbuf(struct buffer_info *buffer)
914 return(__write(buffer->volume, buffer->ondisk, buffer->raw_offset,
915 HAMMER_BUFSIZE));
918 int64_t init_boot_area_size(int64_t value, off_t avg_vol_size)
920 if (value == 0) {
921 value = HAMMER_BOOT_NOMBYTES;
922 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
923 value >>= 1;
926 if (value < HAMMER_BOOT_MINBYTES)
927 value = HAMMER_BOOT_MINBYTES;
928 else if (value > HAMMER_BOOT_MAXBYTES)
929 value = HAMMER_BOOT_MAXBYTES;
931 return(value);
934 int64_t init_memory_log_size(int64_t value, off_t avg_vol_size)
936 if (value == 0) {
937 value = HAMMER_MEM_NOMBYTES;
938 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
939 value >>= 1;
942 if (value < HAMMER_MEM_MINBYTES)
943 value = HAMMER_MEM_MINBYTES;
944 else if (value > HAMMER_MEM_MAXBYTES)
945 value = HAMMER_MEM_MAXBYTES;
947 return(value);