From 7e1ba5da1bae8a66b32f6edfa6d77fb4afd95833 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sun, 1 Jun 2008 20:59:29 +0000 Subject: [PATCH] HAMMER Utilities: Performance adjustments, bug fixes. * Newfs_hammer now pre-allocates the layer1 and layer2 blockmap blocks, and pre-sizes each blockmap to 4x the initial filesystem size instead of 100x the initial filesystem size. The blockmap can be dynamically resized at any time, given a little code. In addition, there is simply no need to give it a 100x initial dynamic range. This only bloats the size of the layer-2 map unnecessarily. * Change alloc_blockmap() to use rootmap->next_offset for allocations instead of rootmap->alloc_offset and fix a bug where rootmap->phys_offset was improperly being incremented (it is a fixed field once set). The bug was in a code-path that could not by executed by current incarnations of newfs_hammer. --- sbin/hammer/hammer_util.h | 6 ++- sbin/hammer/ondisk.c | 110 ++++++++++++++++++++++++++++++++------- sbin/newfs_hammer/newfs_hammer.c | 38 ++++++++++---- 3 files changed, 123 insertions(+), 31 deletions(-) diff --git a/sbin/hammer/hammer_util.h b/sbin/hammer/hammer_util.h index 36d2745be6..cd7b2bd85e 100644 --- a/sbin/hammer/hammer_util.h +++ b/sbin/hammer/hammer_util.h @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sbin/hammer/hammer_util.h,v 1.15 2008/05/12 21:17:16 dillon Exp $ + * $DragonFly: src/sbin/hammer/hammer_util.h,v 1.16 2008/06/01 20:59:28 dillon Exp $ */ #include @@ -123,7 +123,9 @@ void rel_buffer(struct buffer_info *buffer); hammer_off_t blockmap_lookup(hammer_off_t bmap_off, struct hammer_blockmap_layer1 *layer1, struct hammer_blockmap_layer2 *layer2); -void format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_off); +void format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base); +void presize_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base, + hammer_off_t vol0_zone_limit); void format_undomap(hammer_volume_ondisk_t ondisk); void *alloc_btree_element(hammer_off_t *offp); diff --git a/sbin/hammer/ondisk.c b/sbin/hammer/ondisk.c index 5342dbbd80..57a20e6963 100644 --- a/sbin/hammer/ondisk.c +++ b/sbin/hammer/ondisk.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sbin/hammer/ondisk.c,v 1.19 2008/05/12 21:17:16 dillon Exp $ + * $DragonFly: src/sbin/hammer/ondisk.c,v 1.20 2008/06/01 20:59:28 dillon Exp $ */ #include @@ -602,15 +602,19 @@ format_undomap(hammer_volume_ondisk_t ondisk) * target bigblock). */ void -format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_off) +format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base) { - blockmap->phys_offset = alloc_bigblock(NULL, zone_off); - blockmap->alloc_offset = zone_off; - blockmap->first_offset = zone_off; - blockmap->next_offset = zone_off; + blockmap->phys_offset = alloc_bigblock(NULL, zone_base); + blockmap->alloc_offset = zone_base; + blockmap->first_offset = zone_base; + blockmap->next_offset = zone_base; blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE); } +/* + * Allocate a chunk of data out of a blockmap. This is a simplified + * version which uses next_offset as a simple allocation iterator. + */ static void * alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp, @@ -632,13 +636,14 @@ alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp, rootmap = &volume->ondisk->vol0_blockmap[zone]; /* - * Alignment and buffer-boundary issues + * Alignment and buffer-boundary issues. If the allocation would + * cross a buffer boundary we have to skip to the next buffer. */ bytes = (bytes + 7) & ~7; - if ((rootmap->phys_offset ^ (rootmap->phys_offset + bytes - 1)) & + if ((rootmap->next_offset ^ (rootmap->next_offset + bytes - 1)) & ~HAMMER_BUFMASK64) { volume->cache.modified = 1; - rootmap->phys_offset = (rootmap->phys_offset + bytes) & + rootmap->next_offset = (rootmap->next_offset + bytes) & ~HAMMER_BUFMASK64; } @@ -646,30 +651,38 @@ alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp, * Dive layer 1 */ layer1_offset = rootmap->phys_offset + - HAMMER_BLOCKMAP_LAYER1_OFFSET(rootmap->alloc_offset); + HAMMER_BLOCKMAP_LAYER1_OFFSET(rootmap->next_offset); layer1 = get_buffer_data(layer1_offset, &buffer1, 0); - if ((rootmap->alloc_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) { + if ((rootmap->next_offset >= rootmap->alloc_offset && + (rootmap->next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) || + layer1->phys_offset == HAMMER_BLOCKMAP_FREE + ) { + assert(rootmap->next_offset >= rootmap->alloc_offset); buffer1->cache.modified = 1; bzero(layer1, sizeof(*layer1)); layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2; layer1->phys_offset = alloc_bigblock(NULL, - rootmap->alloc_offset); + rootmap->next_offset); } /* * Dive layer 2 */ layer2_offset = layer1->phys_offset + - HAMMER_BLOCKMAP_LAYER2_OFFSET(rootmap->alloc_offset); + HAMMER_BLOCKMAP_LAYER2_OFFSET(rootmap->next_offset); layer2 = get_buffer_data(layer2_offset, &buffer2, 0); - if ((rootmap->alloc_offset & HAMMER_LARGEBLOCK_MASK64) == 0) { + if ((rootmap->next_offset & HAMMER_LARGEBLOCK_MASK64) == 0 && + (rootmap->next_offset >= rootmap->alloc_offset || + layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE) + ) { + assert(rootmap->next_offset >= rootmap->alloc_offset); buffer2->cache.modified = 1; bzero(layer2, sizeof(*layer2)); layer2->u.phys_offset = alloc_bigblock(NULL, - rootmap->alloc_offset); + rootmap->next_offset); layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE; --layer1->blocks_free; } @@ -678,9 +691,9 @@ alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp, buffer2->cache.modified = 1; volume->cache.modified = 1; layer2->bytes_free -= bytes; - *result_offp = rootmap->alloc_offset; - rootmap->alloc_offset += bytes; - rootmap->next_offset = rootmap->alloc_offset; + *result_offp = rootmap->next_offset; + rootmap->next_offset += bytes; + rootmap->alloc_offset = rootmap->next_offset; bigblock_offset = layer2->u.phys_offset + (*result_offp & HAMMER_LARGEBLOCK_MASK); @@ -700,6 +713,67 @@ alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp, return(ptr); } +/* + * Presize a blockmap. Allocate all layer2 bigblocks required to map the + * blockmap through the specified zone limit. + * + * Note: This code is typically called later, after some data may have + * already been allocated, but can be called or re-called at any time. + * + * Note: vol0_zone_limit is not zone-encoded. + */ +void +presize_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base, + hammer_off_t vol0_zone_limit) +{ + struct buffer_info *buffer1 = NULL; + struct buffer_info *buffer2 = NULL; + struct hammer_blockmap_layer1 *layer1; + struct hammer_blockmap_layer2 *layer2; + hammer_off_t zone_limit; + hammer_off_t layer1_offset; + hammer_off_t layer2_offset; + + zone_limit = zone_base + vol0_zone_limit; + + while (zone_base < zone_limit) { + layer1_offset = blockmap->phys_offset + + HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_base); + layer1 = get_buffer_data(layer1_offset, &buffer1, 0); + + if ((zone_base >= blockmap->alloc_offset && + (zone_base & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) || + layer1->phys_offset == HAMMER_BLOCKMAP_FREE + ) { + bzero(layer1, sizeof(*layer1)); + layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2; + layer1->phys_offset = alloc_bigblock(NULL, zone_base); + layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE); + buffer1->cache.modified = 1; + } + layer2_offset = layer1->phys_offset + + HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_base); + layer2 = get_buffer_data(layer2_offset, &buffer2, 0); + if (zone_base >= blockmap->alloc_offset || + layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE) { + bzero(layer2, sizeof(*layer2)); + layer2->u.phys_offset = HAMMER_BLOCKMAP_FREE; + layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE; + layer2->entry_crc = crc32(layer2, + HAMMER_LAYER2_CRCSIZE); + buffer2->cache.modified = 1; + } + zone_base += HAMMER_LARGEBLOCK_SIZE64; + } + if (blockmap->alloc_offset < zone_limit) + blockmap->alloc_offset = zone_limit; + + if (buffer1) + rel_buffer(buffer1); + if (buffer2) + rel_buffer(buffer2); +} + #if 0 /* * Reserve space from the FIFO. Make sure that bytes does not cross a diff --git a/sbin/newfs_hammer/newfs_hammer.c b/sbin/newfs_hammer/newfs_hammer.c index 23e1566aec..5a7f15bbdc 100644 --- a/sbin/newfs_hammer/newfs_hammer.c +++ b/sbin/newfs_hammer/newfs_hammer.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sbin/newfs_hammer/newfs_hammer.c,v 1.26 2008/05/18 01:49:44 dillon Exp $ + * $DragonFly: src/sbin/newfs_hammer/newfs_hammer.c,v 1.27 2008/06/01 20:59:29 dillon Exp $ */ #include "newfs_hammer.h" @@ -47,12 +47,13 @@ static void usage(void); int main(int ac, char **av) { - int i; - int ch; u_int32_t status; off_t total; + int ch; + int i; const char *label = NULL; struct volume_info *vol; + hammer_off_t vol0_zone_limit; /* * Sanity check basic filesystem structures. No cookies for us @@ -159,6 +160,18 @@ main(int ac, char **av) if (i != RootVolNo) format_volume(get_volume(i), NumVolumes, label, total); } + + /* + * Pre-size the blockmap layer1/layer2 infrastructure to the zone + * limit. If we do this the filesystem does not have to allocate + * new layer2 blocks which reduces the chances of the reblocker + * having to fallback to an extremely inefficient algorithm. + */ + vol = get_volume(RootVolNo); + vol0_zone_limit = vol->ondisk->vol0_zone_limit; + presize_blockmap(&vol->ondisk->vol0_blockmap[HAMMER_ZONE_BTREE_INDEX], + HAMMER_ZONE_BTREE, vol0_zone_limit); + printf("---------------------------------------------\n"); printf("%d volume%s total size %s\n", NumVolumes, (NumVolumes == 1 ? "" : "s"), sizetostr(total)); @@ -166,8 +179,7 @@ main(int ac, char **av) printf("memory-log-size: %s\n", sizetostr(MemAreaSize)); printf("undo-buffer-size: %s\n", sizetostr(UndoBufferSize)); - vol = get_volume(RootVolNo); - printf("zone-limit: %s\n", sizetostr(vol->ondisk->vol0_zone_limit)); + printf("zone-limit: %s\n", sizetostr(vol0_zone_limit)); printf("\n"); flush_all_volumes(); @@ -401,13 +413,17 @@ format_volume(struct volume_info *vol, int nvols, const char *label, ondisk->vol0_next_tid = createtid(); /* - * Set the default zone limit to 100x the size of the - * filesystem. We do not want to create a huge zone limit - * for tiny filesystems because the blockmap could wind up - * getting fragmented and eating a large chunk of the disk - * space. + * Set the default zone limit to 4x the size of the + * filesystem, allowing the filesystem to survive a + * worse-case of 75% fragmentation per zone. The limit + * can be expanded at any time if the filesystem is + * made larger. + * + * We do not want to create a huge zone limit for tiny + * filesystems because the blockmap could wind up getting + * fragmented and eating a large chunk of the disk space. */ - zone_limit = (hammer_off_t)total_size * 100; + zone_limit = (hammer_off_t)total_size * 4; zone_limit = (zone_limit + HAMMER_BLOCKMAP_LAYER2_MASK) & ~HAMMER_BLOCKMAP_LAYER2_MASK; if (zone_limit < (hammer_off_t)total_size || -- 2.11.4.GIT