2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sbin/hammer/ondisk.c,v 1.14 2008/03/18 05:21:53 dillon Exp $
37 #include <sys/types.h>
46 #include "hammer_util.h"
48 static void *alloc_blockmap(int zone
, int bytes
, hammer_off_t
*result_offp
,
49 struct buffer_info
**bufferp
);
50 static hammer_off_t
alloc_bigblock(struct volume_info
*volume
,
53 static void init_fifo_head(hammer_fifo_head_t head
, u_int16_t hdr_type
);
54 static hammer_off_t
hammer_alloc_fifo(int32_t base_bytes
, int32_t ext_bytes
,
55 struct buffer_info
**bufp
, u_int16_t hdr_type
);
56 static void readhammerbuf(struct volume_info
*vol
, void *data
,
59 static void writehammerbuf(struct volume_info
*vol
, const void *data
,
67 int UsingSuperClusters
;
70 struct volume_list VolList
= TAILQ_HEAD_INITIALIZER(VolList
);
73 * Lookup the requested information structure and related on-disk buffer.
74 * Missing structures are created.
77 setup_volume(int32_t vol_no
, const char *filename
, int isnew
, int oflags
)
79 struct volume_info
*vol
;
80 struct volume_info
*scan
;
81 struct hammer_volume_ondisk
*ondisk
;
85 * Allocate the volume structure
87 vol
= malloc(sizeof(*vol
));
88 bzero(vol
, sizeof(*vol
));
89 TAILQ_INIT(&vol
->buffer_list
);
90 vol
->name
= strdup(filename
);
91 vol
->fd
= open(filename
, oflags
);
95 err(1, "setup_volume: %s: Open failed", filename
);
99 * Read or initialize the volume header
101 vol
->ondisk
= ondisk
= malloc(HAMMER_BUFSIZE
);
103 bzero(ondisk
, HAMMER_BUFSIZE
);
105 n
= pread(vol
->fd
, ondisk
, HAMMER_BUFSIZE
, 0);
106 if (n
!= HAMMER_BUFSIZE
) {
107 err(1, "setup_volume: %s: Read failed at offset 0",
110 vol_no
= ondisk
->vol_no
;
112 RootVolNo
= ondisk
->vol_rootvol
;
113 } else if (RootVolNo
!= (int)ondisk
->vol_rootvol
) {
114 errx(1, "setup_volume: %s: root volume disagreement: "
116 vol
->name
, RootVolNo
, ondisk
->vol_rootvol
);
119 if (bcmp(&Hammer_FSType
, &ondisk
->vol_fstype
, sizeof(Hammer_FSType
)) != 0) {
120 errx(1, "setup_volume: %s: Header does not indicate "
121 "that this is a hammer volume", vol
->name
);
123 if (TAILQ_EMPTY(&VolList
)) {
124 Hammer_FSId
= vol
->ondisk
->vol_fsid
;
125 } else if (bcmp(&Hammer_FSId
, &ondisk
->vol_fsid
, sizeof(Hammer_FSId
)) != 0) {
126 errx(1, "setup_volume: %s: FSId does match other "
127 "volumes!", vol
->name
);
130 vol
->vol_no
= vol_no
;
133 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
134 vol
->cache
.modified
= 1;
138 * Link the volume structure in
140 TAILQ_FOREACH(scan
, &VolList
, entry
) {
141 if (scan
->vol_no
== vol_no
) {
142 errx(1, "setup_volume %s: Duplicate volume number %d "
143 "against %s", filename
, vol_no
, scan
->name
);
146 TAILQ_INSERT_TAIL(&VolList
, vol
, entry
);
151 get_volume(int32_t vol_no
)
153 struct volume_info
*vol
;
155 TAILQ_FOREACH(vol
, &VolList
, entry
) {
156 if (vol
->vol_no
== vol_no
)
160 errx(1, "get_volume: Volume %d does not exist!", vol_no
);
162 /* not added to or removed from hammer cache */
167 rel_volume(struct volume_info
*volume
)
169 /* not added to or removed from hammer cache */
170 --volume
->cache
.refs
;
174 * Acquire the specified buffer.
177 get_buffer(hammer_off_t buf_offset
, int isnew
)
180 struct buffer_info
*buf
;
181 struct volume_info
*volume
;
186 zone
= HAMMER_ZONE_DECODE(buf_offset
);
187 if (zone
> HAMMER_ZONE_RAW_BUFFER_INDEX
) {
188 buf_offset
= blockmap_lookup(buf_offset
, NULL
, NULL
);
190 assert((buf_offset
& HAMMER_OFF_ZONE_MASK
) == HAMMER_ZONE_RAW_BUFFER
);
191 vol_no
= HAMMER_VOL_DECODE(buf_offset
);
192 volume
= get_volume(vol_no
);
193 buf_offset
&= ~HAMMER_BUFMASK64
;
195 TAILQ_FOREACH(buf
, &volume
->buffer_list
, entry
) {
196 if (buf
->buf_offset
== buf_offset
)
200 buf
= malloc(sizeof(*buf
));
201 bzero(buf
, sizeof(*buf
));
202 buf
->buf_offset
= buf_offset
;
203 buf
->buf_disk_offset
= volume
->ondisk
->vol_buf_beg
+
204 (buf_offset
& HAMMER_OFF_SHORT_MASK
);
205 buf
->volume
= volume
;
206 TAILQ_INSERT_TAIL(&volume
->buffer_list
, buf
, entry
);
207 ++volume
->cache
.refs
;
208 buf
->cache
.u
.buffer
= buf
;
209 hammer_cache_add(&buf
->cache
, ISBUFFER
);
212 hammer_cache_flush();
213 if ((ondisk
= buf
->ondisk
) == NULL
) {
214 buf
->ondisk
= ondisk
= malloc(HAMMER_BUFSIZE
);
216 n
= pread(volume
->fd
, ondisk
, HAMMER_BUFSIZE
,
217 buf
->buf_disk_offset
);
218 if (n
!= HAMMER_BUFSIZE
) {
219 err(1, "get_buffer: %s:%016llx Read failed at "
221 volume
->name
, buf
->buf_offset
,
222 buf
->buf_disk_offset
);
227 bzero(ondisk
, HAMMER_BUFSIZE
);
228 buf
->cache
.modified
= 1;
234 rel_buffer(struct buffer_info
*buffer
)
236 struct volume_info
*volume
;
238 assert(buffer
->cache
.refs
> 0);
239 if (--buffer
->cache
.refs
== 0) {
240 if (buffer
->cache
.delete) {
241 volume
= buffer
->volume
;
242 if (buffer
->cache
.modified
)
243 flush_buffer(buffer
);
244 TAILQ_REMOVE(&volume
->buffer_list
, buffer
, entry
);
245 hammer_cache_del(&buffer
->cache
);
246 free(buffer
->ondisk
);
254 get_buffer_data(hammer_off_t buf_offset
, struct buffer_info
**bufferp
,
257 struct buffer_info
*buffer
;
259 if ((buffer
= *bufferp
) != NULL
) {
261 ((buffer
->buf_offset
^ buf_offset
) & ~HAMMER_BUFMASK64
)) {
263 buffer
= *bufferp
= NULL
;
267 buffer
= *bufferp
= get_buffer(buf_offset
, isnew
);
268 return((char *)buffer
->ondisk
+ ((int32_t)buf_offset
& HAMMER_BUFMASK
));
272 * Retrieve a pointer to a B-Tree node given a cluster offset. The underlying
273 * bufp is freed if non-NULL and a referenced buffer is loaded into it.
276 get_node(hammer_off_t node_offset
, struct buffer_info
**bufp
)
278 struct buffer_info
*buf
;
282 *bufp
= buf
= get_buffer(node_offset
, 0);
283 return((void *)((char *)buf
->ondisk
+
284 (int32_t)(node_offset
& HAMMER_BUFMASK
)));
288 * Allocate HAMMER elements - btree nodes, data storage, and record elements
290 * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
291 * item and zero's out the remainder, so don't bzero() it.
294 alloc_btree_element(hammer_off_t
*offp
)
296 struct buffer_info
*buffer
= NULL
;
297 hammer_node_ondisk_t node
;
299 node
= alloc_blockmap(HAMMER_ZONE_BTREE_INDEX
, sizeof(*node
),
301 bzero(node
, sizeof(*node
));
302 /* XXX buffer not released, pointer remains valid */
306 hammer_record_ondisk_t
307 alloc_record_element(hammer_off_t
*offp
, int32_t data_len
, void **datap
)
309 struct buffer_info
*record_buffer
= NULL
;
310 struct buffer_info
*data_buffer
= NULL
;
311 hammer_record_ondisk_t rec
;
313 rec
= alloc_blockmap(HAMMER_ZONE_RECORD_INDEX
, sizeof(*rec
),
314 offp
, &record_buffer
);
315 bzero(rec
, sizeof(*rec
));
317 if (data_len
>= HAMMER_BUFSIZE
) {
318 assert(data_len
<= HAMMER_BUFSIZE
); /* just one buffer */
319 *datap
= alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX
, data_len
,
320 &rec
->base
.data_off
, &data_buffer
);
321 rec
->base
.data_len
= data_len
;
322 bzero(*datap
, data_len
);
323 } else if (data_len
) {
324 *datap
= alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX
, data_len
,
325 &rec
->base
.data_off
, &data_buffer
);
326 rec
->base
.data_len
= data_len
;
327 bzero(*datap
, data_len
);
331 /* XXX buf not released, ptr remains valid */
336 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
337 * code will load each volume's freemap.
340 format_freemap(struct volume_info
*root_vol
, hammer_blockmap_t blockmap
)
342 struct buffer_info
*buffer
= NULL
;
343 hammer_off_t layer1_offset
;
344 struct hammer_blockmap_layer1
*layer1
;
347 layer1_offset
= alloc_bigblock(root_vol
, 0);
348 for (i
= 0; i
< (int)HAMMER_BLOCKMAP_RADIX1
; ++i
) {
349 isnew
= ((i
% HAMMER_BLOCKMAP_RADIX1_PERBUFFER
) == 0);
350 layer1
= get_buffer_data(layer1_offset
+ i
* sizeof(*layer1
),
352 bzero(layer1
, sizeof(*layer1
));
353 layer1
->phys_offset
= HAMMER_BLOCKMAP_UNAVAIL
;
354 layer1
->layer1_crc
= crc32(layer1
, sizeof(*layer1
));
358 blockmap
= &root_vol
->ondisk
->vol0_blockmap
[HAMMER_ZONE_FREEMAP_INDEX
];
359 blockmap
->phys_offset
= layer1_offset
;
360 blockmap
->alloc_offset
= HAMMER_ENCODE_RAW_BUFFER(255, -1);
361 blockmap
->next_offset
= HAMMER_ENCODE_RAW_BUFFER(0, 0);
362 blockmap
->reserved01
= 0;
363 blockmap
->entry_crc
= crc32(blockmap
, sizeof(*blockmap
));
364 root_vol
->cache
.modified
= 1;
368 * Load the volume's remaining free space into the freemap. If this is
369 * the root volume, initialize the freemap owner for the layer1 bigblock.
371 * Returns the number of bigblocks available.
374 initialize_freemap(struct volume_info
*vol
)
376 struct volume_info
*root_vol
;
377 struct buffer_info
*buffer1
= NULL
;
378 struct buffer_info
*buffer2
= NULL
;
379 struct hammer_blockmap_layer1
*layer1
;
380 struct hammer_blockmap_layer2
*layer2
;
381 hammer_off_t layer1_base
;
382 hammer_off_t layer1_offset
;
383 hammer_off_t layer2_offset
;
384 hammer_off_t phys_offset
;
385 hammer_off_t aligned_vol_free_end
;
388 root_vol
= get_volume(RootVolNo
);
389 aligned_vol_free_end
= (vol
->vol_free_end
+ HAMMER_BLOCKMAP_LAYER2_MASK
)
390 & ~HAMMER_BLOCKMAP_LAYER2_MASK
;
392 printf("initialize freemap volume %d\n", vol
->vol_no
);
395 * Initialize the freemap. First preallocate the bigblocks required
396 * to implement layer2. This preallocation is a bootstrap allocation
397 * using blocks from the target volume.
399 layer1_base
= root_vol
->ondisk
->vol0_blockmap
[
400 HAMMER_ZONE_FREEMAP_INDEX
].phys_offset
;
401 for (phys_offset
= HAMMER_ENCODE_RAW_BUFFER(vol
->vol_no
, 0);
402 phys_offset
< aligned_vol_free_end
;
403 phys_offset
+= HAMMER_BLOCKMAP_LAYER2
) {
404 layer1_offset
= layer1_base
+
405 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset
);
406 layer1
= get_buffer_data(layer1_offset
, &buffer1
, 0);
407 if (layer1
->phys_offset
== HAMMER_BLOCKMAP_UNAVAIL
) {
408 layer1
->phys_offset
= alloc_bigblock(vol
, 0);
409 layer1
->blocks_free
= 0;
410 buffer1
->cache
.modified
= 1;
415 * Now fill everything in.
417 for (phys_offset
= HAMMER_ENCODE_RAW_BUFFER(vol
->vol_no
, 0);
418 phys_offset
< aligned_vol_free_end
;
419 phys_offset
+= HAMMER_LARGEBLOCK_SIZE
) {
420 layer1_offset
= layer1_base
+
421 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset
);
422 layer1
= get_buffer_data(layer1_offset
, &buffer1
, 0);
424 assert(layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
425 layer2_offset
= layer1
->phys_offset
+
426 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset
);
428 layer2
= get_buffer_data(layer2_offset
, &buffer2
, 0);
429 if (phys_offset
< vol
->vol_free_off
) {
431 * Fixups XXX - bigblocks already allocated as part
432 * of the freemap bootstrap.
434 layer2
->u
.owner
= HAMMER_ENCODE_FREEMAP(0, 0); /* XXX */
435 } else if (phys_offset
< vol
->vol_free_end
) {
436 ++layer1
->blocks_free
;
437 buffer1
->cache
.modified
= 1;
438 layer2
->u
.owner
= HAMMER_BLOCKMAP_FREE
;
441 layer2
->u
.owner
= HAMMER_BLOCKMAP_UNAVAIL
;
443 layer2
->entry_crc
= crc32(layer2
, sizeof(*layer2
));
444 buffer2
->cache
.modified
= 1;
449 if (((phys_offset
+ HAMMER_LARGEBLOCK_SIZE
) & HAMMER_BLOCKMAP_LAYER2_MASK
) == 0) {
450 layer1
->layer1_crc
= crc32(layer1
, sizeof(*layer1
));
451 buffer1
->cache
.modified
= 1;
456 rel_volume(root_vol
);
461 * Allocate big-blocks using our poor-man's volume->vol_free_off and
462 * update the freemap if owner != 0.
465 alloc_bigblock(struct volume_info
*volume
, hammer_off_t owner
)
467 struct buffer_info
*buffer
= NULL
;
468 struct volume_info
*root_vol
;
469 hammer_off_t result_offset
;
470 hammer_off_t layer_offset
;
471 struct hammer_blockmap_layer1
*layer1
;
472 struct hammer_blockmap_layer2
*layer2
;
475 if (volume
== NULL
) {
476 volume
= get_volume(RootVolNo
);
481 result_offset
= volume
->vol_free_off
;
482 if (result_offset
>= volume
->vol_free_end
)
483 panic("alloc_bigblock: Ran out of room, filesystem too small");
484 volume
->vol_free_off
+= HAMMER_LARGEBLOCK_SIZE
;
490 root_vol
= get_volume(RootVolNo
);
491 layer_offset
= root_vol
->ondisk
->vol0_blockmap
[
492 HAMMER_ZONE_FREEMAP_INDEX
].phys_offset
;
493 layer_offset
+= HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset
);
494 layer1
= get_buffer_data(layer_offset
, &buffer
, 0);
495 assert(layer1
->phys_offset
!= HAMMER_BLOCKMAP_UNAVAIL
);
496 --layer1
->blocks_free
;
497 layer1
->layer1_crc
= crc32(layer1
, sizeof(*layer1
));
498 buffer
->cache
.modified
= 1;
499 layer_offset
= layer1
->phys_offset
+
500 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset
);
501 layer2
= get_buffer_data(layer_offset
, &buffer
, 0);
502 assert(layer2
->u
.owner
== HAMMER_BLOCKMAP_FREE
);
503 layer2
->u
.owner
= owner
;
504 layer2
->entry_crc
= crc32(layer2
, sizeof(*layer2
));
505 buffer
->cache
.modified
= 1;
508 rel_volume(root_vol
);
513 return(result_offset
);
517 * Format the undo-map for the root volume.
520 format_undomap(hammer_volume_ondisk_t ondisk
)
522 const int undo_zone
= HAMMER_ZONE_UNDO_INDEX
;
523 const hammer_off_t undo_limit
= HAMMER_LARGEBLOCK_SIZE
; /* XXX */
524 hammer_blockmap_t blockmap
;
526 struct hammer_blockmap_layer2
*layer2
;
530 blockmap
= &ondisk
->vol0_blockmap
[undo_zone
];
531 bzero(blockmap
, sizeof(*blockmap
));
532 blockmap
->phys_offset
= HAMMER_BLOCKMAP_UNAVAIL
;
533 blockmap
->first_offset
= HAMMER_ZONE_ENCODE(undo_zone
, 0);
534 blockmap
->next_offset
= blockmap
->first_offset
;
535 blockmap
->alloc_offset
= HAMMER_ZONE_ENCODE(undo_zone
, undo_limit
);
537 blockmap
->entry_crc
= crc32(blockmap
, sizeof(*blockmap
));
539 layer2
= &ondisk
->vol0_undo_array
[0];
541 scan
= blockmap
->next_offset
;
542 limit_index
= undo_limit
/ HAMMER_LARGEBLOCK_SIZE
;
544 assert(limit_index
< HAMMER_UNDO_LAYER2
);
546 for (n
= 0; n
< limit_index
; ++n
) {
547 layer2
->u
.phys_offset
= alloc_bigblock(NULL
, scan
);
548 layer2
->bytes_free
= -1; /* not used */
549 layer2
->entry_crc
= crc32(layer2
, sizeof(*layer2
));
551 scan
+= HAMMER_LARGEBLOCK_SIZE
;
554 while (n
< HAMMER_UNDO_LAYER2
) {
555 layer2
->u
.phys_offset
= HAMMER_BLOCKMAP_UNAVAIL
;
556 layer2
->bytes_free
= -1;
557 layer2
->entry_crc
= crc32(layer2
, sizeof(*layer2
));
564 * Format a new blockmap. Set the owner to the base of the blockmap
565 * (meaning either the blockmap layer1 bigblock, layer2 bigblock, or
569 format_blockmap(hammer_blockmap_t blockmap
, hammer_off_t zone_off
)
571 blockmap
->phys_offset
= alloc_bigblock(NULL
, zone_off
);
572 blockmap
->alloc_offset
= zone_off
;
573 blockmap
->first_offset
= zone_off
;
574 blockmap
->next_offset
= zone_off
;
575 blockmap
->entry_crc
= crc32(blockmap
, sizeof(*blockmap
));
580 alloc_blockmap(int zone
, int bytes
, hammer_off_t
*result_offp
,
581 struct buffer_info
**bufferp
)
583 struct buffer_info
*buffer1
= NULL
;
584 struct buffer_info
*buffer2
= NULL
;
585 struct volume_info
*volume
;
586 hammer_blockmap_t rootmap
;
587 struct hammer_blockmap_layer1
*layer1
;
588 struct hammer_blockmap_layer2
*layer2
;
589 hammer_off_t layer1_offset
;
590 hammer_off_t layer2_offset
;
591 hammer_off_t bigblock_offset
;
594 volume
= get_volume(RootVolNo
);
596 rootmap
= &volume
->ondisk
->vol0_blockmap
[zone
];
599 * Alignment and buffer-boundary issues
601 bytes
= (bytes
+ 7) & ~7;
602 if ((rootmap
->phys_offset
^ (rootmap
->phys_offset
+ bytes
- 1)) &
604 volume
->cache
.modified
= 1;
605 rootmap
->phys_offset
= (rootmap
->phys_offset
+ bytes
) &
612 layer1_offset
= rootmap
->phys_offset
+
613 HAMMER_BLOCKMAP_LAYER1_OFFSET(rootmap
->alloc_offset
);
615 layer1
= get_buffer_data(layer1_offset
, &buffer1
, 0);
616 if ((rootmap
->alloc_offset
& HAMMER_BLOCKMAP_LAYER2_MASK
) == 0) {
617 buffer1
->cache
.modified
= 1;
618 bzero(layer1
, sizeof(*layer1
));
619 layer1
->blocks_free
= HAMMER_BLOCKMAP_RADIX2
;
620 layer1
->phys_offset
= alloc_bigblock(NULL
,
621 rootmap
->alloc_offset
);
627 layer2_offset
= layer1
->phys_offset
+
628 HAMMER_BLOCKMAP_LAYER2_OFFSET(rootmap
->alloc_offset
);
630 layer2
= get_buffer_data(layer2_offset
, &buffer2
, 0);
632 if ((rootmap
->alloc_offset
& HAMMER_LARGEBLOCK_MASK64
) == 0) {
633 buffer2
->cache
.modified
= 1;
634 bzero(layer2
, sizeof(*layer2
));
635 layer2
->u
.phys_offset
= alloc_bigblock(NULL
,
636 rootmap
->alloc_offset
);
637 layer2
->bytes_free
= HAMMER_LARGEBLOCK_SIZE
;
638 --layer1
->blocks_free
;
641 buffer1
->cache
.modified
= 1;
642 buffer2
->cache
.modified
= 1;
643 volume
->cache
.modified
= 1;
644 layer2
->bytes_free
-= bytes
;
645 *result_offp
= rootmap
->alloc_offset
;
646 rootmap
->alloc_offset
+= bytes
;
647 rootmap
->next_offset
= rootmap
->alloc_offset
;
649 bigblock_offset
= layer2
->u
.phys_offset
+
650 (*result_offp
& HAMMER_LARGEBLOCK_MASK
);
651 ptr
= get_buffer_data(bigblock_offset
, bufferp
, 0);
652 (*bufferp
)->cache
.modified
= 1;
665 * Reserve space from the FIFO. Make sure that bytes does not cross a
668 * Zero out base_bytes and initialize the fifo head and tail. The
669 * data area is not zerod.
673 hammer_alloc_fifo(int32_t base_bytes
, int32_t ext_bytes
,
674 struct buffer_info
**bufp
, u_int16_t hdr_type
)
676 struct buffer_info
*buf
;
677 struct volume_info
*volume
;
678 hammer_fifo_head_t head
;
679 hammer_fifo_tail_t tail
;
681 int32_t aligned_bytes
;
683 aligned_bytes
= (base_bytes
+ ext_bytes
+ HAMMER_TAIL_ONDISK_SIZE
+
684 HAMMER_HEAD_ALIGN_MASK
) & ~HAMMER_HEAD_ALIGN_MASK
;
686 volume
= get_volume(RootVolNo
);
687 off
= volume
->ondisk
->vol0_fifo_end
;
690 * For now don't deal with transitions across buffer boundaries,
691 * only newfs_hammer uses this function.
693 assert((off
& ~HAMMER_BUFMASK64
) ==
694 ((off
+ aligned_bytes
) & ~HAMMER_BUFMASK
));
696 *bufp
= buf
= get_buffer(off
, 0);
698 buf
->cache
.modified
= 1;
699 volume
->cache
.modified
= 1;
701 head
= (void *)((char *)buf
->ondisk
+ ((int32_t)off
& HAMMER_BUFMASK
));
702 bzero(head
, base_bytes
);
704 head
->hdr_signature
= HAMMER_HEAD_SIGNATURE
;
705 head
->hdr_type
= hdr_type
;
706 head
->hdr_size
= aligned_bytes
;
707 head
->hdr_seq
= volume
->ondisk
->vol0_next_seq
++;
709 tail
= (void*)((char *)head
+ aligned_bytes
- HAMMER_TAIL_ONDISK_SIZE
);
710 tail
->tail_signature
= HAMMER_TAIL_SIGNATURE
;
711 tail
->tail_type
= hdr_type
;
712 tail
->tail_size
= aligned_bytes
;
714 volume
->ondisk
->vol0_fifo_end
+= aligned_bytes
;
715 volume
->cache
.modified
= 1;
725 * Flush various tracking structures to disk
729 * Flush various tracking structures to disk
732 flush_all_volumes(void)
734 struct volume_info
*vol
;
736 TAILQ_FOREACH(vol
, &VolList
, entry
)
741 flush_volume(struct volume_info
*volume
)
743 struct buffer_info
*buffer
;
745 TAILQ_FOREACH(buffer
, &volume
->buffer_list
, entry
)
746 flush_buffer(buffer
);
747 writehammerbuf(volume
, volume
->ondisk
, 0);
748 volume
->cache
.modified
= 0;
752 flush_buffer(struct buffer_info
*buffer
)
754 writehammerbuf(buffer
->volume
, buffer
->ondisk
, buffer
->buf_disk_offset
);
755 buffer
->cache
.modified
= 0;
760 * Generic buffer initialization
763 init_fifo_head(hammer_fifo_head_t head
, u_int16_t hdr_type
)
765 head
->hdr_signature
= HAMMER_HEAD_SIGNATURE
;
766 head
->hdr_type
= hdr_type
;
776 * Core I/O operations
779 readhammerbuf(struct volume_info
*vol
, void *data
, int64_t offset
)
783 n
= pread(vol
->fd
, data
, HAMMER_BUFSIZE
, offset
);
784 if (n
!= HAMMER_BUFSIZE
)
785 err(1, "Read volume %d (%s)", vol
->vol_no
, vol
->name
);
791 writehammerbuf(struct volume_info
*vol
, const void *data
, int64_t offset
)
795 n
= pwrite(vol
->fd
, data
, HAMMER_BUFSIZE
, offset
);
796 if (n
!= HAMMER_BUFSIZE
)
797 err(1, "Write volume %d (%s)", vol
->vol_no
, vol
->name
);
801 panic(const char *ctl
, ...)
806 vfprintf(stderr
, ctl
, va
);
808 fprintf(stderr
, "\n");