2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_mirror.c,v 1.11 2008/07/11 01:22:29 dillon Exp $
37 * HAMMER mirroring ioctls - serialize and deserialize modifications made
43 static int hammer_mirror_check(hammer_cursor_t cursor
,
44 struct hammer_ioc_mrecord_rec
*mrec
);
45 static int hammer_mirror_update(hammer_cursor_t cursor
,
46 struct hammer_ioc_mrecord_rec
*mrec
);
47 static int hammer_mirror_write(hammer_cursor_t cursor
,
48 struct hammer_ioc_mrecord_rec
*mrec
,
50 static int hammer_ioc_mirror_write_rec(hammer_cursor_t cursor
,
51 struct hammer_ioc_mrecord_rec
*mrec
,
52 struct hammer_ioc_mirror_rw
*mirror
,
53 u_int32_t localization
,
55 static int hammer_ioc_mirror_write_pass(hammer_cursor_t cursor
,
56 struct hammer_ioc_mrecord_rec
*mrec
,
57 struct hammer_ioc_mirror_rw
*mirror
,
58 u_int32_t localization
);
59 static int hammer_ioc_mirror_write_skip(hammer_cursor_t cursor
,
60 struct hammer_ioc_mrecord_skip
*mrec
,
61 struct hammer_ioc_mirror_rw
*mirror
,
62 u_int32_t localization
);
63 static int hammer_mirror_delete_at_cursor(hammer_cursor_t cursor
,
64 struct hammer_ioc_mirror_rw
*mirror
);
65 static int hammer_mirror_localize_data(hammer_data_ondisk_t data
,
66 hammer_btree_leaf_elm_t leaf
);
69 * All B-Tree records within the specified key range which also conform
70 * to the transaction id range are returned. Mirroring code keeps track
71 * of the last transaction id fully scanned and can efficiently pick up
72 * where it left off if interrupted.
74 * The PFS is identified in the mirror structure. The passed ip is just
75 * some directory in the overall HAMMER filesystem and has nothing to
79 hammer_ioc_mirror_read(hammer_transaction_t trans
, hammer_inode_t ip
,
80 struct hammer_ioc_mirror_rw
*mirror
)
82 struct hammer_cmirror cmirror
;
83 struct hammer_cursor cursor
;
84 union hammer_ioc_mrecord_any mrec
;
85 hammer_btree_leaf_elm_t elm
;
86 const int crc_start
= HAMMER_MREC_CRCOFF
;
92 u_int32_t localization
;
95 localization
= (u_int32_t
)mirror
->pfs_id
<< 16;
97 if ((mirror
->key_beg
.localization
| mirror
->key_end
.localization
) &
98 HAMMER_LOCALIZE_PSEUDOFS_MASK
) {
101 if (hammer_btree_cmp(&mirror
->key_beg
, &mirror
->key_end
) > 0)
104 mirror
->key_cur
= mirror
->key_beg
;
105 mirror
->key_cur
.localization
&= HAMMER_LOCALIZE_MASK
;
106 mirror
->key_cur
.localization
+= localization
;
107 bzero(&mrec
, sizeof(mrec
));
108 bzero(&cmirror
, sizeof(cmirror
));
111 error
= hammer_init_cursor(trans
, &cursor
, NULL
, NULL
);
113 hammer_done_cursor(&cursor
);
116 cursor
.key_beg
= mirror
->key_cur
;
117 cursor
.key_end
= mirror
->key_end
;
118 cursor
.key_end
.localization
&= HAMMER_LOCALIZE_MASK
;
119 cursor
.key_end
.localization
+= localization
;
121 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
122 cursor
.flags
|= HAMMER_CURSOR_BACKEND
;
125 * This flag filters the search to only return elements whos create
126 * or delete TID is >= mirror_tid. The B-Tree uses the mirror_tid
127 * field stored with internal and leaf nodes to shortcut the scan.
129 cursor
.flags
|= HAMMER_CURSOR_MIRROR_FILTERED
;
130 cursor
.cmirror
= &cmirror
;
131 cmirror
.mirror_tid
= mirror
->tid_beg
;
133 error
= hammer_btree_first(&cursor
);
136 * An internal node can be returned in mirror-filtered
137 * mode and indicates that the scan is returning a skip
138 * range in the cursor->cmirror structure.
140 uptr
= (char *)mirror
->ubuf
+ mirror
->count
;
141 if (cursor
.node
->ondisk
->type
== HAMMER_BTREE_TYPE_INTERNAL
) {
145 mirror
->key_cur
= cmirror
.skip_beg
;
146 bytes
= sizeof(mrec
.skip
);
147 if (mirror
->count
+ HAMMER_HEAD_DOALIGN(bytes
) >
155 mrec
.head
.signature
= HAMMER_IOC_MIRROR_SIGNATURE
;
156 mrec
.head
.type
= HAMMER_MREC_TYPE_SKIP
;
157 mrec
.head
.rec_size
= bytes
;
158 mrec
.skip
.skip_beg
= cmirror
.skip_beg
;
159 mrec
.skip
.skip_end
= cmirror
.skip_end
;
160 mrec
.head
.rec_crc
= crc32(&mrec
.head
.rec_size
,
162 error
= copyout(&mrec
, uptr
, bytes
);
168 * Leaf node. In full-history mode we could filter out
169 * elements modified outside the user-requested TID range.
171 * However, such elements must be returned so the writer
172 * can compare them against the target to detemrine what
173 * needs to be deleted on the target, particular for
174 * no-history mirrors.
176 KKASSERT(cursor
.node
->ondisk
->type
== HAMMER_BTREE_TYPE_LEAF
);
177 elm
= &cursor
.node
->ondisk
->elms
[cursor
.index
].leaf
;
178 mirror
->key_cur
= elm
->base
;
180 if ((elm
->base
.create_tid
< mirror
->tid_beg
||
181 elm
->base
.create_tid
> mirror
->tid_end
) &&
182 (elm
->base
.delete_tid
< mirror
->tid_beg
||
183 elm
->base
.delete_tid
> mirror
->tid_end
)) {
184 bytes
= sizeof(mrec
.rec
);
185 if (mirror
->count
+ HAMMER_HEAD_DOALIGN(bytes
) >
191 * Fill mrec. PASS records are records which are
192 * outside the TID range needed for the mirror
193 * update. They are sent without any data payload
194 * because the mirroring target must still compare
195 * records that fall outside the SKIP ranges to
196 * determine what might need to be deleted. Such
197 * deletions are needed if the master or files on
198 * the master are no-history, or if the slave is
199 * so far behind the master has already been pruned.
201 mrec
.head
.signature
= HAMMER_IOC_MIRROR_SIGNATURE
;
202 mrec
.head
.type
= HAMMER_MREC_TYPE_PASS
;
203 mrec
.head
.rec_size
= bytes
;
204 mrec
.rec
.leaf
= *elm
;
205 mrec
.head
.rec_crc
= crc32(&mrec
.head
.rec_size
,
207 error
= copyout(&mrec
, uptr
, bytes
);
214 * Yield to more important tasks
216 if ((error
= hammer_signal_check(trans
->hmp
)) != 0)
218 if (trans
->hmp
->sync_lock
.wanted
) {
219 tsleep(trans
, 0, "hmrslo", hz
/ 10);
221 if (trans
->hmp
->locked_dirty_space
+
222 trans
->hmp
->io_running_space
> hammer_limit_dirtybufspace
) {
223 hammer_flusher_async(trans
->hmp
);
224 tsleep(trans
, 0, "hmrslo", hz
/ 10);
228 * The core code exports the data to userland.
230 data_len
= (elm
->data_offset
) ? elm
->data_len
: 0;
232 error
= hammer_btree_extract(&cursor
,
233 HAMMER_CURSOR_GET_DATA
);
238 bytes
= sizeof(mrec
.rec
) + data_len
;
239 if (mirror
->count
+ HAMMER_HEAD_DOALIGN(bytes
) > mirror
->size
)
243 * Construct the record for userland and copyout.
245 * The user is asking for a snapshot, if the record was
246 * deleted beyond the user-requested ending tid, the record
247 * is not considered deleted from the point of view of
248 * userland and delete_tid is cleared.
250 mrec
.head
.signature
= HAMMER_IOC_MIRROR_SIGNATURE
;
251 mrec
.head
.type
= HAMMER_MREC_TYPE_REC
;
252 mrec
.head
.rec_size
= bytes
;
253 mrec
.rec
.leaf
= *elm
;
254 if (elm
->base
.delete_tid
>= mirror
->tid_end
)
255 mrec
.rec
.leaf
.base
.delete_tid
= 0;
256 rec_crc
= crc32(&mrec
.head
.rec_size
,
257 sizeof(mrec
.rec
) - crc_start
);
259 rec_crc
= crc32_ext(cursor
.data
, data_len
, rec_crc
);
260 mrec
.head
.rec_crc
= rec_crc
;
261 error
= copyout(&mrec
, uptr
, sizeof(mrec
.rec
));
262 if (data_len
&& error
== 0) {
263 error
= copyout(cursor
.data
, uptr
+ sizeof(mrec
.rec
),
269 * eatdisk controls whether we skip the current cursor
270 * position on the next scan or not. If doing a SKIP
271 * the cursor is already positioned properly for the next
272 * scan and eatdisk will be 0.
276 mirror
->count
+= HAMMER_HEAD_DOALIGN(bytes
);
278 cursor
.flags
|= HAMMER_CURSOR_ATEDISK
;
280 cursor
.flags
&= ~HAMMER_CURSOR_ATEDISK
;
281 error
= hammer_btree_iterate(&cursor
);
284 if (error
== ENOENT
) {
285 mirror
->key_cur
= mirror
->key_end
;
288 hammer_done_cursor(&cursor
);
289 if (error
== EDEADLK
)
291 if (error
== EINTR
) {
292 mirror
->head
.flags
|= HAMMER_IOC_HEAD_INTR
;
296 mirror
->key_cur
.localization
&= HAMMER_LOCALIZE_MASK
;
301 * Copy records from userland to the target mirror.
303 * The PFS is identified in the mirror structure. The passed ip is just
304 * some directory in the overall HAMMER filesystem and has nothing to
305 * do with the PFS. In fact, there might not even be a root directory for
309 hammer_ioc_mirror_write(hammer_transaction_t trans
, hammer_inode_t ip
,
310 struct hammer_ioc_mirror_rw
*mirror
)
312 union hammer_ioc_mrecord_any mrec
;
313 struct hammer_cursor cursor
;
314 u_int32_t localization
;
319 localization
= (u_int32_t
)mirror
->pfs_id
<< 16;
322 * Validate the mirror structure and relocalize the tracking keys.
324 if (mirror
->size
< 0 || mirror
->size
> 0x70000000)
326 mirror
->key_beg
.localization
&= HAMMER_LOCALIZE_MASK
;
327 mirror
->key_beg
.localization
+= localization
;
328 mirror
->key_end
.localization
&= HAMMER_LOCALIZE_MASK
;
329 mirror
->key_end
.localization
+= localization
;
330 mirror
->key_cur
.localization
&= HAMMER_LOCALIZE_MASK
;
331 mirror
->key_cur
.localization
+= localization
;
334 * Set up our tracking cursor for the loop. The tracking cursor
335 * is used to delete records that are no longer present on the
336 * master. The last handled record at key_cur must be skipped.
338 error
= hammer_init_cursor(trans
, &cursor
, NULL
, NULL
);
340 cursor
.key_beg
= mirror
->key_cur
;
341 cursor
.key_end
= mirror
->key_end
;
342 cursor
.flags
|= HAMMER_CURSOR_BACKEND
;
343 error
= hammer_btree_first(&cursor
);
345 cursor
.flags
|= HAMMER_CURSOR_ATEDISK
;
350 * Loop until our input buffer has been exhausted.
353 mirror
->count
+ sizeof(mrec
.head
) <= mirror
->size
) {
356 * Acquire and validate header
358 if ((bytes
= mirror
->size
- mirror
->count
) > sizeof(mrec
))
359 bytes
= sizeof(mrec
);
360 uptr
= (char *)mirror
->ubuf
+ mirror
->count
;
361 error
= copyin(uptr
, &mrec
, bytes
);
364 if (mrec
.head
.signature
!= HAMMER_IOC_MIRROR_SIGNATURE
) {
368 if (mrec
.head
.rec_size
< sizeof(mrec
.head
) ||
369 mrec
.head
.rec_size
> sizeof(mrec
) + HAMMER_XBUFSIZE
||
370 mirror
->count
+ mrec
.head
.rec_size
> mirror
->size
) {
375 switch(mrec
.head
.type
) {
376 case HAMMER_MREC_TYPE_SKIP
:
377 if (mrec
.head
.rec_size
!= sizeof(mrec
.skip
))
380 error
= hammer_ioc_mirror_write_skip(&cursor
, &mrec
.skip
, mirror
, localization
);
382 case HAMMER_MREC_TYPE_REC
:
383 if (mrec
.head
.rec_size
< sizeof(mrec
.rec
))
386 error
= hammer_ioc_mirror_write_rec(&cursor
, &mrec
.rec
, mirror
, localization
, uptr
+ sizeof(mrec
.rec
));
388 case HAMMER_MREC_TYPE_PASS
:
389 if (mrec
.head
.rec_size
!= sizeof(mrec
.rec
))
392 error
= hammer_ioc_mirror_write_pass(&cursor
, &mrec
.rec
, mirror
, localization
);
400 * Retry the current record on deadlock, otherwise setup
403 if (error
== EDEADLK
) {
404 while (error
== EDEADLK
) {
405 hammer_recover_cursor(&cursor
);
406 error
= hammer_cursor_upgrade(&cursor
);
409 if (error
== EALREADY
)
413 HAMMER_HEAD_DOALIGN(mrec
.head
.rec_size
);
417 hammer_done_cursor(&cursor
);
423 mirror
->head
.flags
|= HAMMER_IOC_HEAD_ERROR
;
424 mirror
->head
.error
= error
;
428 * ioctls don't update the RW data structure if an error is returned,
435 * Handle skip records.
437 * We must iterate from the last resolved record position at mirror->key_cur
438 * to skip_beg and delete any records encountered.
440 * mirror->key_cur must be carefully set when we succeed in processing
444 hammer_ioc_mirror_write_skip(hammer_cursor_t cursor
,
445 struct hammer_ioc_mrecord_skip
*mrec
,
446 struct hammer_ioc_mirror_rw
*mirror
,
447 u_int32_t localization
)
452 * Relocalize the skip range
454 mrec
->skip_beg
.localization
&= HAMMER_LOCALIZE_MASK
;
455 mrec
->skip_beg
.localization
+= localization
;
456 mrec
->skip_end
.localization
&= HAMMER_LOCALIZE_MASK
;
457 mrec
->skip_end
.localization
+= localization
;
460 * Iterate from current position to skip_beg, deleting any records
463 cursor
->key_end
= mrec
->skip_beg
;
464 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
466 error
= hammer_btree_iterate(cursor
);
468 error
= hammer_mirror_delete_at_cursor(cursor
, mirror
);
470 error
= hammer_btree_iterate(cursor
);
474 * ENOENT just means we hit the end of our iteration.
480 * Now skip past the skip (which is the whole point point of
481 * having a skip record). The sender has not sent us any records
482 * for the skip area so we wouldn't know what to keep and what
485 * Clear ATEDISK because skip_end is non-inclusive, so we can't
486 * count an exact match if we happened to get one.
489 mirror
->key_cur
= mrec
->skip_end
;
490 cursor
->key_beg
= mrec
->skip_end
;
491 error
= hammer_btree_lookup(cursor
);
492 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
500 * Handle B-Tree records.
502 * We must iterate to mrec->base.key (non-inclusively), and then process
503 * the record. We are allowed to write a new record or delete an existing
504 * record, but cannot replace an existing record.
506 * mirror->key_cur must be carefully set when we succeed in processing
510 hammer_ioc_mirror_write_rec(hammer_cursor_t cursor
,
511 struct hammer_ioc_mrecord_rec
*mrec
,
512 struct hammer_ioc_mirror_rw
*mirror
,
513 u_int32_t localization
,
516 hammer_transaction_t trans
;
520 trans
= cursor
->trans
;
521 rec_crc
= crc32(mrec
, sizeof(*mrec
));
523 if (mrec
->leaf
.data_len
< 0 ||
524 mrec
->leaf
.data_len
> HAMMER_XBUFSIZE
||
525 mrec
->leaf
.data_len
+ sizeof(*mrec
) > mrec
->head
.rec_size
) {
530 * Re-localize for target. relocalization of data is handled
531 * by hammer_mirror_write().
533 mrec
->leaf
.base
.localization
&= HAMMER_LOCALIZE_MASK
;
534 mrec
->leaf
.base
.localization
+= localization
;
537 * Delete records through until we reach (non-inclusively) the
540 cursor
->key_end
= mrec
->leaf
.base
;
541 cursor
->flags
&= ~HAMMER_CURSOR_END_INCLUSIVE
;
542 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
544 error
= hammer_btree_iterate(cursor
);
546 error
= hammer_mirror_delete_at_cursor(cursor
, mirror
);
548 error
= hammer_btree_iterate(cursor
);
556 * If the record exists only the delete_tid may be updated.
558 * If the record does not exist we create it. For now we
559 * ignore records with a non-zero delete_tid. Note that
560 * mirror operations are effective an as-of operation and
561 * delete_tid can be 0 for mirroring purposes even if it is
562 * not actually 0 at the originator.
564 * These functions can return EDEADLK
566 cursor
->key_beg
= mrec
->leaf
.base
;
567 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
568 cursor
->flags
&= ~HAMMER_CURSOR_INSERT
;
569 error
= hammer_btree_lookup(cursor
);
571 if (error
== 0 && hammer_mirror_check(cursor
, mrec
)) {
572 error
= hammer_mirror_update(cursor
, mrec
);
573 } else if (error
== ENOENT
&& mrec
->leaf
.base
.delete_tid
== 0) {
574 error
= hammer_mirror_write(cursor
, mrec
, uptr
);
575 } else if (error
== ENOENT
) {
578 if (error
== 0 || error
== EALREADY
)
579 mirror
->key_cur
= mrec
->leaf
.base
;
584 * This works like write_rec but no write or update is necessary,
585 * and no data payload is included so we couldn't do a write even
588 * We must still iterate for deletions, and we can validate the
589 * record header which is a good way to test for corrupted mirror
592 * mirror->key_cur must be carefully set when we succeed in processing
597 hammer_ioc_mirror_write_pass(hammer_cursor_t cursor
,
598 struct hammer_ioc_mrecord_rec
*mrec
,
599 struct hammer_ioc_mirror_rw
*mirror
,
600 u_int32_t localization
)
602 hammer_transaction_t trans
;
606 trans
= cursor
->trans
;
607 rec_crc
= crc32(mrec
, sizeof(*mrec
));
610 * Re-localize for target. Relocalization of data is handled
611 * by hammer_mirror_write().
613 mrec
->leaf
.base
.localization
&= HAMMER_LOCALIZE_MASK
;
614 mrec
->leaf
.base
.localization
+= localization
;
617 * Delete records through until we reach (non-inclusively) the
620 cursor
->key_end
= mrec
->leaf
.base
;
621 cursor
->flags
&= ~HAMMER_CURSOR_END_INCLUSIVE
;
622 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
624 error
= hammer_btree_iterate(cursor
);
626 error
= hammer_mirror_delete_at_cursor(cursor
, mirror
);
628 error
= hammer_btree_iterate(cursor
);
634 * Locate the record and get past it by setting ATEDISK.
637 mirror
->key_cur
= mrec
->leaf
.base
;
638 cursor
->key_beg
= mrec
->leaf
.base
;
639 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
640 cursor
->flags
&= ~HAMMER_CURSOR_INSERT
;
641 error
= hammer_btree_lookup(cursor
);
643 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
645 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
653 * As part of the mirror write we iterate across swaths of records
654 * on the target which no longer exist on the source, and mark them
659 hammer_mirror_delete_at_cursor(hammer_cursor_t cursor
,
660 struct hammer_ioc_mirror_rw
*mirror
)
662 hammer_transaction_t trans
;
663 hammer_btree_elm_t elm
;
666 if ((error
= hammer_cursor_upgrade(cursor
)) != 0)
669 elm
= &cursor
->node
->ondisk
->elms
[cursor
->index
];
670 KKASSERT(elm
->leaf
.base
.btype
== HAMMER_BTREE_TYPE_RECORD
);
672 kprintf("mirror_delete %016llx %016llx\n", elm
->leaf
.base
.obj_id
, elm
->leaf
.base
.key
);
674 trans
= cursor
->trans
;
675 hammer_sync_lock_sh(trans
);
677 if (elm
->leaf
.base
.delete_tid
== 0) {
679 * We don't know when the originator deleted the element
680 * because it was destroyed, tid_end works.
682 KKASSERT(elm
->base
.create_tid
< mirror
->tid_end
);
683 hammer_modify_node(trans
, cursor
->node
, elm
, sizeof(*elm
));
684 elm
->base
.delete_tid
= mirror
->tid_end
;
685 elm
->leaf
.delete_ts
= time_second
;
686 hammer_modify_node_done(cursor
->node
);
689 * Track a count of active inodes.
691 if (elm
->base
.obj_type
== HAMMER_RECTYPE_INODE
) {
692 hammer_modify_volume_field(trans
,
695 --trans
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
696 hammer_modify_volume_done(trans
->rootvol
);
699 hammer_sync_unlock(trans
);
701 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
707 * Check whether an update is needed in the case where a match already
708 * exists on the target. The only type of update allowed in this case
709 * is an update of the delete_tid.
711 * Return non-zero if the update should proceed.
715 hammer_mirror_check(hammer_cursor_t cursor
, struct hammer_ioc_mrecord_rec
*mrec
)
717 hammer_btree_leaf_elm_t leaf
= cursor
->leaf
;
719 if (leaf
->base
.delete_tid
!= mrec
->leaf
.base
.delete_tid
) {
720 if (mrec
->leaf
.base
.delete_tid
!= 0)
727 * Update a record in-place. Only the delete_tid can change.
731 hammer_mirror_update(hammer_cursor_t cursor
,
732 struct hammer_ioc_mrecord_rec
*mrec
)
734 hammer_transaction_t trans
;
735 hammer_btree_leaf_elm_t elm
;
738 if ((error
= hammer_cursor_upgrade(cursor
)) != 0)
742 trans
= cursor
->trans
;
744 if (mrec
->leaf
.base
.delete_tid
== 0) {
745 kprintf("mirror_write: object %016llx:%016llx deleted on "
746 "target, not deleted on source\n",
747 elm
->base
.obj_id
, elm
->base
.key
);
750 hammer_sync_lock_sh(trans
);
752 KKASSERT(elm
->base
.create_tid
< mrec
->leaf
.base
.delete_tid
);
753 hammer_modify_node(trans
, cursor
->node
, elm
, sizeof(*elm
));
754 elm
->base
.delete_tid
= mrec
->leaf
.base
.delete_tid
;
755 elm
->delete_ts
= mrec
->leaf
.delete_ts
;
756 hammer_modify_node_done(cursor
->node
);
759 * Cursor is left on the current element, we want to skip it now.
761 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
764 * Track a count of active inodes.
766 if (elm
->base
.obj_type
== HAMMER_RECTYPE_INODE
) {
767 hammer_modify_volume_field(trans
,
770 --trans
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
771 hammer_modify_volume_done(trans
->rootvol
);
773 hammer_sync_unlock(trans
);
779 * Write out a new record.
783 hammer_mirror_write(hammer_cursor_t cursor
,
784 struct hammer_ioc_mrecord_rec
*mrec
,
787 hammer_transaction_t trans
;
788 hammer_buffer_t data_buffer
;
789 hammer_off_t ndata_offset
;
790 hammer_tid_t high_tid
;
795 trans
= cursor
->trans
;
799 * Get the sync lock so the whole mess is atomic
801 hammer_sync_lock_sh(trans
);
804 * Allocate and adjust data
806 if (mrec
->leaf
.data_len
&& mrec
->leaf
.data_offset
) {
807 ndata
= hammer_alloc_data(trans
, mrec
->leaf
.data_len
,
808 mrec
->leaf
.base
.rec_type
,
809 &ndata_offset
, &data_buffer
, &error
);
812 mrec
->leaf
.data_offset
= ndata_offset
;
813 hammer_modify_buffer(trans
, data_buffer
, NULL
, 0);
814 error
= copyin(udata
, ndata
, mrec
->leaf
.data_len
);
816 if (hammer_crc_test_leaf(ndata
, &mrec
->leaf
) == 0) {
817 kprintf("data crc mismatch on pipe\n");
820 error
= hammer_mirror_localize_data(
824 hammer_modify_buffer_done(data_buffer
);
826 mrec
->leaf
.data_offset
= 0;
834 * Do the insertion. This can fail with a EDEADLK or EALREADY
836 cursor
->flags
|= HAMMER_CURSOR_INSERT
;
837 error
= hammer_btree_lookup(cursor
);
838 if (error
!= ENOENT
) {
844 error
= hammer_btree_insert(cursor
, &mrec
->leaf
, &doprop
);
847 * Cursor is left on the current element, we want to skip it now.
849 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
850 cursor
->flags
&= ~HAMMER_CURSOR_INSERT
;
853 * Track a count of active inodes.
855 if (error
== 0 && mrec
->leaf
.base
.delete_tid
== 0 &&
856 mrec
->leaf
.base
.obj_type
== HAMMER_RECTYPE_INODE
) {
857 hammer_modify_volume_field(trans
,
860 ++trans
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
861 hammer_modify_volume_done(trans
->rootvol
);
865 * vol0_next_tid must track the highest TID stored in the filesystem.
866 * We do not need to generate undo for this update.
868 high_tid
= mrec
->leaf
.base
.create_tid
;
869 if (high_tid
< mrec
->leaf
.base
.delete_tid
)
870 high_tid
= mrec
->leaf
.base
.delete_tid
;
871 if (trans
->rootvol
->ondisk
->vol0_next_tid
< high_tid
) {
872 hammer_modify_volume(trans
, trans
->rootvol
, NULL
, 0);
873 trans
->rootvol
->ondisk
->vol0_next_tid
= high_tid
;
874 hammer_modify_volume_done(trans
->rootvol
);
877 if (error
== 0 && doprop
)
878 hammer_btree_do_propagation(cursor
, NULL
, &mrec
->leaf
);
884 if (error
&& mrec
->leaf
.data_offset
) {
885 hammer_blockmap_free(cursor
->trans
,
886 mrec
->leaf
.data_offset
,
887 mrec
->leaf
.data_len
);
889 hammer_sync_unlock(trans
);
891 hammer_rel_buffer(data_buffer
, 0);
896 * Localize the data payload. Directory entries may need their
897 * localization adjusted.
899 * PFS directory entries must be skipped entirely (return EALREADY).
903 hammer_mirror_localize_data(hammer_data_ondisk_t data
,
904 hammer_btree_leaf_elm_t leaf
)
906 u_int32_t localization
;
908 if (leaf
->base
.rec_type
== HAMMER_RECTYPE_DIRENTRY
) {
909 if (data
->entry
.obj_id
== HAMMER_OBJID_ROOT
)
911 localization
= leaf
->base
.localization
&
912 HAMMER_LOCALIZE_PSEUDOFS_MASK
;
913 if (data
->entry
.localization
!= localization
) {
914 data
->entry
.localization
= localization
;
915 hammer_crc_set_leaf(data
, leaf
);
922 * Auto-detect the pseudofs.
926 hammer_mirror_autodetect(struct hammer_ioc_pseudofs_rw
*pfs
, hammer_inode_t ip
)
928 if (pfs
->pfs_id
== -1)
929 pfs
->pfs_id
= (int)(ip
->obj_localization
>> 16);
933 * Get mirroring/pseudo-fs information
936 hammer_ioc_get_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
937 struct hammer_ioc_pseudofs_rw
*pfs
)
939 hammer_pseudofs_inmem_t pfsm
;
940 u_int32_t localization
;
943 hammer_mirror_autodetect(pfs
, ip
);
944 if (pfs
->pfs_id
< 0 || pfs
->pfs_id
>= HAMMER_MAX_PFS
)
946 localization
= (u_int32_t
)pfs
->pfs_id
<< 16;
947 pfs
->bytes
= sizeof(struct hammer_pseudofs_data
);
948 pfs
->version
= HAMMER_IOC_PSEUDOFS_VERSION
;
950 pfsm
= hammer_load_pseudofs(trans
, localization
, &error
);
952 hammer_rel_pseudofs(trans
->hmp
, pfsm
);
957 * If the PFS is a master the sync tid is set by normal operation
958 * rather then the mirroring code, and will always track the
959 * real HAMMER filesystem.
961 if (pfsm
->pfsd
.master_id
>= 0)
962 pfsm
->pfsd
.sync_end_tid
= trans
->rootvol
->ondisk
->vol0_next_tid
;
965 * Copy out to userland.
968 if (pfs
->ondisk
&& error
== 0)
969 error
= copyout(&pfsm
->pfsd
, pfs
->ondisk
, sizeof(pfsm
->pfsd
));
970 hammer_rel_pseudofs(trans
->hmp
, pfsm
);
975 * Set mirroring/pseudo-fs information
978 hammer_ioc_set_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
979 struct ucred
*cred
, struct hammer_ioc_pseudofs_rw
*pfs
)
981 hammer_pseudofs_inmem_t pfsm
;
983 u_int32_t localization
;
986 hammer_mirror_autodetect(pfs
, ip
);
987 if (pfs
->pfs_id
< 0 || pfs
->pfs_id
>= HAMMER_MAX_PFS
)
989 if (pfs
->bytes
!= sizeof(pfsm
->pfsd
))
991 if (pfs
->version
!= HAMMER_IOC_PSEUDOFS_VERSION
)
993 if (error
== 0 && pfs
->ondisk
) {
995 * Load the PFS so we can modify our in-core copy.
997 localization
= (u_int32_t
)pfs
->pfs_id
<< 16;
998 pfsm
= hammer_load_pseudofs(trans
, localization
, &error
);
999 error
= copyin(pfs
->ondisk
, &pfsm
->pfsd
, sizeof(pfsm
->pfsd
));
1002 * Save it back, create a root inode if we are in master
1003 * mode and no root exists.
1006 error
= hammer_mkroot_pseudofs(trans
, cred
, pfsm
);
1008 error
= hammer_save_pseudofs(trans
, pfsm
);
1009 hammer_rel_pseudofs(trans
->hmp
, pfsm
);