HAMMER - Fix root inode creation for slave.
[dragonfly.git] / sys / vfs / hammer / hammer_mirror.c
blob84295070e3b002fa420273a24fcc06e151c934ab
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_mirror.c,v 1.17 2008/07/31 22:30:33 dillon Exp $
37 * HAMMER mirroring ioctls - serialize and deserialize modifications made
38 * to a filesystem.
41 #include "hammer.h"
43 static int hammer_mirror_check(hammer_cursor_t cursor,
44 struct hammer_ioc_mrecord_rec *mrec);
45 static int hammer_mirror_update(hammer_cursor_t cursor,
46 struct hammer_ioc_mrecord_rec *mrec);
47 static int hammer_mirror_write(hammer_cursor_t cursor,
48 struct hammer_ioc_mrecord_rec *mrec,
49 char *udata);
50 static int hammer_ioc_mirror_write_rec(hammer_cursor_t cursor,
51 struct hammer_ioc_mrecord_rec *mrec,
52 struct hammer_ioc_mirror_rw *mirror,
53 u_int32_t localization,
54 char *uptr);
55 static int hammer_ioc_mirror_write_pass(hammer_cursor_t cursor,
56 struct hammer_ioc_mrecord_rec *mrec,
57 struct hammer_ioc_mirror_rw *mirror,
58 u_int32_t localization);
59 static int hammer_ioc_mirror_write_skip(hammer_cursor_t cursor,
60 struct hammer_ioc_mrecord_skip *mrec,
61 struct hammer_ioc_mirror_rw *mirror,
62 u_int32_t localization);
63 static int hammer_mirror_delete_to(hammer_cursor_t cursor,
64 struct hammer_ioc_mirror_rw *mirror);
65 static int hammer_mirror_localize_data(hammer_data_ondisk_t data,
66 hammer_btree_leaf_elm_t leaf);
69 * All B-Tree records within the specified key range which also conform
70 * to the transaction id range are returned. Mirroring code keeps track
71 * of the last transaction id fully scanned and can efficiently pick up
72 * where it left off if interrupted.
74 * The PFS is identified in the mirror structure. The passed ip is just
75 * some directory in the overall HAMMER filesystem and has nothing to
76 * do with the PFS.
78 int
79 hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
80 struct hammer_ioc_mirror_rw *mirror)
82 struct hammer_cmirror cmirror;
83 struct hammer_cursor cursor;
84 union hammer_ioc_mrecord_any mrec;
85 hammer_btree_leaf_elm_t elm;
86 const int crc_start = HAMMER_MREC_CRCOFF;
87 char *uptr;
88 int error;
89 int data_len;
90 int bytes;
91 int eatdisk;
92 int mrec_flags;
93 u_int32_t localization;
94 u_int32_t rec_crc;
96 localization = (u_int32_t)mirror->pfs_id << 16;
98 if ((mirror->key_beg.localization | mirror->key_end.localization) &
99 HAMMER_LOCALIZE_PSEUDOFS_MASK) {
100 return(EINVAL);
102 if (hammer_btree_cmp(&mirror->key_beg, &mirror->key_end) > 0)
103 return(EINVAL);
105 mirror->key_cur = mirror->key_beg;
106 mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
107 mirror->key_cur.localization += localization;
108 bzero(&mrec, sizeof(mrec));
109 bzero(&cmirror, sizeof(cmirror));
112 * Make CRC errors non-fatal (at least on data), causing an EDOM
113 * error instead of EIO.
115 trans->flags |= HAMMER_TRANSF_CRCDOM;
117 retry:
118 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
119 if (error) {
120 hammer_done_cursor(&cursor);
121 goto failed;
123 cursor.key_beg = mirror->key_cur;
124 cursor.key_end = mirror->key_end;
125 cursor.key_end.localization &= HAMMER_LOCALIZE_MASK;
126 cursor.key_end.localization += localization;
128 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
129 cursor.flags |= HAMMER_CURSOR_BACKEND;
132 * This flag filters the search to only return elements whos create
133 * or delete TID is >= mirror_tid. The B-Tree uses the mirror_tid
134 * field stored with internal and leaf nodes to shortcut the scan.
136 cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED;
137 cursor.cmirror = &cmirror;
138 cmirror.mirror_tid = mirror->tid_beg;
140 error = hammer_btree_first(&cursor);
141 while (error == 0) {
143 * Yield to more important tasks
145 if (error == 0) {
146 error = hammer_signal_check(trans->hmp);
147 if (error)
148 break;
152 * An internal node can be returned in mirror-filtered
153 * mode and indicates that the scan is returning a skip
154 * range in the cursor->cmirror structure.
156 uptr = (char *)mirror->ubuf + mirror->count;
157 if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
159 * Check space
161 mirror->key_cur = cmirror.skip_beg;
162 bytes = sizeof(mrec.skip);
163 if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
164 mirror->size) {
165 break;
169 * Fill mrec
171 mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
172 mrec.head.type = HAMMER_MREC_TYPE_SKIP;
173 mrec.head.rec_size = bytes;
174 mrec.skip.skip_beg = cmirror.skip_beg;
175 mrec.skip.skip_end = cmirror.skip_end;
176 mrec.head.rec_crc = crc32(&mrec.head.rec_size,
177 bytes - crc_start);
178 error = copyout(&mrec, uptr, bytes);
179 eatdisk = 0;
180 goto didwrite;
184 * Leaf node. In full-history mode we could filter out
185 * elements modified outside the user-requested TID range.
187 * However, such elements must be returned so the writer
188 * can compare them against the target to determine what
189 * needs to be deleted on the target, particular for
190 * no-history mirrors.
192 KKASSERT(cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF);
193 elm = &cursor.node->ondisk->elms[cursor.index].leaf;
194 mirror->key_cur = elm->base;
197 * If the record was created after our end point we just
198 * ignore it.
200 if (elm->base.create_tid > mirror->tid_end) {
201 error = 0;
202 bytes = 0;
203 eatdisk = 1;
204 goto didwrite;
208 * Determine if we should generate a PASS or a REC. PASS
209 * records are records without any data payload. Such
210 * records will be generated if the target is already expected
211 * to have the record, allowing it to delete the gaps.
213 * A PASS record is also used to perform deletions on the
214 * target.
216 * Such deletions are needed if the master or files on the
217 * master are no-history, or if the slave is so far behind
218 * the master has already been pruned.
220 if (elm->base.create_tid < mirror->tid_beg) {
221 bytes = sizeof(mrec.rec);
222 if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
223 mirror->size) {
224 break;
228 * Fill mrec.
230 mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
231 mrec.head.type = HAMMER_MREC_TYPE_PASS;
232 mrec.head.rec_size = bytes;
233 mrec.rec.leaf = *elm;
234 mrec.head.rec_crc = crc32(&mrec.head.rec_size,
235 bytes - crc_start);
236 error = copyout(&mrec, uptr, bytes);
237 eatdisk = 1;
238 goto didwrite;
243 * The core code exports the data to userland.
245 * CRC errors on data are reported but passed through,
246 * but the data must be washed by the user program.
248 mrec_flags = 0;
249 data_len = (elm->data_offset) ? elm->data_len : 0;
250 if (data_len) {
251 error = hammer_btree_extract(&cursor,
252 HAMMER_CURSOR_GET_DATA);
253 if (error) {
254 if (error != EDOM)
255 break;
256 mrec_flags |= HAMMER_MRECF_CRC_ERROR |
257 HAMMER_MRECF_DATA_CRC_BAD;
261 bytes = sizeof(mrec.rec) + data_len;
262 if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) > mirror->size)
263 break;
266 * Construct the record for userland and copyout.
268 * The user is asking for a snapshot, if the record was
269 * deleted beyond the user-requested ending tid, the record
270 * is not considered deleted from the point of view of
271 * userland and delete_tid is cleared.
273 mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
274 mrec.head.type = HAMMER_MREC_TYPE_REC | mrec_flags;
275 mrec.head.rec_size = bytes;
276 mrec.rec.leaf = *elm;
278 if (elm->base.delete_tid > mirror->tid_end)
279 mrec.rec.leaf.base.delete_tid = 0;
280 rec_crc = crc32(&mrec.head.rec_size,
281 sizeof(mrec.rec) - crc_start);
282 if (data_len)
283 rec_crc = crc32_ext(cursor.data, data_len, rec_crc);
284 mrec.head.rec_crc = rec_crc;
285 error = copyout(&mrec, uptr, sizeof(mrec.rec));
286 if (data_len && error == 0) {
287 error = copyout(cursor.data, uptr + sizeof(mrec.rec),
288 data_len);
290 eatdisk = 1;
293 * eatdisk controls whether we skip the current cursor
294 * position on the next scan or not. If doing a SKIP
295 * the cursor is already positioned properly for the next
296 * scan and eatdisk will be 0.
298 didwrite:
299 if (error == 0) {
300 mirror->count += HAMMER_HEAD_DOALIGN(bytes);
301 if (eatdisk)
302 cursor.flags |= HAMMER_CURSOR_ATEDISK;
303 else
304 cursor.flags &= ~HAMMER_CURSOR_ATEDISK;
305 error = hammer_btree_iterate(&cursor);
308 if (error == ENOENT) {
309 mirror->key_cur = mirror->key_end;
310 error = 0;
312 hammer_done_cursor(&cursor);
313 if (error == EDEADLK)
314 goto retry;
315 if (error == EINTR) {
316 mirror->head.flags |= HAMMER_IOC_HEAD_INTR;
317 error = 0;
319 failed:
320 mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
321 return(error);
325 * Copy records from userland to the target mirror.
327 * The PFS is identified in the mirror structure. The passed ip is just
328 * some directory in the overall HAMMER filesystem and has nothing to
329 * do with the PFS. In fact, there might not even be a root directory for
330 * the PFS yet!
333 hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
334 struct hammer_ioc_mirror_rw *mirror)
336 union hammer_ioc_mrecord_any mrec;
337 struct hammer_cursor cursor;
338 u_int32_t localization;
339 int checkspace_count = 0;
340 int error;
341 int bytes;
342 char *uptr;
343 int seq;
345 localization = (u_int32_t)mirror->pfs_id << 16;
346 seq = trans->hmp->flusher.act;
349 * Validate the mirror structure and relocalize the tracking keys.
351 if (mirror->size < 0 || mirror->size > 0x70000000)
352 return(EINVAL);
353 mirror->key_beg.localization &= HAMMER_LOCALIZE_MASK;
354 mirror->key_beg.localization += localization;
355 mirror->key_end.localization &= HAMMER_LOCALIZE_MASK;
356 mirror->key_end.localization += localization;
357 mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
358 mirror->key_cur.localization += localization;
361 * Set up our tracking cursor for the loop. The tracking cursor
362 * is used to delete records that are no longer present on the
363 * master. The last handled record at key_cur must be skipped.
365 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
367 cursor.key_beg = mirror->key_cur;
368 cursor.key_end = mirror->key_end;
369 cursor.flags |= HAMMER_CURSOR_BACKEND;
370 error = hammer_btree_first(&cursor);
371 if (error == 0)
372 cursor.flags |= HAMMER_CURSOR_ATEDISK;
373 if (error == ENOENT)
374 error = 0;
377 * Loop until our input buffer has been exhausted.
379 while (error == 0 &&
380 mirror->count + sizeof(mrec.head) <= mirror->size) {
383 * Don't blow out the buffer cache. Leave room for frontend
384 * cache as well.
386 * WARNING: See warnings in hammer_unlock_cursor() function.
388 while (hammer_flusher_meta_halflimit(trans->hmp) ||
389 hammer_flusher_undo_exhausted(trans, 2)) {
390 hammer_unlock_cursor(&cursor);
391 hammer_flusher_wait(trans->hmp, seq);
392 hammer_lock_cursor(&cursor);
393 seq = hammer_flusher_async_one(trans->hmp);
397 * If there is insufficient free space it may be due to
398 * reserved bigblocks, which flushing might fix.
400 if (hammer_checkspace(trans->hmp, HAMMER_CHKSPC_MIRROR)) {
401 if (++checkspace_count == 10) {
402 error = ENOSPC;
403 break;
405 hammer_unlock_cursor(&cursor);
406 hammer_flusher_wait(trans->hmp, seq);
407 hammer_lock_cursor(&cursor);
408 seq = hammer_flusher_async(trans->hmp, NULL);
413 * Acquire and validate header
415 if ((bytes = mirror->size - mirror->count) > sizeof(mrec))
416 bytes = sizeof(mrec);
417 uptr = (char *)mirror->ubuf + mirror->count;
418 error = copyin(uptr, &mrec, bytes);
419 if (error)
420 break;
421 if (mrec.head.signature != HAMMER_IOC_MIRROR_SIGNATURE) {
422 error = EINVAL;
423 break;
425 if (mrec.head.rec_size < sizeof(mrec.head) ||
426 mrec.head.rec_size > sizeof(mrec) + HAMMER_XBUFSIZE ||
427 mirror->count + mrec.head.rec_size > mirror->size) {
428 error = EINVAL;
429 break;
432 switch(mrec.head.type & HAMMER_MRECF_TYPE_MASK) {
433 case HAMMER_MREC_TYPE_SKIP:
434 if (mrec.head.rec_size != sizeof(mrec.skip))
435 error = EINVAL;
436 if (error == 0)
437 error = hammer_ioc_mirror_write_skip(&cursor, &mrec.skip, mirror, localization);
438 break;
439 case HAMMER_MREC_TYPE_REC:
440 if (mrec.head.rec_size < sizeof(mrec.rec))
441 error = EINVAL;
442 if (error == 0)
443 error = hammer_ioc_mirror_write_rec(&cursor, &mrec.rec, mirror, localization, uptr + sizeof(mrec.rec));
444 break;
445 case HAMMER_MREC_TYPE_REC_BADCRC:
447 * Records with bad data payloads are ignored XXX.
449 if (mrec.head.rec_size < sizeof(mrec.rec))
450 error = EINVAL;
451 break;
452 case HAMMER_MREC_TYPE_PASS:
453 if (mrec.head.rec_size != sizeof(mrec.rec))
454 error = EINVAL;
455 if (error == 0)
456 error = hammer_ioc_mirror_write_pass(&cursor, &mrec.rec, mirror, localization);
457 break;
458 default:
459 error = EINVAL;
460 break;
464 * Retry the current record on deadlock, otherwise setup
465 * for the next loop.
467 if (error == EDEADLK) {
468 while (error == EDEADLK) {
469 hammer_recover_cursor(&cursor);
470 error = hammer_cursor_upgrade(&cursor);
472 } else {
473 if (error == EALREADY)
474 error = 0;
475 if (error == 0) {
476 mirror->count +=
477 HAMMER_HEAD_DOALIGN(mrec.head.rec_size);
481 hammer_done_cursor(&cursor);
484 * cumulative error
486 if (error) {
487 mirror->head.flags |= HAMMER_IOC_HEAD_ERROR;
488 mirror->head.error = error;
492 * ioctls don't update the RW data structure if an error is returned,
493 * always return 0.
495 return(0);
499 * Handle skip records.
501 * We must iterate from the last resolved record position at mirror->key_cur
502 * to skip_beg non-inclusive and delete any records encountered.
504 * mirror->key_cur must be carefully set when we succeed in processing
505 * this mrec.
507 static int
508 hammer_ioc_mirror_write_skip(hammer_cursor_t cursor,
509 struct hammer_ioc_mrecord_skip *mrec,
510 struct hammer_ioc_mirror_rw *mirror,
511 u_int32_t localization)
513 int error;
516 * Relocalize the skip range
518 mrec->skip_beg.localization &= HAMMER_LOCALIZE_MASK;
519 mrec->skip_beg.localization += localization;
520 mrec->skip_end.localization &= HAMMER_LOCALIZE_MASK;
521 mrec->skip_end.localization += localization;
524 * Iterate from current position to skip_beg, deleting any records
525 * we encounter. The record at skip_beg is not included (it is
526 * skipped).
528 cursor->key_end = mrec->skip_beg;
529 cursor->flags &= ~HAMMER_CURSOR_END_INCLUSIVE;
530 cursor->flags |= HAMMER_CURSOR_BACKEND;
531 error = hammer_mirror_delete_to(cursor, mirror);
534 * Now skip past the skip (which is the whole point point of
535 * having a skip record). The sender has not sent us any records
536 * for the skip area so we wouldn't know what to keep and what
537 * to delete anyway.
539 * Clear ATEDISK because skip_end is non-inclusive, so we can't
540 * count an exact match if we happened to get one.
542 if (error == 0) {
543 mirror->key_cur = mrec->skip_end;
544 cursor->key_beg = mrec->skip_end;
545 error = hammer_btree_lookup(cursor);
546 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
547 if (error == ENOENT)
548 error = 0;
550 return(error);
554 * Handle B-Tree records.
556 * We must iterate to mrec->base.key (non-inclusively), and then process
557 * the record. We are allowed to write a new record or delete an existing
558 * record, but cannot replace an existing record.
560 * mirror->key_cur must be carefully set when we succeed in processing
561 * this mrec.
563 static int
564 hammer_ioc_mirror_write_rec(hammer_cursor_t cursor,
565 struct hammer_ioc_mrecord_rec *mrec,
566 struct hammer_ioc_mirror_rw *mirror,
567 u_int32_t localization,
568 char *uptr)
570 hammer_transaction_t trans;
571 u_int32_t rec_crc;
572 int error;
574 trans = cursor->trans;
575 rec_crc = crc32(mrec, sizeof(*mrec));
577 if (mrec->leaf.data_len < 0 ||
578 mrec->leaf.data_len > HAMMER_XBUFSIZE ||
579 mrec->leaf.data_len + sizeof(*mrec) > mrec->head.rec_size) {
580 return(EINVAL);
584 * Re-localize for target. relocalization of data is handled
585 * by hammer_mirror_write().
587 mrec->leaf.base.localization &= HAMMER_LOCALIZE_MASK;
588 mrec->leaf.base.localization += localization;
591 * Delete records through until we reach (non-inclusively) the
592 * target record.
594 cursor->key_end = mrec->leaf.base;
595 cursor->flags &= ~HAMMER_CURSOR_END_INCLUSIVE;
596 cursor->flags |= HAMMER_CURSOR_BACKEND;
597 error = hammer_mirror_delete_to(cursor, mirror);
600 * Locate the record.
602 * If the record exists only the delete_tid may be updated.
604 * If the record does not exist we can create it only if the
605 * create_tid is not too old. If the create_tid is too old
606 * it may have already been destroyed on the slave from pruning.
608 * Note that mirror operations are effectively as-of operations
609 * and delete_tid can be 0 for mirroring purposes even if it is
610 * not actually 0 at the originator.
612 * These functions can return EDEADLK
614 cursor->key_beg = mrec->leaf.base;
615 cursor->flags |= HAMMER_CURSOR_BACKEND;
616 cursor->flags &= ~HAMMER_CURSOR_INSERT;
617 error = hammer_btree_lookup(cursor);
619 if (error == 0 && hammer_mirror_check(cursor, mrec)) {
620 error = hammer_mirror_update(cursor, mrec);
621 } else if (error == ENOENT) {
622 if (mrec->leaf.base.create_tid >= mirror->tid_beg)
623 error = hammer_mirror_write(cursor, mrec, uptr);
624 else
625 error = 0;
627 if (error == 0 || error == EALREADY)
628 mirror->key_cur = mrec->leaf.base;
629 return(error);
633 * This works like write_rec but no write or update is necessary,
634 * and no data payload is included so we couldn't do a write even
635 * if we wanted to.
637 * We must still iterate for deletions, and we can validate the
638 * record header which is a good way to test for corrupted mirror
639 * targets XXX.
641 * mirror->key_cur must be carefully set when we succeed in processing
642 * this mrec.
644 static
646 hammer_ioc_mirror_write_pass(hammer_cursor_t cursor,
647 struct hammer_ioc_mrecord_rec *mrec,
648 struct hammer_ioc_mirror_rw *mirror,
649 u_int32_t localization)
651 hammer_transaction_t trans;
652 u_int32_t rec_crc;
653 int error;
655 trans = cursor->trans;
656 rec_crc = crc32(mrec, sizeof(*mrec));
659 * Re-localize for target. Relocalization of data is handled
660 * by hammer_mirror_write().
662 mrec->leaf.base.localization &= HAMMER_LOCALIZE_MASK;
663 mrec->leaf.base.localization += localization;
666 * Delete records through until we reach (non-inclusively) the
667 * target record.
669 cursor->key_end = mrec->leaf.base;
670 cursor->flags &= ~HAMMER_CURSOR_END_INCLUSIVE;
671 cursor->flags |= HAMMER_CURSOR_BACKEND;
672 error = hammer_mirror_delete_to(cursor, mirror);
675 * Locate the record and get past it by setting ATEDISK. Perform
676 * any necessary deletions. We have no data payload and cannot
677 * create a new record.
679 if (error == 0) {
680 mirror->key_cur = mrec->leaf.base;
681 cursor->key_beg = mrec->leaf.base;
682 cursor->flags |= HAMMER_CURSOR_BACKEND;
683 cursor->flags &= ~HAMMER_CURSOR_INSERT;
684 error = hammer_btree_lookup(cursor);
685 if (error == 0) {
686 if (hammer_mirror_check(cursor, mrec))
687 error = hammer_mirror_update(cursor, mrec);
688 cursor->flags |= HAMMER_CURSOR_ATEDISK;
689 } else {
690 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
692 if (error == ENOENT)
693 error = 0;
695 return(error);
699 * As part of the mirror write we iterate across swaths of records
700 * on the target which no longer exist on the source, and mark them
701 * deleted.
703 * The caller has indexed the cursor and set up key_end. We iterate
704 * through to key_end.
706 * There is an edge case where the master has deleted a record whos
707 * create_tid exactly matches our end_tid. We cannot delete this
708 * record on the slave yet because we cannot assign delete_tid == create_tid.
709 * The deletion should be picked up on the next sequence since in order
710 * to have been deleted on the master a transaction must have occured with
711 * a TID greater then the create_tid of the record.
713 * To support incremental re-mirroring, just for robustness, we do not
714 * touch any records created beyond (or equal to) mirror->tid_end.
716 static
718 hammer_mirror_delete_to(hammer_cursor_t cursor,
719 struct hammer_ioc_mirror_rw *mirror)
721 hammer_btree_leaf_elm_t elm;
722 int error;
724 error = hammer_btree_iterate(cursor);
725 while (error == 0) {
726 elm = &cursor->node->ondisk->elms[cursor->index].leaf;
727 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
728 cursor->flags |= HAMMER_CURSOR_ATEDISK;
731 * Note: Must still delete records with create_tid < tid_beg,
732 * as record may have been pruned-away on source.
734 if (elm->base.delete_tid == 0 &&
735 elm->base.create_tid < mirror->tid_end) {
736 error = hammer_delete_at_cursor(cursor,
737 HAMMER_DELETE_ADJUST,
738 mirror->tid_end,
739 time_second,
740 1, NULL);
742 if (error == 0)
743 error = hammer_btree_iterate(cursor);
745 if (error == ENOENT)
746 error = 0;
747 return(error);
751 * Check whether an update is needed in the case where a match already
752 * exists on the target. The only type of update allowed in this case
753 * is an update of the delete_tid.
755 * Return non-zero if the update should proceed.
757 static
759 hammer_mirror_check(hammer_cursor_t cursor, struct hammer_ioc_mrecord_rec *mrec)
761 hammer_btree_leaf_elm_t leaf = cursor->leaf;
763 if (leaf->base.delete_tid != mrec->leaf.base.delete_tid) {
764 if (mrec->leaf.base.delete_tid != 0)
765 return(1);
767 return(0);
771 * Update a record in-place. Only the delete_tid can change, and
772 * only from zero to non-zero.
774 static
776 hammer_mirror_update(hammer_cursor_t cursor,
777 struct hammer_ioc_mrecord_rec *mrec)
779 int error;
782 * This case shouldn't occur.
784 if (mrec->leaf.base.delete_tid == 0)
785 return(0);
788 * Mark the record deleted on the mirror target.
790 error = hammer_delete_at_cursor(cursor, HAMMER_DELETE_ADJUST,
791 mrec->leaf.base.delete_tid,
792 mrec->leaf.delete_ts,
793 1, NULL);
794 cursor->flags |= HAMMER_CURSOR_ATEDISK;
795 return(error);
799 * Write out a new record.
801 static
803 hammer_mirror_write(hammer_cursor_t cursor,
804 struct hammer_ioc_mrecord_rec *mrec,
805 char *udata)
807 hammer_transaction_t trans;
808 hammer_buffer_t data_buffer;
809 hammer_off_t ndata_offset;
810 hammer_tid_t high_tid;
811 void *ndata;
812 int error;
813 int doprop;
815 trans = cursor->trans;
816 data_buffer = NULL;
819 * Get the sync lock so the whole mess is atomic
821 hammer_sync_lock_sh(trans);
824 * Allocate and adjust data
826 if (mrec->leaf.data_len && mrec->leaf.data_offset) {
827 ndata = hammer_alloc_data(trans, mrec->leaf.data_len,
828 mrec->leaf.base.rec_type,
829 &ndata_offset, &data_buffer,
830 0, &error);
831 if (ndata == NULL)
832 return(error);
833 mrec->leaf.data_offset = ndata_offset;
834 hammer_modify_buffer(trans, data_buffer, NULL, 0);
835 error = copyin(udata, ndata, mrec->leaf.data_len);
836 if (error == 0) {
837 if (hammer_crc_test_leaf(ndata, &mrec->leaf) == 0) {
838 kprintf("data crc mismatch on pipe\n");
839 error = EINVAL;
840 } else {
841 error = hammer_mirror_localize_data(
842 ndata, &mrec->leaf);
845 hammer_modify_buffer_done(data_buffer);
846 } else {
847 mrec->leaf.data_offset = 0;
848 error = 0;
849 ndata = NULL;
851 if (error)
852 goto failed;
855 * Do the insertion. This can fail with a EDEADLK or EALREADY
857 cursor->flags |= HAMMER_CURSOR_INSERT;
858 error = hammer_btree_lookup(cursor);
859 if (error != ENOENT) {
860 if (error == 0)
861 error = EALREADY;
862 goto failed;
865 error = hammer_btree_insert(cursor, &mrec->leaf, &doprop);
868 * Cursor is left on the current element, we want to skip it now.
870 cursor->flags |= HAMMER_CURSOR_ATEDISK;
871 cursor->flags &= ~HAMMER_CURSOR_INSERT;
874 * Track a count of active inodes.
876 if (error == 0 &&
877 mrec->leaf.base.rec_type == HAMMER_RECTYPE_INODE &&
878 mrec->leaf.base.delete_tid == 0) {
879 hammer_modify_volume_field(trans,
880 trans->rootvol,
881 vol0_stat_inodes);
882 ++trans->hmp->rootvol->ondisk->vol0_stat_inodes;
883 hammer_modify_volume_done(trans->rootvol);
887 * vol0_next_tid must track the highest TID stored in the filesystem.
888 * We do not need to generate undo for this update.
890 high_tid = mrec->leaf.base.create_tid;
891 if (high_tid < mrec->leaf.base.delete_tid)
892 high_tid = mrec->leaf.base.delete_tid;
893 if (trans->rootvol->ondisk->vol0_next_tid < high_tid) {
894 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
895 trans->rootvol->ondisk->vol0_next_tid = high_tid;
896 hammer_modify_volume_done(trans->rootvol);
900 * WARNING! cursor's leaf pointer may have changed after
901 * do_propagation returns.
903 if (error == 0 && doprop)
904 hammer_btree_do_propagation(cursor, NULL, &mrec->leaf);
906 failed:
908 * Cleanup
910 if (error && mrec->leaf.data_offset) {
911 hammer_blockmap_free(cursor->trans,
912 mrec->leaf.data_offset,
913 mrec->leaf.data_len);
915 hammer_sync_unlock(trans);
916 if (data_buffer)
917 hammer_rel_buffer(data_buffer, 0);
918 return(error);
922 * Localize the data payload. Directory entries may need their
923 * localization adjusted.
925 static
927 hammer_mirror_localize_data(hammer_data_ondisk_t data,
928 hammer_btree_leaf_elm_t leaf)
930 u_int32_t localization;
932 if (leaf->base.rec_type == HAMMER_RECTYPE_DIRENTRY) {
933 localization = leaf->base.localization &
934 HAMMER_LOCALIZE_PSEUDOFS_MASK;
935 if (data->entry.localization != localization) {
936 data->entry.localization = localization;
937 hammer_crc_set_leaf(data, leaf);
940 return(0);