hammer2 - Implement error processing and free reserve enforcement
[dragonfly.git] / sys / vfs / hammer2 / hammer2_flush.c
blob440154d560df40678ed16b656c4bf8f956421ad7
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 * TRANSACTION AND FLUSH HANDLING
38 * Deceptively simple but actually fairly difficult to implement properly is
39 * how I would describe it.
41 * Flushing generally occurs bottom-up but requires a top-down scan to
42 * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag
43 * tells how to recurse downward to find these chains.
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/uuid.h>
53 #include "hammer2.h"
55 #define FLUSH_DEBUG 0
57 #define HAMMER2_FLUSH_DEPTH_LIMIT 10 /* stack recursion limit */
61 * Recursively flush the specified chain. The chain is locked and
62 * referenced by the caller and will remain so on return. The chain
63 * will remain referenced throughout but can temporarily lose its
64 * lock during the recursion to avoid unnecessarily stalling user
65 * processes.
67 struct hammer2_flush_info {
68 hammer2_chain_t *parent;
69 int depth;
70 int diddeferral;
71 int error; /* cumulative error */
72 int flags;
73 struct h2_flush_list flushq;
74 hammer2_chain_t *debug;
77 typedef struct hammer2_flush_info hammer2_flush_info_t;
79 static void hammer2_flush_core(hammer2_flush_info_t *info,
80 hammer2_chain_t *chain, int flags);
81 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
84 * Any per-pfs transaction initialization goes here.
86 void
87 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
92 * Transaction support for any modifying operation. Transactions are used
93 * in the pmp layer by the frontend and in the spmp layer by the backend.
95 * 0 - Normal transaction, interlocked against flush
96 * transaction.
98 * TRANS_ISFLUSH - Flush transaction, interlocked against normal
99 * transaction.
101 * TRANS_BUFCACHE - Buffer cache transaction, no interlock.
103 * Initializing a new transaction allocates a transaction ID. Typically
104 * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can
105 * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
106 * media target. The latter mode is used by the recovery code.
108 * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
109 * other is a set of any number of concurrent filesystem operations. We
110 * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
111 * or we can have <running_flush> + <concurrent_fs_ops>.
113 * During a flush, new fs_ops are only blocked until the fs_ops prior to
114 * the flush complete. The new fs_ops can then run concurrent with the flush.
116 * Buffer-cache transactions operate as fs_ops but never block. A
117 * buffer-cache flush will run either before or after the current pending
118 * flush depending on its state.
120 void
121 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
123 uint32_t oflags;
124 uint32_t nflags;
125 int dowait;
127 for (;;) {
128 oflags = pmp->trans.flags;
129 cpu_ccfence();
130 dowait = 0;
132 if (flags & HAMMER2_TRANS_ISFLUSH) {
134 * Requesting flush transaction. Wait for all
135 * currently running transactions to finish.
136 * Afterwords, normal transactions will be
137 * interlocked.
139 if (oflags & HAMMER2_TRANS_MASK) {
140 nflags = oflags | HAMMER2_TRANS_FPENDING |
141 HAMMER2_TRANS_WAITING;
142 dowait = 1;
143 } else {
144 nflags = (oflags | flags) + 1;
146 } else if (flags & HAMMER2_TRANS_BUFCACHE) {
148 * Requesting strategy transaction from buffer-cache,
149 * or a VM getpages/putpages through the buffer cache.
150 * We must allow such transactions in all situations
151 * to avoid deadlocks.
153 nflags = (oflags | flags) + 1;
154 #if 0
156 * (old) previous code interlocked against the main
157 * flush pass.
159 if ((oflags & (HAMMER2_TRANS_ISFLUSH |
160 HAMMER2_TRANS_PREFLUSH)) ==
161 HAMMER2_TRANS_ISFLUSH) {
162 nflags = oflags | HAMMER2_TRANS_WAITING;
163 dowait = 1;
164 } else {
165 nflags = (oflags | flags) + 1;
167 #endif
168 } else {
170 * Requesting normal modifying transaction (read-only
171 * operations do not use transactions). Waits for
172 * any flush to finish before allowing. Multiple
173 * modifying transactions can run concurrently.
175 if (oflags & HAMMER2_TRANS_ISFLUSH) {
176 nflags = oflags | HAMMER2_TRANS_WAITING;
177 dowait = 1;
178 } else {
179 nflags = (oflags | flags) + 1;
182 if (dowait)
183 tsleep_interlock(&pmp->trans.sync_wait, 0);
184 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
185 if (dowait == 0)
186 break;
187 tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
188 "h2trans", hz);
189 } else {
190 cpu_pause();
192 /* retry */
197 * Start a sub-transaction, there is no 'subdone' function. This will
198 * issue a new modify_tid (mtid) for the current transaction, which is a
199 * CLC (cluster level change) id and not a per-node id.
201 * This function must be called for each XOP when multiple XOPs are run in
202 * sequence within a transaction.
204 * Callers typically update the inode with the transaction mtid manually
205 * to enforce sequencing.
207 hammer2_tid_t
208 hammer2_trans_sub(hammer2_pfs_t *pmp)
210 hammer2_tid_t mtid;
212 mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
214 return (mtid);
217 void
218 hammer2_trans_done(hammer2_pfs_t *pmp)
220 uint32_t oflags;
221 uint32_t nflags;
223 for (;;) {
224 oflags = pmp->trans.flags;
225 cpu_ccfence();
226 KKASSERT(oflags & HAMMER2_TRANS_MASK);
227 if ((oflags & HAMMER2_TRANS_MASK) == 1) {
229 * This was the last transaction
231 nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
232 HAMMER2_TRANS_BUFCACHE |
233 HAMMER2_TRANS_FPENDING |
234 HAMMER2_TRANS_WAITING);
235 } else {
237 * Still transactions pending
239 nflags = oflags - 1;
241 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
242 if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
243 (oflags & HAMMER2_TRANS_WAITING)) {
244 wakeup(&pmp->trans.sync_wait);
246 break;
247 } else {
248 cpu_pause();
250 /* retry */
255 * Obtain new, unique inode number (not serialized by caller).
257 hammer2_tid_t
258 hammer2_trans_newinum(hammer2_pfs_t *pmp)
260 hammer2_tid_t tid;
262 tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
264 return tid;
268 * Assert that a strategy call is ok here. Currently we allow strategy
269 * calls in all situations, including during flushes. Previously:
270 * (old) (1) In a normal transaction.
271 * (old) (2) In a flush transaction only if PREFLUSH is also set.
273 void
274 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
276 #if 0
277 KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
278 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
279 #endif
284 * Chains undergoing destruction are removed from the in-memory topology.
285 * To avoid getting lost these chains are placed on the delayed flush
286 * queue which will properly dispose of them.
288 * We do this instead of issuing an immediate flush in order to give
289 * recursive deletions (rm -rf, etc) a chance to remove more of the
290 * hierarchy, potentially allowing an enormous amount of write I/O to
291 * be avoided.
293 void
294 hammer2_delayed_flush(hammer2_chain_t *chain)
296 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
297 hammer2_spin_ex(&chain->hmp->list_spin);
298 if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
299 HAMMER2_CHAIN_DEFERRED)) == 0) {
300 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
301 HAMMER2_CHAIN_DEFERRED);
302 TAILQ_INSERT_TAIL(&chain->hmp->flushq,
303 chain, flush_node);
304 hammer2_chain_ref(chain);
306 hammer2_spin_unex(&chain->hmp->list_spin);
307 hammer2_voldata_modify(chain->hmp);
312 * Flush the chain and all modified sub-chains through the specified
313 * synchronization point, propagating blockref updates back up. As
314 * part of this propagation, mirror_tid and inode/data usage statistics
315 * propagates back upward.
317 * Returns a HAMMER2 error code, 0 if no error. Note that I/O errors from
318 * buffers dirtied during the flush operation can occur later.
320 * modify_tid (clc - cluster level change) is not propagated.
322 * update_tid (clc) is used for validation and is not propagated by this
323 * function.
325 * This routine can be called from several places but the most important
326 * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
328 * chain is locked on call and will remain locked on return. The chain's
329 * UPDATE flag indicates that its parent's block table (which is not yet
330 * part of the flush) should be updated.
333 hammer2_flush(hammer2_chain_t *chain, int flags)
335 hammer2_chain_t *scan;
336 hammer2_flush_info_t info;
337 hammer2_dev_t *hmp;
338 int loops;
341 * Execute the recursive flush and handle deferrals.
343 * Chains can be ridiculously long (thousands deep), so to
344 * avoid blowing out the kernel stack the recursive flush has a
345 * depth limit. Elements at the limit are placed on a list
346 * for re-execution after the stack has been popped.
348 bzero(&info, sizeof(info));
349 TAILQ_INIT(&info.flushq);
350 info.flags = flags & ~HAMMER2_FLUSH_TOP;
353 * Calculate parent (can be NULL), if not NULL the flush core
354 * expects the parent to be referenced so it can easily lock/unlock
355 * it without it getting ripped up.
357 if ((info.parent = chain->parent) != NULL)
358 hammer2_chain_ref(info.parent);
361 * Extra ref needed because flush_core expects it when replacing
362 * chain.
364 hammer2_chain_ref(chain);
365 hmp = chain->hmp;
366 loops = 0;
368 for (;;) {
370 * Move hmp->flushq to info.flushq if non-empty so it can
371 * be processed.
373 if (TAILQ_FIRST(&hmp->flushq) != NULL) {
374 hammer2_spin_ex(&chain->hmp->list_spin);
375 TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
376 hammer2_spin_unex(&chain->hmp->list_spin);
380 * Unwind deep recursions which had been deferred. This
381 * can leave the FLUSH_* bits set for these chains, which
382 * will be handled when we [re]flush chain after the unwind.
384 while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
385 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
386 TAILQ_REMOVE(&info.flushq, scan, flush_node);
387 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
388 HAMMER2_CHAIN_DELAYED);
391 * Now that we've popped back up we can do a secondary
392 * recursion on the deferred elements.
394 * NOTE: hammer2_flush() may replace scan.
396 if (hammer2_debug & 0x0040)
397 kprintf("deferred flush %p\n", scan);
398 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
399 if (scan->error == 0) {
400 hammer2_flush(scan, flags & ~HAMMER2_FLUSH_TOP);
401 hammer2_chain_unlock(scan);
402 hammer2_chain_drop(scan);/* ref from defer */
403 } else {
404 info.error |= scan->error;
409 * [re]flush chain.
411 info.diddeferral = 0;
412 hammer2_flush_core(&info, chain, flags);
415 * Only loop if deep recursions have been deferred.
417 if (TAILQ_EMPTY(&info.flushq))
418 break;
420 if (++loops % 1000 == 0) {
421 kprintf("hammer2_flush: excessive loops on %p\n",
422 chain);
423 if (hammer2_debug & 0x100000)
424 Debugger("hell4");
427 hammer2_chain_drop(chain);
428 if (info.parent)
429 hammer2_chain_drop(info.parent);
430 return (info.error);
434 * This is the core of the chain flushing code. The chain is locked by the
435 * caller and must also have an extra ref on it by the caller, and remains
436 * locked and will have an extra ref on return. info.parent is referenced
437 * but not locked.
439 * Upon return, the caller can test the UPDATE bit on the chain to determine
440 * if the parent needs updating.
442 * (1) Determine if this node is a candidate for the flush, return if it is
443 * not. fchain and vchain are always candidates for the flush.
445 * (2) If we recurse too deep the chain is entered onto the deferral list and
446 * the current flush stack is aborted until after the deferral list is
447 * run.
449 * (3) Recursively flush live children (rbtree). This can create deferrals.
450 * A successful flush clears the MODIFIED and UPDATE bits on the children
451 * and typically causes the parent to be marked MODIFIED as the children
452 * update the parent's block table. A parent might already be marked
453 * MODIFIED due to a deletion (whos blocktable update in the parent is
454 * handled by the frontend), or if the parent itself is modified by the
455 * frontend for other reasons.
457 * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
458 * Deleted-but-open inodes can still be individually flushed via the
459 * filesystem syncer.
461 * (5) Delete parents on the way back up if they are normal indirect blocks
462 * and have no children.
464 * (6) Note that an unmodified child may still need the block table in its
465 * parent updated (e.g. rename/move). The child will have UPDATE set
466 * in this case.
468 * WARNING ON BREF MODIFY_TID/MIRROR_TID
470 * blockref.modify_tid is consistent only within a PFS, and will not be
471 * consistent during synchronization. mirror_tid is consistent across the
472 * block device regardless of the PFS.
474 static void
475 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
476 int flags)
478 hammer2_chain_t *parent;
479 hammer2_dev_t *hmp;
480 int diddeferral;
481 int save_error;
484 * (1) Optimize downward recursion to locate nodes needing action.
485 * Nothing to do if none of these flags are set.
487 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
488 if (hammer2_debug & 0x200) {
489 if (info->debug == NULL)
490 info->debug = chain;
491 } else {
492 return;
496 hmp = chain->hmp;
497 diddeferral = info->diddeferral;
498 parent = info->parent; /* can be NULL */
499 KKASSERT(chain->parent == parent);
502 * Downward search recursion
504 if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
506 * Already deferred.
508 ++info->diddeferral;
509 } else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
510 (flags & HAMMER2_FLUSH_ALL) == 0 &&
511 (flags & HAMMER2_FLUSH_TOP) == 0) {
513 * If FLUSH_ALL is not specified the caller does not want
514 * to recurse through PFS roots. The typical sequence is
515 * to flush dirty PFS's starting at their root downward,
516 * then flush the device root (vchain). It is this second
517 * flush that typically leaves out the ALL flag.
519 * However we must still process the PFSROOT chains for block
520 * table updates in their parent (which IS part of our flush).
522 * NOTE: The volume root, vchain, does not set PFSBOUNDARY.
524 * NOTE: This test must be done before the depth-limit test,
525 * else it might become the top on a flushq iteration.
527 * NOTE: We must re-set ONFLUSH in the parent to retain if
528 * this chain (that we are skipping) requires work.
530 if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
531 HAMMER2_CHAIN_DESTROY |
532 HAMMER2_CHAIN_MODIFIED)) {
533 hammer2_chain_setflush(parent);
535 } else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
537 * Recursion depth reached.
539 KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
540 hammer2_chain_ref(chain);
541 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
542 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
543 ++info->diddeferral;
544 } else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
545 HAMMER2_CHAIN_DESTROY)) {
547 * Downward recursion search (actual flush occurs bottom-up).
548 * pre-clear ONFLUSH. It can get set again due to races or
549 * flush errors, which we want so the scan finds us again in
550 * the next flush.
552 * We must also recurse if DESTROY is set so we can finally
553 * get rid of the related children, otherwise the node will
554 * just get re-flushed on lastdrop.
556 * WARNING! The recursion will unlock/relock info->parent
557 * (which is 'chain'), potentially allowing it
558 * to be ripped up.
560 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
561 save_error = info->error;
562 info->error = 0;
563 info->parent = chain;
564 hammer2_spin_ex(&chain->core.spin);
565 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
566 NULL, hammer2_flush_recurse, info);
567 hammer2_spin_unex(&chain->core.spin);
568 info->parent = parent;
571 * Re-set the flush bits if the flush was incomplete or
572 * an error occurred. If an error occurs it is typically
573 * an allocation error. Errors do not cause deferrals.
575 if (info->error)
576 hammer2_chain_setflush(chain);
577 info->error |= save_error;
578 if (info->diddeferral)
579 hammer2_chain_setflush(chain);
582 * If we lost the parent->chain association we have to
583 * stop processing this chain because it is no longer
584 * in this recursion. If it moved, it will be handled
585 * by the ONFLUSH flag elsewhere.
587 if (chain->parent != parent) {
588 kprintf("LOST CHILD2 %p->%p (actual parent %p)\n",
589 parent, chain, chain->parent);
590 goto done;
595 * Now we are in the bottom-up part of the recursion.
597 * Do not update chain if lower layers were deferred. We continue
598 * to try to update the chain on lower-level errors, but the flush
599 * code may decide not to flush the volume root.
601 * XXX should we continue to try to update the chain if an error
602 * occurred?
604 if (info->diddeferral)
605 goto done;
608 * Both parent and chain must be locked in order to flush chain,
609 * in order to properly update the parent under certain conditions.
611 * In addition, we can't safely unlock/relock the chain once we
612 * start flushing the chain itself, which we would have to do later
613 * on in order to lock the parent if we didn't do that now.
615 hammer2_chain_unlock(chain);
616 if (parent)
617 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
618 hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
621 * Can't process if we can't access their content.
623 if ((parent && parent->error) || chain->error) {
624 kprintf("hammer2: chain error during flush\n");
625 info->error |= chain->error;
626 if (parent) {
627 info->error |= parent->error;
628 hammer2_chain_unlock(parent);
630 goto done;
633 if (chain->parent != parent) {
634 kprintf("LOST CHILD3 %p->%p (actual parent %p)\n",
635 parent, chain, chain->parent);
636 KKASSERT(parent != NULL);
637 hammer2_chain_unlock(parent);
638 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
639 hammer2_chain_ref(chain);
640 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
641 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
642 ++info->diddeferral;
644 goto done;
648 * Propagate the DESTROY flag downwards. This dummies up the flush
649 * code and tries to invalidate related buffer cache buffers to
650 * avoid the disk write.
652 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
653 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
656 * Chain was already modified or has become modified, flush it out.
658 if ((hammer2_debug & 0x200) &&
659 info->debug &&
660 (chain->flags & (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_UPDATE))) {
661 hammer2_chain_t *scan = chain;
663 kprintf("DISCONNECTED FLUSH %p->%p\n", info->debug, chain);
664 while (scan) {
665 kprintf(" chain %p [%08x] bref=%016jx:%02x\n",
666 scan, scan->flags,
667 scan->bref.key, scan->bref.type);
668 if (scan == info->debug)
669 break;
670 scan = scan->parent;
675 * Dispose of the modified bit.
677 * If parent is present, the UPDATE bit should already be set.
678 * UPDATE should already be set.
679 * bref.mirror_tid should already be set.
681 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
682 KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
683 chain->parent == NULL);
684 if (hammer2_debug & 0x800000) {
685 hammer2_chain_t *pp;
687 for (pp = chain; pp->parent; pp = pp->parent)
689 kprintf("FLUSH CHAIN %p (p=%p pp=%p/%d) TYPE %d FLAGS %08x (%s)\n",
690 chain, chain->parent, pp, pp->bref.type,
691 chain->bref.type, chain->flags,
692 (chain->bref.type == 1 ? (const char *)chain->data->ipdata.filename : "?")
695 print_backtrace(10);
697 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
698 atomic_add_long(&hammer2_count_modified_chains, -1);
701 * Manage threads waiting for excessive dirty memory to
702 * be retired.
704 if (chain->pmp)
705 hammer2_pfs_memory_wakeup(chain->pmp);
707 #if 0
708 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
709 chain != &hmp->vchain &&
710 chain != &hmp->fchain) {
712 * Set UPDATE bit indicating that the parent block
713 * table requires updating.
715 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
717 #endif
720 * Issue the flush. This is indirect via the DIO.
722 * NOTE: A DELETED node that reaches this point must be
723 * flushed for synchronization point consistency.
725 * NOTE: Even though MODIFIED was already set, the related DIO
726 * might not be dirty due to a system buffer cache
727 * flush and must be set dirty if we are going to make
728 * further modifications to the buffer. Chains with
729 * embedded data don't need this.
731 if (hammer2_debug & 0x1000) {
732 kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
733 chain, chain->bref.type,
734 (uintmax_t)chain->bref.key,
735 chain->bref.keybits,
736 (uintmax_t)chain->bref.data_off);
738 if (hammer2_debug & 0x2000) {
739 Debugger("Flush hell");
743 * Update chain CRCs for flush.
745 * NOTE: Volume headers are NOT flushed here as they require
746 * special processing.
748 switch(chain->bref.type) {
749 case HAMMER2_BREF_TYPE_FREEMAP:
751 * Update the volume header's freemap_tid to the
752 * freemap's flushing mirror_tid.
754 * (note: embedded data, do not call setdirty)
756 KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
757 KKASSERT(chain == &hmp->fchain);
758 hmp->voldata.freemap_tid = chain->bref.mirror_tid;
759 if (hammer2_debug & 0x8000) {
760 /* debug only, avoid syslogd loop */
761 kprintf("sync freemap mirror_tid %08jx\n",
762 (intmax_t)chain->bref.mirror_tid);
766 * The freemap can be flushed independently of the
767 * main topology, but for the case where it is
768 * flushed in the same transaction, and flushed
769 * before vchain (a case we want to allow for
770 * performance reasons), make sure modifications
771 * made during the flush under vchain use a new
772 * transaction id.
774 * Otherwise the mount recovery code will get confused.
776 ++hmp->voldata.mirror_tid;
777 break;
778 case HAMMER2_BREF_TYPE_VOLUME:
780 * The free block table is flushed by
781 * hammer2_vfs_sync() before it flushes vchain.
782 * We must still hold fchain locked while copying
783 * voldata to volsync, however.
785 * These do not error per-say since their data does
786 * not need to be re-read from media on lock.
788 * (note: embedded data, do not call setdirty)
790 hammer2_chain_lock(&hmp->fchain,
791 HAMMER2_RESOLVE_ALWAYS);
792 hammer2_voldata_lock(hmp);
793 if (hammer2_debug & 0x8000) {
794 /* debug only, avoid syslogd loop */
795 kprintf("sync volume mirror_tid %08jx\n",
796 (intmax_t)chain->bref.mirror_tid);
800 * Update the volume header's mirror_tid to the
801 * main topology's flushing mirror_tid. It is
802 * possible that voldata.mirror_tid is already
803 * beyond bref.mirror_tid due to the bump we made
804 * above in BREF_TYPE_FREEMAP.
806 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
807 hmp->voldata.mirror_tid =
808 chain->bref.mirror_tid;
812 * The volume header is flushed manually by the
813 * syncer, not here. All we do here is adjust the
814 * crc's.
816 KKASSERT(chain->data != NULL);
817 KKASSERT(chain->dio == NULL);
819 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
820 hammer2_icrc32(
821 (char *)&hmp->voldata +
822 HAMMER2_VOLUME_ICRC1_OFF,
823 HAMMER2_VOLUME_ICRC1_SIZE);
824 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
825 hammer2_icrc32(
826 (char *)&hmp->voldata +
827 HAMMER2_VOLUME_ICRC0_OFF,
828 HAMMER2_VOLUME_ICRC0_SIZE);
829 hmp->voldata.icrc_volheader =
830 hammer2_icrc32(
831 (char *)&hmp->voldata +
832 HAMMER2_VOLUME_ICRCVH_OFF,
833 HAMMER2_VOLUME_ICRCVH_SIZE);
835 if (hammer2_debug & 0x8000) {
836 /* debug only, avoid syslogd loop */
837 kprintf("syncvolhdr %016jx %016jx\n",
838 hmp->voldata.mirror_tid,
839 hmp->vchain.bref.mirror_tid);
841 hmp->volsync = hmp->voldata;
842 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
843 hammer2_voldata_unlock(hmp);
844 hammer2_chain_unlock(&hmp->fchain);
845 break;
846 case HAMMER2_BREF_TYPE_DATA:
848 * Data elements have already been flushed via the
849 * logical file buffer cache. Their hash was set in
850 * the bref by the vop_write code. Do not re-dirty.
852 * Make sure any device buffer(s) have been flushed
853 * out here (there aren't usually any to flush) XXX.
855 break;
856 case HAMMER2_BREF_TYPE_INDIRECT:
857 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
858 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
860 * Buffer I/O will be cleaned up when the volume is
861 * flushed (but the kernel is free to flush it before
862 * then, as well).
864 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
865 hammer2_chain_setcheck(chain, chain->data);
866 break;
867 case HAMMER2_BREF_TYPE_DIRENT:
869 * A directory entry can use the check area to store
870 * the filename for filenames <= 64 bytes, don't blow
871 * it up!
873 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
874 if (chain->bytes)
875 hammer2_chain_setcheck(chain, chain->data);
876 break;
877 case HAMMER2_BREF_TYPE_INODE:
879 * NOTE: We must call io_setdirty() to make any late
880 * changes to the inode data, the system might
881 * have already flushed the buffer.
883 if (chain->data->ipdata.meta.op_flags &
884 HAMMER2_OPFLAG_PFSROOT) {
886 * non-NULL pmp if mounted as a PFS. We must
887 * sync fields cached in the pmp? XXX
889 hammer2_inode_data_t *ipdata;
891 hammer2_io_setdirty(chain->dio);
892 ipdata = &chain->data->ipdata;
893 if (chain->pmp) {
894 ipdata->meta.pfs_inum =
895 chain->pmp->inode_tid;
897 } else {
898 /* can't be mounted as a PFS */
901 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
902 hammer2_chain_setcheck(chain, chain->data);
903 break;
904 default:
905 KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
906 panic("hammer2_flush_core: unsupported "
907 "embedded bref %d",
908 chain->bref.type);
909 /* NOT REACHED */
913 * If the chain was destroyed try to avoid unnecessary I/O
914 * that might not have yet occurred. Remove the data range
915 * from dedup candidacy and attempt to invalidation that
916 * potentially dirty portion of the I/O buffer.
918 if (chain->flags & HAMMER2_CHAIN_DESTROY) {
919 hammer2_io_dedup_delete(hmp,
920 chain->bref.type,
921 chain->bref.data_off,
922 chain->bytes);
923 #if 0
924 hammer2_io_t *dio;
925 if (chain->dio) {
926 hammer2_io_inval(chain->dio,
927 chain->bref.data_off,
928 chain->bytes);
929 } else if ((dio = hammer2_io_getquick(hmp,
930 chain->bref.data_off,
931 chain->bytes,
932 1)) != NULL) {
933 hammer2_io_inval(dio,
934 chain->bref.data_off,
935 chain->bytes);
936 hammer2_io_putblk(&dio);
938 #endif
943 * If UPDATE is set the parent block table may need to be updated.
944 * This can fail if the hammer2_chain_modify() fails.
946 * NOTE: UPDATE may be set on vchain or fchain in which case
947 * parent could be NULL. It's easiest to allow the case
948 * and test for NULL. parent can also wind up being NULL
949 * due to a deletion so we need to handle the case anyway.
951 * If no parent exists we can just clear the UPDATE bit. If the
952 * chain gets reattached later on the bit will simply get set
953 * again.
955 if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
956 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
959 * The chain may need its blockrefs updated in the parent.
961 if (chain->flags & HAMMER2_CHAIN_UPDATE) {
962 hammer2_blockref_t *base;
963 int count;
966 * Clear UPDATE flag, mark parent modified, update its
967 * modify_tid if necessary, and adjust the parent blockmap.
969 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
972 * (optional code)
974 * Avoid actually modifying and updating the parent if it
975 * was flagged for destruction. This can greatly reduce
976 * disk I/O in large tree removals because the
977 * hammer2_io_setinval() call in the upward recursion
978 * (see MODIFIED code above) can only handle a few cases.
980 if (parent->flags & HAMMER2_CHAIN_DESTROY) {
981 if (parent->bref.modify_tid < chain->bref.modify_tid) {
982 parent->bref.modify_tid =
983 chain->bref.modify_tid;
985 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
986 HAMMER2_CHAIN_BMAPUPD);
987 goto skipupdate;
991 * (semi-optional code)
993 * The flusher is responsible for deleting empty indirect
994 * blocks at this point. If we don't do this, no major harm
995 * will be done but the empty indirect blocks will stay in
996 * the topology and make it a bit messy.
998 * Do not delete internal freemap nodes.
1000 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT &&
1001 chain->core.live_count == 0 &&
1002 (chain->flags & (HAMMER2_CHAIN_INITIAL |
1003 HAMMER2_CHAIN_COUNTEDBREFS)) == 0) {
1004 base = &chain->data->npdata[0];
1005 count = chain->bytes / sizeof(hammer2_blockref_t);
1006 hammer2_chain_countbrefs(chain, base, count);
1008 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT &&
1009 chain->core.live_count == 0 &&
1010 RB_EMPTY(&chain->core.rbtree)) {
1011 #if 0
1012 kprintf("DELETE CHAIN %016jx.%02x %016jx/%d refs=%d\n",
1013 chain->bref.data_off, chain->bref.type,
1014 chain->bref.key, chain->bref.keybits,
1015 chain->refs);
1016 #endif
1017 hammer2_chain_delete(parent, chain,
1018 chain->bref.modify_tid,
1019 HAMMER2_DELETE_PERMANENT);
1020 goto skipupdate;
1024 * We are updating the parent's blockmap, the parent must
1025 * be set modified. If this fails we re-set the UPDATE flag
1026 * in the child.
1028 * NOTE! A modification error can be ENOSPC. We still want
1029 * to flush modified chains recursively, not break out,
1030 * so we just skip the update in this situation and
1031 * continue. That is, we still need to try to clean
1032 * out dirty chains and buffers.
1034 * This may not help bulkfree though. XXX
1036 save_error = hammer2_chain_modify(parent, 0, 0, 0);
1037 if (save_error) {
1038 info->error |= save_error;
1039 kprintf("hammer2_flush: %016jx.%02x error=%08x\n",
1040 parent->bref.data_off, parent->bref.type,
1041 save_error);
1042 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1043 goto skipupdate;
1045 if (parent->bref.modify_tid < chain->bref.modify_tid)
1046 parent->bref.modify_tid = chain->bref.modify_tid;
1049 * Calculate blockmap pointer
1051 switch(parent->bref.type) {
1052 case HAMMER2_BREF_TYPE_INODE:
1054 * Access the inode's block array. However, there is
1055 * no block array if the inode is flagged DIRECTDATA.
1057 if (parent->data &&
1058 (parent->data->ipdata.meta.op_flags &
1059 HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1060 base = &parent->data->
1061 ipdata.u.blockset.blockref[0];
1062 } else {
1063 base = NULL;
1065 count = HAMMER2_SET_COUNT;
1066 break;
1067 case HAMMER2_BREF_TYPE_INDIRECT:
1068 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1069 if (parent->data)
1070 base = &parent->data->npdata[0];
1071 else
1072 base = NULL;
1073 count = parent->bytes / sizeof(hammer2_blockref_t);
1074 break;
1075 case HAMMER2_BREF_TYPE_VOLUME:
1076 base = &chain->hmp->voldata.sroot_blockset.blockref[0];
1077 count = HAMMER2_SET_COUNT;
1078 break;
1079 case HAMMER2_BREF_TYPE_FREEMAP:
1080 base = &parent->data->npdata[0];
1081 count = HAMMER2_SET_COUNT;
1082 break;
1083 default:
1084 base = NULL;
1085 count = 0;
1086 panic("hammer2_flush_core: "
1087 "unrecognized blockref type: %d",
1088 parent->bref.type);
1092 * Blocktable updates
1094 * We synchronize pending statistics at this time. Delta
1095 * adjustments designated for the current and upper level
1096 * are synchronized.
1098 if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
1099 if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
1100 hammer2_spin_ex(&parent->core.spin);
1101 hammer2_base_delete(parent, base, count, chain);
1102 hammer2_spin_unex(&parent->core.spin);
1103 /* base_delete clears both bits */
1104 } else {
1105 atomic_clear_int(&chain->flags,
1106 HAMMER2_CHAIN_BMAPUPD);
1109 if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
1110 hammer2_spin_ex(&parent->core.spin);
1111 hammer2_base_insert(parent, base, count, chain);
1112 hammer2_spin_unex(&parent->core.spin);
1113 /* base_insert sets BMAPPED */
1116 skipupdate:
1117 if (parent)
1118 hammer2_chain_unlock(parent);
1121 * Final cleanup after flush
1123 done:
1124 KKASSERT(chain->refs > 0);
1125 if (hammer2_debug & 0x200) {
1126 if (info->debug == chain)
1127 info->debug = NULL;
1132 * Flush recursion helper, called from flush_core, calls flush_core.
1134 * Flushes the children of the caller's chain (info->parent), restricted
1135 * by sync_tid. Set info->domodify if the child's blockref must propagate
1136 * back up to the parent.
1138 * This function may set info->error as a side effect.
1140 * Ripouts can move child from rbtree to dbtree or dbq but the caller's
1141 * flush scan order prevents any chains from being lost. A child can be
1142 * executes more than once.
1144 * WARNING! If we do not call hammer2_flush_core() we must update
1145 * bref.mirror_tid ourselves to indicate that the flush has
1146 * processed the child.
1148 * WARNING! parent->core spinlock is held on entry and return.
1150 static int
1151 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
1153 hammer2_flush_info_t *info = data;
1154 hammer2_chain_t *parent = info->parent;
1157 * (child can never be fchain or vchain so a special check isn't
1158 * needed).
1160 * We must ref the child before unlocking the spinlock.
1162 * The caller has added a ref to the parent so we can temporarily
1163 * unlock it in order to lock the child. However, if it no longer
1164 * winds up being the child of the parent we must skip this child.
1166 * NOTE! chain locking errors are fatal. They are never out-of-space
1167 * errors.
1169 hammer2_chain_ref(child);
1170 hammer2_spin_unex(&parent->core.spin);
1172 hammer2_chain_unlock(parent);
1173 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1174 if (child->parent != parent) {
1175 kprintf("LOST CHILD1 %p->%p (actual parent %p)\n",
1176 parent, child, child->parent);
1177 goto done;
1179 if (child->error) {
1180 kprintf("CHILD ERROR DURING FLUSH LOCK %p->%p\n",
1181 parent, child);
1182 info->error |= child->error;
1183 goto done;
1187 * Must propagate the DESTROY flag downwards, otherwise the
1188 * parent could end up never being removed because it will
1189 * be requeued to the flusher if it survives this run due to
1190 * the flag.
1192 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
1193 atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
1196 * Recurse and collect deferral data. We're in the media flush,
1197 * this can cross PFS boundaries.
1199 if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1200 ++info->depth;
1201 hammer2_flush_core(info, child, info->flags);
1202 --info->depth;
1203 } else if (hammer2_debug & 0x200) {
1204 if (info->debug == NULL)
1205 info->debug = child;
1206 ++info->depth;
1207 hammer2_flush_core(info, child, info->flags);
1208 --info->depth;
1209 if (info->debug == child)
1210 info->debug = NULL;
1213 done:
1215 * Relock to continue the loop.
1217 hammer2_chain_unlock(child);
1218 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1219 if (parent->error) {
1220 kprintf("PARENT ERROR DURING FLUSH LOCK %p->%p\n",
1221 parent, child);
1222 info->error |= parent->error;
1224 hammer2_chain_drop(child);
1225 KKASSERT(info->parent == parent);
1226 hammer2_spin_ex(&parent->core.spin);
1228 return (0);
1232 * flush helper (backend threaded)
1234 * Flushes core chains, issues disk sync, flushes volume roots.
1236 * Primarily called from vfs_sync().
1238 void
1239 hammer2_inode_xop_flush(hammer2_thread_t *thr, hammer2_xop_t *arg)
1241 hammer2_xop_flush_t *xop = &arg->xop_flush;
1242 hammer2_chain_t *chain;
1243 hammer2_chain_t *parent;
1244 hammer2_dev_t *hmp;
1245 int flush_error = 0;
1246 int fsync_error = 0;
1247 int total_error = 0;
1248 int j;
1251 * Flush core chains
1253 chain = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1254 HAMMER2_RESOLVE_ALWAYS);
1255 if (chain) {
1256 hmp = chain->hmp;
1257 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
1258 TAILQ_FIRST(&hmp->flushq) != NULL) {
1259 hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1260 parent = chain->parent;
1261 KKASSERT(chain->pmp != parent->pmp);
1262 hammer2_chain_setflush(parent);
1264 hammer2_chain_unlock(chain);
1265 hammer2_chain_drop(chain);
1266 chain = NULL;
1267 } else {
1268 hmp = NULL;
1272 * Flush volume roots. Avoid replication, we only want to
1273 * flush each hammer2_dev (hmp) once.
1275 for (j = thr->clindex - 1; j >= 0; --j) {
1276 if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) {
1277 if (chain->hmp == hmp) {
1278 chain = NULL; /* safety */
1279 goto skip;
1283 chain = NULL; /* safety */
1286 * spmp transaction. The super-root is never directly mounted so
1287 * there shouldn't be any vnodes, let alone any dirty vnodes
1288 * associated with it, so we shouldn't have to mess around with any
1289 * vnode flushes here.
1291 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1294 * Media mounts have two 'roots', vchain for the topology
1295 * and fchain for the free block table. Flush both.
1297 * Note that the topology and free block table are handled
1298 * independently, so the free block table can wind up being
1299 * ahead of the topology. We depend on the bulk free scan
1300 * code to deal with any loose ends.
1302 * vchain and fchain do not error on-lock since their data does
1303 * not have to be re-read from media.
1305 hammer2_chain_ref(&hmp->vchain);
1306 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1307 hammer2_chain_ref(&hmp->fchain);
1308 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1309 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1311 * This will also modify vchain as a side effect,
1312 * mark vchain as modified now.
1314 hammer2_voldata_modify(hmp);
1315 chain = &hmp->fchain;
1316 flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1317 KKASSERT(chain == &hmp->fchain);
1319 hammer2_chain_unlock(&hmp->fchain);
1320 hammer2_chain_unlock(&hmp->vchain);
1321 hammer2_chain_drop(&hmp->fchain);
1322 /* vchain dropped down below */
1324 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1325 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1326 chain = &hmp->vchain;
1327 flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1328 KKASSERT(chain == &hmp->vchain);
1330 hammer2_chain_unlock(&hmp->vchain);
1331 hammer2_chain_drop(&hmp->vchain);
1334 * We can't safely flush the volume header until we have
1335 * flushed any device buffers which have built up.
1337 * XXX this isn't being incremental
1339 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1340 fsync_error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1341 vn_unlock(hmp->devvp);
1342 if (fsync_error || flush_error) {
1343 kprintf("hammer2: sync error fsync=%d h2flush=0x%04x dev=%s\n",
1344 fsync_error, flush_error, hmp->devrepname);
1348 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1349 * volume header needs synchronization via hmp->volsync.
1351 * XXX synchronize the flag & data with only this flush XXX
1353 if (fsync_error == 0 && flush_error == 0 &&
1354 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1355 struct buf *bp;
1356 int vol_error = 0;
1359 * Synchronize the disk before flushing the volume
1360 * header.
1362 bp = getpbuf(NULL);
1363 bp->b_bio1.bio_offset = 0;
1364 bp->b_bufsize = 0;
1365 bp->b_bcount = 0;
1366 bp->b_cmd = BUF_CMD_FLUSH;
1367 bp->b_bio1.bio_done = biodone_sync;
1368 bp->b_bio1.bio_flags |= BIO_SYNC;
1369 vn_strategy(hmp->devvp, &bp->b_bio1);
1370 fsync_error = biowait(&bp->b_bio1, "h2vol");
1371 relpbuf(bp, NULL);
1374 * Then we can safely flush the version of the
1375 * volume header synchronized by the flush code.
1377 j = hmp->volhdrno + 1;
1378 if (j >= HAMMER2_NUM_VOLHDRS)
1379 j = 0;
1380 if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1381 hmp->volsync.volu_size) {
1382 j = 0;
1384 if (hammer2_debug & 0x8000) {
1385 /* debug only, avoid syslogd loop */
1386 kprintf("sync volhdr %d %jd\n",
1387 j, (intmax_t)hmp->volsync.volu_size);
1389 bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1390 HAMMER2_PBUFSIZE, 0, 0);
1391 atomic_clear_int(&hmp->vchain.flags,
1392 HAMMER2_CHAIN_VOLUMESYNC);
1393 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1394 vol_error = bwrite(bp);
1395 hmp->volhdrno = j;
1396 if (vol_error)
1397 fsync_error = vol_error;
1399 if (flush_error)
1400 total_error = flush_error;
1401 if (fsync_error)
1402 total_error = hammer2_errno_to_error(fsync_error);
1404 hammer2_trans_done(hmp->spmp); /* spmp trans */
1405 skip:
1406 hammer2_xop_feed(&xop->head, NULL, thr->clindex, total_error);