hammer2 - More involved refactoring of chain_repparent, cleanup
[dragonfly.git] / sys / vfs / hammer2 / hammer2_flush.c
blob01e4a7a245149ec07111631135f57f7e31f8a56d
1 /*
2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 * TRANSACTION AND FLUSH HANDLING
38 * Deceptively simple but actually fairly difficult to implement properly is
39 * how I would describe it.
41 * Flushing generally occurs bottom-up but requires a top-down scan to
42 * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag
43 * tells how to recurse downward to find these chains.
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/uuid.h>
53 #include "hammer2.h"
55 #define FLUSH_DEBUG 0
57 #define HAMMER2_FLUSH_DEPTH_LIMIT 60 /* stack recursion limit */
61 * Recursively flush the specified chain. The chain is locked and
62 * referenced by the caller and will remain so on return. The chain
63 * will remain referenced throughout but can temporarily lose its
64 * lock during the recursion to avoid unnecessarily stalling user
65 * processes.
67 struct hammer2_flush_info {
68 hammer2_chain_t *parent;
69 int depth;
70 long diddeferral;
71 int error; /* cumulative error */
72 int flags;
73 #ifdef HAMMER2_SCAN_DEBUG
74 long scan_count;
75 long scan_mod_count;
76 long scan_upd_count;
77 long scan_onf_count;
78 long scan_del_count;
79 long scan_btype[7];
80 long flushq_count;
81 #endif
82 struct h2_flush_list flushq;
83 hammer2_chain_t *debug;
86 typedef struct hammer2_flush_info hammer2_flush_info_t;
88 static void hammer2_flush_core(hammer2_flush_info_t *info,
89 hammer2_chain_t *chain, int flags);
90 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
93 * Any per-pfs transaction initialization goes here.
95 void
96 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
101 * Transaction support for any modifying operation. Transactions are used
102 * in the pmp layer by the frontend and in the spmp layer by the backend.
104 * 0 - Normal transaction, interlocked against flush
105 * transaction.
107 * TRANS_ISFLUSH - Flush transaction, interlocked against normal
108 * transaction.
110 * TRANS_BUFCACHE - Buffer cache transaction, no interlock.
112 * Initializing a new transaction allocates a transaction ID. Typically
113 * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can
114 * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
115 * media target. The latter mode is used by the recovery code.
117 * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
118 * other is a set of any number of concurrent filesystem operations. We
119 * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
120 * or we can have <running_flush> + <concurrent_fs_ops>.
122 * During a flush, new fs_ops are only blocked until the fs_ops prior to
123 * the flush complete. The new fs_ops can then run concurrent with the flush.
125 * Buffer-cache transactions operate as fs_ops but never block. A
126 * buffer-cache flush will run either before or after the current pending
127 * flush depending on its state.
129 void
130 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
132 uint32_t oflags;
133 uint32_t nflags;
134 int dowait;
136 for (;;) {
137 oflags = pmp->trans.flags;
138 cpu_ccfence();
139 dowait = 0;
141 if (flags & HAMMER2_TRANS_ISFLUSH) {
143 * Requesting flush transaction. Wait for all
144 * currently running transactions to finish.
145 * Afterwords, normal transactions will be
146 * interlocked.
148 if (oflags & HAMMER2_TRANS_MASK) {
149 nflags = oflags | HAMMER2_TRANS_FPENDING |
150 HAMMER2_TRANS_WAITING;
151 dowait = 1;
152 } else {
153 nflags = (oflags | flags) + 1;
155 } else if (flags & HAMMER2_TRANS_BUFCACHE) {
157 * Requesting strategy transaction from buffer-cache,
158 * or a VM getpages/putpages through the buffer cache.
159 * We must allow such transactions in all situations
160 * to avoid deadlocks.
162 nflags = (oflags | flags) + 1;
163 #if 0
165 * (old) previous code interlocked against the main
166 * flush pass.
168 if ((oflags & (HAMMER2_TRANS_ISFLUSH |
169 HAMMER2_TRANS_PREFLUSH)) ==
170 HAMMER2_TRANS_ISFLUSH) {
171 nflags = oflags | HAMMER2_TRANS_WAITING;
172 dowait = 1;
173 } else {
174 nflags = (oflags | flags) + 1;
176 #endif
177 } else {
179 * Requesting a normal modifying transaction.
180 * Waits for any flush to finish before allowing.
181 * Multiple modifying transactions can run
182 * concurrently.
184 * If a flush is pending for more than one second
185 * but can't run because many modifying transactions
186 * are active, we wait for the flush to be granted.
188 * NOTE: Remember that non-modifying operations
189 * such as read, stat, readdir, etc, do
190 * not use transactions.
192 if ((oflags & HAMMER2_TRANS_FPENDING) &&
193 (u_int)(ticks - pmp->trans.fticks) >= (u_int)hz) {
194 nflags = oflags | HAMMER2_TRANS_WAITING;
195 dowait = 1;
196 } else if (oflags & HAMMER2_TRANS_ISFLUSH) {
197 nflags = oflags | HAMMER2_TRANS_WAITING;
198 dowait = 1;
199 } else {
200 nflags = (oflags | flags) + 1;
203 if (dowait)
204 tsleep_interlock(&pmp->trans.sync_wait, 0);
205 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
206 if ((oflags & HAMMER2_TRANS_FPENDING) == 0 &&
207 (nflags & HAMMER2_TRANS_FPENDING)) {
208 pmp->trans.fticks = ticks;
210 if (dowait == 0)
211 break;
212 tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
213 "h2trans", hz);
214 } else {
215 cpu_pause();
217 /* retry */
222 * Start a sub-transaction, there is no 'subdone' function. This will
223 * issue a new modify_tid (mtid) for the current transaction, which is a
224 * CLC (cluster level change) id and not a per-node id.
226 * This function must be called for each XOP when multiple XOPs are run in
227 * sequence within a transaction.
229 * Callers typically update the inode with the transaction mtid manually
230 * to enforce sequencing.
232 hammer2_tid_t
233 hammer2_trans_sub(hammer2_pfs_t *pmp)
235 hammer2_tid_t mtid;
237 mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
239 return (mtid);
242 void
243 hammer2_trans_done(hammer2_pfs_t *pmp)
245 uint32_t oflags;
246 uint32_t nflags;
248 for (;;) {
249 oflags = pmp->trans.flags;
250 cpu_ccfence();
251 KKASSERT(oflags & HAMMER2_TRANS_MASK);
252 if ((oflags & HAMMER2_TRANS_MASK) == 1) {
254 * This was the last transaction
256 nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
257 HAMMER2_TRANS_BUFCACHE |
258 HAMMER2_TRANS_FPENDING |
259 HAMMER2_TRANS_WAITING);
260 } else {
262 * Still transactions pending
264 nflags = oflags - 1;
266 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
267 if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
268 (oflags & HAMMER2_TRANS_WAITING)) {
269 wakeup(&pmp->trans.sync_wait);
271 break;
272 } else {
273 cpu_pause();
275 /* retry */
280 * Obtain new, unique inode number (not serialized by caller).
282 hammer2_tid_t
283 hammer2_trans_newinum(hammer2_pfs_t *pmp)
285 hammer2_tid_t tid;
287 tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
289 return tid;
293 * Assert that a strategy call is ok here. Currently we allow strategy
294 * calls in all situations, including during flushes. Previously:
295 * (old) (1) In a normal transaction.
296 * (old) (2) In a flush transaction only if PREFLUSH is also set.
298 void
299 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
301 #if 0
302 KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
303 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
304 #endif
309 * Chains undergoing destruction are removed from the in-memory topology.
310 * To avoid getting lost these chains are placed on the delayed flush
311 * queue which will properly dispose of them.
313 * We do this instead of issuing an immediate flush in order to give
314 * recursive deletions (rm -rf, etc) a chance to remove more of the
315 * hierarchy, potentially allowing an enormous amount of write I/O to
316 * be avoided.
318 * NOTE: The flush code tests HAMMER2_CHAIN_DESTROY to differentiate
319 * between these chains and the deep-recursion requeue.
321 void
322 hammer2_delayed_flush(hammer2_chain_t *chain)
324 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
325 hammer2_spin_ex(&chain->hmp->list_spin);
326 if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
327 HAMMER2_CHAIN_DEFERRED)) == 0) {
328 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
329 HAMMER2_CHAIN_DEFERRED);
330 TAILQ_INSERT_TAIL(&chain->hmp->flushq,
331 chain, flush_node);
332 hammer2_chain_ref(chain);
334 hammer2_spin_unex(&chain->hmp->list_spin);
335 hammer2_voldata_modify(chain->hmp);
340 * Flush the chain and all modified sub-chains through the specified
341 * synchronization point, propagating blockref updates back up. As
342 * part of this propagation, mirror_tid and inode/data usage statistics
343 * propagates back upward.
345 * Returns a HAMMER2 error code, 0 if no error. Note that I/O errors from
346 * buffers dirtied during the flush operation can occur later.
348 * modify_tid (clc - cluster level change) is not propagated.
350 * update_tid (clc) is used for validation and is not propagated by this
351 * function.
353 * This routine can be called from several places but the most important
354 * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
356 * chain is locked on call and will remain locked on return. The chain's
357 * UPDATE flag indicates that its parent's block table (which is not yet
358 * part of the flush) should be updated.
360 * flags:
361 * HAMMER2_FLUSH_TOP Indicates that this is the top of the flush.
362 * Is cleared for the recursion.
364 * HAMMER2_FLUSH_ALL Recurse everything
366 * HAMMER2_FLUSH_INODE_RECURSE
367 * Recurse one inode level, flush includes
368 * sub-inodes but do not go deeper (thus UPDATE
369 * can wind up remaining set).
372 hammer2_flush(hammer2_chain_t *chain, int flags)
374 hammer2_chain_t *scan;
375 hammer2_flush_info_t info;
376 hammer2_dev_t *hmp;
377 int loops;
380 * Execute the recursive flush and handle deferrals.
382 * Chains can be ridiculously long (thousands deep), so to
383 * avoid blowing out the kernel stack the recursive flush has a
384 * depth limit. Elements at the limit are placed on a list
385 * for re-execution after the stack has been popped.
387 bzero(&info, sizeof(info));
388 TAILQ_INIT(&info.flushq);
389 info.flags = flags & ~HAMMER2_FLUSH_TOP;
392 * Calculate parent (can be NULL), if not NULL the flush core
393 * expects the parent to be referenced so it can easily lock/unlock
394 * it without it getting ripped up.
396 if ((info.parent = chain->parent) != NULL)
397 hammer2_chain_ref(info.parent);
400 * Extra ref needed because flush_core expects it when replacing
401 * chain.
403 hammer2_chain_ref(chain);
404 hmp = chain->hmp;
405 loops = 0;
407 for (;;) {
409 * Move hmp->flushq to info.flushq if non-empty so it can
410 * be processed.
412 if (TAILQ_FIRST(&hmp->flushq) != NULL) {
413 hammer2_spin_ex(&chain->hmp->list_spin);
414 TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
415 hammer2_spin_unex(&chain->hmp->list_spin);
419 * Unwind deep recursions which had been deferred. This
420 * can leave the FLUSH_* bits set for these chains, which
421 * will be handled when we [re]flush chain after the unwind.
423 while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
424 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
425 TAILQ_REMOVE(&info.flushq, scan, flush_node);
426 #ifdef HAMMER2_SCAN_DEBUG
427 ++info.flushq_count;
428 #endif
429 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
430 HAMMER2_CHAIN_DELAYED);
433 * Now that we've popped back up we can do a secondary
434 * recursion on the deferred elements.
436 * NOTE: hmp->flushq chains (marked DESTROY) must be
437 * handled unconditionally so they can be cleaned
438 * out.
440 * NOTE: hammer2_flush() may replace scan.
442 if (hammer2_debug & 0x0040)
443 kprintf("deferred flush %p\n", scan);
444 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
445 if (scan->error == 0) {
446 if (scan->flags & HAMMER2_CHAIN_DESTROY) {
447 hammer2_flush(scan,
448 flags |
449 HAMMER2_FLUSH_TOP |
450 HAMMER2_FLUSH_ALL);
451 } else {
452 hammer2_flush(scan,
453 flags & ~HAMMER2_FLUSH_TOP);
455 } else {
456 info.error |= scan->error;
458 hammer2_chain_unlock(scan);
459 hammer2_chain_drop(scan);/* ref from defer */
463 * [re]flush chain as the deep recursion may have generated
464 * additional modifications.
466 info.diddeferral = 0;
467 if (info.parent != chain->parent) {
468 if (hammer2_debug & 0x0040) {
469 kprintf("LOST CHILD4 %p->%p "
470 "(actual parent %p)\n",
471 info.parent, chain, chain->parent);
473 hammer2_chain_drop(info.parent);
474 info.parent = chain->parent;
475 hammer2_chain_ref(info.parent);
477 hammer2_flush_core(&info, chain, flags);
480 * Only loop if deep recursions have been deferred.
482 if (TAILQ_EMPTY(&info.flushq))
483 break;
485 if (++loops % 1000 == 0) {
486 kprintf("hammer2_flush: excessive loops on %p\n",
487 chain);
488 if (hammer2_debug & 0x100000)
489 Debugger("hell4");
492 #ifdef HAMMER2_SCAN_DEBUG
493 if (info.scan_count >= 10)
494 kprintf("hammer2_flush: scan_count %ld (%ld,%ld,%ld,%ld) "
495 "bt(%ld,%ld,%ld,%ld,%ld,%ld) flushq %ld\n",
496 info.scan_count,
497 info.scan_mod_count,
498 info.scan_upd_count,
499 info.scan_onf_count,
500 info.scan_del_count,
501 info.scan_btype[1],
502 info.scan_btype[2],
503 info.scan_btype[3],
504 info.scan_btype[4],
505 info.scan_btype[5],
506 info.scan_btype[6],
507 info.flushq_count);
508 #endif
509 hammer2_chain_drop(chain);
510 if (info.parent)
511 hammer2_chain_drop(info.parent);
512 return (info.error);
516 * This is the core of the chain flushing code. The chain is locked by the
517 * caller and must also have an extra ref on it by the caller, and remains
518 * locked and will have an extra ref on return. info.parent is referenced
519 * but not locked.
521 * Upon return, the caller can test the UPDATE bit on the chain to determine
522 * if the parent needs updating.
524 * (1) Determine if this node is a candidate for the flush, return if it is
525 * not. fchain and vchain are always candidates for the flush.
527 * (2) If we recurse too deep the chain is entered onto the deferral list and
528 * the current flush stack is aborted until after the deferral list is
529 * run.
531 * (3) Recursively flush live children (rbtree). This can create deferrals.
532 * A successful flush clears the MODIFIED and UPDATE bits on the children
533 * and typically causes the parent to be marked MODIFIED as the children
534 * update the parent's block table. A parent might already be marked
535 * MODIFIED due to a deletion (whos blocktable update in the parent is
536 * handled by the frontend), or if the parent itself is modified by the
537 * frontend for other reasons.
539 * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
540 * Deleted-but-open inodes can still be individually flushed via the
541 * filesystem syncer.
543 * (5) Delete parents on the way back up if they are normal indirect blocks
544 * and have no children.
546 * (6) Note that an unmodified child may still need the block table in its
547 * parent updated (e.g. rename/move). The child will have UPDATE set
548 * in this case.
550 * WARNING ON BREF MODIFY_TID/MIRROR_TID
552 * blockref.modify_tid is consistent only within a PFS, and will not be
553 * consistent during synchronization. mirror_tid is consistent across the
554 * block device regardless of the PFS.
556 static void
557 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
558 int flags)
560 hammer2_chain_t *parent;
561 hammer2_dev_t *hmp;
562 int save_error;
565 * (1) Optimize downward recursion to locate nodes needing action.
566 * Nothing to do if none of these flags are set.
568 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
569 if (hammer2_debug & 0x200) {
570 if (info->debug == NULL)
571 info->debug = chain;
572 } else {
573 return;
577 hmp = chain->hmp;
580 * NOTE: parent can be NULL, usually due to destroy races.
582 parent = info->parent;
583 KKASSERT(chain->parent == parent);
586 * Downward search recursion
588 * We must be careful on cold stops. If CHAIN_UPDATE is set and
589 * we stop cold (verses a deferral which will re-run the chain later),
590 * the update can wind up never being applied. This situation most
591 * typically occurs on inode boundaries due to the way
592 * hammer2_vfs_sync() breaks-up the flush. As a safety, we
593 * flush-through such situations.
595 if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
597 * Already deferred.
599 ++info->diddeferral;
600 } else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
601 (chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
602 (flags & HAMMER2_FLUSH_ALL) == 0 &&
603 (flags & HAMMER2_FLUSH_TOP) == 0 &&
604 chain->pmp && chain->pmp->mp) {
606 * If FLUSH_ALL is not specified the caller does not want
607 * to recurse through PFS roots that have been mounted.
609 * (If the PFS has not been mounted there may not be
610 * anything monitoring its chains and its up to us
611 * to flush it).
613 * The typical sequence is to flush dirty PFS's starting at
614 * their root downward, then flush the device root (vchain).
615 * It is this second flush that typically leaves out the
616 * ALL flag.
618 * However we must still process the PFSROOT chains for block
619 * table updates in their parent (which IS part of our flush).
621 * NOTE: The volume root, vchain, does not set PFSBOUNDARY.
623 * NOTE: This test must be done before the depth-limit test,
624 * else it might become the top on a flushq iteration.
626 * NOTE: We must re-set ONFLUSH in the parent to retain if
627 * this chain (that we are skipping) requires work.
629 if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
630 HAMMER2_CHAIN_DESTROY |
631 HAMMER2_CHAIN_MODIFIED)) {
632 hammer2_chain_setflush(parent);
634 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
635 (chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
636 (flags & HAMMER2_FLUSH_INODE_STOP) &&
637 (flags & HAMMER2_FLUSH_ALL) == 0 &&
638 (flags & HAMMER2_FLUSH_TOP) == 0 &&
639 chain->pmp && chain->pmp->mp) {
641 * If FLUSH_INODE_STOP is specified and both ALL and TOP
642 * are clear, we must not flush the chain. The chain should
643 * have already been flushed and any further ONFLUSH/UPDATE
644 * setting will be related to the next flush.
646 * This features allows us to flush inodes independently of
647 * each other and meta-data above the inodes separately.
649 if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
650 HAMMER2_CHAIN_DESTROY |
651 HAMMER2_CHAIN_MODIFIED)) {
652 if (parent)
653 hammer2_chain_setflush(parent);
655 } else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
657 * Recursion depth reached.
659 KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
660 hammer2_chain_ref(chain);
661 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
662 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
663 ++info->diddeferral;
664 } else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
665 HAMMER2_CHAIN_DESTROY)) {
667 * Downward recursion search (actual flush occurs bottom-up).
668 * pre-clear ONFLUSH. It can get set again due to races or
669 * flush errors, which we want so the scan finds us again in
670 * the next flush.
672 * We must also recurse if DESTROY is set so we can finally
673 * get rid of the related children, otherwise the node will
674 * just get re-flushed on lastdrop.
676 * WARNING! The recursion will unlock/relock info->parent
677 * (which is 'chain'), potentially allowing it
678 * to be ripped up.
680 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
681 save_error = info->error;
682 info->error = 0;
683 info->parent = chain;
686 * We may have to do this twice to catch any indirect
687 * block maintenance that occurs. Other conditions which
688 * can keep setting ONFLUSH (such as deferrals) ought to
689 * be handled by the flushq code. XXX needs more help
691 hammer2_spin_ex(&chain->core.spin);
692 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
693 NULL, hammer2_flush_recurse, info);
694 if (chain->flags & HAMMER2_CHAIN_ONFLUSH) {
695 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
696 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
697 NULL, hammer2_flush_recurse, info);
699 hammer2_spin_unex(&chain->core.spin);
700 info->parent = parent;
703 * Re-set the flush bits if the flush was incomplete or
704 * an error occurred. If an error occurs it is typically
705 * an allocation error. Errors do not cause deferrals.
707 if (info->error)
708 hammer2_chain_setflush(chain);
709 info->error |= save_error;
710 if (info->diddeferral)
711 hammer2_chain_setflush(chain);
714 * If we lost the parent->chain association we have to
715 * stop processing this chain because it is no longer
716 * in this recursion. If it moved, it will be handled
717 * by the ONFLUSH flag elsewhere.
719 if (chain->parent != parent) {
720 kprintf("LOST CHILD2 %p->%p (actual parent %p)\n",
721 parent, chain, chain->parent);
722 goto done;
727 * Now we are in the bottom-up part of the recursion.
729 * Do not update chain if lower layers were deferred. We continue
730 * to try to update the chain on lower-level errors, but the flush
731 * code may decide not to flush the volume root.
733 * XXX should we continue to try to update the chain if an error
734 * occurred?
736 if (info->diddeferral)
737 goto done;
740 * Both parent and chain must be locked in order to flush chain,
741 * in order to properly update the parent under certain conditions.
743 * In addition, we can't safely unlock/relock the chain once we
744 * start flushing the chain itself, which we would have to do later
745 * on in order to lock the parent if we didn't do that now.
747 hammer2_chain_ref_hold(chain);
748 hammer2_chain_unlock(chain);
749 if (parent)
750 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
751 hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
752 hammer2_chain_drop_unhold(chain);
755 * Can't process if we can't access their content.
757 if ((parent && parent->error) || chain->error) {
758 kprintf("hammer2: chain error during flush\n");
759 info->error |= chain->error;
760 if (parent) {
761 info->error |= parent->error;
762 hammer2_chain_unlock(parent);
764 goto done;
767 if (chain->parent != parent) {
768 if (hammer2_debug & 0x0040) {
769 kprintf("LOST CHILD3 %p->%p (actual parent %p)\n",
770 parent, chain, chain->parent);
772 KKASSERT(parent != NULL);
773 hammer2_chain_unlock(parent);
774 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
775 hammer2_chain_ref(chain);
776 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
777 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
778 ++info->diddeferral;
780 goto done;
784 * Propagate the DESTROY flag downwards. This dummies up the flush
785 * code and tries to invalidate related buffer cache buffers to
786 * avoid the disk write.
788 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
789 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
792 * Dispose of the modified bit.
794 * If parent is present, the UPDATE bit should already be set.
795 * UPDATE should already be set.
796 * bref.mirror_tid should already be set.
798 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
799 KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
800 chain->parent == NULL);
801 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
802 atomic_add_long(&hammer2_count_modified_chains, -1);
805 * Manage threads waiting for excessive dirty memory to
806 * be retired.
808 if (chain->pmp)
809 hammer2_pfs_memory_wakeup(chain->pmp);
811 #if 0
812 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
813 chain != &hmp->vchain &&
814 chain != &hmp->fchain) {
816 * Set UPDATE bit indicating that the parent block
817 * table requires updating.
819 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
821 #endif
824 * Issue the flush. This is indirect via the DIO.
826 * NOTE: A DELETED node that reaches this point must be
827 * flushed for synchronization point consistency.
829 * NOTE: Even though MODIFIED was already set, the related DIO
830 * might not be dirty due to a system buffer cache
831 * flush and must be set dirty if we are going to make
832 * further modifications to the buffer. Chains with
833 * embedded data don't need this.
835 if (hammer2_debug & 0x1000) {
836 kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
837 chain, chain->bref.type,
838 (uintmax_t)chain->bref.key,
839 chain->bref.keybits,
840 (uintmax_t)chain->bref.data_off);
842 if (hammer2_debug & 0x2000) {
843 Debugger("Flush hell");
847 * Update chain CRCs for flush.
849 * NOTE: Volume headers are NOT flushed here as they require
850 * special processing.
852 switch(chain->bref.type) {
853 case HAMMER2_BREF_TYPE_FREEMAP:
855 * Update the volume header's freemap_tid to the
856 * freemap's flushing mirror_tid.
858 * (note: embedded data, do not call setdirty)
860 KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
861 KKASSERT(chain == &hmp->fchain);
862 hmp->voldata.freemap_tid = chain->bref.mirror_tid;
863 if (hammer2_debug & 0x8000) {
864 /* debug only, avoid syslogd loop */
865 kprintf("sync freemap mirror_tid %08jx\n",
866 (intmax_t)chain->bref.mirror_tid);
870 * The freemap can be flushed independently of the
871 * main topology, but for the case where it is
872 * flushed in the same transaction, and flushed
873 * before vchain (a case we want to allow for
874 * performance reasons), make sure modifications
875 * made during the flush under vchain use a new
876 * transaction id.
878 * Otherwise the mount recovery code will get confused.
880 ++hmp->voldata.mirror_tid;
881 break;
882 case HAMMER2_BREF_TYPE_VOLUME:
884 * The free block table is flushed by
885 * hammer2_vfs_sync() before it flushes vchain.
886 * We must still hold fchain locked while copying
887 * voldata to volsync, however.
889 * These do not error per-say since their data does
890 * not need to be re-read from media on lock.
892 * (note: embedded data, do not call setdirty)
894 hammer2_chain_lock(&hmp->fchain,
895 HAMMER2_RESOLVE_ALWAYS);
896 hammer2_voldata_lock(hmp);
897 if (hammer2_debug & 0x8000) {
898 /* debug only, avoid syslogd loop */
899 kprintf("sync volume mirror_tid %08jx\n",
900 (intmax_t)chain->bref.mirror_tid);
904 * Update the volume header's mirror_tid to the
905 * main topology's flushing mirror_tid. It is
906 * possible that voldata.mirror_tid is already
907 * beyond bref.mirror_tid due to the bump we made
908 * above in BREF_TYPE_FREEMAP.
910 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
911 hmp->voldata.mirror_tid =
912 chain->bref.mirror_tid;
916 * The volume header is flushed manually by the
917 * syncer, not here. All we do here is adjust the
918 * crc's.
920 KKASSERT(chain->data != NULL);
921 KKASSERT(chain->dio == NULL);
923 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
924 hammer2_icrc32(
925 (char *)&hmp->voldata +
926 HAMMER2_VOLUME_ICRC1_OFF,
927 HAMMER2_VOLUME_ICRC1_SIZE);
928 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
929 hammer2_icrc32(
930 (char *)&hmp->voldata +
931 HAMMER2_VOLUME_ICRC0_OFF,
932 HAMMER2_VOLUME_ICRC0_SIZE);
933 hmp->voldata.icrc_volheader =
934 hammer2_icrc32(
935 (char *)&hmp->voldata +
936 HAMMER2_VOLUME_ICRCVH_OFF,
937 HAMMER2_VOLUME_ICRCVH_SIZE);
939 if (hammer2_debug & 0x8000) {
940 /* debug only, avoid syslogd loop */
941 kprintf("syncvolhdr %016jx %016jx\n",
942 hmp->voldata.mirror_tid,
943 hmp->vchain.bref.mirror_tid);
945 hmp->volsync = hmp->voldata;
946 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
947 hammer2_voldata_unlock(hmp);
948 hammer2_chain_unlock(&hmp->fchain);
949 break;
950 case HAMMER2_BREF_TYPE_DATA:
952 * Data elements have already been flushed via the
953 * logical file buffer cache. Their hash was set in
954 * the bref by the vop_write code. Do not re-dirty.
956 * Make sure any device buffer(s) have been flushed
957 * out here (there aren't usually any to flush) XXX.
959 break;
960 case HAMMER2_BREF_TYPE_INDIRECT:
961 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
962 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
964 * Buffer I/O will be cleaned up when the volume is
965 * flushed (but the kernel is free to flush it before
966 * then, as well).
968 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
969 hammer2_chain_setcheck(chain, chain->data);
970 break;
971 case HAMMER2_BREF_TYPE_DIRENT:
973 * A directory entry can use the check area to store
974 * the filename for filenames <= 64 bytes, don't blow
975 * it up!
977 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
978 if (chain->bytes)
979 hammer2_chain_setcheck(chain, chain->data);
980 break;
981 case HAMMER2_BREF_TYPE_INODE:
983 * NOTE: We must call io_setdirty() to make any late
984 * changes to the inode data, the system might
985 * have already flushed the buffer.
987 if (chain->data->ipdata.meta.op_flags &
988 HAMMER2_OPFLAG_PFSROOT) {
990 * non-NULL pmp if mounted as a PFS. We must
991 * sync fields cached in the pmp? XXX
993 hammer2_inode_data_t *ipdata;
995 hammer2_io_setdirty(chain->dio);
996 ipdata = &chain->data->ipdata;
997 if (chain->pmp) {
998 ipdata->meta.pfs_inum =
999 chain->pmp->inode_tid;
1001 } else {
1002 /* can't be mounted as a PFS */
1005 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
1006 hammer2_chain_setcheck(chain, chain->data);
1008 hammer2_inode_data_t *ipdata;
1009 ipdata = &chain->data->ipdata;
1010 break;
1011 default:
1012 KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
1013 panic("hammer2_flush_core: unsupported "
1014 "embedded bref %d",
1015 chain->bref.type);
1016 /* NOT REACHED */
1020 * If the chain was destroyed try to avoid unnecessary I/O
1021 * that might not have yet occurred. Remove the data range
1022 * from dedup candidacy and attempt to invalidation that
1023 * potentially dirty portion of the I/O buffer.
1025 if (chain->flags & HAMMER2_CHAIN_DESTROY) {
1026 hammer2_io_dedup_delete(hmp,
1027 chain->bref.type,
1028 chain->bref.data_off,
1029 chain->bytes);
1030 #if 0
1031 hammer2_io_t *dio;
1032 if (chain->dio) {
1033 hammer2_io_inval(chain->dio,
1034 chain->bref.data_off,
1035 chain->bytes);
1036 } else if ((dio = hammer2_io_getquick(hmp,
1037 chain->bref.data_off,
1038 chain->bytes,
1039 1)) != NULL) {
1040 hammer2_io_inval(dio,
1041 chain->bref.data_off,
1042 chain->bytes);
1043 hammer2_io_putblk(&dio);
1045 #endif
1050 * If UPDATE is set the parent block table may need to be updated.
1051 * This can fail if the hammer2_chain_modify() fails.
1053 * NOTE: UPDATE may be set on vchain or fchain in which case
1054 * parent could be NULL. It's easiest to allow the case
1055 * and test for NULL. parent can also wind up being NULL
1056 * due to a deletion so we need to handle the case anyway.
1058 * If no parent exists we can just clear the UPDATE bit. If the
1059 * chain gets reattached later on the bit will simply get set
1060 * again.
1062 if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
1063 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1066 * The chain may need its blockrefs updated in the parent.
1068 if (chain->flags & HAMMER2_CHAIN_UPDATE) {
1069 hammer2_blockref_t *base;
1070 int count;
1073 * Clear UPDATE flag, mark parent modified, update its
1074 * modify_tid if necessary, and adjust the parent blockmap.
1076 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1079 * (optional code)
1081 * Avoid actually modifying and updating the parent if it
1082 * was flagged for destruction. This can greatly reduce
1083 * disk I/O in large tree removals because the
1084 * hammer2_io_setinval() call in the upward recursion
1085 * (see MODIFIED code above) can only handle a few cases.
1087 if (parent->flags & HAMMER2_CHAIN_DESTROY) {
1088 if (parent->bref.modify_tid < chain->bref.modify_tid) {
1089 parent->bref.modify_tid =
1090 chain->bref.modify_tid;
1092 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
1093 HAMMER2_CHAIN_BMAPUPD);
1094 goto skipupdate;
1098 * The flusher is responsible for deleting empty indirect
1099 * blocks at this point. If we don't do this, no major harm
1100 * will be done but the empty indirect blocks will stay in
1101 * the topology and make it a messy and inefficient.
1103 * The flusher is also responsible for collapsing the
1104 * content of an indirect block into its parent whenever
1105 * possible (with some hysteresis). Not doing this will also
1106 * not harm the topology, but would make it messy and
1107 * inefficient.
1109 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1110 if (hammer2_chain_indirect_maintenance(parent, chain))
1111 goto skipupdate;
1115 * We are updating the parent's blockmap, the parent must
1116 * be set modified. If this fails we re-set the UPDATE flag
1117 * in the child.
1119 * NOTE! A modification error can be ENOSPC. We still want
1120 * to flush modified chains recursively, not break out,
1121 * so we just skip the update in this situation and
1122 * continue. That is, we still need to try to clean
1123 * out dirty chains and buffers.
1125 * This may not help bulkfree though. XXX
1127 save_error = hammer2_chain_modify(parent, 0, 0, 0);
1128 if (save_error) {
1129 info->error |= save_error;
1130 kprintf("hammer2_flush: %016jx.%02x error=%08x\n",
1131 parent->bref.data_off, parent->bref.type,
1132 save_error);
1133 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1134 goto skipupdate;
1136 if (parent->bref.modify_tid < chain->bref.modify_tid)
1137 parent->bref.modify_tid = chain->bref.modify_tid;
1140 * Calculate blockmap pointer
1142 switch(parent->bref.type) {
1143 case HAMMER2_BREF_TYPE_INODE:
1145 * Access the inode's block array. However, there is
1146 * no block array if the inode is flagged DIRECTDATA.
1148 if (parent->data &&
1149 (parent->data->ipdata.meta.op_flags &
1150 HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1151 base = &parent->data->
1152 ipdata.u.blockset.blockref[0];
1153 } else {
1154 base = NULL;
1156 count = HAMMER2_SET_COUNT;
1157 break;
1158 case HAMMER2_BREF_TYPE_INDIRECT:
1159 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1160 if (parent->data)
1161 base = &parent->data->npdata[0];
1162 else
1163 base = NULL;
1164 count = parent->bytes / sizeof(hammer2_blockref_t);
1165 break;
1166 case HAMMER2_BREF_TYPE_VOLUME:
1167 base = &chain->hmp->voldata.sroot_blockset.blockref[0];
1168 count = HAMMER2_SET_COUNT;
1169 break;
1170 case HAMMER2_BREF_TYPE_FREEMAP:
1171 base = &parent->data->npdata[0];
1172 count = HAMMER2_SET_COUNT;
1173 break;
1174 default:
1175 base = NULL;
1176 count = 0;
1177 panic("hammer2_flush_core: "
1178 "unrecognized blockref type: %d",
1179 parent->bref.type);
1183 * Blocktable updates
1185 * We synchronize pending statistics at this time. Delta
1186 * adjustments designated for the current and upper level
1187 * are synchronized.
1189 if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
1190 if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
1191 hammer2_spin_ex(&parent->core.spin);
1192 hammer2_base_delete(parent, base, count, chain);
1193 hammer2_spin_unex(&parent->core.spin);
1194 /* base_delete clears both bits */
1195 } else {
1196 atomic_clear_int(&chain->flags,
1197 HAMMER2_CHAIN_BMAPUPD);
1200 if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
1201 hammer2_spin_ex(&parent->core.spin);
1202 hammer2_base_insert(parent, base, count,
1203 chain, &chain->bref);
1204 hammer2_spin_unex(&parent->core.spin);
1205 /* base_insert sets BMAPPED */
1208 skipupdate:
1209 if (parent)
1210 hammer2_chain_unlock(parent);
1213 * Final cleanup after flush
1215 done:
1216 KKASSERT(chain->refs > 0);
1217 if (hammer2_debug & 0x200) {
1218 if (info->debug == chain)
1219 info->debug = NULL;
1224 * Flush recursion helper, called from flush_core, calls flush_core.
1226 * Flushes the children of the caller's chain (info->parent), restricted
1227 * by sync_tid. Set info->domodify if the child's blockref must propagate
1228 * back up to the parent.
1230 * This function may set info->error as a side effect.
1232 * Ripouts can move child from rbtree to dbtree or dbq but the caller's
1233 * flush scan order prevents any chains from being lost. A child can be
1234 * executes more than once.
1236 * WARNING! If we do not call hammer2_flush_core() we must update
1237 * bref.mirror_tid ourselves to indicate that the flush has
1238 * processed the child.
1240 * WARNING! parent->core spinlock is held on entry and return.
1242 static int
1243 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
1245 hammer2_flush_info_t *info = data;
1246 hammer2_chain_t *parent = info->parent;
1248 #ifdef HAMMER2_SCAN_DEBUG
1249 ++info->scan_count;
1250 if (child->flags & HAMMER2_CHAIN_MODIFIED)
1251 ++info->scan_mod_count;
1252 if (child->flags & HAMMER2_CHAIN_UPDATE)
1253 ++info->scan_upd_count;
1254 if (child->flags & HAMMER2_CHAIN_ONFLUSH)
1255 ++info->scan_onf_count;
1256 #endif
1259 * (child can never be fchain or vchain so a special check isn't
1260 * needed).
1262 * We must ref the child before unlocking the spinlock.
1264 * The caller has added a ref to the parent so we can temporarily
1265 * unlock it in order to lock the child. However, if it no longer
1266 * winds up being the child of the parent we must skip this child.
1268 * NOTE! chain locking errors are fatal. They are never out-of-space
1269 * errors.
1271 hammer2_chain_ref(child);
1272 hammer2_spin_unex(&parent->core.spin);
1274 hammer2_chain_ref_hold(parent);
1275 hammer2_chain_unlock(parent);
1276 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1277 if (child->parent != parent) {
1278 kprintf("LOST CHILD1 %p->%p (actual parent %p)\n",
1279 parent, child, child->parent);
1280 goto done;
1282 if (child->error) {
1283 kprintf("CHILD ERROR DURING FLUSH LOCK %p->%p\n",
1284 parent, child);
1285 info->error |= child->error;
1286 goto done;
1290 * Must propagate the DESTROY flag downwards, otherwise the
1291 * parent could end up never being removed because it will
1292 * be requeued to the flusher if it survives this run due to
1293 * the flag.
1295 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
1296 atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
1297 #ifdef HAMMER2_SCAN_DEBUG
1298 if (child->flags & HAMMER2_CHAIN_DESTROY)
1299 ++info->scan_del_count;
1300 #endif
1303 * Recurse and collect deferral data. We're in the media flush,
1304 * this can cross PFS boundaries.
1306 if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1307 #ifdef HAMMER2_SCAN_DEBUG
1308 if (child->bref.type < 7)
1309 ++info->scan_btype[child->bref.type];
1310 #endif
1311 ++info->depth;
1312 hammer2_flush_core(info, child, info->flags);
1313 --info->depth;
1314 } else if (hammer2_debug & 0x200) {
1315 if (info->debug == NULL)
1316 info->debug = child;
1317 ++info->depth;
1318 hammer2_flush_core(info, child, info->flags);
1319 --info->depth;
1320 if (info->debug == child)
1321 info->debug = NULL;
1324 done:
1326 * Relock to continue the loop.
1328 hammer2_chain_unlock(child);
1329 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1330 hammer2_chain_drop_unhold(parent);
1331 if (parent->error) {
1332 kprintf("PARENT ERROR DURING FLUSH LOCK %p->%p\n",
1333 parent, child);
1334 info->error |= parent->error;
1336 hammer2_chain_drop(child);
1337 KKASSERT(info->parent == parent);
1338 hammer2_spin_ex(&parent->core.spin);
1340 return (0);
1344 * flush helper (backend threaded)
1346 * Flushes chain topology for the specified inode.
1348 * If HAMMER2_XOP_FLUSH is set we flush all chains from the current inode
1349 * through but stop at sub-inodes (we flush the inode chains for sub-inodes,
1350 * but do not go further as deeper modifications do not belong to the current
1351 * flush cycle).
1353 * If HAMMER2_XOP_FLUSH is not set we flush the current inode's chains only
1354 * and do not recurse through sub-inodes, including not including those
1355 * sub-inodes.
1357 * Remember that HAMMER2 is currently using a flat inode model, so directory
1358 * hierarchies do not translate to inode hierarchies. PFS ROOTs, however,
1359 * do.
1361 * chain->parent can be NULL, usually due to destroy races.
1363 * Primarily called from vfs_sync().
1365 void
1366 hammer2_inode_xop_flush(hammer2_thread_t *thr, hammer2_xop_t *arg)
1368 hammer2_xop_flush_t *xop = &arg->xop_flush;
1369 hammer2_chain_t *chain;
1370 hammer2_chain_t *parent;
1371 hammer2_dev_t *hmp;
1372 int flush_error = 0;
1373 int fsync_error = 0;
1374 int total_error = 0;
1375 int j;
1376 int xflags;
1377 int ispfsroot = 0;
1379 xflags = HAMMER2_FLUSH_TOP;
1380 if (xop->head.flags & HAMMER2_XOP_INODE_STOP)
1381 xflags |= HAMMER2_FLUSH_INODE_STOP;
1384 * Flush core chains
1386 chain = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1387 HAMMER2_RESOLVE_ALWAYS);
1388 if (chain) {
1389 hmp = chain->hmp;
1390 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
1391 TAILQ_FIRST(&hmp->flushq) != NULL) {
1392 hammer2_flush(chain, xflags);
1393 parent = chain->parent;
1394 if (parent)
1395 hammer2_chain_setflush(parent);
1397 if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
1398 ispfsroot = 1;
1399 hammer2_chain_unlock(chain);
1400 hammer2_chain_drop(chain);
1401 chain = NULL;
1402 } else {
1403 hmp = NULL;
1407 * Don't flush from the volume root to the PFSROOT unless ip was
1408 * a PFSROOT. If it isn't then this flush is probably related to
1409 * a VOP_FSYNC.
1411 if (ispfsroot == 0)
1412 goto skip;
1415 * Flush volume roots. Avoid replication, we only want to
1416 * flush each hammer2_dev (hmp) once.
1418 for (j = thr->clindex - 1; j >= 0; --j) {
1419 if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) {
1420 if (chain->hmp == hmp) {
1421 chain = NULL; /* safety */
1422 goto skip;
1426 chain = NULL; /* safety */
1429 * spmp transaction. The super-root is never directly mounted so
1430 * there shouldn't be any vnodes, let alone any dirty vnodes
1431 * associated with it, so we shouldn't have to mess around with any
1432 * vnode flushes here.
1434 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1437 * Media mounts have two 'roots', vchain for the topology
1438 * and fchain for the free block table. Flush both.
1440 * Note that the topology and free block table are handled
1441 * independently, so the free block table can wind up being
1442 * ahead of the topology. We depend on the bulk free scan
1443 * code to deal with any loose ends.
1445 * vchain and fchain do not error on-lock since their data does
1446 * not have to be re-read from media.
1448 hammer2_chain_ref(&hmp->vchain);
1449 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1450 hammer2_chain_ref(&hmp->fchain);
1451 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1452 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1454 * This will also modify vchain as a side effect,
1455 * mark vchain as modified now.
1457 hammer2_voldata_modify(hmp);
1458 chain = &hmp->fchain;
1459 flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1460 KKASSERT(chain == &hmp->fchain);
1462 hammer2_chain_unlock(&hmp->fchain);
1463 hammer2_chain_unlock(&hmp->vchain);
1464 hammer2_chain_drop(&hmp->fchain);
1465 /* vchain dropped down below */
1467 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1468 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1469 chain = &hmp->vchain;
1470 flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1471 KKASSERT(chain == &hmp->vchain);
1473 hammer2_chain_unlock(&hmp->vchain);
1474 hammer2_chain_drop(&hmp->vchain);
1477 * We can't safely flush the volume header until we have
1478 * flushed any device buffers which have built up.
1480 * XXX this isn't being incremental
1482 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1483 fsync_error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1484 vn_unlock(hmp->devvp);
1485 if (fsync_error || flush_error) {
1486 kprintf("hammer2: sync error fsync=%d h2flush=0x%04x dev=%s\n",
1487 fsync_error, flush_error, hmp->devrepname);
1491 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1492 * volume header needs synchronization via hmp->volsync.
1494 * XXX synchronize the flag & data with only this flush XXX
1496 if (fsync_error == 0 && flush_error == 0 &&
1497 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1498 struct buf *bp;
1499 int vol_error = 0;
1502 * Synchronize the disk before flushing the volume
1503 * header.
1505 bp = getpbuf(NULL);
1506 bp->b_bio1.bio_offset = 0;
1507 bp->b_bufsize = 0;
1508 bp->b_bcount = 0;
1509 bp->b_cmd = BUF_CMD_FLUSH;
1510 bp->b_bio1.bio_done = biodone_sync;
1511 bp->b_bio1.bio_flags |= BIO_SYNC;
1512 vn_strategy(hmp->devvp, &bp->b_bio1);
1513 fsync_error = biowait(&bp->b_bio1, "h2vol");
1514 relpbuf(bp, NULL);
1517 * Then we can safely flush the version of the
1518 * volume header synchronized by the flush code.
1520 j = hmp->volhdrno + 1;
1521 if (j < 0)
1522 j = 0;
1523 if (j >= HAMMER2_NUM_VOLHDRS)
1524 j = 0;
1525 if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1526 hmp->volsync.volu_size) {
1527 j = 0;
1529 if (hammer2_debug & 0x8000) {
1530 /* debug only, avoid syslogd loop */
1531 kprintf("sync volhdr %d %jd\n",
1532 j, (intmax_t)hmp->volsync.volu_size);
1534 bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1535 HAMMER2_PBUFSIZE, GETBLK_KVABIO, 0);
1536 atomic_clear_int(&hmp->vchain.flags,
1537 HAMMER2_CHAIN_VOLUMESYNC);
1538 bkvasync(bp);
1539 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1540 vol_error = bwrite(bp);
1541 hmp->volhdrno = j;
1542 if (vol_error)
1543 fsync_error = vol_error;
1545 if (flush_error)
1546 total_error = flush_error;
1547 if (fsync_error)
1548 total_error = hammer2_errno_to_error(fsync_error);
1550 hammer2_trans_done(hmp->spmp); /* spmp trans */
1551 skip:
1552 hammer2_xop_feed(&xop->head, NULL, thr->clindex, total_error);