2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * TRANSACTION AND FLUSH HANDLING
38 * Deceptively simple but actually fairly difficult to implement properly is
39 * how I would describe it.
41 * Flushing generally occurs bottom-up but requires a top-down scan to
42 * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag
43 * tells how to recurse downward to find these chains.
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
57 #define HAMMER2_FLUSH_DEPTH_LIMIT 10 /* stack recursion limit */
61 * Recursively flush the specified chain. The chain is locked and
62 * referenced by the caller and will remain so on return. The chain
63 * will remain referenced throughout but can temporarily lose its
64 * lock during the recursion to avoid unnecessarily stalling user
67 struct hammer2_flush_info
{
68 hammer2_chain_t
*parent
;
73 struct h2_flush_list flushq
;
74 hammer2_chain_t
*debug
;
77 typedef struct hammer2_flush_info hammer2_flush_info_t
;
79 static void hammer2_flush_core(hammer2_flush_info_t
*info
,
80 hammer2_chain_t
*chain
, int flags
);
81 static int hammer2_flush_recurse(hammer2_chain_t
*child
, void *data
);
84 * Any per-pfs transaction initialization goes here.
87 hammer2_trans_manage_init(hammer2_pfs_t
*pmp
)
92 * Transaction support for any modifying operation. Transactions are used
93 * in the pmp layer by the frontend and in the spmp layer by the backend.
95 * 0 - Normal transaction, interlocked against flush
98 * TRANS_ISFLUSH - Flush transaction, interlocked against normal
101 * TRANS_BUFCACHE - Buffer cache transaction, no interlock.
103 * Initializing a new transaction allocates a transaction ID. Typically
104 * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can
105 * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
106 * media target. The latter mode is used by the recovery code.
108 * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
109 * other is a set of any number of concurrent filesystem operations. We
110 * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
111 * or we can have <running_flush> + <concurrent_fs_ops>.
113 * During a flush, new fs_ops are only blocked until the fs_ops prior to
114 * the flush complete. The new fs_ops can then run concurrent with the flush.
116 * Buffer-cache transactions operate as fs_ops but never block. A
117 * buffer-cache flush will run either before or after the current pending
118 * flush depending on its state.
121 hammer2_trans_init(hammer2_pfs_t
*pmp
, uint32_t flags
)
128 oflags
= pmp
->trans
.flags
;
132 if (flags
& HAMMER2_TRANS_ISFLUSH
) {
134 * Requesting flush transaction. Wait for all
135 * currently running transactions to finish.
136 * Afterwords, normal transactions will be
139 if (oflags
& HAMMER2_TRANS_MASK
) {
140 nflags
= oflags
| HAMMER2_TRANS_FPENDING
|
141 HAMMER2_TRANS_WAITING
;
144 nflags
= (oflags
| flags
) + 1;
146 } else if (flags
& HAMMER2_TRANS_BUFCACHE
) {
148 * Requesting strategy transaction from buffer-cache,
149 * or a VM getpages/putpages through the buffer cache.
150 * We must allow such transactions in all situations
151 * to avoid deadlocks.
153 nflags
= (oflags
| flags
) + 1;
156 * (old) previous code interlocked against the main
159 if ((oflags
& (HAMMER2_TRANS_ISFLUSH
|
160 HAMMER2_TRANS_PREFLUSH
)) ==
161 HAMMER2_TRANS_ISFLUSH
) {
162 nflags
= oflags
| HAMMER2_TRANS_WAITING
;
165 nflags
= (oflags
| flags
) + 1;
170 * Requesting normal modifying transaction (read-only
171 * operations do not use transactions). Waits for
172 * any flush to finish before allowing. Multiple
173 * modifying transactions can run concurrently.
175 if (oflags
& HAMMER2_TRANS_ISFLUSH
) {
176 nflags
= oflags
| HAMMER2_TRANS_WAITING
;
179 nflags
= (oflags
| flags
) + 1;
183 tsleep_interlock(&pmp
->trans
.sync_wait
, 0);
184 if (atomic_cmpset_int(&pmp
->trans
.flags
, oflags
, nflags
)) {
187 tsleep(&pmp
->trans
.sync_wait
, PINTERLOCKED
,
197 * Start a sub-transaction, there is no 'subdone' function. This will
198 * issue a new modify_tid (mtid) for the current transaction, which is a
199 * CLC (cluster level change) id and not a per-node id.
201 * This function must be called for each XOP when multiple XOPs are run in
202 * sequence within a transaction.
204 * Callers typically update the inode with the transaction mtid manually
205 * to enforce sequencing.
208 hammer2_trans_sub(hammer2_pfs_t
*pmp
)
212 mtid
= atomic_fetchadd_64(&pmp
->modify_tid
, 1);
218 hammer2_trans_done(hammer2_pfs_t
*pmp
)
224 oflags
= pmp
->trans
.flags
;
226 KKASSERT(oflags
& HAMMER2_TRANS_MASK
);
227 if ((oflags
& HAMMER2_TRANS_MASK
) == 1) {
229 * This was the last transaction
231 nflags
= (oflags
- 1) & ~(HAMMER2_TRANS_ISFLUSH
|
232 HAMMER2_TRANS_BUFCACHE
|
233 HAMMER2_TRANS_FPENDING
|
234 HAMMER2_TRANS_WAITING
);
237 * Still transactions pending
241 if (atomic_cmpset_int(&pmp
->trans
.flags
, oflags
, nflags
)) {
242 if ((nflags
& HAMMER2_TRANS_MASK
) == 0 &&
243 (oflags
& HAMMER2_TRANS_WAITING
)) {
244 wakeup(&pmp
->trans
.sync_wait
);
255 * Obtain new, unique inode number (not serialized by caller).
258 hammer2_trans_newinum(hammer2_pfs_t
*pmp
)
262 tid
= atomic_fetchadd_64(&pmp
->inode_tid
, 1);
268 * Assert that a strategy call is ok here. Currently we allow strategy
269 * calls in all situations, including during flushes. Previously:
270 * (old) (1) In a normal transaction.
271 * (old) (2) In a flush transaction only if PREFLUSH is also set.
274 hammer2_trans_assert_strategy(hammer2_pfs_t
*pmp
)
277 KKASSERT((pmp
->trans
.flags
& HAMMER2_TRANS_ISFLUSH
) == 0 ||
278 (pmp
->trans
.flags
& HAMMER2_TRANS_PREFLUSH
));
284 * Chains undergoing destruction are removed from the in-memory topology.
285 * To avoid getting lost these chains are placed on the delayed flush
286 * queue which will properly dispose of them.
288 * We do this instead of issuing an immediate flush in order to give
289 * recursive deletions (rm -rf, etc) a chance to remove more of the
290 * hierarchy, potentially allowing an enormous amount of write I/O to
294 hammer2_delayed_flush(hammer2_chain_t
*chain
)
296 if ((chain
->flags
& HAMMER2_CHAIN_DELAYED
) == 0) {
297 hammer2_spin_ex(&chain
->hmp
->list_spin
);
298 if ((chain
->flags
& (HAMMER2_CHAIN_DELAYED
|
299 HAMMER2_CHAIN_DEFERRED
)) == 0) {
300 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_DELAYED
|
301 HAMMER2_CHAIN_DEFERRED
);
302 TAILQ_INSERT_TAIL(&chain
->hmp
->flushq
,
304 hammer2_chain_ref(chain
);
306 hammer2_spin_unex(&chain
->hmp
->list_spin
);
307 hammer2_voldata_modify(chain
->hmp
);
312 * Flush the chain and all modified sub-chains through the specified
313 * synchronization point, propagating blockref updates back up. As
314 * part of this propagation, mirror_tid and inode/data usage statistics
315 * propagates back upward.
317 * modify_tid (clc - cluster level change) is not propagated.
319 * update_tid (clc) is used for validation and is not propagated by this
322 * This routine can be called from several places but the most important
323 * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
325 * chain is locked on call and will remain locked on return. The chain's
326 * UPDATE flag indicates that its parent's block table (which is not yet
327 * part of the flush) should be updated. The chain may be replaced by
328 * the call if it was modified.
331 hammer2_flush(hammer2_chain_t
*chain
, int flags
)
333 hammer2_chain_t
*scan
;
334 hammer2_flush_info_t info
;
339 * Execute the recursive flush and handle deferrals.
341 * Chains can be ridiculously long (thousands deep), so to
342 * avoid blowing out the kernel stack the recursive flush has a
343 * depth limit. Elements at the limit are placed on a list
344 * for re-execution after the stack has been popped.
346 bzero(&info
, sizeof(info
));
347 TAILQ_INIT(&info
.flushq
);
348 info
.cache_index
= -1;
349 info
.flags
= flags
& ~HAMMER2_FLUSH_TOP
;
352 * Calculate parent (can be NULL), if not NULL the flush core
353 * expects the parent to be referenced so it can easily lock/unlock
354 * it without it getting ripped up.
356 if ((info
.parent
= chain
->parent
) != NULL
)
357 hammer2_chain_ref(info
.parent
);
360 * Extra ref needed because flush_core expects it when replacing
363 hammer2_chain_ref(chain
);
369 * Move hmp->flushq to info.flushq if non-empty so it can
372 if (TAILQ_FIRST(&hmp
->flushq
) != NULL
) {
373 hammer2_spin_ex(&chain
->hmp
->list_spin
);
374 TAILQ_CONCAT(&info
.flushq
, &hmp
->flushq
, flush_node
);
375 hammer2_spin_unex(&chain
->hmp
->list_spin
);
379 * Unwind deep recursions which had been deferred. This
380 * can leave the FLUSH_* bits set for these chains, which
381 * will be handled when we [re]flush chain after the unwind.
383 while ((scan
= TAILQ_FIRST(&info
.flushq
)) != NULL
) {
384 KKASSERT(scan
->flags
& HAMMER2_CHAIN_DEFERRED
);
385 TAILQ_REMOVE(&info
.flushq
, scan
, flush_node
);
386 atomic_clear_int(&scan
->flags
, HAMMER2_CHAIN_DEFERRED
|
387 HAMMER2_CHAIN_DELAYED
);
390 * Now that we've popped back up we can do a secondary
391 * recursion on the deferred elements.
393 * NOTE: hammer2_flush() may replace scan.
395 if (hammer2_debug
& 0x0040)
396 kprintf("deferred flush %p\n", scan
);
397 hammer2_chain_lock(scan
, HAMMER2_RESOLVE_MAYBE
);
398 hammer2_flush(scan
, flags
& ~HAMMER2_FLUSH_TOP
);
399 hammer2_chain_unlock(scan
);
400 hammer2_chain_drop(scan
); /* ref from deferral */
406 info
.diddeferral
= 0;
407 hammer2_flush_core(&info
, chain
, flags
);
410 * Only loop if deep recursions have been deferred.
412 if (TAILQ_EMPTY(&info
.flushq
))
415 if (++loops
% 1000 == 0) {
416 kprintf("hammer2_flush: excessive loops on %p\n",
418 if (hammer2_debug
& 0x100000)
422 hammer2_chain_drop(chain
);
424 hammer2_chain_drop(info
.parent
);
428 * This is the core of the chain flushing code. The chain is locked by the
429 * caller and must also have an extra ref on it by the caller, and remains
430 * locked and will have an extra ref on return. Upon return, the caller can
431 * test the UPDATE bit on the child to determine if the parent needs updating.
433 * (1) Determine if this node is a candidate for the flush, return if it is
434 * not. fchain and vchain are always candidates for the flush.
436 * (2) If we recurse too deep the chain is entered onto the deferral list and
437 * the current flush stack is aborted until after the deferral list is
440 * (3) Recursively flush live children (rbtree). This can create deferrals.
441 * A successful flush clears the MODIFIED and UPDATE bits on the children
442 * and typically causes the parent to be marked MODIFIED as the children
443 * update the parent's block table. A parent might already be marked
444 * MODIFIED due to a deletion (whos blocktable update in the parent is
445 * handled by the frontend), or if the parent itself is modified by the
446 * frontend for other reasons.
448 * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
449 * Deleted-but-open inodes can still be individually flushed via the
452 * (5) Delete parents on the way back up if they are normal indirect blocks
453 * and have no children.
455 * (6) Note that an unmodified child may still need the block table in its
456 * parent updated (e.g. rename/move). The child will have UPDATE set
459 * WARNING ON BREF MODIFY_TID/MIRROR_TID
461 * blockref.modify_tid is consistent only within a PFS, and will not be
462 * consistent during synchronization. mirror_tid is consistent across the
463 * block device regardless of the PFS.
466 hammer2_flush_core(hammer2_flush_info_t
*info
, hammer2_chain_t
*chain
,
469 hammer2_chain_t
*parent
;
474 * (1) Optimize downward recursion to locate nodes needing action.
475 * Nothing to do if none of these flags are set.
477 if ((chain
->flags
& HAMMER2_CHAIN_FLUSH_MASK
) == 0) {
478 if (hammer2_debug
& 0x200) {
479 if (info
->debug
== NULL
)
487 diddeferral
= info
->diddeferral
;
488 parent
= info
->parent
; /* can be NULL */
491 * Downward search recursion
493 if (chain
->flags
& (HAMMER2_CHAIN_DEFERRED
| HAMMER2_CHAIN_DELAYED
)) {
498 } else if ((chain
->flags
& HAMMER2_CHAIN_PFSBOUNDARY
) &&
499 (flags
& HAMMER2_FLUSH_ALL
) == 0 &&
500 (flags
& HAMMER2_FLUSH_TOP
) == 0) {
502 * We do not recurse through PFSROOTs. PFSROOT flushes are
503 * handled by the related pmp's (whether mounted or not,
504 * including during recovery).
506 * But we must still process the PFSROOT chains for block
507 * table updates in their parent (which IS part of our flush).
509 * Note that the volume root, vchain, does not set this flag.
510 * Note the logic here requires that this test be done before
511 * the depth-limit test, else it might become the top on a
515 } else if (info
->depth
== HAMMER2_FLUSH_DEPTH_LIMIT
) {
517 * Recursion depth reached.
519 KKASSERT((chain
->flags
& HAMMER2_CHAIN_DELAYED
) == 0);
520 hammer2_chain_ref(chain
);
521 TAILQ_INSERT_TAIL(&info
->flushq
, chain
, flush_node
);
522 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_DEFERRED
);
524 } else if (chain
->flags
& (HAMMER2_CHAIN_ONFLUSH
|
525 HAMMER2_CHAIN_DESTROY
)) {
527 * Downward recursion search (actual flush occurs bottom-up).
528 * pre-clear ONFLUSH. It can get set again due to races,
529 * which we want so the scan finds us again in the next flush.
531 * We must also recurse if DESTROY is set so we can finally
532 * get rid of the related children, otherwise the node will
533 * just get re-flushed on lastdrop.
535 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_ONFLUSH
);
536 info
->parent
= chain
;
537 hammer2_spin_ex(&chain
->core
.spin
);
538 RB_SCAN(hammer2_chain_tree
, &chain
->core
.rbtree
,
539 NULL
, hammer2_flush_recurse
, info
);
540 hammer2_spin_unex(&chain
->core
.spin
);
541 info
->parent
= parent
;
542 if (info
->diddeferral
)
543 hammer2_chain_setflush(chain
);
547 * Now we are in the bottom-up part of the recursion.
549 * Do not update chain if lower layers were deferred.
551 if (info
->diddeferral
)
555 * Propagate the DESTROY flag downwards. This dummies up the flush
556 * code and tries to invalidate related buffer cache buffers to
557 * avoid the disk write.
559 if (parent
&& (parent
->flags
& HAMMER2_CHAIN_DESTROY
))
560 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_DESTROY
);
563 * Chain was already modified or has become modified, flush it out.
566 if ((hammer2_debug
& 0x200) &&
568 (chain
->flags
& (HAMMER2_CHAIN_MODIFIED
| HAMMER2_CHAIN_UPDATE
))) {
569 hammer2_chain_t
*scan
= chain
;
571 kprintf("DISCONNECTED FLUSH %p->%p\n", info
->debug
, chain
);
573 kprintf(" chain %p [%08x] bref=%016jx:%02x\n",
575 scan
->bref
.key
, scan
->bref
.type
);
576 if (scan
== info
->debug
)
582 if (chain
->flags
& HAMMER2_CHAIN_MODIFIED
) {
584 * Dispose of the modified bit.
586 * If parent is present, the UPDATE bit should already be set.
587 * UPDATE should already be set.
588 * bref.mirror_tid should already be set.
590 KKASSERT((chain
->flags
& HAMMER2_CHAIN_UPDATE
) ||
591 chain
->parent
== NULL
);
592 if (hammer2_debug
& 0x800000) {
595 for (pp
= chain
; pp
->parent
; pp
= pp
->parent
)
597 kprintf("FLUSH CHAIN %p (p=%p pp=%p/%d) TYPE %d FLAGS %08x (%s)\n",
598 chain
, chain
->parent
, pp
, pp
->bref
.type
,
599 chain
->bref
.type
, chain
->flags
,
600 (chain
->bref
.type
== 1 ? (const char *)chain
->data
->ipdata
.filename
: "?")
605 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_MODIFIED
);
606 atomic_add_long(&hammer2_count_modified_chains
, -1);
609 * Manage threads waiting for excessive dirty memory to
613 hammer2_pfs_memory_wakeup(chain
->pmp
);
616 if ((chain
->flags
& HAMMER2_CHAIN_UPDATE
) == 0 &&
617 chain
!= &hmp
->vchain
&&
618 chain
!= &hmp
->fchain
) {
620 * Set UPDATE bit indicating that the parent block
621 * table requires updating.
623 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_UPDATE
);
628 * Issue the flush. This is indirect via the DIO.
630 * NOTE: A DELETED node that reaches this point must be
631 * flushed for synchronization point consistency.
633 * NOTE: Even though MODIFIED was already set, the related DIO
634 * might not be dirty due to a system buffer cache
635 * flush and must be set dirty if we are going to make
636 * further modifications to the buffer. Chains with
637 * embedded data don't need this.
639 if (hammer2_debug
& 0x1000) {
640 kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
641 chain
, chain
->bref
.type
,
642 (uintmax_t)chain
->bref
.key
,
644 (uintmax_t)chain
->bref
.data_off
);
646 if (hammer2_debug
& 0x2000) {
647 Debugger("Flush hell");
651 * Update chain CRCs for flush.
653 * NOTE: Volume headers are NOT flushed here as they require
654 * special processing.
656 switch(chain
->bref
.type
) {
657 case HAMMER2_BREF_TYPE_FREEMAP
:
659 * Update the volume header's freemap_tid to the
660 * freemap's flushing mirror_tid.
662 * (note: embedded data, do not call setdirty)
664 KKASSERT(hmp
->vchain
.flags
& HAMMER2_CHAIN_MODIFIED
);
665 KKASSERT(chain
== &hmp
->fchain
);
666 hmp
->voldata
.freemap_tid
= chain
->bref
.mirror_tid
;
667 if (hammer2_debug
& 0x8000) {
668 /* debug only, avoid syslogd loop */
669 kprintf("sync freemap mirror_tid %08jx\n",
670 (intmax_t)chain
->bref
.mirror_tid
);
674 * The freemap can be flushed independently of the
675 * main topology, but for the case where it is
676 * flushed in the same transaction, and flushed
677 * before vchain (a case we want to allow for
678 * performance reasons), make sure modifications
679 * made during the flush under vchain use a new
682 * Otherwise the mount recovery code will get confused.
684 ++hmp
->voldata
.mirror_tid
;
686 case HAMMER2_BREF_TYPE_VOLUME
:
688 * The free block table is flushed by
689 * hammer2_vfs_sync() before it flushes vchain.
690 * We must still hold fchain locked while copying
691 * voldata to volsync, however.
693 * (note: embedded data, do not call setdirty)
695 hammer2_chain_lock(&hmp
->fchain
,
696 HAMMER2_RESOLVE_ALWAYS
);
697 hammer2_voldata_lock(hmp
);
698 if (hammer2_debug
& 0x8000) {
699 /* debug only, avoid syslogd loop */
700 kprintf("sync volume mirror_tid %08jx\n",
701 (intmax_t)chain
->bref
.mirror_tid
);
705 * Update the volume header's mirror_tid to the
706 * main topology's flushing mirror_tid. It is
707 * possible that voldata.mirror_tid is already
708 * beyond bref.mirror_tid due to the bump we made
709 * above in BREF_TYPE_FREEMAP.
711 if (hmp
->voldata
.mirror_tid
< chain
->bref
.mirror_tid
) {
712 hmp
->voldata
.mirror_tid
=
713 chain
->bref
.mirror_tid
;
717 * The volume header is flushed manually by the
718 * syncer, not here. All we do here is adjust the
721 KKASSERT(chain
->data
!= NULL
);
722 KKASSERT(chain
->dio
== NULL
);
724 hmp
->voldata
.icrc_sects
[HAMMER2_VOL_ICRC_SECT1
]=
726 (char *)&hmp
->voldata
+
727 HAMMER2_VOLUME_ICRC1_OFF
,
728 HAMMER2_VOLUME_ICRC1_SIZE
);
729 hmp
->voldata
.icrc_sects
[HAMMER2_VOL_ICRC_SECT0
]=
731 (char *)&hmp
->voldata
+
732 HAMMER2_VOLUME_ICRC0_OFF
,
733 HAMMER2_VOLUME_ICRC0_SIZE
);
734 hmp
->voldata
.icrc_volheader
=
736 (char *)&hmp
->voldata
+
737 HAMMER2_VOLUME_ICRCVH_OFF
,
738 HAMMER2_VOLUME_ICRCVH_SIZE
);
740 if (hammer2_debug
& 0x8000) {
741 /* debug only, avoid syslogd loop */
742 kprintf("syncvolhdr %016jx %016jx\n",
743 hmp
->voldata
.mirror_tid
,
744 hmp
->vchain
.bref
.mirror_tid
);
746 hmp
->volsync
= hmp
->voldata
;
747 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_VOLUMESYNC
);
748 hammer2_voldata_unlock(hmp
);
749 hammer2_chain_unlock(&hmp
->fchain
);
751 case HAMMER2_BREF_TYPE_DATA
:
753 * Data elements have already been flushed via the
754 * logical file buffer cache. Their hash was set in
755 * the bref by the vop_write code. Do not re-dirty.
757 * Make sure any device buffer(s) have been flushed
758 * out here (there aren't usually any to flush) XXX.
761 case HAMMER2_BREF_TYPE_INDIRECT
:
762 case HAMMER2_BREF_TYPE_FREEMAP_NODE
:
763 case HAMMER2_BREF_TYPE_FREEMAP_LEAF
:
765 * Buffer I/O will be cleaned up when the volume is
766 * flushed (but the kernel is free to flush it before
769 KKASSERT((chain
->flags
& HAMMER2_CHAIN_EMBEDDED
) == 0);
770 hammer2_chain_setcheck(chain
, chain
->data
);
772 case HAMMER2_BREF_TYPE_INODE
:
774 * NOTE: We must call io_setdirty() to make any late
775 * changes to the inode data, the system might
776 * have already flushed the buffer.
778 if (chain
->data
->ipdata
.meta
.op_flags
&
779 HAMMER2_OPFLAG_PFSROOT
) {
781 * non-NULL pmp if mounted as a PFS. We must
782 * sync fields cached in the pmp? XXX
784 hammer2_inode_data_t
*ipdata
;
786 hammer2_io_setdirty(chain
->dio
);
787 ipdata
= &chain
->data
->ipdata
;
789 ipdata
->meta
.pfs_inum
=
790 chain
->pmp
->inode_tid
;
793 /* can't be mounted as a PFS */
796 KKASSERT((chain
->flags
& HAMMER2_CHAIN_EMBEDDED
) == 0);
797 hammer2_chain_setcheck(chain
, chain
->data
);
800 KKASSERT(chain
->flags
& HAMMER2_CHAIN_EMBEDDED
);
801 panic("hammer2_flush_core: unsupported "
808 * If the chain was destroyed try to avoid unnecessary I/O.
809 * The DIO system buffer may silently disallow the
812 if (chain
->flags
& HAMMER2_CHAIN_DESTROY
) {
816 hammer2_io_setinval(chain
->dio
,
817 chain
->bref
.data_off
,
819 } else if ((dio
= hammer2_io_getquick(hmp
,
820 chain
->bref
.data_off
,
821 chain
->bytes
)) != NULL
) {
822 hammer2_io_setinval(dio
,
823 chain
->bref
.data_off
,
825 hammer2_io_putblk(&dio
);
831 * If UPDATE is set the parent block table may need to be updated.
833 * NOTE: UPDATE may be set on vchain or fchain in which case
834 * parent could be NULL. It's easiest to allow the case
835 * and test for NULL. parent can also wind up being NULL
836 * due to a deletion so we need to handle the case anyway.
838 * If no parent exists we can just clear the UPDATE bit. If the
839 * chain gets reattached later on the bit will simply get set
842 if ((chain
->flags
& HAMMER2_CHAIN_UPDATE
) && parent
== NULL
)
843 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_UPDATE
);
846 * The chain may need its blockrefs updated in the parent. This
847 * requires some fancy footwork.
849 if (chain
->flags
& HAMMER2_CHAIN_UPDATE
) {
850 hammer2_blockref_t
*base
;
854 * Both parent and chain must be locked. This requires
855 * temporarily unlocking the chain. We have to deal with
856 * the case where the chain might be reparented or modified
857 * while it was unlocked.
859 hammer2_chain_unlock(chain
);
860 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
861 hammer2_chain_lock(chain
, HAMMER2_RESOLVE_MAYBE
);
862 if (chain
->parent
!= parent
) {
863 kprintf("PARENT MISMATCH ch=%p p=%p/%p\n",
864 chain
, chain
->parent
, parent
);
865 hammer2_chain_unlock(parent
);
870 * Check race condition. If someone got in and modified
871 * it again while it was unlocked, we have to loop up.
873 if (chain
->flags
& HAMMER2_CHAIN_MODIFIED
) {
874 hammer2_chain_unlock(parent
);
875 kprintf("hammer2_flush: chain %p flush-mod race\n",
881 * Clear UPDATE flag, mark parent modified, update its
882 * modify_tid if necessary, and adjust the parent blockmap.
884 if (chain
->flags
& HAMMER2_CHAIN_UPDATE
)
885 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_UPDATE
);
890 * Avoid actually modifying and updating the parent if it
891 * was flagged for destruction. This can greatly reduce
892 * disk I/O in large tree removals because the
893 * hammer2_io_setinval() call in the upward recursion
894 * (see MODIFIED code above) can only handle a few cases.
896 if (parent
->flags
& HAMMER2_CHAIN_DESTROY
) {
897 if (parent
->bref
.modify_tid
< chain
->bref
.modify_tid
) {
898 parent
->bref
.modify_tid
=
899 chain
->bref
.modify_tid
;
901 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_BMAPPED
|
902 HAMMER2_CHAIN_BMAPUPD
);
903 hammer2_chain_unlock(parent
);
908 * (semi-optional code)
910 * The flusher is responsible for deleting empty indirect
911 * blocks at this point. If we don't do this, no major harm
912 * will be done but the empty indirect blocks will stay in
913 * the topology and make it a bit messy.
915 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INDIRECT
&&
916 chain
->core
.live_count
== 0 &&
917 (chain
->flags
& (HAMMER2_CHAIN_INITIAL
|
918 HAMMER2_CHAIN_COUNTEDBREFS
)) == 0) {
919 base
= &chain
->data
->npdata
[0];
920 count
= chain
->bytes
/ sizeof(hammer2_blockref_t
);
921 hammer2_chain_countbrefs(chain
, base
, count
);
923 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INDIRECT
&&
924 chain
->core
.live_count
== 0) {
926 kprintf("DELETE CHAIN %016jx.%02x %016jx/%d refs=%d\n",
927 chain
->bref
.data_off
, chain
->bref
.type
,
928 chain
->bref
.key
, chain
->bref
.keybits
,
931 hammer2_chain_delete(parent
, chain
,
932 chain
->bref
.modify_tid
,
933 HAMMER2_DELETE_PERMANENT
);
934 hammer2_chain_unlock(parent
);
939 * We are updating the parent's blockmap, the parent must
942 hammer2_chain_modify(parent
, 0, 0, 0);
943 if (parent
->bref
.modify_tid
< chain
->bref
.modify_tid
)
944 parent
->bref
.modify_tid
= chain
->bref
.modify_tid
;
947 * Calculate blockmap pointer
949 switch(parent
->bref
.type
) {
950 case HAMMER2_BREF_TYPE_INODE
:
952 * Access the inode's block array. However, there is
953 * no block array if the inode is flagged DIRECTDATA.
956 (parent
->data
->ipdata
.meta
.op_flags
&
957 HAMMER2_OPFLAG_DIRECTDATA
) == 0) {
958 base
= &parent
->data
->
959 ipdata
.u
.blockset
.blockref
[0];
963 count
= HAMMER2_SET_COUNT
;
965 case HAMMER2_BREF_TYPE_INDIRECT
:
966 case HAMMER2_BREF_TYPE_FREEMAP_NODE
:
968 base
= &parent
->data
->npdata
[0];
971 count
= parent
->bytes
/ sizeof(hammer2_blockref_t
);
973 case HAMMER2_BREF_TYPE_VOLUME
:
974 base
= &chain
->hmp
->voldata
.sroot_blockset
.blockref
[0];
975 count
= HAMMER2_SET_COUNT
;
977 case HAMMER2_BREF_TYPE_FREEMAP
:
978 base
= &parent
->data
->npdata
[0];
979 count
= HAMMER2_SET_COUNT
;
984 panic("hammer2_flush_core: "
985 "unrecognized blockref type: %d",
992 * We synchronize pending statistics at this time. Delta
993 * adjustments designated for the current and upper level
996 if (base
&& (chain
->flags
& HAMMER2_CHAIN_BMAPUPD
)) {
997 if (chain
->flags
& HAMMER2_CHAIN_BMAPPED
) {
998 hammer2_spin_ex(&parent
->core
.spin
);
999 hammer2_base_delete(parent
, base
, count
,
1000 &info
->cache_index
, chain
);
1001 hammer2_spin_unex(&parent
->core
.spin
);
1002 /* base_delete clears both bits */
1004 atomic_clear_int(&chain
->flags
,
1005 HAMMER2_CHAIN_BMAPUPD
);
1008 if (base
&& (chain
->flags
& HAMMER2_CHAIN_BMAPPED
) == 0) {
1009 hammer2_spin_ex(&parent
->core
.spin
);
1010 hammer2_base_insert(parent
, base
, count
,
1011 &info
->cache_index
, chain
);
1012 hammer2_spin_unex(&parent
->core
.spin
);
1013 /* base_insert sets BMAPPED */
1015 hammer2_chain_unlock(parent
);
1021 * Final cleanup after flush
1024 KKASSERT(chain
->refs
> 0);
1025 if (hammer2_debug
& 0x200) {
1026 if (info
->debug
== chain
)
1032 * Flush recursion helper, called from flush_core, calls flush_core.
1034 * Flushes the children of the caller's chain (info->parent), restricted
1035 * by sync_tid. Set info->domodify if the child's blockref must propagate
1036 * back up to the parent.
1038 * Ripouts can move child from rbtree to dbtree or dbq but the caller's
1039 * flush scan order prevents any chains from being lost. A child can be
1040 * executes more than once.
1042 * WARNING! If we do not call hammer2_flush_core() we must update
1043 * bref.mirror_tid ourselves to indicate that the flush has
1044 * processed the child.
1046 * WARNING! parent->core spinlock is held on entry and return.
1049 hammer2_flush_recurse(hammer2_chain_t
*child
, void *data
)
1051 hammer2_flush_info_t
*info
= data
;
1052 hammer2_chain_t
*parent
= info
->parent
;
1055 * (child can never be fchain or vchain so a special check isn't
1058 * We must ref the child before unlocking the spinlock.
1060 * The caller has added a ref to the parent so we can temporarily
1061 * unlock it in order to lock the child.
1063 hammer2_chain_ref(child
);
1064 hammer2_spin_unex(&parent
->core
.spin
);
1066 hammer2_chain_unlock(parent
);
1067 hammer2_chain_lock(child
, HAMMER2_RESOLVE_MAYBE
);
1070 * Must propagate the DESTROY flag downwards, otherwise the
1071 * parent could end up never being removed because it will
1072 * be requeued to the flusher if it survives this run due to
1075 if (parent
&& (parent
->flags
& HAMMER2_CHAIN_DESTROY
))
1076 atomic_set_int(&child
->flags
, HAMMER2_CHAIN_DESTROY
);
1079 * Recurse and collect deferral data. We're in the media flush,
1080 * this can cross PFS boundaries.
1082 if (child
->flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1084 hammer2_flush_core(info
, child
, info
->flags
);
1086 } else if (hammer2_debug
& 0x200) {
1087 if (info
->debug
== NULL
)
1088 info
->debug
= child
;
1090 hammer2_flush_core(info
, child
, info
->flags
);
1092 if (info
->debug
== child
)
1097 * Relock to continue the loop
1099 hammer2_chain_unlock(child
);
1100 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_MAYBE
);
1101 hammer2_chain_drop(child
);
1102 KKASSERT(info
->parent
== parent
);
1103 hammer2_spin_ex(&parent
->core
.spin
);
1109 * flush helper (direct)
1111 * Quickly flushes any dirty chains for a device. This will update our
1112 * concept of the volume root but does NOT flush the actual volume root
1113 * and does not flush dirty device buffers.
1115 * This function is primarily used by the bulkfree code to allow it to
1116 * create a snapshot for the pass. It doesn't care about any pending
1117 * work (dirty vnodes, dirty inodes, dirty logical buffers) for which blocks
1118 * have not yet been allocated.
1121 hammer2_flush_quick(hammer2_dev_t
*hmp
)
1123 hammer2_chain_t
*chain
;
1125 hammer2_trans_init(hmp
->spmp
, HAMMER2_TRANS_ISFLUSH
);
1127 hammer2_chain_ref(&hmp
->vchain
);
1128 hammer2_chain_lock(&hmp
->vchain
, HAMMER2_RESOLVE_ALWAYS
);
1129 if (hmp
->vchain
.flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1130 chain
= &hmp
->vchain
;
1131 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
|
1133 KKASSERT(chain
== &hmp
->vchain
);
1135 hammer2_chain_unlock(&hmp
->vchain
);
1136 hammer2_chain_drop(&hmp
->vchain
);
1138 hammer2_trans_done(hmp
->spmp
); /* spmp trans */
1142 * flush helper (backend threaded)
1144 * Flushes core chains, issues disk sync, flushes volume roots.
1146 * Primarily called from vfs_sync().
1149 hammer2_inode_xop_flush(hammer2_xop_t
*arg
, int clindex
)
1151 hammer2_xop_flush_t
*xop
= &arg
->xop_flush
;
1152 hammer2_chain_t
*chain
;
1153 hammer2_chain_t
*parent
;
1156 int total_error
= 0;
1162 chain
= hammer2_inode_chain(xop
->head
.ip1
, clindex
,
1163 HAMMER2_RESOLVE_ALWAYS
);
1166 if ((chain
->flags
& HAMMER2_CHAIN_FLUSH_MASK
) ||
1167 TAILQ_FIRST(&hmp
->flushq
) != NULL
) {
1168 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
);
1169 parent
= chain
->parent
;
1170 KKASSERT(chain
->pmp
!= parent
->pmp
);
1171 hammer2_chain_setflush(parent
);
1173 hammer2_chain_unlock(chain
);
1174 hammer2_chain_drop(chain
);
1181 * Flush volume roots. Avoid replication, we only want to
1182 * flush each hammer2_dev (hmp) once.
1184 for (j
= clindex
- 1; j
>= 0; --j
) {
1185 if ((chain
= xop
->head
.ip1
->cluster
.array
[j
].chain
) != NULL
) {
1186 if (chain
->hmp
== hmp
) {
1187 chain
= NULL
; /* safety */
1192 chain
= NULL
; /* safety */
1195 * spmp transaction. The super-root is never directly mounted so
1196 * there shouldn't be any vnodes, let alone any dirty vnodes
1197 * associated with it, so we shouldn't have to mess around with any
1198 * vnode flushes here.
1200 hammer2_trans_init(hmp
->spmp
, HAMMER2_TRANS_ISFLUSH
);
1203 * Media mounts have two 'roots', vchain for the topology
1204 * and fchain for the free block table. Flush both.
1206 * Note that the topology and free block table are handled
1207 * independently, so the free block table can wind up being
1208 * ahead of the topology. We depend on the bulk free scan
1209 * code to deal with any loose ends.
1211 hammer2_chain_ref(&hmp
->vchain
);
1212 hammer2_chain_lock(&hmp
->vchain
, HAMMER2_RESOLVE_ALWAYS
);
1213 hammer2_chain_ref(&hmp
->fchain
);
1214 hammer2_chain_lock(&hmp
->fchain
, HAMMER2_RESOLVE_ALWAYS
);
1215 if (hmp
->fchain
.flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1217 * This will also modify vchain as a side effect,
1218 * mark vchain as modified now.
1220 hammer2_voldata_modify(hmp
);
1221 chain
= &hmp
->fchain
;
1222 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
);
1223 KKASSERT(chain
== &hmp
->fchain
);
1225 hammer2_chain_unlock(&hmp
->fchain
);
1226 hammer2_chain_unlock(&hmp
->vchain
);
1227 hammer2_chain_drop(&hmp
->fchain
);
1228 /* vchain dropped down below */
1230 hammer2_chain_lock(&hmp
->vchain
, HAMMER2_RESOLVE_ALWAYS
);
1231 if (hmp
->vchain
.flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1232 chain
= &hmp
->vchain
;
1233 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
);
1234 KKASSERT(chain
== &hmp
->vchain
);
1236 hammer2_chain_unlock(&hmp
->vchain
);
1237 hammer2_chain_drop(&hmp
->vchain
);
1242 * We can't safely flush the volume header until we have
1243 * flushed any device buffers which have built up.
1245 * XXX this isn't being incremental
1247 vn_lock(hmp
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
1248 error
= VOP_FSYNC(hmp
->devvp
, MNT_WAIT
, 0);
1249 vn_unlock(hmp
->devvp
);
1252 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1253 * volume header needs synchronization via hmp->volsync.
1255 * XXX synchronize the flag & data with only this flush XXX
1258 (hmp
->vchain
.flags
& HAMMER2_CHAIN_VOLUMESYNC
)) {
1262 * Synchronize the disk before flushing the volume
1266 bp
->b_bio1
.bio_offset
= 0;
1269 bp
->b_cmd
= BUF_CMD_FLUSH
;
1270 bp
->b_bio1
.bio_done
= biodone_sync
;
1271 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
1272 vn_strategy(hmp
->devvp
, &bp
->b_bio1
);
1273 biowait(&bp
->b_bio1
, "h2vol");
1277 * Then we can safely flush the version of the
1278 * volume header synchronized by the flush code.
1280 j
= hmp
->volhdrno
+ 1;
1281 if (j
>= HAMMER2_NUM_VOLHDRS
)
1283 if (j
* HAMMER2_ZONE_BYTES64
+ HAMMER2_SEGSIZE
>
1284 hmp
->volsync
.volu_size
) {
1287 if (hammer2_debug
& 0x8000) {
1288 /* debug only, avoid syslogd loop */
1289 kprintf("sync volhdr %d %jd\n",
1290 j
, (intmax_t)hmp
->volsync
.volu_size
);
1292 bp
= getblk(hmp
->devvp
, j
* HAMMER2_ZONE_BYTES64
,
1293 HAMMER2_PBUFSIZE
, 0, 0);
1294 atomic_clear_int(&hmp
->vchain
.flags
,
1295 HAMMER2_CHAIN_VOLUMESYNC
);
1296 bcopy(&hmp
->volsync
, bp
->b_data
, HAMMER2_PBUFSIZE
);
1301 total_error
= error
;
1303 hammer2_trans_done(hmp
->spmp
); /* spmp trans */
1305 error
= hammer2_xop_feed(&xop
->head
, NULL
, clindex
, total_error
);