2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * TRANSACTION AND FLUSH HANDLING
38 * Deceptively simple but actually fairly difficult to implement properly is
39 * how I would describe it.
41 * Flushing generally occurs bottom-up but requires a top-down scan to
42 * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag
43 * tells how to recurse downward to find these chains.
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
57 #define HAMMER2_FLUSH_DEPTH_LIMIT 10 /* stack recursion limit */
61 * Recursively flush the specified chain. The chain is locked and
62 * referenced by the caller and will remain so on return. The chain
63 * will remain referenced throughout but can temporarily lose its
64 * lock during the recursion to avoid unnecessarily stalling user
67 struct hammer2_flush_info
{
68 hammer2_chain_t
*parent
;
73 struct h2_flush_list flushq
;
74 hammer2_chain_t
*debug
;
77 typedef struct hammer2_flush_info hammer2_flush_info_t
;
79 static void hammer2_flush_core(hammer2_flush_info_t
*info
,
80 hammer2_chain_t
*chain
, int flags
);
81 static int hammer2_flush_recurse(hammer2_chain_t
*child
, void *data
);
84 * Any per-pfs transaction initialization goes here.
87 hammer2_trans_manage_init(hammer2_pfs_t
*pmp
)
92 * Transaction support for any modifying operation. Transactions are used
93 * in the pmp layer by the frontend and in the spmp layer by the backend.
95 * 0 - Normal transaction, interlocked against flush
98 * TRANS_ISFLUSH - Flush transaction, interlocked against normal
101 * TRANS_BUFCACHE - Buffer cache transaction, no interlock.
103 * Initializing a new transaction allocates a transaction ID. Typically
104 * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can
105 * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
106 * media target. The latter mode is used by the recovery code.
108 * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
109 * other is a set of any number of concurrent filesystem operations. We
110 * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
111 * or we can have <running_flush> + <concurrent_fs_ops>.
113 * During a flush, new fs_ops are only blocked until the fs_ops prior to
114 * the flush complete. The new fs_ops can then run concurrent with the flush.
116 * Buffer-cache transactions operate as fs_ops but never block. A
117 * buffer-cache flush will run either before or after the current pending
118 * flush depending on its state.
121 hammer2_trans_init(hammer2_pfs_t
*pmp
, uint32_t flags
)
128 oflags
= pmp
->trans
.flags
;
132 if (flags
& HAMMER2_TRANS_ISFLUSH
) {
134 * Requesting flush transaction. Wait for all
135 * currently running transactions to finish.
136 * Afterwords, normal transactions will be
139 if (oflags
& HAMMER2_TRANS_MASK
) {
140 nflags
= oflags
| HAMMER2_TRANS_FPENDING
|
141 HAMMER2_TRANS_WAITING
;
144 nflags
= (oflags
| flags
) + 1;
146 } else if (flags
& HAMMER2_TRANS_BUFCACHE
) {
148 * Requesting strategy transaction from buffer-cache,
149 * or a VM getpages/putpages through the buffer cache.
150 * We must allow such transactions in all situations
151 * to avoid deadlocks.
153 nflags
= (oflags
| flags
) + 1;
156 * (old) previous code interlocked against the main
159 if ((oflags
& (HAMMER2_TRANS_ISFLUSH
|
160 HAMMER2_TRANS_PREFLUSH
)) ==
161 HAMMER2_TRANS_ISFLUSH
) {
162 nflags
= oflags
| HAMMER2_TRANS_WAITING
;
165 nflags
= (oflags
| flags
) + 1;
170 * Requesting normal modifying transaction (read-only
171 * operations do not use transactions). Waits for
172 * any flush to finish before allowing. Multiple
173 * modifying transactions can run concurrently.
175 if (oflags
& HAMMER2_TRANS_ISFLUSH
) {
176 nflags
= oflags
| HAMMER2_TRANS_WAITING
;
179 nflags
= (oflags
| flags
) + 1;
183 tsleep_interlock(&pmp
->trans
.sync_wait
, 0);
184 if (atomic_cmpset_int(&pmp
->trans
.flags
, oflags
, nflags
)) {
187 tsleep(&pmp
->trans
.sync_wait
, PINTERLOCKED
,
197 * Start a sub-transaction, there is no 'subdone' function. This will
198 * issue a new modify_tid (mtid) for the current transaction, which is a
199 * CLC (cluster level change) id and not a per-node id.
201 * This function must be called for each XOP when multiple XOPs are run in
202 * sequence within a transaction.
204 * Callers typically update the inode with the transaction mtid manually
205 * to enforce sequencing.
208 hammer2_trans_sub(hammer2_pfs_t
*pmp
)
212 mtid
= atomic_fetchadd_64(&pmp
->modify_tid
, 1);
218 hammer2_trans_done(hammer2_pfs_t
*pmp
)
224 oflags
= pmp
->trans
.flags
;
226 KKASSERT(oflags
& HAMMER2_TRANS_MASK
);
227 if ((oflags
& HAMMER2_TRANS_MASK
) == 1) {
229 * This was the last transaction
231 nflags
= (oflags
- 1) & ~(HAMMER2_TRANS_ISFLUSH
|
232 HAMMER2_TRANS_BUFCACHE
|
233 HAMMER2_TRANS_FPENDING
|
234 HAMMER2_TRANS_WAITING
);
237 * Still transactions pending
241 if (atomic_cmpset_int(&pmp
->trans
.flags
, oflags
, nflags
)) {
242 if ((nflags
& HAMMER2_TRANS_MASK
) == 0 &&
243 (oflags
& HAMMER2_TRANS_WAITING
)) {
244 wakeup(&pmp
->trans
.sync_wait
);
255 * Obtain new, unique inode number (not serialized by caller).
258 hammer2_trans_newinum(hammer2_pfs_t
*pmp
)
262 tid
= atomic_fetchadd_64(&pmp
->inode_tid
, 1);
268 * Assert that a strategy call is ok here. Currently we allow strategy
269 * calls in all situations, including during flushes. Previously:
270 * (old) (1) In a normal transaction.
271 * (old) (2) In a flush transaction only if PREFLUSH is also set.
274 hammer2_trans_assert_strategy(hammer2_pfs_t
*pmp
)
277 KKASSERT((pmp
->trans
.flags
& HAMMER2_TRANS_ISFLUSH
) == 0 ||
278 (pmp
->trans
.flags
& HAMMER2_TRANS_PREFLUSH
));
284 * Chains undergoing destruction are removed from the in-memory topology.
285 * To avoid getting lost these chains are placed on the delayed flush
286 * queue which will properly dispose of them.
288 * We do this instead of issuing an immediate flush in order to give
289 * recursive deletions (rm -rf, etc) a chance to remove more of the
290 * hierarchy, potentially allowing an enormous amount of write I/O to
294 hammer2_delayed_flush(hammer2_chain_t
*chain
)
296 if ((chain
->flags
& HAMMER2_CHAIN_DELAYED
) == 0) {
297 hammer2_spin_ex(&chain
->hmp
->list_spin
);
298 if ((chain
->flags
& (HAMMER2_CHAIN_DELAYED
|
299 HAMMER2_CHAIN_DEFERRED
)) == 0) {
300 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_DELAYED
|
301 HAMMER2_CHAIN_DEFERRED
);
302 TAILQ_INSERT_TAIL(&chain
->hmp
->flushq
,
304 hammer2_chain_ref(chain
);
306 hammer2_spin_unex(&chain
->hmp
->list_spin
);
307 hammer2_voldata_modify(chain
->hmp
);
312 * Flush the chain and all modified sub-chains through the specified
313 * synchronization point, propagating blockref updates back up. As
314 * part of this propagation, mirror_tid and inode/data usage statistics
315 * propagates back upward.
317 * modify_tid (clc - cluster level change) is not propagated.
319 * update_tid (clc) is used for validation and is not propagated by this
322 * This routine can be called from several places but the most important
323 * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
325 * chain is locked on call and will remain locked on return. The chain's
326 * UPDATE flag indicates that its parent's block table (which is not yet
327 * part of the flush) should be updated. The chain may be replaced by
328 * the call if it was modified.
331 hammer2_flush(hammer2_chain_t
*chain
, int flags
)
333 hammer2_chain_t
*scan
;
334 hammer2_flush_info_t info
;
339 * Execute the recursive flush and handle deferrals.
341 * Chains can be ridiculously long (thousands deep), so to
342 * avoid blowing out the kernel stack the recursive flush has a
343 * depth limit. Elements at the limit are placed on a list
344 * for re-execution after the stack has been popped.
346 bzero(&info
, sizeof(info
));
347 TAILQ_INIT(&info
.flushq
);
348 info
.cache_index
= -1;
349 info
.flags
= flags
& ~HAMMER2_FLUSH_TOP
;
352 * Calculate parent (can be NULL), if not NULL the flush core
353 * expects the parent to be referenced so it can easily lock/unlock
354 * it without it getting ripped up.
356 if ((info
.parent
= chain
->parent
) != NULL
)
357 hammer2_chain_ref(info
.parent
);
360 * Extra ref needed because flush_core expects it when replacing
363 hammer2_chain_ref(chain
);
369 * Move hmp->flushq to info.flushq if non-empty so it can
372 if (TAILQ_FIRST(&hmp
->flushq
) != NULL
) {
373 hammer2_spin_ex(&chain
->hmp
->list_spin
);
374 TAILQ_CONCAT(&info
.flushq
, &hmp
->flushq
, flush_node
);
375 hammer2_spin_unex(&chain
->hmp
->list_spin
);
379 * Unwind deep recursions which had been deferred. This
380 * can leave the FLUSH_* bits set for these chains, which
381 * will be handled when we [re]flush chain after the unwind.
383 while ((scan
= TAILQ_FIRST(&info
.flushq
)) != NULL
) {
384 KKASSERT(scan
->flags
& HAMMER2_CHAIN_DEFERRED
);
385 TAILQ_REMOVE(&info
.flushq
, scan
, flush_node
);
386 atomic_clear_int(&scan
->flags
, HAMMER2_CHAIN_DEFERRED
|
387 HAMMER2_CHAIN_DELAYED
);
390 * Now that we've popped back up we can do a secondary
391 * recursion on the deferred elements.
393 * NOTE: hammer2_flush() may replace scan.
395 if (hammer2_debug
& 0x0040)
396 kprintf("deferred flush %p\n", scan
);
397 hammer2_chain_lock(scan
, HAMMER2_RESOLVE_MAYBE
);
398 hammer2_flush(scan
, flags
& ~HAMMER2_FLUSH_TOP
);
399 hammer2_chain_unlock(scan
);
400 hammer2_chain_drop(scan
); /* ref from deferral */
406 info
.diddeferral
= 0;
407 hammer2_flush_core(&info
, chain
, flags
);
410 * Only loop if deep recursions have been deferred.
412 if (TAILQ_EMPTY(&info
.flushq
))
415 if (++loops
% 1000 == 0) {
416 kprintf("hammer2_flush: excessive loops on %p\n",
418 if (hammer2_debug
& 0x100000)
422 hammer2_chain_drop(chain
);
424 hammer2_chain_drop(info
.parent
);
428 * This is the core of the chain flushing code. The chain is locked by the
429 * caller and must also have an extra ref on it by the caller, and remains
430 * locked and will have an extra ref on return. Upon return, the caller can
431 * test the UPDATE bit on the child to determine if the parent needs updating.
433 * (1) Determine if this node is a candidate for the flush, return if it is
434 * not. fchain and vchain are always candidates for the flush.
436 * (2) If we recurse too deep the chain is entered onto the deferral list and
437 * the current flush stack is aborted until after the deferral list is
440 * (3) Recursively flush live children (rbtree). This can create deferrals.
441 * A successful flush clears the MODIFIED and UPDATE bits on the children
442 * and typically causes the parent to be marked MODIFIED as the children
443 * update the parent's block table. A parent might already be marked
444 * MODIFIED due to a deletion (whos blocktable update in the parent is
445 * handled by the frontend), or if the parent itself is modified by the
446 * frontend for other reasons.
448 * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
449 * Deleted-but-open inodes can still be individually flushed via the
452 * (5) Delete parents on the way back up if they are normal indirect blocks
453 * and have no children.
455 * (6) Note that an unmodified child may still need the block table in its
456 * parent updated (e.g. rename/move). The child will have UPDATE set
459 * WARNING ON BREF MODIFY_TID/MIRROR_TID
461 * blockref.modify_tid is consistent only within a PFS, and will not be
462 * consistent during synchronization. mirror_tid is consistent across the
463 * block device regardless of the PFS.
466 hammer2_flush_core(hammer2_flush_info_t
*info
, hammer2_chain_t
*chain
,
469 hammer2_chain_t
*parent
;
474 * (1) Optimize downward recursion to locate nodes needing action.
475 * Nothing to do if none of these flags are set.
477 if ((chain
->flags
& HAMMER2_CHAIN_FLUSH_MASK
) == 0) {
478 if (hammer2_debug
& 0x200) {
479 if (info
->debug
== NULL
)
487 diddeferral
= info
->diddeferral
;
488 parent
= info
->parent
; /* can be NULL */
491 * Downward search recursion
493 if (chain
->flags
& (HAMMER2_CHAIN_DEFERRED
| HAMMER2_CHAIN_DELAYED
)) {
498 } else if ((chain
->flags
& HAMMER2_CHAIN_PFSBOUNDARY
) &&
499 (flags
& HAMMER2_FLUSH_ALL
) == 0 &&
500 (flags
& HAMMER2_FLUSH_TOP
) == 0) {
502 * We do not recurse through PFSROOTs. PFSROOT flushes are
503 * handled by the related pmp's (whether mounted or not,
504 * including during recovery).
506 * But we must still process the PFSROOT chains for block
507 * table updates in their parent (which IS part of our flush).
509 * Note that the volume root, vchain, does not set this flag.
510 * Note the logic here requires that this test be done before
511 * the depth-limit test, else it might become the top on a
515 } else if (info
->depth
== HAMMER2_FLUSH_DEPTH_LIMIT
) {
517 * Recursion depth reached.
519 KKASSERT((chain
->flags
& HAMMER2_CHAIN_DELAYED
) == 0);
520 hammer2_chain_ref(chain
);
521 TAILQ_INSERT_TAIL(&info
->flushq
, chain
, flush_node
);
522 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_DEFERRED
);
524 } else if (chain
->flags
& (HAMMER2_CHAIN_ONFLUSH
|
525 HAMMER2_CHAIN_DESTROY
)) {
527 * Downward recursion search (actual flush occurs bottom-up).
528 * pre-clear ONFLUSH. It can get set again due to races,
529 * which we want so the scan finds us again in the next flush.
531 * We must also recurse if DESTROY is set so we can finally
532 * get rid of the related children, otherwise the node will
533 * just get re-flushed on lastdrop.
535 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_ONFLUSH
);
536 info
->parent
= chain
;
537 hammer2_spin_ex(&chain
->core
.spin
);
538 RB_SCAN(hammer2_chain_tree
, &chain
->core
.rbtree
,
539 NULL
, hammer2_flush_recurse
, info
);
540 hammer2_spin_unex(&chain
->core
.spin
);
541 info
->parent
= parent
;
542 if (info
->diddeferral
)
543 hammer2_chain_setflush(chain
);
547 * Now we are in the bottom-up part of the recursion.
549 * Do not update chain if lower layers were deferred.
551 if (info
->diddeferral
)
555 * Propagate the DESTROY flag downwards. This dummies up the flush
556 * code and tries to invalidate related buffer cache buffers to
557 * avoid the disk write.
559 if (parent
&& (parent
->flags
& HAMMER2_CHAIN_DESTROY
))
560 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_DESTROY
);
563 * Chain was already modified or has become modified, flush it out.
566 if ((hammer2_debug
& 0x200) &&
568 (chain
->flags
& (HAMMER2_CHAIN_MODIFIED
| HAMMER2_CHAIN_UPDATE
))) {
569 hammer2_chain_t
*scan
= chain
;
571 kprintf("DISCONNECTED FLUSH %p->%p\n", info
->debug
, chain
);
573 kprintf(" chain %p [%08x] bref=%016jx:%02x\n",
575 scan
->bref
.key
, scan
->bref
.type
);
576 if (scan
== info
->debug
)
582 if (chain
->flags
& HAMMER2_CHAIN_MODIFIED
) {
584 * Dispose of the modified bit.
586 * If parent is present, the UPDATE bit should already be set.
587 * UPDATE should already be set.
588 * bref.mirror_tid should already be set.
590 KKASSERT((chain
->flags
& HAMMER2_CHAIN_UPDATE
) ||
591 chain
->parent
== NULL
);
592 if (hammer2_debug
& 0x800000) {
595 for (pp
= chain
; pp
->parent
; pp
= pp
->parent
)
597 kprintf("FLUSH CHAIN %p (p=%p pp=%p/%d) TYPE %d FLAGS %08x (%s)\n",
598 chain
, chain
->parent
, pp
, pp
->bref
.type
,
599 chain
->bref
.type
, chain
->flags
,
600 (chain
->bref
.type
== 1 ? (const char *)chain
->data
->ipdata
.filename
: "?")
605 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_MODIFIED
);
606 atomic_add_long(&hammer2_count_modified_chains
, -1);
609 * Manage threads waiting for excessive dirty memory to
613 hammer2_pfs_memory_wakeup(chain
->pmp
);
616 if ((chain
->flags
& HAMMER2_CHAIN_UPDATE
) == 0 &&
617 chain
!= &hmp
->vchain
&&
618 chain
!= &hmp
->fchain
) {
620 * Set UPDATE bit indicating that the parent block
621 * table requires updating.
623 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_UPDATE
);
628 * Issue the flush. This is indirect via the DIO.
630 * NOTE: A DELETED node that reaches this point must be
631 * flushed for synchronization point consistency.
633 * NOTE: Even though MODIFIED was already set, the related DIO
634 * might not be dirty due to a system buffer cache
635 * flush and must be set dirty if we are going to make
636 * further modifications to the buffer. Chains with
637 * embedded data don't need this.
639 if (hammer2_debug
& 0x1000) {
640 kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
641 chain
, chain
->bref
.type
,
642 (uintmax_t)chain
->bref
.key
,
644 (uintmax_t)chain
->bref
.data_off
);
646 if (hammer2_debug
& 0x2000) {
647 Debugger("Flush hell");
651 * Update chain CRCs for flush.
653 * NOTE: Volume headers are NOT flushed here as they require
654 * special processing.
656 switch(chain
->bref
.type
) {
657 case HAMMER2_BREF_TYPE_FREEMAP
:
659 * Update the volume header's freemap_tid to the
660 * freemap's flushing mirror_tid.
662 * (note: embedded data, do not call setdirty)
664 KKASSERT(hmp
->vchain
.flags
& HAMMER2_CHAIN_MODIFIED
);
665 KKASSERT(chain
== &hmp
->fchain
);
666 hmp
->voldata
.freemap_tid
= chain
->bref
.mirror_tid
;
667 if (hammer2_debug
& 0x8000) {
668 /* debug only, avoid syslogd loop */
669 kprintf("sync freemap mirror_tid %08jx\n",
670 (intmax_t)chain
->bref
.mirror_tid
);
674 * The freemap can be flushed independently of the
675 * main topology, but for the case where it is
676 * flushed in the same transaction, and flushed
677 * before vchain (a case we want to allow for
678 * performance reasons), make sure modifications
679 * made during the flush under vchain use a new
682 * Otherwise the mount recovery code will get confused.
684 ++hmp
->voldata
.mirror_tid
;
686 case HAMMER2_BREF_TYPE_VOLUME
:
688 * The free block table is flushed by
689 * hammer2_vfs_sync() before it flushes vchain.
690 * We must still hold fchain locked while copying
691 * voldata to volsync, however.
693 * (note: embedded data, do not call setdirty)
695 hammer2_chain_lock(&hmp
->fchain
,
696 HAMMER2_RESOLVE_ALWAYS
);
697 hammer2_voldata_lock(hmp
);
698 if (hammer2_debug
& 0x8000) {
699 /* debug only, avoid syslogd loop */
700 kprintf("sync volume mirror_tid %08jx\n",
701 (intmax_t)chain
->bref
.mirror_tid
);
705 * Update the volume header's mirror_tid to the
706 * main topology's flushing mirror_tid. It is
707 * possible that voldata.mirror_tid is already
708 * beyond bref.mirror_tid due to the bump we made
709 * above in BREF_TYPE_FREEMAP.
711 if (hmp
->voldata
.mirror_tid
< chain
->bref
.mirror_tid
) {
712 hmp
->voldata
.mirror_tid
=
713 chain
->bref
.mirror_tid
;
717 * The volume header is flushed manually by the
718 * syncer, not here. All we do here is adjust the
721 KKASSERT(chain
->data
!= NULL
);
722 KKASSERT(chain
->dio
== NULL
);
724 hmp
->voldata
.icrc_sects
[HAMMER2_VOL_ICRC_SECT1
]=
726 (char *)&hmp
->voldata
+
727 HAMMER2_VOLUME_ICRC1_OFF
,
728 HAMMER2_VOLUME_ICRC1_SIZE
);
729 hmp
->voldata
.icrc_sects
[HAMMER2_VOL_ICRC_SECT0
]=
731 (char *)&hmp
->voldata
+
732 HAMMER2_VOLUME_ICRC0_OFF
,
733 HAMMER2_VOLUME_ICRC0_SIZE
);
734 hmp
->voldata
.icrc_volheader
=
736 (char *)&hmp
->voldata
+
737 HAMMER2_VOLUME_ICRCVH_OFF
,
738 HAMMER2_VOLUME_ICRCVH_SIZE
);
740 if (hammer2_debug
& 0x8000) {
741 /* debug only, avoid syslogd loop */
742 kprintf("syncvolhdr %016jx %016jx\n",
743 hmp
->voldata
.mirror_tid
,
744 hmp
->vchain
.bref
.mirror_tid
);
746 hmp
->volsync
= hmp
->voldata
;
747 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_VOLUMESYNC
);
748 hammer2_voldata_unlock(hmp
);
749 hammer2_chain_unlock(&hmp
->fchain
);
751 case HAMMER2_BREF_TYPE_DATA
:
753 * Data elements have already been flushed via the
754 * logical file buffer cache. Their hash was set in
755 * the bref by the vop_write code. Do not re-dirty.
757 * Make sure any device buffer(s) have been flushed
758 * out here (there aren't usually any to flush) XXX.
761 case HAMMER2_BREF_TYPE_INDIRECT
:
762 case HAMMER2_BREF_TYPE_FREEMAP_NODE
:
763 case HAMMER2_BREF_TYPE_FREEMAP_LEAF
:
765 * Buffer I/O will be cleaned up when the volume is
766 * flushed (but the kernel is free to flush it before
769 KKASSERT((chain
->flags
& HAMMER2_CHAIN_EMBEDDED
) == 0);
770 hammer2_chain_setcheck(chain
, chain
->data
);
772 case HAMMER2_BREF_TYPE_DIRENT
:
774 * A directory entry can use the check area to store
775 * the filename for filenames <= 64 bytes, don't blow
778 KKASSERT((chain
->flags
& HAMMER2_CHAIN_EMBEDDED
) == 0);
780 hammer2_chain_setcheck(chain
, chain
->data
);
782 case HAMMER2_BREF_TYPE_INODE
:
784 * NOTE: We must call io_setdirty() to make any late
785 * changes to the inode data, the system might
786 * have already flushed the buffer.
788 if (chain
->data
->ipdata
.meta
.op_flags
&
789 HAMMER2_OPFLAG_PFSROOT
) {
791 * non-NULL pmp if mounted as a PFS. We must
792 * sync fields cached in the pmp? XXX
794 hammer2_inode_data_t
*ipdata
;
796 hammer2_io_setdirty(chain
->dio
);
797 ipdata
= &chain
->data
->ipdata
;
799 ipdata
->meta
.pfs_inum
=
800 chain
->pmp
->inode_tid
;
803 /* can't be mounted as a PFS */
806 KKASSERT((chain
->flags
& HAMMER2_CHAIN_EMBEDDED
) == 0);
807 hammer2_chain_setcheck(chain
, chain
->data
);
810 KKASSERT(chain
->flags
& HAMMER2_CHAIN_EMBEDDED
);
811 panic("hammer2_flush_core: unsupported "
818 * If the chain was destroyed try to avoid unnecessary I/O.
819 * The DIO system buffer may silently disallow the
822 if (chain
->flags
& HAMMER2_CHAIN_DESTROY
) {
826 hammer2_io_setinval(chain
->dio
,
827 chain
->bref
.data_off
,
829 } else if ((dio
= hammer2_io_getquick(hmp
,
830 chain
->bref
.data_off
,
831 chain
->bytes
)) != NULL
) {
832 hammer2_io_setinval(dio
,
833 chain
->bref
.data_off
,
835 hammer2_io_putblk(&dio
);
841 * If UPDATE is set the parent block table may need to be updated.
843 * NOTE: UPDATE may be set on vchain or fchain in which case
844 * parent could be NULL. It's easiest to allow the case
845 * and test for NULL. parent can also wind up being NULL
846 * due to a deletion so we need to handle the case anyway.
848 * If no parent exists we can just clear the UPDATE bit. If the
849 * chain gets reattached later on the bit will simply get set
852 if ((chain
->flags
& HAMMER2_CHAIN_UPDATE
) && parent
== NULL
)
853 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_UPDATE
);
856 * The chain may need its blockrefs updated in the parent. This
857 * requires some fancy footwork.
859 if (chain
->flags
& HAMMER2_CHAIN_UPDATE
) {
860 hammer2_blockref_t
*base
;
864 * Both parent and chain must be locked. This requires
865 * temporarily unlocking the chain. We have to deal with
866 * the case where the chain might be reparented or modified
867 * while it was unlocked.
869 hammer2_chain_unlock(chain
);
870 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
871 hammer2_chain_lock(chain
, HAMMER2_RESOLVE_MAYBE
);
872 if (chain
->parent
!= parent
) {
873 kprintf("PARENT MISMATCH ch=%p p=%p/%p\n",
874 chain
, chain
->parent
, parent
);
875 hammer2_chain_unlock(parent
);
880 * Check race condition. If someone got in and modified
881 * it again while it was unlocked, we have to loop up.
883 if (chain
->flags
& HAMMER2_CHAIN_MODIFIED
) {
884 hammer2_chain_unlock(parent
);
885 kprintf("hammer2_flush: chain %p flush-mod race\n",
891 * Clear UPDATE flag, mark parent modified, update its
892 * modify_tid if necessary, and adjust the parent blockmap.
894 if (chain
->flags
& HAMMER2_CHAIN_UPDATE
)
895 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_UPDATE
);
900 * Avoid actually modifying and updating the parent if it
901 * was flagged for destruction. This can greatly reduce
902 * disk I/O in large tree removals because the
903 * hammer2_io_setinval() call in the upward recursion
904 * (see MODIFIED code above) can only handle a few cases.
906 if (parent
->flags
& HAMMER2_CHAIN_DESTROY
) {
907 if (parent
->bref
.modify_tid
< chain
->bref
.modify_tid
) {
908 parent
->bref
.modify_tid
=
909 chain
->bref
.modify_tid
;
911 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_BMAPPED
|
912 HAMMER2_CHAIN_BMAPUPD
);
913 hammer2_chain_unlock(parent
);
918 * (semi-optional code)
920 * The flusher is responsible for deleting empty indirect
921 * blocks at this point. If we don't do this, no major harm
922 * will be done but the empty indirect blocks will stay in
923 * the topology and make it a bit messy.
925 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INDIRECT
&&
926 chain
->core
.live_count
== 0 &&
927 (chain
->flags
& (HAMMER2_CHAIN_INITIAL
|
928 HAMMER2_CHAIN_COUNTEDBREFS
)) == 0) {
929 base
= &chain
->data
->npdata
[0];
930 count
= chain
->bytes
/ sizeof(hammer2_blockref_t
);
931 hammer2_chain_countbrefs(chain
, base
, count
);
933 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INDIRECT
&&
934 chain
->core
.live_count
== 0) {
936 kprintf("DELETE CHAIN %016jx.%02x %016jx/%d refs=%d\n",
937 chain
->bref
.data_off
, chain
->bref
.type
,
938 chain
->bref
.key
, chain
->bref
.keybits
,
941 hammer2_chain_delete(parent
, chain
,
942 chain
->bref
.modify_tid
,
943 HAMMER2_DELETE_PERMANENT
);
944 hammer2_chain_unlock(parent
);
949 * We are updating the parent's blockmap, the parent must
952 hammer2_chain_modify(parent
, 0, 0, 0);
953 if (parent
->bref
.modify_tid
< chain
->bref
.modify_tid
)
954 parent
->bref
.modify_tid
= chain
->bref
.modify_tid
;
957 * Calculate blockmap pointer
959 switch(parent
->bref
.type
) {
960 case HAMMER2_BREF_TYPE_INODE
:
962 * Access the inode's block array. However, there is
963 * no block array if the inode is flagged DIRECTDATA.
966 (parent
->data
->ipdata
.meta
.op_flags
&
967 HAMMER2_OPFLAG_DIRECTDATA
) == 0) {
968 base
= &parent
->data
->
969 ipdata
.u
.blockset
.blockref
[0];
973 count
= HAMMER2_SET_COUNT
;
975 case HAMMER2_BREF_TYPE_INDIRECT
:
976 case HAMMER2_BREF_TYPE_FREEMAP_NODE
:
978 base
= &parent
->data
->npdata
[0];
981 count
= parent
->bytes
/ sizeof(hammer2_blockref_t
);
983 case HAMMER2_BREF_TYPE_VOLUME
:
984 base
= &chain
->hmp
->voldata
.sroot_blockset
.blockref
[0];
985 count
= HAMMER2_SET_COUNT
;
987 case HAMMER2_BREF_TYPE_FREEMAP
:
988 base
= &parent
->data
->npdata
[0];
989 count
= HAMMER2_SET_COUNT
;
994 panic("hammer2_flush_core: "
995 "unrecognized blockref type: %d",
1000 * Blocktable updates
1002 * We synchronize pending statistics at this time. Delta
1003 * adjustments designated for the current and upper level
1006 if (base
&& (chain
->flags
& HAMMER2_CHAIN_BMAPUPD
)) {
1007 if (chain
->flags
& HAMMER2_CHAIN_BMAPPED
) {
1008 hammer2_spin_ex(&parent
->core
.spin
);
1009 hammer2_base_delete(parent
, base
, count
,
1010 &info
->cache_index
, chain
);
1011 hammer2_spin_unex(&parent
->core
.spin
);
1012 /* base_delete clears both bits */
1014 atomic_clear_int(&chain
->flags
,
1015 HAMMER2_CHAIN_BMAPUPD
);
1018 if (base
&& (chain
->flags
& HAMMER2_CHAIN_BMAPPED
) == 0) {
1019 hammer2_spin_ex(&parent
->core
.spin
);
1020 hammer2_base_insert(parent
, base
, count
,
1021 &info
->cache_index
, chain
);
1022 hammer2_spin_unex(&parent
->core
.spin
);
1023 /* base_insert sets BMAPPED */
1025 hammer2_chain_unlock(parent
);
1031 * Final cleanup after flush
1034 KKASSERT(chain
->refs
> 0);
1035 if (hammer2_debug
& 0x200) {
1036 if (info
->debug
== chain
)
1042 * Flush recursion helper, called from flush_core, calls flush_core.
1044 * Flushes the children of the caller's chain (info->parent), restricted
1045 * by sync_tid. Set info->domodify if the child's blockref must propagate
1046 * back up to the parent.
1048 * Ripouts can move child from rbtree to dbtree or dbq but the caller's
1049 * flush scan order prevents any chains from being lost. A child can be
1050 * executes more than once.
1052 * WARNING! If we do not call hammer2_flush_core() we must update
1053 * bref.mirror_tid ourselves to indicate that the flush has
1054 * processed the child.
1056 * WARNING! parent->core spinlock is held on entry and return.
1059 hammer2_flush_recurse(hammer2_chain_t
*child
, void *data
)
1061 hammer2_flush_info_t
*info
= data
;
1062 hammer2_chain_t
*parent
= info
->parent
;
1065 * (child can never be fchain or vchain so a special check isn't
1068 * We must ref the child before unlocking the spinlock.
1070 * The caller has added a ref to the parent so we can temporarily
1071 * unlock it in order to lock the child.
1073 hammer2_chain_ref(child
);
1074 hammer2_spin_unex(&parent
->core
.spin
);
1076 hammer2_chain_unlock(parent
);
1077 hammer2_chain_lock(child
, HAMMER2_RESOLVE_MAYBE
);
1080 * Must propagate the DESTROY flag downwards, otherwise the
1081 * parent could end up never being removed because it will
1082 * be requeued to the flusher if it survives this run due to
1085 if (parent
&& (parent
->flags
& HAMMER2_CHAIN_DESTROY
))
1086 atomic_set_int(&child
->flags
, HAMMER2_CHAIN_DESTROY
);
1089 * Recurse and collect deferral data. We're in the media flush,
1090 * this can cross PFS boundaries.
1092 if (child
->flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1094 hammer2_flush_core(info
, child
, info
->flags
);
1096 } else if (hammer2_debug
& 0x200) {
1097 if (info
->debug
== NULL
)
1098 info
->debug
= child
;
1100 hammer2_flush_core(info
, child
, info
->flags
);
1102 if (info
->debug
== child
)
1107 * Relock to continue the loop
1109 hammer2_chain_unlock(child
);
1110 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_MAYBE
);
1111 hammer2_chain_drop(child
);
1112 KKASSERT(info
->parent
== parent
);
1113 hammer2_spin_ex(&parent
->core
.spin
);
1119 * flush helper (direct)
1121 * Quickly flushes any dirty chains for a device. This will update our
1122 * concept of the volume root but does NOT flush the actual volume root
1123 * and does not flush dirty device buffers.
1125 * This function is primarily used by the bulkfree code to allow it to
1126 * create a snapshot for the pass. It doesn't care about any pending
1127 * work (dirty vnodes, dirty inodes, dirty logical buffers) for which blocks
1128 * have not yet been allocated.
1131 hammer2_flush_quick(hammer2_dev_t
*hmp
)
1133 hammer2_chain_t
*chain
;
1135 hammer2_trans_init(hmp
->spmp
, HAMMER2_TRANS_ISFLUSH
);
1137 hammer2_chain_ref(&hmp
->vchain
);
1138 hammer2_chain_lock(&hmp
->vchain
, HAMMER2_RESOLVE_ALWAYS
);
1139 if (hmp
->vchain
.flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1140 chain
= &hmp
->vchain
;
1141 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
|
1143 KKASSERT(chain
== &hmp
->vchain
);
1145 hammer2_chain_unlock(&hmp
->vchain
);
1146 hammer2_chain_drop(&hmp
->vchain
);
1148 hammer2_trans_done(hmp
->spmp
); /* spmp trans */
1152 * flush helper (backend threaded)
1154 * Flushes core chains, issues disk sync, flushes volume roots.
1156 * Primarily called from vfs_sync().
1159 hammer2_inode_xop_flush(hammer2_thread_t
*thr
, hammer2_xop_t
*arg
)
1161 hammer2_xop_flush_t
*xop
= &arg
->xop_flush
;
1162 hammer2_chain_t
*chain
;
1163 hammer2_chain_t
*parent
;
1166 int total_error
= 0;
1172 chain
= hammer2_inode_chain(xop
->head
.ip1
, thr
->clindex
,
1173 HAMMER2_RESOLVE_ALWAYS
);
1176 if ((chain
->flags
& HAMMER2_CHAIN_FLUSH_MASK
) ||
1177 TAILQ_FIRST(&hmp
->flushq
) != NULL
) {
1178 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
);
1179 parent
= chain
->parent
;
1180 KKASSERT(chain
->pmp
!= parent
->pmp
);
1181 hammer2_chain_setflush(parent
);
1183 hammer2_chain_unlock(chain
);
1184 hammer2_chain_drop(chain
);
1191 * Flush volume roots. Avoid replication, we only want to
1192 * flush each hammer2_dev (hmp) once.
1194 for (j
= thr
->clindex
- 1; j
>= 0; --j
) {
1195 if ((chain
= xop
->head
.ip1
->cluster
.array
[j
].chain
) != NULL
) {
1196 if (chain
->hmp
== hmp
) {
1197 chain
= NULL
; /* safety */
1202 chain
= NULL
; /* safety */
1205 * spmp transaction. The super-root is never directly mounted so
1206 * there shouldn't be any vnodes, let alone any dirty vnodes
1207 * associated with it, so we shouldn't have to mess around with any
1208 * vnode flushes here.
1210 hammer2_trans_init(hmp
->spmp
, HAMMER2_TRANS_ISFLUSH
);
1213 * Media mounts have two 'roots', vchain for the topology
1214 * and fchain for the free block table. Flush both.
1216 * Note that the topology and free block table are handled
1217 * independently, so the free block table can wind up being
1218 * ahead of the topology. We depend on the bulk free scan
1219 * code to deal with any loose ends.
1221 hammer2_chain_ref(&hmp
->vchain
);
1222 hammer2_chain_lock(&hmp
->vchain
, HAMMER2_RESOLVE_ALWAYS
);
1223 hammer2_chain_ref(&hmp
->fchain
);
1224 hammer2_chain_lock(&hmp
->fchain
, HAMMER2_RESOLVE_ALWAYS
);
1225 if (hmp
->fchain
.flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1227 * This will also modify vchain as a side effect,
1228 * mark vchain as modified now.
1230 hammer2_voldata_modify(hmp
);
1231 chain
= &hmp
->fchain
;
1232 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
);
1233 KKASSERT(chain
== &hmp
->fchain
);
1235 hammer2_chain_unlock(&hmp
->fchain
);
1236 hammer2_chain_unlock(&hmp
->vchain
);
1237 hammer2_chain_drop(&hmp
->fchain
);
1238 /* vchain dropped down below */
1240 hammer2_chain_lock(&hmp
->vchain
, HAMMER2_RESOLVE_ALWAYS
);
1241 if (hmp
->vchain
.flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1242 chain
= &hmp
->vchain
;
1243 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
);
1244 KKASSERT(chain
== &hmp
->vchain
);
1246 hammer2_chain_unlock(&hmp
->vchain
);
1247 hammer2_chain_drop(&hmp
->vchain
);
1252 * We can't safely flush the volume header until we have
1253 * flushed any device buffers which have built up.
1255 * XXX this isn't being incremental
1257 vn_lock(hmp
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
1258 error
= VOP_FSYNC(hmp
->devvp
, MNT_WAIT
, 0);
1259 vn_unlock(hmp
->devvp
);
1262 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1263 * volume header needs synchronization via hmp->volsync.
1265 * XXX synchronize the flag & data with only this flush XXX
1268 (hmp
->vchain
.flags
& HAMMER2_CHAIN_VOLUMESYNC
)) {
1272 * Synchronize the disk before flushing the volume
1276 bp
->b_bio1
.bio_offset
= 0;
1279 bp
->b_cmd
= BUF_CMD_FLUSH
;
1280 bp
->b_bio1
.bio_done
= biodone_sync
;
1281 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
1282 vn_strategy(hmp
->devvp
, &bp
->b_bio1
);
1283 biowait(&bp
->b_bio1
, "h2vol");
1287 * Then we can safely flush the version of the
1288 * volume header synchronized by the flush code.
1290 j
= hmp
->volhdrno
+ 1;
1291 if (j
>= HAMMER2_NUM_VOLHDRS
)
1293 if (j
* HAMMER2_ZONE_BYTES64
+ HAMMER2_SEGSIZE
>
1294 hmp
->volsync
.volu_size
) {
1297 if (hammer2_debug
& 0x8000) {
1298 /* debug only, avoid syslogd loop */
1299 kprintf("sync volhdr %d %jd\n",
1300 j
, (intmax_t)hmp
->volsync
.volu_size
);
1302 bp
= getblk(hmp
->devvp
, j
* HAMMER2_ZONE_BYTES64
,
1303 HAMMER2_PBUFSIZE
, 0, 0);
1304 atomic_clear_int(&hmp
->vchain
.flags
,
1305 HAMMER2_CHAIN_VOLUMESYNC
);
1306 bcopy(&hmp
->volsync
, bp
->b_data
, HAMMER2_PBUFSIZE
);
1311 total_error
= error
;
1313 hammer2_trans_done(hmp
->spmp
); /* spmp trans */
1315 error
= hammer2_xop_feed(&xop
->head
, NULL
, thr
->clindex
, total_error
);