hammer2 - freemap part 4, misc fixes
[dragonfly.git] / sys / vfs / hammer2 / hammer2_flush.c
blob3f289828102f2640426446245d47894806231c16
1 /*
2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 #include <sys/cdefs.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/types.h>
40 #include <sys/lock.h>
41 #include <sys/uuid.h>
43 #include "hammer2.h"
46 * Recursively flush the specified chain. The chain is locked and
47 * referenced by the caller and will remain so on return. The chain
48 * will remain referenced throughout but can temporarily lose its
49 * lock during the recursion to avoid unnecessarily stalling user
50 * processes.
52 struct hammer2_flush_info {
53 hammer2_mount_t *hmp;
54 hammer2_chain_t *parent;
55 hammer2_trans_t *trans;
56 int depth;
57 int diddeferral;
58 struct flush_deferral_list flush_list;
59 hammer2_tid_t sync_tid; /* flush synchronization point */
60 hammer2_tid_t mirror_tid; /* collect mirror TID updates */
63 typedef struct hammer2_flush_info hammer2_flush_info_t;
65 static void hammer2_chain_flush_core(hammer2_flush_info_t *info,
66 hammer2_chain_t *chain);
67 static int hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data);
68 static int hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data);
70 #if 0
71 static __inline
72 void
73 hammer2_updatestats(hammer2_flush_info_t *info, hammer2_blockref_t *bref,
74 int how)
76 hammer2_key_t bytes;
78 if (bref->type != 0) {
79 bytes = 1 << (bref->data_off & HAMMER2_OFF_MASK_RADIX);
80 if (bref->type == HAMMER2_BREF_TYPE_INODE)
81 info->inode_count += how;
82 if (how < 0)
83 info->data_count -= bytes;
84 else
85 info->data_count += bytes;
88 #endif
91 * Transaction support functions for writing to the filesystem.
93 * Initializing a new transaction allocates a transaction ID. We
94 * don't bother marking the volume header MODIFIED. Instead, the volume
95 * will be synchronized at a later time as part of a larger flush sequence.
97 * Non-flush transactions can typically run concurrently. However if
98 * there are non-flush transaction both before AND after a flush trans,
99 * the transactions after stall until the ones before finish.
101 * Non-flush transactions occuring after a flush pointer can run concurrently
102 * with that flush. They only have to wait for transactions prior to the
103 * flush trans to complete before they unstall.
105 * WARNING! Modifications to the root volume cannot dup the root volume
106 * header to handle synchronization points, so alloc_tid can
107 * wind up (harmlessly) more advanced on flush.
109 * WARNING! Operations which might call inode_duplicate()/chain_duplicate()
110 * depend heavily on having a unique sync_tid to avoid duplication
111 * collisions (which key off of delete_tid).
113 void
114 hammer2_trans_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
115 hammer2_inode_t *ip, int flags)
117 hammer2_trans_t *scan;
119 bzero(trans, sizeof(*trans));
120 trans->hmp = hmp;
122 hammer2_voldata_lock(hmp);
123 trans->sync_tid = hmp->voldata.alloc_tid++;
124 trans->flags = flags;
125 trans->td = curthread;
126 trans->tmp_ip = ip;
127 trans->tmp_bpref = 0;
128 TAILQ_INSERT_TAIL(&hmp->transq, trans, entry);
130 if (flags & HAMMER2_TRANS_ISFLUSH) {
132 * If we are a flush we have to wait for all transactions
133 * prior to our flush synchronization point to complete
134 * before we can start our flush.
136 ++hmp->flushcnt;
137 if (hmp->curflush == NULL) {
138 hmp->curflush = trans;
139 hmp->topo_flush_tid = trans->sync_tid;
141 while (TAILQ_FIRST(&hmp->transq) != trans) {
142 lksleep(&trans->sync_tid, &hmp->voldatalk,
143 0, "h2syncw", hz);
147 * Once we become the running flush we can wakeup anyone
148 * who blocked on us.
150 scan = trans;
151 while ((scan = TAILQ_NEXT(scan, entry)) != NULL) {
152 if (scan->flags & HAMMER2_TRANS_ISFLUSH)
153 break;
154 if (scan->blocked == 0)
155 break;
156 scan->blocked = 0;
157 wakeup(&scan->blocked);
159 } else {
161 * If we are not a flush but our sync_tid is after a
162 * stalled flush, we have to wait until that flush unstalls
163 * (that is, all transactions prior to that flush complete),
164 * but then we can run concurrently with that flush.
166 * (flushcnt check only good as pre-condition, otherwise it
167 * may represent elements queued after us after we block).
169 if (hmp->flushcnt > 1 ||
170 (hmp->curflush &&
171 TAILQ_FIRST(&hmp->transq) != hmp->curflush)) {
172 trans->blocked = 1;
173 while (trans->blocked) {
174 lksleep(&trans->blocked, &hmp->voldatalk,
175 0, "h2trans", hz);
179 hammer2_voldata_unlock(hmp, 0);
182 void
183 hammer2_trans_done(hammer2_trans_t *trans)
185 hammer2_mount_t *hmp = trans->hmp;
186 hammer2_trans_t *scan;
188 hammer2_voldata_lock(hmp);
189 TAILQ_REMOVE(&hmp->transq, trans, entry);
190 if (trans->flags & HAMMER2_TRANS_ISFLUSH) {
192 * If we were a flush we have to adjust curflush to the
193 * next flush.
195 * flush_tid is used to partition copy-on-write operations
196 * (mostly duplicate-on-modify ops), which is what allows
197 * us to execute a flush concurrent with modifying operations
198 * with higher TIDs.
200 --hmp->flushcnt;
201 if (hmp->flushcnt) {
202 TAILQ_FOREACH(scan, &hmp->transq, entry) {
203 if (scan->flags & HAMMER2_TRANS_ISFLUSH)
204 break;
206 KKASSERT(scan);
207 hmp->curflush = scan;
208 hmp->topo_flush_tid = scan->sync_tid;
209 } else {
211 * Theoretically we don't have to clear flush_tid
212 * here since the flush will have synchronized
213 * all operations <= flush_tid already. But for
214 * now zero-it.
216 hmp->curflush = NULL;
217 hmp->topo_flush_tid = 0;
219 } else {
221 * If we are not a flush but a flush is now at the head
222 * of the queue and we were previously blocking it,
223 * we can now unblock it.
225 if (hmp->flushcnt &&
226 (scan = TAILQ_FIRST(&hmp->transq)) != NULL &&
227 trans->sync_tid < scan->sync_tid &&
228 (scan->flags & HAMMER2_TRANS_ISFLUSH)) {
229 wakeup(&scan->sync_tid);
232 hammer2_voldata_unlock(hmp, 0);
234 trans->hmp = NULL;
238 * Flush the chain and all modified sub-chains through the specified
239 * synchronization point (sync_tid), propagating parent chain modifications
240 * and mirror_tid updates back up as needed. Since we are recursing downward
241 * we do not have to deal with the complexities of multi-homed chains (chains
242 * with multiple parents).
244 * Caller must have interlocked against any non-flush-related modifying
245 * operations in progress whos modify_tid values are less than or equal
246 * to the passed sync_tid.
248 * Caller must have already vetted synchronization points to ensure they
249 * are properly flushed. Only snapshots and cluster flushes can create
250 * these sorts of synchronization points.
252 * This routine can be called from several places but the most important
253 * is from the hammer2_vop_reclaim() function. We want to try to completely
254 * clean out the inode structure to prevent disconnected inodes from
255 * building up and blowing out the kmalloc pool. However, it is not actually
256 * necessary to flush reclaimed inodes to maintain HAMMER2's crash recovery
257 * capability.
259 * chain is locked on call and will remain locked on return. If a flush
260 * occured, the chain's MOVED bit will be set indicating that its parent
261 * (which is not part of the flush) should be updated.
263 void
264 hammer2_chain_flush(hammer2_trans_t *trans, hammer2_chain_t *chain)
266 hammer2_chain_t *scan;
267 hammer2_chain_core_t *core;
268 hammer2_flush_info_t info;
271 * Execute the recursive flush and handle deferrals.
273 * Chains can be ridiculously long (thousands deep), so to
274 * avoid blowing out the kernel stack the recursive flush has a
275 * depth limit. Elements at the limit are placed on a list
276 * for re-execution after the stack has been popped.
278 bzero(&info, sizeof(info));
279 TAILQ_INIT(&info.flush_list);
280 info.hmp = trans->hmp;
281 info.trans = trans;
282 info.sync_tid = trans->sync_tid;
283 info.mirror_tid = 0;
285 core = chain->core;
287 for (;;) {
289 * Unwind deep recursions which had been deferred. This
290 * can leave MOVED set for these chains, which will be
291 * handled when we [re]flush chain after the unwind.
293 while ((scan = TAILQ_FIRST(&info.flush_list)) != NULL) {
294 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
295 TAILQ_REMOVE(&info.flush_list, scan, flush_node);
296 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED);
299 * Now that we've popped back up we can do a secondary
300 * recursion on the deferred elements.
302 if (hammer2_debug & 0x0040)
303 kprintf("defered flush %p\n", scan);
304 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
305 hammer2_chain_flush(trans, scan);
306 hammer2_chain_unlock(scan);
307 hammer2_chain_drop(scan); /* ref from deferral */
311 * Flush pass1 on root.
313 info.diddeferral = 0;
314 hammer2_chain_flush_core(&info, chain);
315 #if FLUSH_DEBUG
316 kprintf("flush_core_done parent=<base> chain=%p.%d %08x\n",
317 chain, chain->bref.type, chain->flags);
318 #endif
321 * Only loop if deep recursions have been deferred.
323 if (TAILQ_EMPTY(&info.flush_list))
324 break;
329 * This is the core of the chain flushing code. The chain is locked by the
330 * caller and remains locked on return. This function is keyed off of
331 * the SUBMODIFIED bit but must make fine-grained choices based on the
332 * synchronization point we are flushing to.
334 * If the flush accomplished any work chain will be flagged MOVED
335 * indicating a copy-on-write propagation back up is required.
336 * Deep sub-nodes may also have been entered onto the deferral list.
337 * MOVED is never set on the volume root.
339 * NOTE: modify_tid is different from MODIFIED. modify_tid is updated
340 * only when a chain is specifically modified, and not updated
341 * for copy-on-write propagations. MODIFIED is set on any modification
342 * including copy-on-write propagations.
344 static void
345 hammer2_chain_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain)
347 hammer2_mount_t *hmp;
348 hammer2_blockref_t *bref;
349 hammer2_off_t pbase;
350 hammer2_off_t pmask;
351 hammer2_tid_t saved_sync;
352 hammer2_trans_t *trans = info->trans;
353 hammer2_chain_core_t *core;
354 size_t psize;
355 size_t boff;
356 char *bdata;
357 struct buf *bp;
358 int error;
359 int wasmodified;
360 int diddeferral = 0;
362 hmp = info->hmp;
364 #if FLUSH_DEBUG
365 if (info->parent)
366 kprintf("flush_core %p->%p.%d %08x (%s)\n",
367 info->parent, chain, chain->bref.type,
368 chain->flags,
369 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE) ?
370 chain->data->ipdata.filename : "?"));
371 else
372 kprintf("flush_core NULL->%p.%d %08x (%s)\n",
373 chain, chain->bref.type,
374 chain->flags,
375 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE) ?
376 chain->data->ipdata.filename : "?"));
377 #endif
379 * Ignore chains modified beyond the current flush point. These
380 * will be treated as if they did not exist.
382 if (chain->modify_tid > info->sync_tid)
383 return;
386 * Deleted chains which have not been destroyed must be retained,
387 * and we probably have to recurse to clean-up any sub-trees.
388 * However, restricted flushes can stop processing here because
389 * the chain cleanup will be handled by a later normal flush.
391 * The MODIFIED bit can likely be cleared in this situation and we
392 * will do so later on in this procedure.
394 if (chain->delete_tid <= info->sync_tid) {
395 if (trans->flags & HAMMER2_TRANS_RESTRICTED)
396 return;
399 saved_sync = info->sync_tid;
400 core = chain->core;
403 * If SUBMODIFIED is set we recurse the flush and adjust the
404 * blockrefs accordingly.
406 * NOTE: Looping on SUBMODIFIED can prevent a flush from ever
407 * finishing in the face of filesystem activity.
409 if (chain->flags & HAMMER2_CHAIN_SUBMODIFIED) {
410 hammer2_chain_t *saved_parent;
411 hammer2_tid_t saved_mirror;
414 * Clear SUBMODIFIED to catch races. Note that any child
415 * with MODIFIED, DELETED, or MOVED set during Scan2, after
416 * it processes the child, will cause SUBMODIFIED to be
417 * re-set.
418 * child has to be flushed SUBMODIFIED will wind up being
419 * set again (for next time), but this does not stop us from
420 * synchronizing block updates which occurred.
422 * We don't want to set our chain to MODIFIED gratuitously.
424 * We need an extra ref on chain because we are going to
425 * release its lock temporarily in our child loop.
427 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
428 hammer2_chain_ref(chain);
431 * Run two passes. The first pass handles MODIFIED and
432 * SUBMODIFIED chains and recurses while the second pass
433 * handles MOVED chains on the way back up.
435 * If the stack gets too deep we defer scan1, but must
436 * be sure to still run scan2 if on the next loop the
437 * deferred chain has been flushed and now needs MOVED
438 * handling on the way back up.
440 * Scan1 is recursive.
442 * NOTE: The act of handling a modified/submodified chain can
443 * cause the MOVED Flag to be set. It can also be set
444 * via hammer2_chain_delete() and in other situations.
446 * NOTE: RB_SCAN() must be used instead of RB_FOREACH()
447 * because children can be physically removed during
448 * the scan.
450 saved_parent = info->parent;
451 saved_mirror = info->mirror_tid;
452 info->parent = chain;
453 info->mirror_tid = chain->bref.mirror_tid;
455 if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
456 if ((chain->flags & HAMMER2_CHAIN_DEFERRED) == 0) {
457 hammer2_chain_ref(chain);
458 TAILQ_INSERT_TAIL(&info->flush_list,
459 chain, flush_node);
460 atomic_set_int(&chain->flags,
461 HAMMER2_CHAIN_DEFERRED);
463 diddeferral = 1;
464 } else {
465 info->diddeferral = 0;
466 spin_lock(&core->cst.spin);
467 RB_SCAN(hammer2_chain_tree, &chain->core->rbtree,
468 NULL, hammer2_chain_flush_scan1, info);
469 spin_unlock(&core->cst.spin);
470 diddeferral += info->diddeferral;
474 * Handle successfully flushed children who are in the MOVED
475 * state on the way back up the recursion. This can have
476 * the side-effect of clearing MOVED.
478 * We execute this even if there were deferrals to try to
479 * keep the chain topology cleaner.
481 * Scan2 is non-recursive.
483 if (diddeferral) {
484 atomic_set_int(&chain->flags,
485 HAMMER2_CHAIN_SUBMODIFIED);
486 } else {
487 #if FLUSH_DEBUG
488 kprintf("scan2_start parent %p %08x\n",
489 chain, chain->flags);
490 #endif
491 spin_lock(&core->cst.spin);
492 RB_SCAN(hammer2_chain_tree, &core->rbtree,
493 NULL, hammer2_chain_flush_scan2, info);
494 spin_unlock(&core->cst.spin);
495 #if FLUSH_DEBUG
496 kprintf("scan2_stop parent %p %08x\n",
497 chain, chain->flags);
498 #endif
500 chain->bref.mirror_tid = info->mirror_tid;
501 info->mirror_tid = saved_mirror;
502 info->parent = saved_parent;
503 hammer2_chain_drop(chain);
507 * Restore sync_tid in case it was restricted by a delete/duplicate.
509 info->sync_tid = saved_sync;
512 * Rollup diddeferral for caller. Note direct assignment, not +=.
514 info->diddeferral = diddeferral;
517 * Do not flush chain if there were any deferrals. It will be
518 * retried later after the deferrals are independently handled.
520 if (diddeferral) {
521 if (hammer2_debug & 0x0008) {
522 kprintf("%*.*s} %p/%d %04x (deferred)",
523 info->depth, info->depth, "",
524 chain, chain->refs, chain->flags);
526 return;
530 * If we encounter a deleted chain within our flush we can clear
531 * the MODIFIED bit and avoid flushing it whether it has been
532 * destroyed or not. We must make sure that the chain is flagged
533 * MOVED in this situation so the parent picks up the deletion.
535 if (chain->delete_tid <= info->sync_tid) {
536 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
537 if (chain->bp) {
538 if (chain->bytes == chain->bp->b_bufsize)
539 chain->bp->b_flags |= B_INVAL|B_RELBUF;
541 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
542 hammer2_chain_ref(chain);
543 atomic_set_int(&chain->flags,
544 HAMMER2_CHAIN_MOVED);
546 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
547 hammer2_chain_drop(chain);
549 return;
551 #if 0
552 if ((chain->flags & HAMMER2_CHAIN_DESTROYED) &&
553 (chain->flags & HAMMER2_CHAIN_DELETED) &&
554 (trans->flags & HAMMER2_TRANS_RESTRICTED) == 0) {
556 * Throw-away the MODIFIED flag
558 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
559 if (chain->bp) {
560 if (chain->bytes == chain->bp->b_bufsize)
561 chain->bp->b_flags |= B_INVAL|B_RELBUF;
563 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
564 hammer2_chain_drop(chain);
566 return;
568 #endif
571 * A degenerate flush might not have flushed anything and thus not
572 * processed modified blocks on the way back up. Detect the case.
574 * Note that MOVED can be set without MODIFIED being set due to
575 * a deletion, in which case it is handled by Scan2 later on.
577 * Both bits can be set along with DELETED due to a deletion if
578 * modified data within the synchronization zone and the chain
579 * was then deleted beyond the zone, in which case we still have
580 * to flush for synchronization point consistency. Otherwise though
581 * DELETED and MODIFIED are treated as separate flags.
583 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0)
584 return;
587 * Issue flush.
589 * A DESTROYED node that reaches this point must be flushed for
590 * synchronization point consistency.
594 * Update mirror_tid, clear MODIFIED, and set MOVED.
596 * The caller will update the parent's reference to this chain
597 * by testing MOVED as long as the modification was in-bounds.
599 * MOVED is never set on the volume root as there is no parent
600 * to adjust.
602 if (chain->bref.mirror_tid < info->sync_tid)
603 chain->bref.mirror_tid = info->sync_tid;
604 wasmodified = (chain->flags & HAMMER2_CHAIN_MODIFIED) != 0;
605 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
606 if (chain == &hmp->vchain)
607 kprintf("(FLUSHED VOLUME HEADER)\n");
608 if (chain == &hmp->fchain)
609 kprintf("(FLUSHED FREEMAP HEADER)\n");
611 if ((chain->flags & HAMMER2_CHAIN_MOVED) ||
612 chain == &hmp->vchain ||
613 chain == &hmp->fchain) {
615 * Drop the ref from the MODIFIED bit we cleared.
617 if (wasmodified)
618 hammer2_chain_drop(chain);
619 } else {
621 * If we were MODIFIED we inherit the ref from clearing
622 * that bit, otherwise we need another ref.
624 if (wasmodified == 0)
625 hammer2_chain_ref(chain);
626 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
630 * If this is part of a recursive flush we can go ahead and write
631 * out the buffer cache buffer and pass a new bref back up the chain
632 * via the MOVED bit.
634 * Volume headers are NOT flushed here as they require special
635 * processing.
637 switch(chain->bref.type) {
638 case HAMMER2_BREF_TYPE_FREEMAP:
639 hammer2_modify_volume(hmp);
640 break;
641 case HAMMER2_BREF_TYPE_VOLUME:
643 * We should flush the free block table before we calculate
644 * CRCs and copy voldata -> volsync.
646 * To prevent SMP races, fchain must remain locked until
647 * voldata is copied to volsync.
649 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
650 if (hmp->fchain.flags & (HAMMER2_CHAIN_MODIFIED |
651 HAMMER2_CHAIN_SUBMODIFIED)) {
652 /* this will modify vchain as a side effect */
653 hammer2_chain_flush(info->trans, &hmp->fchain);
657 * The volume header is flushed manually by the syncer, not
658 * here. All we do is adjust the crc's.
660 KKASSERT(chain->data != NULL);
661 KKASSERT(chain->bp == NULL);
662 kprintf("volume header mirror_tid %jd\n",
663 hmp->voldata.mirror_tid);
665 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
666 hammer2_icrc32(
667 (char *)&hmp->voldata +
668 HAMMER2_VOLUME_ICRC1_OFF,
669 HAMMER2_VOLUME_ICRC1_SIZE);
670 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
671 hammer2_icrc32(
672 (char *)&hmp->voldata +
673 HAMMER2_VOLUME_ICRC0_OFF,
674 HAMMER2_VOLUME_ICRC0_SIZE);
675 hmp->voldata.icrc_volheader =
676 hammer2_icrc32(
677 (char *)&hmp->voldata +
678 HAMMER2_VOLUME_ICRCVH_OFF,
679 HAMMER2_VOLUME_ICRCVH_SIZE);
680 hmp->volsync = hmp->voldata;
681 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
682 hammer2_chain_unlock(&hmp->fchain);
683 break;
684 case HAMMER2_BREF_TYPE_DATA:
686 * Data elements have already been flushed via the logical
687 * file buffer cache. Their hash was set in the bref by
688 * the vop_write code.
690 * Make sure any device buffer(s) have been flushed out here.
691 * (there aren't usually any to flush).
693 psize = hammer2_devblksize(chain->bytes);
694 pmask = (hammer2_off_t)psize - 1;
695 pbase = chain->bref.data_off & ~pmask;
696 boff = chain->bref.data_off & (HAMMER2_OFF_MASK & pmask);
698 bp = getblk(hmp->devvp, pbase, psize, GETBLK_NOWAIT, 0);
699 if (bp) {
700 if ((bp->b_flags & (B_CACHE | B_DIRTY)) ==
701 (B_CACHE | B_DIRTY)) {
702 cluster_awrite(bp);
703 } else {
704 bp->b_flags |= B_RELBUF;
705 brelse(bp);
708 break;
709 #if 0
710 case HAMMER2_BREF_TYPE_INDIRECT:
712 * Indirect blocks may be in an INITIAL state. Use the
713 * chain_lock() call to ensure that the buffer has been
714 * instantiated (even though it is already locked the buffer
715 * might not have been instantiated).
717 * Only write the buffer out if it is dirty, it is possible
718 * the operating system had already written out the buffer.
720 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
721 KKASSERT(chain->bp != NULL);
723 bp = chain->bp;
724 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) ||
725 (bp->b_flags & B_DIRTY)) {
726 bdwrite(chain->bp);
727 } else {
728 brelse(chain->bp);
730 chain->bp = NULL;
731 chain->data = NULL;
732 hammer2_chain_unlock(chain);
733 break;
734 #endif
735 case HAMMER2_BREF_TYPE_INDIRECT:
736 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
738 * Device-backed. Buffer will be flushed by the sync
739 * code XXX.
741 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
742 break;
743 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
744 default:
746 * Embedded elements have to be flushed out.
747 * (Basically just BREF_TYPE_INODE).
749 KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
750 KKASSERT(chain->data != NULL);
751 KKASSERT(chain->bp == NULL);
752 bref = &chain->bref;
754 KKASSERT((bref->data_off & HAMMER2_OFF_MASK) != 0);
755 KKASSERT(HAMMER2_DEC_CHECK(chain->bref.methods) ==
756 HAMMER2_CHECK_ISCSI32 ||
757 HAMMER2_DEC_CHECK(chain->bref.methods) ==
758 HAMMER2_CHECK_FREEMAP);
761 * The data is embedded, we have to acquire the
762 * buffer cache buffer and copy the data into it.
764 psize = hammer2_devblksize(chain->bytes);
765 pmask = (hammer2_off_t)psize - 1;
766 pbase = bref->data_off & ~pmask;
767 boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
770 * The getblk() optimization can only be used if the
771 * physical block size matches the request.
773 error = bread(hmp->devvp, pbase, psize, &bp);
774 KKASSERT(error == 0);
776 bdata = (char *)bp->b_data + boff;
779 * Copy the data to the buffer, mark the buffer
780 * dirty, and convert the chain to unmodified.
782 bcopy(chain->data, bdata, chain->bytes);
783 bp->b_flags |= B_CLUSTEROK;
784 bdwrite(bp);
785 bp = NULL;
787 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
788 case HAMMER2_CHECK_FREEMAP:
789 chain->bref.check.freemap.icrc32 =
790 hammer2_icrc32(chain->data, chain->bytes);
791 break;
792 case HAMMER2_CHECK_ISCSI32:
793 chain->bref.check.iscsi32.value =
794 hammer2_icrc32(chain->data, chain->bytes);
795 break;
796 default:
797 panic("hammer2_flush_core: bad crc type");
798 break; /* NOT REACHED */
800 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
801 ++hammer2_iod_meta_write;
802 else
803 ++hammer2_iod_indr_write;
808 * Flush helper scan1 (recursive)
810 * Flushes the children of the caller's chain (parent) and updates
811 * the blockref, restricted by sync_tid.
813 * Ripouts during the loop should not cause any problems. Because we are
814 * flushing to a synchronization point, modification races will occur after
815 * sync_tid and do not have to be flushed anyway.
817 * It is also ok if the parent is chain_duplicate()'d while unlocked because
818 * the delete/duplication will install a delete_tid that is still larger than
819 * our current sync_tid.
821 static int
822 hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data)
824 hammer2_flush_info_t *info = data;
825 hammer2_trans_t *trans = info->trans;
826 hammer2_chain_t *parent = info->parent;
827 /*hammer2_mount_t *hmp = info->hmp;*/
828 int diddeferral;
831 * We should only need to recurse if SUBMODIFIED is set, but as
832 * a safety also recurse if MODIFIED is also set.
834 * Return early if neither bit is set. We must re-assert the
835 * SUBMODIFIED flag in the parent if any child covered by the
836 * parent (via delete_tid) is skipped.
838 if ((child->flags & (HAMMER2_CHAIN_MODIFIED |
839 HAMMER2_CHAIN_SUBMODIFIED)) == 0) {
840 return (0);
842 if (child->modify_tid > trans->sync_tid) {
843 if (parent->delete_tid > trans->sync_tid) {
844 atomic_set_int(&parent->flags,
845 HAMMER2_CHAIN_SUBMODIFIED);
847 return (0);
850 hammer2_chain_ref(child);
851 spin_unlock(&parent->core->cst.spin);
854 * The caller has added a ref to the parent so we can temporarily
855 * unlock it in order to lock the child. Re-check the flags before
856 * continuing.
858 hammer2_chain_unlock(parent);
859 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
861 if ((child->flags & (HAMMER2_CHAIN_MODIFIED |
862 HAMMER2_CHAIN_SUBMODIFIED)) == 0) {
863 hammer2_chain_unlock(child);
864 hammer2_chain_drop(child);
865 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
866 spin_lock(&parent->core->cst.spin);
867 return (0);
869 if (child->modify_tid > trans->sync_tid) {
870 hammer2_chain_unlock(child);
871 hammer2_chain_drop(child);
872 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
873 spin_lock(&parent->core->cst.spin);
874 if (parent->delete_tid > trans->sync_tid) {
875 atomic_set_int(&parent->flags,
876 HAMMER2_CHAIN_SUBMODIFIED);
878 return (0);
882 * The DESTROYED flag can only be initially set on an unreferenced
883 * deleted inode and will propagate downward via the mechanic below.
884 * Such inode chains have been deleted for good and should no longer
885 * be subject to delete/duplication.
887 * This optimization allows the inode reclaim (destroy unlinked file
888 * on vnode reclamation after last close) to be flagged by just
889 * setting HAMMER2_CHAIN_DESTROYED at the top level and then will
890 * cause the chains to be terminated and related buffers to be
891 * invalidated and not flushed out.
893 * We have to be careful not to propagate the DESTROYED flag if
894 * the destruction occurred after our flush sync_tid.
896 if ((parent->flags & HAMMER2_CHAIN_DESTROYED) &&
897 (child->flags & HAMMER2_CHAIN_DELETED) &&
898 (child->flags & HAMMER2_CHAIN_DESTROYED) == 0) {
899 atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROYED |
900 HAMMER2_CHAIN_SUBMODIFIED);
904 * Recurse and collect deferral data.
906 diddeferral = info->diddeferral;
907 ++info->depth;
908 hammer2_chain_flush_core(info, child);
909 #if FLUSH_DEBUG
910 kprintf("flush_core_done parent=%p flags=%08x child=%p.%d %08x\n",
911 parent, parent->flags, child, child->bref.type, child->flags);
912 #endif
913 --info->depth;
914 info->diddeferral += diddeferral;
916 if (child->flags & HAMMER2_CHAIN_SUBMODIFIED)
917 atomic_set_int(&parent->flags, HAMMER2_CHAIN_SUBMODIFIED);
919 hammer2_chain_unlock(child);
920 hammer2_chain_drop(child);
922 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
924 spin_lock(&parent->core->cst.spin);
925 return (0);
929 * Flush helper scan2 (non-recursive)
931 * This pass on a chain's children propagates any MOVED or DELETED
932 * elements back up the chain towards the root after those elements have
933 * been fully flushed. Unlike scan1, this function is NOT recursive and
934 * the parent remains locked across the entire scan.
936 * NOTE! We must re-set SUBMODIFIED on the parent(s) as appropriate, and
937 * due to the above conditions it is possible to do this and still
938 * have some children flagged MOVED depending on the synchronization.
940 * NOTE! A deletion is a visbility issue, there can still be referenced to
941 * deleted elements (for example, to an unlinked file which is still
942 * open), and there can also be multiple chains pointing to the same
943 * bref where some are deleted and some are not (for example due to
944 * a rename). So a chain marked for deletion is basically considered
945 * to be live until it is explicitly destroyed or until its ref-count
946 * reaches zero (also implying that MOVED and MODIFIED are clear).
948 static int
949 hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data)
951 hammer2_flush_info_t *info = data;
952 hammer2_chain_t *parent = info->parent;
953 hammer2_chain_core_t *above = child->above;
954 hammer2_mount_t *hmp = info->hmp;
955 hammer2_trans_t *trans = info->trans;
956 hammer2_blockref_t *base;
957 int count;
960 * Inodes with stale children that have been converted to DIRECTDATA
961 * mode (file extension or hardlink conversion typically) need to
962 * skipped right now before we start messing with a non-existant
963 * block table.
965 #if 0
966 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
967 (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)) {
968 #if FLUSH_DEBUG
969 kprintf("B");
970 #endif
971 goto finalize;
973 #endif
976 * Ignore children created after our flush point, treating them as
977 * if they did not exist). These children will not cause the parent
978 * to be updated.
980 * When we encounter such children and the parent chain has not been
981 * deleted, delete/duplicated, or delete/duplicated-for-move, then
982 * the parent may be used to funnel through several flush points.
983 * We must re-set the SUBMODIFIED flag in the parent to ensure that
984 * those flushes have visbility. A simple test of delete_tid suffices
985 * to determine if the parent spans beyond our current flush.
987 if (child->modify_tid > trans->sync_tid) {
988 #if FLUSH_DEBUG
989 kprintf("E");
990 #endif
991 goto finalize;
995 * Ignore children which have not changed. The parent's block table
996 * is already correct.
998 if ((child->flags & HAMMER2_CHAIN_MOVED) == 0) {
999 #if FLUSH_DEBUG
1000 kprintf("D");
1001 #endif
1002 goto finalize;
1006 hammer2_chain_ref(child);
1007 spin_unlock(&above->cst.spin);
1010 * The MOVED bit implies an additional reference which prevents
1011 * the child from being destroyed out from under our operation
1012 * so we can lock the child safely without worrying about it
1013 * getting ripped up (?).
1015 * We can only update parents where child->parent matches. The
1016 * child->parent link will migrate along the chain but the flush
1017 * order must be enforced absolutely. Parent reflushed after the
1018 * child has passed them by should skip due to the modify_tid test.
1020 hammer2_chain_lock(child, HAMMER2_RESOLVE_NEVER);
1023 * The parent's blockref to the child must be deleted or updated.
1025 * This point is not reached on successful DESTROYED optimizations
1026 * but can be reached on recursive deletions and restricted flushes.
1028 * Because flushes are ordered we do not have to make a
1029 * modify/duplicate of indirect blocks. That is, the flush
1030 * code does not have to kmalloc or duplicate anything. We
1031 * can adjust the indirect block table in-place and reuse the
1032 * chain. It IS possible that the chain has already been duplicated
1033 * or may wind up being duplicated on-the-fly by modifying code
1034 * on the frontend. We simply use the original and ignore such
1035 * chains. However, it does mean we can't clear the MOVED bit.
1037 * XXX recursive deletions not optimized.
1039 hammer2_chain_modify(trans, &parent,
1040 HAMMER2_MODIFY_NO_MODIFY_TID |
1041 HAMMER2_MODIFY_ASSERTNOCOPY);
1043 switch(parent->bref.type) {
1044 case HAMMER2_BREF_TYPE_INODE:
1046 * XXX Should assert that OPFLAG_DIRECTDATA is 0 once we
1047 * properly duplicate the inode headers and do proper flush
1048 * range checks (all the children should be beyond the flush
1049 * point). For now just don't sync the non-applicable
1050 * children.
1052 * XXX Can also occur due to hardlink consolidation. We
1053 * set OPFLAG_DIRECTDATA to prevent the indirect and data
1054 * blocks from syncing ot the hardlink pointer.
1056 #if 0
1057 KKASSERT((parent->data->ipdata.op_flags &
1058 HAMMER2_OPFLAG_DIRECTDATA) == 0);
1059 #endif
1060 #if 0
1061 if (parent->data->ipdata.op_flags &
1062 HAMMER2_OPFLAG_DIRECTDATA) {
1063 base = NULL;
1064 } else
1065 #endif
1067 base = &parent->data->ipdata.u.blockset.blockref[0];
1068 count = HAMMER2_SET_COUNT;
1070 break;
1071 case HAMMER2_BREF_TYPE_INDIRECT:
1072 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1073 if (parent->data) {
1074 base = &parent->data->npdata[0];
1075 } else {
1076 base = NULL;
1077 KKASSERT(child->flags & HAMMER2_CHAIN_DELETED);
1079 count = parent->bytes / sizeof(hammer2_blockref_t);
1080 break;
1081 case HAMMER2_BREF_TYPE_VOLUME:
1082 base = &hmp->voldata.sroot_blockset.blockref[0];
1083 count = HAMMER2_SET_COUNT;
1084 break;
1085 case HAMMER2_BREF_TYPE_FREEMAP:
1086 base = &parent->data->npdata[0];
1087 count = HAMMER2_SET_COUNT;
1088 break;
1089 default:
1090 base = NULL;
1091 count = 0;
1092 panic("hammer2_chain_get: "
1093 "unrecognized blockref type: %d",
1094 parent->bref.type);
1098 * Update the parent's blockref table and propagate mirror_tid.
1100 * NOTE! Children with modify_tid's beyond our flush point are
1101 * considered to not exist for the purposes of updating the
1102 * parent's blockref array.
1104 * NOTE! Updates to a parent's blockref table do not adjust the
1105 * parent's bref.modify_tid, only its bref.mirror_tid.
1107 KKASSERT(child->index >= 0);
1108 if (child->delete_tid <= trans->sync_tid) {
1109 if (base) {
1110 KKASSERT(child->index < count);
1111 bzero(&base[child->index], sizeof(child->bref));
1113 if (info->mirror_tid < child->delete_tid)
1114 info->mirror_tid = child->delete_tid;
1115 } else {
1116 if (base) {
1117 KKASSERT(child->index < count);
1118 base[child->index] = child->bref;
1120 if (info->mirror_tid < child->modify_tid)
1121 info->mirror_tid = child->modify_tid;
1124 if (info->mirror_tid < child->bref.mirror_tid) {
1125 info->mirror_tid = child->bref.mirror_tid;
1127 if ((parent->bref.type == HAMMER2_BREF_TYPE_VOLUME ||
1128 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP) &&
1129 hmp->voldata.mirror_tid < child->bref.mirror_tid) {
1130 hmp->voldata.mirror_tid = child->bref.mirror_tid;
1134 * When can we safely clear the MOVED flag? Flushes down duplicate
1135 * paths can occur out of order, for example if an inode is moved
1136 * as part of a hardlink consolidation or if an inode is moved into
1137 * an indirect block indexed before the inode.
1139 * Only clear MOVED once all possible parents have been flushed.
1141 if (child->flags & HAMMER2_CHAIN_MOVED) {
1142 hammer2_chain_t *scan;
1143 int ok = 1;
1145 spin_lock(&above->cst.spin);
1146 for (scan = above->first_parent;
1147 scan;
1148 scan = scan->next_parent) {
1150 * XXX weird code also checked at the top of scan2,
1151 * I would like to fix this by detaching the core
1152 * on initial hardlink consolidation (1->2 nlinks).
1154 #if 0
1155 if (scan->bref.type == HAMMER2_BREF_TYPE_INODE &&
1156 (scan->data->ipdata.op_flags &
1157 HAMMER2_OPFLAG_DIRECTDATA)) {
1158 continue;
1160 #endif
1161 if (scan->flags & HAMMER2_CHAIN_SUBMODIFIED) {
1162 ok = 0;
1163 break;
1166 spin_unlock(&above->cst.spin);
1167 if (ok) {
1168 atomic_clear_int(&child->flags, HAMMER2_CHAIN_MOVED);
1169 hammer2_chain_drop(child); /* flag */
1174 * Unlock the child. This can wind up dropping the child's
1175 * last ref, removing it from the parent's RB tree, and deallocating
1176 * the structure. The RB_SCAN() our caller is doing handles the
1177 * situation.
1179 hammer2_chain_unlock(child);
1180 hammer2_chain_drop(child);
1181 spin_lock(&above->cst.spin);
1182 #if FLUSH_DEBUG
1183 kprintf("F");
1184 #endif
1187 * The parent cleared SUBMODIFIED prior to the scan. If the child
1188 * still requires a flush (possibly due to being outside the current
1189 * synchronization zone), we must re-set SUBMODIFIED on the way back
1190 * up.
1192 finalize:
1193 #if FLUSH_DEBUG
1194 kprintf("G child %p 08x\n", child, child->flags);
1195 #endif
1196 return (0);