hammer2 - Add kernel-thread-based async bulk free
[dragonfly.git] / sys / vfs / hammer2 / hammer2_synchro.c
blobc7f9079c65acee69f9ce39d84c831e9780e3cd45
1 /*
2 * Copyright (c) 2015-2017 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * This module implements the cluster synchronizer. Basically the way
36 * it works is that a thread is created for each cluster node in a PFS.
37 * This thread is responsible for synchronizing the current node using
38 * data from other nodes.
40 * Any out of sync master or slave can get back into synchronization as
41 * long as a quorum of masters agree on the update_tid. If a quorum is
42 * not available it may still be possible to synchronize to the highest
43 * available update_tid as a way of trying to catch up as much as possible
44 * until a quorum is available.
46 * If no quorum is possible (which can happen even if all masters are
47 * available, if the update_tid does not match), then manual intervention
48 * may be required to resolve discrepancies.
50 #include "hammer2.h"
52 typedef struct hammer2_deferred_ip {
53 struct hammer2_deferred_ip *next;
54 hammer2_inode_t *ip;
55 } hammer2_deferred_ip_t;
57 typedef struct hammer2_deferred_list {
58 hammer2_deferred_ip_t *base;
59 int count;
60 } hammer2_deferred_list_t;
63 #define HAMMER2_SYNCHRO_DEBUG 1
65 static int hammer2_sync_slaves(hammer2_thread_t *thr, hammer2_inode_t *ip,
66 hammer2_deferred_list_t *list, int isroot);
67 #if 0
68 static void hammer2_update_pfs_status(hammer2_thread_t *thr, uint32_t flags);
69 nerror = hammer2_sync_insert(
70 thr, &parent, &chain,
71 focus->bref.modify_tid,
72 idx, focus);
73 #endif
74 static int hammer2_sync_insert(hammer2_thread_t *thr,
75 hammer2_chain_t **parentp, hammer2_chain_t **chainp,
76 hammer2_tid_t modify_tid, int idx,
77 hammer2_chain_t *focus);
78 static int hammer2_sync_destroy(hammer2_thread_t *thr,
79 hammer2_chain_t **parentp, hammer2_chain_t **chainp,
80 hammer2_tid_t mtid, int idx);
81 static int hammer2_sync_replace(hammer2_thread_t *thr,
82 hammer2_chain_t *parent, hammer2_chain_t *chain,
83 hammer2_tid_t mtid, int idx,
84 hammer2_chain_t *focus, int isroot);
86 /****************************************************************************
87 * HAMMER2 SYNC THREADS *
88 ****************************************************************************/
90 * Primary management thread for an element of a node. A thread will exist
91 * for each element requiring management.
93 * No management threads are needed for the SPMP or for any PMP with only
94 * a single MASTER.
96 * On the SPMP - handles bulkfree and dedup operations
97 * On a PFS - handles remastering and synchronization
99 void
100 hammer2_primary_sync_thread(void *arg)
102 hammer2_thread_t *thr = arg;
103 hammer2_pfs_t *pmp;
104 hammer2_deferred_list_t list;
105 hammer2_deferred_ip_t *defer;
106 int error;
107 uint32_t flags;
108 uint32_t nflags;
110 pmp = thr->pmp;
111 bzero(&list, sizeof(list));
113 for (;;) {
114 flags = thr->flags;
115 cpu_ccfence();
118 * Handle stop request
120 if (flags & HAMMER2_THREAD_STOP)
121 break;
124 * Handle freeze request
126 if (flags & HAMMER2_THREAD_FREEZE) {
127 nflags = (flags & ~(HAMMER2_THREAD_FREEZE |
128 HAMMER2_THREAD_WAITING)) |
129 HAMMER2_THREAD_FROZEN;
130 if (!atomic_cmpset_int(&thr->flags, flags, nflags))
131 continue;
132 if (flags & HAMMER2_THREAD_WAITING)
133 wakeup(&thr->flags);
134 flags = nflags;
135 /* fall through */
138 if (flags & HAMMER2_THREAD_UNFREEZE) {
139 nflags = flags & ~(HAMMER2_THREAD_UNFREEZE |
140 HAMMER2_THREAD_FROZEN |
141 HAMMER2_THREAD_WAITING);
142 if (!atomic_cmpset_int(&thr->flags, flags, nflags))
143 continue;
144 if (flags & HAMMER2_THREAD_WAITING)
145 wakeup(&thr->flags);
146 flags = nflags;
147 /* fall through */
151 * Force idle if frozen until unfrozen or stopped.
153 if (flags & HAMMER2_THREAD_FROZEN) {
154 nflags = flags | HAMMER2_THREAD_WAITING;
155 tsleep_interlock(&thr->flags, 0);
156 if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
157 tsleep(&thr->flags, PINTERLOCKED, "frozen", 0);
158 atomic_clear_int(&thr->flags,
159 HAMMER2_THREAD_WAITING);
161 continue;
165 * Reset state on REMASTER request
167 if (thr->flags & HAMMER2_THREAD_REMASTER) {
168 nflags = flags & ~HAMMER2_THREAD_REMASTER;
169 if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
170 /* reset state here */
172 continue;
176 * Synchronization scan.
178 if (hammer2_debug & 0x8000)
179 kprintf("sync_slaves pfs %s clindex %d\n",
180 pmp->pfs_names[thr->clindex], thr->clindex);
181 hammer2_trans_init(pmp, 0);
183 hammer2_inode_ref(pmp->iroot);
185 for (;;) {
186 int didbreak = 0;
187 /* XXX lock synchronize pmp->modify_tid */
188 error = hammer2_sync_slaves(thr, pmp->iroot, &list, 1);
189 if (hammer2_debug & 0x8000) {
190 kprintf("sync_slaves error %d defer %p\n",
191 error, list.base);
193 if (error != EAGAIN)
194 break;
195 while ((defer = list.base) != NULL) {
196 hammer2_inode_t *nip;
198 nip = defer->ip;
199 error = hammer2_sync_slaves(thr, nip, &list,
200 (nip == pmp->iroot));
201 if (error && error != EAGAIN && error != ENOENT)
202 break;
203 if (hammer2_thr_break(thr)) {
204 didbreak = 1;
205 break;
209 * If no additional defers occurred we can
210 * remove this one, otherwise keep it on
211 * the list and retry once the additional
212 * defers have completed.
214 if (defer == list.base) {
215 --list.count;
216 list.base = defer->next;
217 kfree(defer, M_HAMMER2);
218 defer = NULL; /* safety */
219 hammer2_inode_drop(nip);
224 * If the thread is being remastered, frozen, or
225 * stopped, clean up any left-over deferals.
227 if (didbreak || (error && error != EAGAIN)) {
228 kprintf("didbreak\n");
229 while ((defer = list.base) != NULL) {
230 --list.count;
231 hammer2_inode_drop(defer->ip);
232 list.base = defer->next;
233 kfree(defer, M_HAMMER2);
235 if (error == 0 || error == EAGAIN)
236 error = EINPROGRESS;
237 break;
241 hammer2_inode_drop(pmp->iroot);
242 hammer2_trans_done(pmp);
244 if (error && error != EINPROGRESS)
245 kprintf("hammer2_sync_slaves: error %d\n", error);
248 * Wait for event, or 5-second poll.
250 nflags = flags | HAMMER2_THREAD_WAITING;
251 tsleep_interlock(&thr->flags, 0);
252 if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
253 tsleep(&thr->flags, 0, "h2idle", hz * 5);
254 atomic_clear_int(&thr->flags, HAMMER2_THREAD_WAITING);
257 thr->td = NULL;
258 hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
259 /* thr structure can go invalid after this point */
262 #if 0
264 * Given a locked cluster created from pmp->iroot, update the PFS's
265 * reporting status.
267 static
268 void
269 hammer2_update_pfs_status(hammer2_thread_t *thr, uint32_t flags)
271 hammer2_pfs_t *pmp = thr->pmp;
273 flags &= HAMMER2_CLUSTER_ZFLAGS;
274 if (pmp->cluster_flags == flags)
275 return;
276 pmp->cluster_flags = flags;
278 kprintf("pfs %p", pmp);
279 if (flags & HAMMER2_CLUSTER_MSYNCED)
280 kprintf(" masters-all-good");
281 if (flags & HAMMER2_CLUSTER_SSYNCED)
282 kprintf(" slaves-all-good");
284 if (flags & HAMMER2_CLUSTER_WRHARD)
285 kprintf(" quorum/rw");
286 else if (flags & HAMMER2_CLUSTER_RDHARD)
287 kprintf(" quorum/ro");
289 if (flags & HAMMER2_CLUSTER_UNHARD)
290 kprintf(" out-of-sync-masters");
291 else if (flags & HAMMER2_CLUSTER_NOHARD)
292 kprintf(" no-masters-visible");
294 if (flags & HAMMER2_CLUSTER_WRSOFT)
295 kprintf(" soft/rw");
296 else if (flags & HAMMER2_CLUSTER_RDSOFT)
297 kprintf(" soft/ro");
299 if (flags & HAMMER2_CLUSTER_UNSOFT)
300 kprintf(" out-of-sync-slaves");
301 else if (flags & HAMMER2_CLUSTER_NOSOFT)
302 kprintf(" no-slaves-visible");
303 kprintf("\n");
305 #endif
307 #if 0
308 static
309 void
310 dumpcluster(const char *label,
311 hammer2_cluster_t *cparent, hammer2_cluster_t *cluster)
313 hammer2_chain_t *chain;
314 int i;
316 if ((hammer2_debug & 1) == 0)
317 return;
319 kprintf("%s\t", label);
320 KKASSERT(cparent->nchains == cluster->nchains);
321 for (i = 0; i < cparent->nchains; ++i) {
322 if (i)
323 kprintf("\t");
324 kprintf("%d ", i);
325 if ((chain = cparent->array[i].chain) != NULL) {
326 kprintf("%016jx%s ",
327 chain->bref.key,
328 ((cparent->array[i].flags &
329 HAMMER2_CITEM_INVALID) ? "(I)" : " ")
331 } else {
332 kprintf(" NULL %s ", " ");
334 if ((chain = cluster->array[i].chain) != NULL) {
335 kprintf("%016jx%s ",
336 chain->bref.key,
337 ((cluster->array[i].flags &
338 HAMMER2_CITEM_INVALID) ? "(I)" : " ")
340 } else {
341 kprintf(" NULL %s ", " ");
343 kprintf("\n");
346 #endif
349 * Each out of sync node sync-thread must issue an all-nodes XOP scan of
350 * the inode. This creates a multiplication effect since the XOP scan itself
351 * issues to all nodes. However, this is the only way we can safely
352 * synchronize nodes which might have disparate I/O bandwidths and the only
353 * way we can safely deal with stalled nodes.
355 static
357 hammer2_sync_slaves(hammer2_thread_t *thr, hammer2_inode_t *ip,
358 hammer2_deferred_list_t *list, int isroot)
360 hammer2_xop_scanall_t *xop;
361 hammer2_chain_t *parent;
362 hammer2_chain_t *chain;
363 hammer2_pfs_t *pmp;
364 hammer2_key_t key_next;
365 hammer2_tid_t sync_tid;
366 int cache_index = -1;
367 int needrescan;
368 int want_update;
369 int error;
370 int nerror;
371 int idx;
372 int n;
374 pmp = ip->pmp;
375 idx = thr->clindex; /* cluster node we are responsible for */
376 needrescan = 0;
377 want_update = 0;
378 sync_tid = 0;
379 chain = NULL;
380 parent = NULL;
382 #if 0
384 * Nothing to do if all slaves are synchronized.
385 * Nothing to do if cluster not authoritatively readable.
387 if (pmp->cluster_flags & HAMMER2_CLUSTER_SSYNCED)
388 return(0);
389 if ((pmp->cluster_flags & HAMMER2_CLUSTER_RDHARD) == 0)
390 return(HAMMER2_ERROR_INCOMPLETE);
391 #endif
393 error = 0;
396 * Resolve the root inode of the PFS and determine if synchronization
397 * is needed by checking modify_tid.
399 * Retain the synchronization TID from the focus inode and use it
400 * later to synchronize the focus inode if/when the recursion
401 * succeeds.
404 hammer2_xop_ipcluster_t *xop2;
405 hammer2_chain_t *focus;
407 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
408 xop2 = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
409 hammer2_xop_start_except(&xop2->head, hammer2_xop_ipcluster,
410 idx);
411 hammer2_inode_unlock(ip);
412 error = hammer2_xop_collect(&xop2->head, 0);
413 if (error == 0 && (focus = xop2->head.cluster.focus) != NULL) {
414 sync_tid = focus->bref.modify_tid;
415 chain = hammer2_inode_chain_and_parent(ip, idx,
416 &parent,
417 HAMMER2_RESOLVE_ALWAYS |
418 HAMMER2_RESOLVE_SHARED);
419 want_update = (chain->bref.modify_tid != sync_tid);
420 if (chain) {
421 hammer2_chain_unlock(chain);
422 hammer2_chain_drop(chain);
423 chain = NULL;
425 if (parent) {
426 hammer2_chain_unlock(parent);
427 hammer2_chain_drop(parent);
428 parent = NULL;
431 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
434 if (want_update == 0)
435 return(0);
438 * The inode is left unlocked during the scan. Issue a XOP
439 * that does *not* include our cluster index to iterate
440 * properly synchronized elements and resolve our cluster index
441 * against it.
443 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
444 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
445 xop->key_beg = HAMMER2_KEY_MIN;
446 xop->key_end = HAMMER2_KEY_MAX;
447 xop->resolve_flags = HAMMER2_RESOLVE_SHARED |
448 HAMMER2_RESOLVE_ALWAYS;
449 xop->lookup_flags = HAMMER2_LOOKUP_SHARED |
450 HAMMER2_LOOKUP_NODIRECT |
451 HAMMER2_LOOKUP_ALWAYS;
452 hammer2_xop_start_except(&xop->head, hammer2_xop_scanall, idx);
453 parent = hammer2_inode_chain(ip, idx,
454 HAMMER2_RESOLVE_ALWAYS |
455 HAMMER2_RESOLVE_SHARED);
456 hammer2_inode_unlock(ip);
458 chain = hammer2_chain_lookup(&parent, &key_next,
459 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX,
460 &cache_index,
461 HAMMER2_LOOKUP_SHARED |
462 HAMMER2_LOOKUP_NODIRECT |
463 HAMMER2_LOOKUP_NODATA);
464 error = hammer2_xop_collect(&xop->head, 0);
465 if (hammer2_debug & 0x8000) {
466 kprintf("START_SCAN IP=%016jx chain=%p (%016jx)\n",
467 ip->meta.name_key, chain,
468 (chain ? chain->bref.key : -1));
471 for (;;) {
473 * We are done if our scan is done and the XOP scan is done.
474 * We are done if the XOP scan failed (that is, we don't
475 * have authoritative data to synchronize with).
477 int advance_local = 0;
478 int advance_xop = 0;
479 int dodefer = 0;
480 hammer2_chain_t *focus;
482 if (chain == NULL && error == ENOENT)
483 break;
484 if (error && error != ENOENT)
485 break;
488 * Compare
490 if (chain && error == ENOENT) {
492 * If we have local chains but the XOP scan is done,
493 * the chains need to be deleted.
495 n = -1;
496 focus = NULL;
497 } else if (chain == NULL) {
499 * If our local scan is done but the XOP scan is not,
500 * we need to create the missing chain(s).
502 n = 1;
503 focus = xop->head.cluster.focus;
504 } else {
506 * Otherwise compare to determine the action
507 * needed.
509 focus = xop->head.cluster.focus;
510 n = hammer2_chain_cmp(chain, focus);
514 * Take action based on comparison results.
516 if (n < 0) {
518 * Delete extranious local data. This will
519 * automatically advance the chain.
521 nerror = hammer2_sync_destroy(thr, &parent, &chain,
522 0, idx);
523 } else if (n == 0 && chain->bref.modify_tid !=
524 focus->bref.modify_tid) {
526 * Matching key but local data or meta-data requires
527 * updating. If we will recurse, we still need to
528 * update to compatible content first but we do not
529 * synchronize modify_tid until the entire recursion
530 * has completed successfully.
532 if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
533 nerror = hammer2_sync_replace(
534 thr, parent, chain,
536 idx, focus, 0);
537 dodefer = 1;
538 } else {
539 nerror = hammer2_sync_replace(
540 thr, parent, chain,
541 focus->bref.modify_tid,
542 idx, focus, 0);
544 advance_local = 1;
545 advance_xop = 1;
546 } else if (n == 0) {
548 * 100% match, advance both
550 advance_local = 1;
551 advance_xop = 1;
552 nerror = 0;
553 } else if (n > 0) {
555 * Insert missing local data.
557 * If we will recurse, we still need to update to
558 * compatible content first but we do not synchronize
559 * modify_tid until the entire recursion has
560 * completed successfully.
562 if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
563 nerror = hammer2_sync_insert(
564 thr, &parent, &chain,
566 idx, focus);
567 dodefer = 2;
568 } else {
569 nerror = hammer2_sync_insert(
570 thr, &parent, &chain,
571 focus->bref.modify_tid,
572 idx, focus);
574 advance_local = 1;
575 advance_xop = 1;
579 * We cannot recurse depth-first because the XOP is still
580 * running in node threads for this scan. Create a placemarker
581 * by obtaining and record the hammer2_inode.
583 * We excluded our node from the XOP so we must temporarily
584 * add it to xop->head.cluster so it is properly incorporated
585 * into the inode.
587 * The deferral is pushed onto a LIFO list for bottom-up
588 * synchronization.
590 if (error == 0 && dodefer) {
591 hammer2_inode_t *nip;
592 hammer2_deferred_ip_t *defer;
594 KKASSERT(focus->bref.type == HAMMER2_BREF_TYPE_INODE);
596 defer = kmalloc(sizeof(*defer), M_HAMMER2,
597 M_WAITOK | M_ZERO);
598 KKASSERT(xop->head.cluster.array[idx].chain == NULL);
599 xop->head.cluster.array[idx].flags =
600 HAMMER2_CITEM_INVALID;
601 xop->head.cluster.array[idx].chain = chain;
602 nip = hammer2_inode_get(pmp, ip,
603 &xop->head.cluster, idx);
604 xop->head.cluster.array[idx].chain = NULL;
606 hammer2_inode_ref(nip);
607 hammer2_inode_unlock(nip);
609 defer->next = list->base;
610 defer->ip = nip;
611 list->base = defer;
612 ++list->count;
613 needrescan = 1;
617 * If at least one deferral was added and the deferral
618 * list has grown too large, stop adding more. This
619 * will trigger an EAGAIN return.
621 if (needrescan && list->count > 1000)
622 break;
625 * Advancements for iteration.
627 if (advance_xop) {
628 error = hammer2_xop_collect(&xop->head, 0);
630 if (advance_local) {
631 chain = hammer2_chain_next(&parent, chain, &key_next,
632 key_next, HAMMER2_KEY_MAX,
633 &cache_index,
634 HAMMER2_LOOKUP_SHARED |
635 HAMMER2_LOOKUP_NODIRECT |
636 HAMMER2_LOOKUP_NODATA);
639 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
640 if (chain) {
641 hammer2_chain_unlock(chain);
642 hammer2_chain_drop(chain);
644 if (parent) {
645 hammer2_chain_unlock(parent);
646 hammer2_chain_drop(parent);
650 * If we added deferrals we want the caller to synchronize them
651 * and then call us again.
653 * NOTE: In this situation we do not yet want to synchronize our
654 * inode, setting the error code also has that effect.
656 if ((error == 0 || error == ENOENT) && needrescan)
657 error = EAGAIN;
660 * If no error occurred we can synchronize the inode meta-data
661 * and modify_tid. Only limited changes are made to PFSROOTs.
663 * XXX inode lock was lost
665 if (error == 0 || error == ENOENT) {
666 hammer2_xop_ipcluster_t *xop2;
667 hammer2_chain_t *focus;
669 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
670 xop2 = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
671 hammer2_xop_start_except(&xop2->head, hammer2_xop_ipcluster,
672 idx);
673 hammer2_inode_unlock(ip);
674 error = hammer2_xop_collect(&xop2->head, 0);
675 if (error == 0) {
676 focus = xop2->head.cluster.focus;
677 if (hammer2_debug & 0x8000) {
678 kprintf("syncthr: update inode %p (%s)\n",
679 focus,
680 (focus ? (char *)focus->data->
681 ipdata.filename :
682 "?"));
684 chain = hammer2_inode_chain_and_parent(ip, idx,
685 &parent,
686 HAMMER2_RESOLVE_ALWAYS |
687 HAMMER2_RESOLVE_SHARED);
689 KKASSERT(parent != NULL);
690 nerror = hammer2_sync_replace(
691 thr, parent, chain,
692 sync_tid,
693 idx, focus, isroot);
694 hammer2_chain_unlock(chain);
695 hammer2_chain_drop(chain);
696 hammer2_chain_unlock(parent);
697 hammer2_chain_drop(parent);
698 /* XXX */
700 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
703 return error;
707 * Create a missing chain by copying the focus from another device.
709 * On entry *parentp and focus are both locked shared. The chain will be
710 * created and returned in *chainp also locked shared.
712 static
714 hammer2_sync_insert(hammer2_thread_t *thr,
715 hammer2_chain_t **parentp, hammer2_chain_t **chainp,
716 hammer2_tid_t mtid, int idx, hammer2_chain_t *focus)
718 hammer2_chain_t *chain;
719 hammer2_key_t dummy;
720 int cache_index = -1;
722 #if HAMMER2_SYNCHRO_DEBUG
723 if (hammer2_debug & 1)
724 kprintf("insert rec par=%p/%d.%016jx slave %d %d.%016jx mod=%016jx\n",
725 *parentp,
726 (*parentp)->bref.type,
727 (*parentp)->bref.key,
728 idx,
729 focus->bref.type, focus->bref.key, mtid);
730 #endif
733 * Parent requires an exclusive lock for the insertion.
734 * We must unlock the child to avoid deadlocks while
735 * relocking the parent.
737 if (*chainp) {
738 hammer2_chain_unlock(*chainp);
739 hammer2_chain_drop(*chainp);
740 *chainp = NULL;
742 hammer2_chain_unlock(*parentp);
743 hammer2_chain_lock(*parentp, HAMMER2_RESOLVE_ALWAYS);
746 * We must reissue the lookup to properly position (*parentp)
747 * for the insertion.
749 chain = hammer2_chain_lookup(parentp, &dummy,
750 focus->bref.key, focus->bref.key,
751 &cache_index,
752 HAMMER2_LOOKUP_NODIRECT |
753 HAMMER2_LOOKUP_ALWAYS);
754 KKASSERT(chain == NULL);
756 chain = NULL;
757 hammer2_chain_create(parentp, &chain,
758 thr->pmp, focus->bref.methods,
759 focus->bref.key, focus->bref.keybits,
760 focus->bref.type, focus->bytes,
761 mtid, 0, 0);
762 hammer2_chain_modify(chain, mtid, 0, 0);
765 * Copy focus to new chain
768 /* type already set */
769 chain->bref.methods = focus->bref.methods;
770 /* keybits already set */
771 chain->bref.vradix = focus->bref.vradix;
772 /* mirror_tid set by flush */
773 KKASSERT(chain->bref.modify_tid == mtid);
774 chain->bref.flags = focus->bref.flags;
775 /* key already present */
776 /* check code will be recalculated */
779 * Copy data body.
781 switch(chain->bref.type) {
782 case HAMMER2_BREF_TYPE_INODE:
783 if ((focus->data->ipdata.meta.op_flags &
784 HAMMER2_OPFLAG_DIRECTDATA) == 0) {
785 /* do not copy block table */
786 bcopy(focus->data, chain->data,
787 offsetof(hammer2_inode_data_t, u));
788 break;
790 /* fall through copy whole thing */
791 case HAMMER2_BREF_TYPE_DATA:
792 bcopy(focus->data, chain->data, chain->bytes);
793 hammer2_chain_setcheck(chain, chain->data);
794 break;
795 case HAMMER2_BREF_TYPE_DIRENT:
797 * Directory entries embed data in the blockref.
799 if (chain->bytes) {
800 bcopy(focus->data, chain->data, chain->bytes);
801 hammer2_chain_setcheck(chain, chain->data);
802 } else {
803 chain->bref.check = focus->bref.check;
805 chain->bref.embed = focus->bref.embed;
806 break;
807 default:
808 KKASSERT(0);
809 break;
812 hammer2_chain_unlock(chain); /* unlock, leave ref */
813 *chainp = chain; /* will be returned locked */
816 * Avoid an ordering deadlock when relocking shared.
818 hammer2_chain_unlock(*parentp);
819 hammer2_chain_lock(*parentp, HAMMER2_RESOLVE_SHARED |
820 HAMMER2_RESOLVE_ALWAYS);
821 hammer2_chain_lock(chain, HAMMER2_RESOLVE_SHARED |
822 HAMMER2_RESOLVE_ALWAYS);
824 return 0;
828 * Destroy an extranious chain.
830 * Both *parentp and *chainp are locked shared.
832 * On return, *chainp will be adjusted to point to the next element in the
833 * iteration and locked shared.
835 static
837 hammer2_sync_destroy(hammer2_thread_t *thr,
838 hammer2_chain_t **parentp, hammer2_chain_t **chainp,
839 hammer2_tid_t mtid, int idx)
841 hammer2_chain_t *chain;
842 hammer2_key_t key_next;
843 hammer2_key_t save_key;
844 int cache_index = -1;
846 chain = *chainp;
848 #if HAMMER2_SYNCHRO_DEBUG
849 if (hammer2_debug & 1)
850 kprintf("destroy rec %p/%p slave %d %d.%016jx\n",
851 *parentp, chain,
852 idx, chain->bref.type, chain->bref.key);
853 #endif
855 save_key = chain->bref.key;
856 if (save_key != HAMMER2_KEY_MAX)
857 ++save_key;
860 * Try to avoid unnecessary I/O.
862 * XXX accounting not propagated up properly. We might have to do
863 * a RESOLVE_MAYBE here and pass 0 for the flags.
865 hammer2_chain_unlock(chain); /* relock exclusive */
866 hammer2_chain_unlock(*parentp);
867 hammer2_chain_lock(*parentp, HAMMER2_RESOLVE_ALWAYS);
868 hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER);
870 hammer2_chain_delete(*parentp, chain, mtid, HAMMER2_DELETE_PERMANENT);
871 hammer2_chain_unlock(chain);
872 hammer2_chain_drop(chain);
873 chain = NULL; /* safety */
875 hammer2_chain_unlock(*parentp); /* relock shared */
876 hammer2_chain_lock(*parentp, HAMMER2_RESOLVE_SHARED |
877 HAMMER2_RESOLVE_ALWAYS);
878 *chainp = hammer2_chain_lookup(parentp, &key_next,
879 save_key, HAMMER2_KEY_MAX,
880 &cache_index,
881 HAMMER2_LOOKUP_SHARED |
882 HAMMER2_LOOKUP_NODIRECT |
883 HAMMER2_LOOKUP_NODATA);
884 return 0;
888 * cparent is locked exclusively, with an extra ref, cluster is not locked.
889 * Replace element [i] in the cluster.
891 static
893 hammer2_sync_replace(hammer2_thread_t *thr,
894 hammer2_chain_t *parent, hammer2_chain_t *chain,
895 hammer2_tid_t mtid, int idx,
896 hammer2_chain_t *focus, int isroot)
898 int nradix;
899 uint8_t otype;
901 #if HAMMER2_SYNCHRO_DEBUG
902 if (hammer2_debug & 1)
903 kprintf("replace rec %p slave %d %d.%016jx mod=%016jx\n",
904 chain,
905 idx,
906 focus->bref.type, focus->bref.key, mtid);
907 #endif
908 hammer2_chain_unlock(chain);
909 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
910 if (chain->bytes != focus->bytes) {
911 /* XXX what if compressed? */
912 nradix = hammer2_getradix(chain->bytes);
913 hammer2_chain_resize(chain, mtid, 0, nradix, 0);
915 hammer2_chain_modify(chain, mtid, 0, 0);
916 otype = chain->bref.type;
917 chain->bref.type = focus->bref.type;
918 chain->bref.methods = focus->bref.methods;
919 chain->bref.keybits = focus->bref.keybits;
920 chain->bref.vradix = focus->bref.vradix;
921 /* mirror_tid updated by flush */
922 KKASSERT(mtid == 0 || chain->bref.modify_tid == mtid);
923 chain->bref.flags = focus->bref.flags;
924 /* key already present */
925 /* check code will be recalculated */
926 chain->error = 0;
929 * Copy data body.
931 switch(chain->bref.type) {
932 case HAMMER2_BREF_TYPE_INODE:
934 * Special case PFSROOTs, only limited changes can be made
935 * since the meta-data contains miscellanious distinguishing
936 * fields.
938 if (isroot) {
939 chain->data->ipdata.meta.uflags =
940 focus->data->ipdata.meta.uflags;
941 chain->data->ipdata.meta.rmajor =
942 focus->data->ipdata.meta.rmajor;
943 chain->data->ipdata.meta.rminor =
944 focus->data->ipdata.meta.rminor;
945 chain->data->ipdata.meta.ctime =
946 focus->data->ipdata.meta.ctime;
947 chain->data->ipdata.meta.mtime =
948 focus->data->ipdata.meta.mtime;
949 chain->data->ipdata.meta.atime =
950 focus->data->ipdata.meta.atime;
951 /* not btime */
952 chain->data->ipdata.meta.uid =
953 focus->data->ipdata.meta.uid;
954 chain->data->ipdata.meta.gid =
955 focus->data->ipdata.meta.gid;
956 chain->data->ipdata.meta.mode =
957 focus->data->ipdata.meta.mode;
958 chain->data->ipdata.meta.ncopies =
959 focus->data->ipdata.meta.ncopies;
960 chain->data->ipdata.meta.comp_algo =
961 focus->data->ipdata.meta.comp_algo;
962 chain->data->ipdata.meta.check_algo =
963 focus->data->ipdata.meta.check_algo;
964 chain->data->ipdata.meta.data_quota =
965 focus->data->ipdata.meta.data_quota;
966 chain->data->ipdata.meta.inode_quota =
967 focus->data->ipdata.meta.inode_quota;
970 * last snapshot tid controls overwrite
972 if (chain->data->ipdata.meta.pfs_lsnap_tid <
973 focus->data->ipdata.meta.pfs_lsnap_tid) {
974 chain->data->ipdata.meta.pfs_lsnap_tid =
975 focus->data->ipdata.meta.pfs_lsnap_tid;
978 hammer2_chain_setcheck(chain, chain->data);
979 break;
983 * Normal replacement.
985 if ((focus->data->ipdata.meta.op_flags &
986 HAMMER2_OPFLAG_DIRECTDATA) == 0) {
988 * If DIRECTDATA is transitioning to 0 or the old
989 * chain is not an inode we have to initialize
990 * the block table.
992 if (otype != HAMMER2_BREF_TYPE_INODE ||
993 (chain->data->ipdata.meta.op_flags &
994 HAMMER2_OPFLAG_DIRECTDATA)) {
995 kprintf("chain inode trans away from dd\n");
996 bzero(&chain->data->ipdata.u,
997 sizeof(chain->data->ipdata.u));
999 bcopy(focus->data, chain->data,
1000 offsetof(hammer2_inode_data_t, u));
1001 /* XXX setcheck on inode should not be needed */
1002 hammer2_chain_setcheck(chain, chain->data);
1003 break;
1005 /* fall through */
1006 case HAMMER2_BREF_TYPE_DATA:
1007 bcopy(focus->data, chain->data, chain->bytes);
1008 hammer2_chain_setcheck(chain, chain->data);
1009 break;
1010 case HAMMER2_BREF_TYPE_DIRENT:
1012 * Directory entries embed data in the blockref.
1014 if (chain->bytes) {
1015 bcopy(focus->data, chain->data, chain->bytes);
1016 hammer2_chain_setcheck(chain, chain->data);
1017 } else {
1018 chain->bref.check = focus->bref.check;
1020 chain->bref.embed = focus->bref.embed;
1021 break;
1022 default:
1023 KKASSERT(0);
1024 break;
1027 hammer2_chain_unlock(chain);
1028 hammer2_chain_lock(chain, HAMMER2_RESOLVE_SHARED |
1029 HAMMER2_RESOLVE_MAYBE);
1031 return 0;