hammer2 - More involved refactoring of chain_repparent, cleanup
[dragonfly.git] / sys / vfs / hammer2 / hammer2_admin.c
blobd8b0170d15553400aa6866d4b8060e01716012f8
1 /*
2 * Copyright (c) 2015-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * This module implements the hammer2 helper thread API, including
36 * the frontend/backend XOP API.
38 #include "hammer2.h"
41 * Set flags and wakeup any waiters.
43 * WARNING! During teardown (thr) can disappear the instant our cmpset
44 * succeeds.
46 void
47 hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags)
49 uint32_t oflags;
50 uint32_t nflags;
52 for (;;) {
53 oflags = thr->flags;
54 cpu_ccfence();
55 nflags = (oflags | flags) & ~HAMMER2_THREAD_WAITING;
57 if (oflags & HAMMER2_THREAD_WAITING) {
58 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
59 wakeup(&thr->flags);
60 break;
62 } else {
63 if (atomic_cmpset_int(&thr->flags, oflags, nflags))
64 break;
70 * Set and clear flags and wakeup any waiters.
72 * WARNING! During teardown (thr) can disappear the instant our cmpset
73 * succeeds.
75 void
76 hammer2_thr_signal2(hammer2_thread_t *thr, uint32_t posflags, uint32_t negflags)
78 uint32_t oflags;
79 uint32_t nflags;
81 for (;;) {
82 oflags = thr->flags;
83 cpu_ccfence();
84 nflags = (oflags | posflags) &
85 ~(negflags | HAMMER2_THREAD_WAITING);
86 if (oflags & HAMMER2_THREAD_WAITING) {
87 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
88 wakeup(&thr->flags);
89 break;
91 } else {
92 if (atomic_cmpset_int(&thr->flags, oflags, nflags))
93 break;
99 * Wait until all the bits in flags are set.
101 * WARNING! During teardown (thr) can disappear the instant our cmpset
102 * succeeds.
104 void
105 hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags)
107 uint32_t oflags;
108 uint32_t nflags;
110 for (;;) {
111 oflags = thr->flags;
112 cpu_ccfence();
113 if ((oflags & flags) == flags)
114 break;
115 nflags = oflags | HAMMER2_THREAD_WAITING;
116 tsleep_interlock(&thr->flags, 0);
117 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
118 tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
124 * Wait until any of the bits in flags are set, with timeout.
126 * WARNING! During teardown (thr) can disappear the instant our cmpset
127 * succeeds.
130 hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo)
132 uint32_t oflags;
133 uint32_t nflags;
134 int error;
136 error = 0;
137 for (;;) {
138 oflags = thr->flags;
139 cpu_ccfence();
140 if (oflags & flags)
141 break;
142 nflags = oflags | HAMMER2_THREAD_WAITING;
143 tsleep_interlock(&thr->flags, 0);
144 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
145 error = tsleep(&thr->flags, PINTERLOCKED,
146 "h2twait", timo);
148 if (error == ETIMEDOUT) {
149 error = HAMMER2_ERROR_ETIMEDOUT;
150 break;
153 return error;
157 * Wait until the bits in flags are clear.
159 * WARNING! During teardown (thr) can disappear the instant our cmpset
160 * succeeds.
162 void
163 hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags)
165 uint32_t oflags;
166 uint32_t nflags;
168 for (;;) {
169 oflags = thr->flags;
170 cpu_ccfence();
171 if ((oflags & flags) == 0)
172 break;
173 nflags = oflags | HAMMER2_THREAD_WAITING;
174 tsleep_interlock(&thr->flags, 0);
175 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
176 tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
182 * Initialize the supplied thread structure, starting the specified
183 * thread.
185 * NOTE: thr structure can be retained across mounts and unmounts for this
186 * pmp, so make sure the flags are in a sane state.
188 void
189 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
190 hammer2_dev_t *hmp,
191 const char *id, int clindex, int repidx,
192 void (*func)(void *arg))
194 thr->pmp = pmp; /* xop helpers */
195 thr->hmp = hmp; /* bulkfree */
196 thr->clindex = clindex;
197 thr->repidx = repidx;
198 TAILQ_INIT(&thr->xopq);
199 atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP |
200 HAMMER2_THREAD_STOPPED |
201 HAMMER2_THREAD_FREEZE |
202 HAMMER2_THREAD_FROZEN);
203 if (thr->scratch == NULL)
204 thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO);
205 if (repidx >= 0) {
206 lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus,
207 "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
208 } else if (pmp) {
209 lwkt_create(func, thr, &thr->td, NULL, 0, -1,
210 "%s-%s", id, pmp->pfs_names[clindex]);
211 } else {
212 lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s", id);
217 * Terminate a thread. This function will silently return if the thread
218 * was never initialized or has already been deleted.
220 * This is accomplished by setting the STOP flag and waiting for the td
221 * structure to become NULL.
223 void
224 hammer2_thr_delete(hammer2_thread_t *thr)
226 if (thr->td == NULL)
227 return;
228 hammer2_thr_signal(thr, HAMMER2_THREAD_STOP);
229 hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED);
230 thr->pmp = NULL;
231 if (thr->scratch) {
232 kfree(thr->scratch, M_HAMMER2);
233 thr->scratch = NULL;
235 KKASSERT(TAILQ_EMPTY(&thr->xopq));
239 * Asynchronous remaster request. Ask the synchronization thread to
240 * start over soon (as if it were frozen and unfrozen, but without waiting).
241 * The thread always recalculates mastership relationships when restarting.
243 void
244 hammer2_thr_remaster(hammer2_thread_t *thr)
246 if (thr->td == NULL)
247 return;
248 hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER);
251 void
252 hammer2_thr_freeze_async(hammer2_thread_t *thr)
254 hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
257 void
258 hammer2_thr_freeze(hammer2_thread_t *thr)
260 if (thr->td == NULL)
261 return;
262 hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
263 hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN);
266 void
267 hammer2_thr_unfreeze(hammer2_thread_t *thr)
269 if (thr->td == NULL)
270 return;
271 hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE);
272 hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN);
276 hammer2_thr_break(hammer2_thread_t *thr)
278 if (thr->flags & (HAMMER2_THREAD_STOP |
279 HAMMER2_THREAD_REMASTER |
280 HAMMER2_THREAD_FREEZE)) {
281 return 1;
283 return 0;
286 /****************************************************************************
287 * HAMMER2 XOPS API *
288 ****************************************************************************/
290 void
291 hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp)
293 /* no extra fields in structure at the moment */
297 * Allocate a XOP request.
299 * Once allocated a XOP request can be started, collected, and retired,
300 * and can be retired early if desired.
302 * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
304 void *
305 hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
307 hammer2_xop_t *xop;
309 xop = objcache_get(cache_xops, M_WAITOK);
310 KKASSERT(xop->head.cluster.array[0].chain == NULL);
312 xop->head.ip1 = ip;
313 xop->head.func = NULL;
314 xop->head.flags = flags;
315 xop->head.state = 0;
316 xop->head.error = 0;
317 xop->head.collect_key = 0;
318 if (flags & HAMMER2_XOP_MODIFYING)
319 xop->head.mtid = hammer2_trans_sub(ip->pmp);
320 else
321 xop->head.mtid = 0;
323 xop->head.cluster.nchains = ip->cluster.nchains;
324 xop->head.cluster.pmp = ip->pmp;
325 xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
328 * run_mask - Active thread (or frontend) associated with XOP
330 xop->head.run_mask = HAMMER2_XOPMASK_VOP;
332 hammer2_inode_ref(ip);
334 return xop;
337 void
338 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
340 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
341 xop->name1_len = name_len;
342 bcopy(name, xop->name1, name_len);
345 void
346 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
348 xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
349 xop->name2_len = name_len;
350 bcopy(name, xop->name2, name_len);
353 size_t
354 hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum)
356 const size_t name_len = 18;
358 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
359 xop->name1_len = name_len;
360 ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum);
362 return name_len;
366 void
367 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
369 xop->ip2 = ip2;
370 hammer2_inode_ref(ip2);
373 void
374 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
376 xop->ip3 = ip3;
377 hammer2_inode_ref(ip3);
380 void
381 hammer2_xop_reinit(hammer2_xop_head_t *xop)
383 xop->state = 0;
384 xop->error = 0;
385 xop->collect_key = 0;
386 xop->run_mask = HAMMER2_XOPMASK_VOP;
390 * A mounted PFS needs Xops threads to support frontend operations.
392 void
393 hammer2_xop_helper_create(hammer2_pfs_t *pmp)
395 int i;
396 int j;
398 lockmgr(&pmp->lock, LK_EXCLUSIVE);
399 pmp->has_xop_threads = 1;
401 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
402 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
403 if (pmp->xop_groups[j].thrs[i].td)
404 continue;
405 hammer2_thr_create(&pmp->xop_groups[j].thrs[i],
406 pmp, NULL,
407 "h2xop", i, j,
408 hammer2_primary_xops_thread);
411 lockmgr(&pmp->lock, LK_RELEASE);
414 void
415 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
417 int i;
418 int j;
420 for (i = 0; i < pmp->pfs_nmasters; ++i) {
421 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
422 if (pmp->xop_groups[j].thrs[i].td)
423 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
426 pmp->has_xop_threads = 0;
430 * Start a XOP request, queueing it to all nodes in the cluster to
431 * execute the cluster op.
433 * XXX optimize single-target case.
435 void
436 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
437 int notidx)
439 hammer2_inode_t *ip1;
440 hammer2_pfs_t *pmp;
441 hammer2_thread_t *thr;
442 int i;
443 int ng;
444 int nchains;
446 ip1 = xop->ip1;
447 pmp = ip1->pmp;
448 if (pmp->has_xop_threads == 0)
449 hammer2_xop_helper_create(pmp);
452 * The intent of the XOP sequencer is to ensure that ops on the same
453 * inode execute in the same order. This is necessary when issuing
454 * modifying operations to multiple targets because some targets might
455 * get behind and the frontend is allowed to complete the moment a
456 * quorum of targets succeed.
458 * Strategy operations must be segregated from non-strategy operations
459 * to avoid a deadlock. For example, if a vfsync and a bread/bwrite
460 * were queued to the same worker thread, the locked buffer in the
461 * strategy operation can deadlock the vfsync's buffer list scan.
463 * TODO - RENAME fails here because it is potentially modifying
464 * three different inodes.
466 if (xop->flags & HAMMER2_XOP_STRATEGY) {
467 hammer2_xop_strategy_t *xopst;
469 xopst = &((hammer2_xop_t *)xop)->xop_strategy;
470 ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)) ^
471 hammer2_icrc32(&xopst->lbase, sizeof(xopst->lbase)));
472 ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
473 ng += HAMMER2_XOPGROUPS / 2;
474 } else {
475 ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)));
476 ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
478 xop->func = func;
481 * The instant xop is queued another thread can pick it off. In the
482 * case of asynchronous ops, another thread might even finish and
483 * deallocate it.
485 hammer2_spin_ex(&pmp->xop_spin);
486 nchains = ip1->cluster.nchains;
487 for (i = 0; i < nchains; ++i) {
489 * XXX ip1->cluster.array* not stable here. This temporary
490 * hack fixes basic issues in target XOPs which need to
491 * obtain a starting chain from the inode but does not
492 * address possible races against inode updates which
493 * might NULL-out a chain.
495 if (i != notidx && ip1->cluster.array[i].chain) {
496 thr = &pmp->xop_groups[ng].thrs[i];
497 atomic_set_64(&xop->run_mask, 1LLU << i);
498 atomic_set_64(&xop->chk_mask, 1LLU << i);
499 xop->collect[i].thr = thr;
500 TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry);
503 hammer2_spin_unex(&pmp->xop_spin);
504 /* xop can become invalid at this point */
507 * Each thread has its own xopq
509 for (i = 0; i < nchains; ++i) {
510 if (i != notidx) {
511 thr = &pmp->xop_groups[ng].thrs[i];
512 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
517 void
518 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func)
520 hammer2_xop_start_except(xop, func, -1);
524 * Retire a XOP. Used by both the VOP frontend and by the XOP backend.
526 void
527 hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask)
529 hammer2_chain_t *chain;
530 uint64_t nmask;
531 int i;
534 * Remove the frontend collector or remove a backend feeder.
536 * When removing the frontend we must wakeup any backend feeders
537 * who are waiting for FIFO space.
539 * When removing the last backend feeder we must wakeup any waiting
540 * frontend.
542 KKASSERT(xop->run_mask & mask);
543 nmask = atomic_fetchadd_64(&xop->run_mask,
544 -mask + HAMMER2_XOPMASK_FEED);
547 * More than one entity left
549 if ((nmask & HAMMER2_XOPMASK_ALLDONE) != mask) {
551 * Frontend terminating, wakeup any backends waiting on
552 * fifo full.
554 * NOTE!!! The xop can get ripped out from under us at
555 * this point, so do not reference it again.
556 * The wakeup(xop) doesn't touch the xop and
557 * is ok.
559 if (mask == HAMMER2_XOPMASK_VOP) {
560 if (nmask & HAMMER2_XOPMASK_FIFOW)
561 wakeup(xop);
565 * Wakeup frontend if the last backend is terminating.
567 nmask -= mask;
568 if ((nmask & HAMMER2_XOPMASK_ALLDONE) == HAMMER2_XOPMASK_VOP) {
569 if (nmask & HAMMER2_XOPMASK_WAIT)
570 wakeup(xop);
573 return;
575 /* else nobody else left, we can ignore FIFOW */
578 * All collectors are gone, we can cleanup and dispose of the XOP.
579 * Note that this can wind up being a frontend OR a backend.
580 * Pending chains are locked shared and not owned by any thread.
582 * Cleanup the collection cluster.
584 for (i = 0; i < xop->cluster.nchains; ++i) {
585 xop->cluster.array[i].flags = 0;
586 chain = xop->cluster.array[i].chain;
587 if (chain) {
588 xop->cluster.array[i].chain = NULL;
589 hammer2_chain_drop_unhold(chain);
594 * Cleanup the fifos. Since we are the only entity left on this
595 * xop we don't have to worry about fifo flow control, and one
596 * lfence() will do the job.
598 cpu_lfence();
599 mask = xop->chk_mask;
600 for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
601 hammer2_xop_fifo_t *fifo = &xop->collect[i];
602 while (fifo->ri != fifo->wi) {
603 chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
604 if (chain)
605 hammer2_chain_drop_unhold(chain);
606 ++fifo->ri;
608 mask &= ~(1U << i);
612 * The inode is only held at this point, simply drop it.
614 if (xop->ip1) {
615 hammer2_inode_drop(xop->ip1);
616 xop->ip1 = NULL;
618 if (xop->ip2) {
619 hammer2_inode_drop(xop->ip2);
620 xop->ip2 = NULL;
622 if (xop->ip3) {
623 hammer2_inode_drop(xop->ip3);
624 xop->ip3 = NULL;
626 if (xop->name1) {
627 kfree(xop->name1, M_HAMMER2);
628 xop->name1 = NULL;
629 xop->name1_len = 0;
631 if (xop->name2) {
632 kfree(xop->name2, M_HAMMER2);
633 xop->name2 = NULL;
634 xop->name2_len = 0;
637 objcache_put(cache_xops, xop);
641 * (Backend) Returns non-zero if the frontend is still attached.
644 hammer2_xop_active(hammer2_xop_head_t *xop)
646 if (xop->run_mask & HAMMER2_XOPMASK_VOP)
647 return 1;
648 else
649 return 0;
653 * (Backend) Feed chain data through the cluster validator and back to
654 * the frontend. Chains are fed from multiple nodes concurrently
655 * and pipelined via per-node FIFOs in the XOP.
657 * The chain must be locked (either shared or exclusive). The caller may
658 * unlock and drop the chain on return. This function will add an extra
659 * ref and hold the chain's data for the pass-back.
661 * No xop lock is needed because we are only manipulating fields under
662 * our direct control.
664 * Returns 0 on success and a hammer2 error code if sync is permanently
665 * lost. The caller retains a ref on the chain but by convention
666 * the lock is typically inherited by the xop (caller loses lock).
668 * Returns non-zero on error. In this situation the caller retains a
669 * ref on the chain but loses the lock (we unlock here).
672 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
673 int clindex, int error)
675 hammer2_xop_fifo_t *fifo;
676 uint64_t mask;
679 * Early termination (typicaly of xop_readir)
681 if (hammer2_xop_active(xop) == 0) {
682 error = HAMMER2_ERROR_ABORTED;
683 goto done;
687 * Multi-threaded entry into the XOP collector. We own the
688 * fifo->wi for our clindex.
690 fifo = &xop->collect[clindex];
692 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO)
693 lwkt_yield();
694 while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
695 atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
696 mask = xop->run_mask;
697 if ((mask & HAMMER2_XOPMASK_VOP) == 0) {
698 error = HAMMER2_ERROR_ABORTED;
699 goto done;
701 tsleep_interlock(xop, 0);
702 if (atomic_cmpset_64(&xop->run_mask, mask,
703 mask | HAMMER2_XOPMASK_FIFOW)) {
704 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
705 tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
708 /* retry */
710 atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
711 if (chain)
712 hammer2_chain_ref_hold(chain);
713 if (error == 0 && chain)
714 error = chain->error;
715 fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
716 fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
717 cpu_sfence();
718 ++fifo->wi;
720 mask = atomic_fetchadd_64(&xop->run_mask, HAMMER2_XOPMASK_FEED);
721 if (mask & HAMMER2_XOPMASK_WAIT) {
722 atomic_clear_64(&xop->run_mask, HAMMER2_XOPMASK_WAIT);
723 wakeup(xop);
725 error = 0;
728 * Cleanup. If an error occurred we eat the lock. If no error
729 * occurred the fifo inherits the lock and gains an additional ref.
731 * The caller's ref remains in both cases.
733 done:
734 return error;
738 * (Frontend) collect a response from a running cluster op.
740 * Responses are fed from all appropriate nodes concurrently
741 * and collected into a cohesive response >= collect_key.
743 * The collector will return the instant quorum or other requirements
744 * are met, even if some nodes get behind or become non-responsive.
746 * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection,
747 * usually called synchronously from the
748 * node XOPs for the strategy code to
749 * fake the frontend collection and complete
750 * the BIO as soon as possible.
752 * HAMMER2_XOP_SYNCHRONIZER - Reqeuest synchronization with a particular
753 * cluster index, prevents looping when that
754 * index is out of sync so caller can act on
755 * the out of sync element. ESRCH and EDEADLK
756 * can be returned if this flag is specified.
758 * Returns 0 on success plus a filled out xop->cluster structure.
759 * Return ENOENT on normal termination.
760 * Otherwise return an error.
763 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
765 hammer2_xop_fifo_t *fifo;
766 hammer2_chain_t *chain;
767 hammer2_key_t lokey;
768 uint64_t mask;
769 int error;
770 int keynull;
771 int adv; /* advance the element */
772 int i;
774 loop:
776 * First loop tries to advance pieces of the cluster which
777 * are out of sync.
779 lokey = HAMMER2_KEY_MAX;
780 keynull = HAMMER2_CHECK_NULL;
781 mask = xop->run_mask;
782 cpu_lfence();
784 for (i = 0; i < xop->cluster.nchains; ++i) {
785 chain = xop->cluster.array[i].chain;
786 if (chain == NULL) {
787 adv = 1;
788 } else if (chain->bref.key < xop->collect_key) {
789 adv = 1;
790 } else {
791 keynull &= ~HAMMER2_CHECK_NULL;
792 if (lokey > chain->bref.key)
793 lokey = chain->bref.key;
794 adv = 0;
796 if (adv == 0)
797 continue;
800 * Advance element if possible, advanced element may be NULL.
802 if (chain)
803 hammer2_chain_drop_unhold(chain);
805 fifo = &xop->collect[i];
806 if (fifo->ri != fifo->wi) {
807 cpu_lfence();
808 chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
809 error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
810 ++fifo->ri;
811 xop->cluster.array[i].chain = chain;
812 xop->cluster.array[i].error = error;
813 if (chain == NULL) {
814 /* XXX */
815 xop->cluster.array[i].flags |=
816 HAMMER2_CITEM_NULL;
818 if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) {
819 if (fifo->flags & HAMMER2_XOP_FIFO_STALL) {
820 atomic_clear_int(&fifo->flags,
821 HAMMER2_XOP_FIFO_STALL);
822 wakeup(xop);
823 lwkt_yield();
826 --i; /* loop on same index */
827 } else {
829 * Retain CITEM_NULL flag. If set just repeat EOF.
830 * If not, the NULL,0 combination indicates an
831 * operation in-progress.
833 xop->cluster.array[i].chain = NULL;
834 /* retain any CITEM_NULL setting */
839 * Determine whether the lowest collected key meets clustering
840 * requirements. Returns:
842 * 0 - key valid, cluster can be returned.
844 * ENOENT - normal end of scan, return ENOENT.
846 * ESRCH - sufficient elements collected, quorum agreement
847 * that lokey is not a valid element and should be
848 * skipped.
850 * EDEADLK - sufficient elements collected, no quorum agreement
851 * (and no agreement possible). In this situation a
852 * repair is needed, for now we loop.
854 * EINPROGRESS - insufficient elements collected to resolve, wait
855 * for event and loop.
857 if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
858 (mask & HAMMER2_XOPMASK_ALLDONE) != HAMMER2_XOPMASK_VOP) {
859 error = HAMMER2_ERROR_EINPROGRESS;
860 } else {
861 error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
863 if (error == HAMMER2_ERROR_EINPROGRESS) {
864 if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
865 goto done;
866 tsleep_interlock(xop, 0);
867 if (atomic_cmpset_64(&xop->run_mask,
868 mask, mask | HAMMER2_XOPMASK_WAIT)) {
869 tsleep(xop, PINTERLOCKED, "h2coll", hz*60);
871 goto loop;
873 if (error == HAMMER2_ERROR_ESRCH) {
874 if (lokey != HAMMER2_KEY_MAX) {
875 xop->collect_key = lokey + 1;
876 goto loop;
878 error = HAMMER2_ERROR_ENOENT;
880 if (error == HAMMER2_ERROR_EDEADLK) {
881 kprintf("hammer2: no quorum possible lokey %016jx\n",
882 lokey);
883 if (lokey != HAMMER2_KEY_MAX) {
884 xop->collect_key = lokey + 1;
885 goto loop;
887 error = HAMMER2_ERROR_ENOENT;
889 if (lokey == HAMMER2_KEY_MAX)
890 xop->collect_key = lokey;
891 else
892 xop->collect_key = lokey + 1;
893 done:
894 return error;
898 * N x M processing threads are available to handle XOPs, N per cluster
899 * index x M cluster nodes.
901 * Locate and return the next runnable xop, or NULL if no xops are
902 * present or none of the xops are currently runnable (for various reasons).
903 * The xop is left on the queue and serves to block other dependent xops
904 * from being run.
906 * Dependent xops will not be returned.
908 * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
910 * NOTE! Xops run concurrently for each cluster index.
912 #define XOP_HASH_SIZE 16
913 #define XOP_HASH_MASK (XOP_HASH_SIZE - 1)
915 static __inline
917 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
919 uint32_t mask;
920 int hv;
922 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
923 mask = 1U << (hv & 31);
924 hv >>= 5;
926 return ((int)(hash[hv & XOP_HASH_MASK] & mask));
929 static __inline
930 void
931 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
933 uint32_t mask;
934 int hv;
936 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
937 mask = 1U << (hv & 31);
938 hv >>= 5;
940 hash[hv & XOP_HASH_MASK] |= mask;
943 static
944 hammer2_xop_head_t *
945 hammer2_xop_next(hammer2_thread_t *thr)
947 hammer2_pfs_t *pmp = thr->pmp;
948 int clindex = thr->clindex;
949 uint32_t hash[XOP_HASH_SIZE] = { 0 };
950 hammer2_xop_head_t *xop;
952 hammer2_spin_ex(&pmp->xop_spin);
953 TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) {
955 * Check dependency
957 if (xop_testhash(thr, xop->ip1, hash) ||
958 (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
959 (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) {
960 continue;
962 xop_sethash(thr, xop->ip1, hash);
963 if (xop->ip2)
964 xop_sethash(thr, xop->ip2, hash);
965 if (xop->ip3)
966 xop_sethash(thr, xop->ip3, hash);
969 * Check already running
971 if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
972 continue;
975 * Found a good one, return it.
977 atomic_set_int(&xop->collect[clindex].flags,
978 HAMMER2_XOP_FIFO_RUN);
979 break;
981 hammer2_spin_unex(&pmp->xop_spin);
983 return xop;
987 * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
989 * NOTE! Xops run concurrently for each cluster index.
991 static
992 void
993 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
995 hammer2_pfs_t *pmp = thr->pmp;
996 int clindex = thr->clindex;
998 hammer2_spin_ex(&pmp->xop_spin);
999 TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry);
1000 atomic_clear_int(&xop->collect[clindex].flags,
1001 HAMMER2_XOP_FIFO_RUN);
1002 hammer2_spin_unex(&pmp->xop_spin);
1003 if (TAILQ_FIRST(&thr->xopq))
1004 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
1008 * Primary management thread for xops support. Each node has several such
1009 * threads which replicate front-end operations on cluster nodes.
1011 * XOPS thread node operations, allowing the function to focus on a single
1012 * node in the cluster after validating the operation with the cluster.
1013 * This is primarily what prevents dead or stalled nodes from stalling
1014 * the front-end.
1016 void
1017 hammer2_primary_xops_thread(void *arg)
1019 hammer2_thread_t *thr = arg;
1020 hammer2_pfs_t *pmp;
1021 hammer2_xop_head_t *xop;
1022 uint64_t mask;
1023 uint32_t flags;
1024 uint32_t nflags;
1025 hammer2_xop_func_t last_func = NULL;
1027 pmp = thr->pmp;
1028 /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
1029 mask = 1LLU << thr->clindex;
1031 for (;;) {
1032 flags = thr->flags;
1035 * Handle stop request
1037 if (flags & HAMMER2_THREAD_STOP)
1038 break;
1041 * Handle freeze request
1043 if (flags & HAMMER2_THREAD_FREEZE) {
1044 hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
1045 HAMMER2_THREAD_FREEZE);
1046 continue;
1049 if (flags & HAMMER2_THREAD_UNFREEZE) {
1050 hammer2_thr_signal2(thr, 0,
1051 HAMMER2_THREAD_FROZEN |
1052 HAMMER2_THREAD_UNFREEZE);
1053 continue;
1057 * Force idle if frozen until unfrozen or stopped.
1059 if (flags & HAMMER2_THREAD_FROZEN) {
1060 hammer2_thr_wait_any(thr,
1061 HAMMER2_THREAD_UNFREEZE |
1062 HAMMER2_THREAD_STOP,
1064 continue;
1068 * Reset state on REMASTER request
1070 if (flags & HAMMER2_THREAD_REMASTER) {
1071 hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
1072 /* reset state here */
1073 continue;
1077 * Process requests. Each request can be multi-queued.
1079 * If we get behind and the frontend VOP is no longer active,
1080 * we retire the request without processing it. The callback
1081 * may also abort processing if the frontend VOP becomes
1082 * inactive.
1084 if (flags & HAMMER2_THREAD_XOPQ) {
1085 nflags = flags & ~HAMMER2_THREAD_XOPQ;
1086 if (!atomic_cmpset_int(&thr->flags, flags, nflags))
1087 continue;
1088 flags = nflags;
1089 /* fall through */
1091 while ((xop = hammer2_xop_next(thr)) != NULL) {
1092 if (hammer2_xop_active(xop)) {
1093 last_func = xop->func;
1094 xop->func(thr, (hammer2_xop_t *)xop);
1095 hammer2_xop_dequeue(thr, xop);
1096 hammer2_xop_retire(xop, mask);
1097 } else {
1098 last_func = xop->func;
1099 hammer2_xop_feed(xop, NULL, thr->clindex,
1100 ECONNABORTED);
1101 hammer2_xop_dequeue(thr, xop);
1102 hammer2_xop_retire(xop, mask);
1107 * Wait for event, interlock using THREAD_WAITING and
1108 * THREAD_SIGNAL.
1110 * For robustness poll on a 30-second interval, but nominally
1111 * expect to be woken up.
1113 nflags = flags | HAMMER2_THREAD_WAITING;
1115 tsleep_interlock(&thr->flags, 0);
1116 if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
1117 tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30);
1121 #if 0
1123 * Cleanup / termination
1125 while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
1126 kprintf("hammer2_thread: aborting xop %p\n", xop->func);
1127 TAILQ_REMOVE(&thr->xopq, xop,
1128 collect[thr->clindex].entry);
1129 hammer2_xop_retire(xop, mask);
1131 #endif
1132 thr->td = NULL;
1133 hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
1134 /* thr structure can go invalid after this point */