2 * Copyright (c) 2015-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This module implements the hammer2 helper thread API, including
36 * the frontend/backend XOP API.
41 * Set flags and wakeup any waiters.
43 * WARNING! During teardown (thr) can disappear the instant our cmpset
47 hammer2_thr_signal(hammer2_thread_t
*thr
, uint32_t flags
)
55 nflags
= (oflags
| flags
) & ~HAMMER2_THREAD_WAITING
;
57 if (oflags
& HAMMER2_THREAD_WAITING
) {
58 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
)) {
63 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
))
70 * Set and clear flags and wakeup any waiters.
72 * WARNING! During teardown (thr) can disappear the instant our cmpset
76 hammer2_thr_signal2(hammer2_thread_t
*thr
, uint32_t posflags
, uint32_t negflags
)
84 nflags
= (oflags
| posflags
) &
85 ~(negflags
| HAMMER2_THREAD_WAITING
);
86 if (oflags
& HAMMER2_THREAD_WAITING
) {
87 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
)) {
92 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
))
99 * Wait until all the bits in flags are set.
101 * WARNING! During teardown (thr) can disappear the instant our cmpset
105 hammer2_thr_wait(hammer2_thread_t
*thr
, uint32_t flags
)
113 if ((oflags
& flags
) == flags
)
115 nflags
= oflags
| HAMMER2_THREAD_WAITING
;
116 tsleep_interlock(&thr
->flags
, 0);
117 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
)) {
118 tsleep(&thr
->flags
, PINTERLOCKED
, "h2twait", hz
*60);
124 * Wait until any of the bits in flags are set, with timeout.
126 * WARNING! During teardown (thr) can disappear the instant our cmpset
130 hammer2_thr_wait_any(hammer2_thread_t
*thr
, uint32_t flags
, int timo
)
142 nflags
= oflags
| HAMMER2_THREAD_WAITING
;
143 tsleep_interlock(&thr
->flags
, 0);
144 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
)) {
145 error
= tsleep(&thr
->flags
, PINTERLOCKED
,
148 if (error
== ETIMEDOUT
) {
149 error
= HAMMER2_ERROR_ETIMEDOUT
;
157 * Wait until the bits in flags are clear.
159 * WARNING! During teardown (thr) can disappear the instant our cmpset
163 hammer2_thr_wait_neg(hammer2_thread_t
*thr
, uint32_t flags
)
171 if ((oflags
& flags
) == 0)
173 nflags
= oflags
| HAMMER2_THREAD_WAITING
;
174 tsleep_interlock(&thr
->flags
, 0);
175 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
)) {
176 tsleep(&thr
->flags
, PINTERLOCKED
, "h2twait", hz
*60);
182 * Initialize the supplied thread structure, starting the specified
185 * NOTE: thr structure can be retained across mounts and unmounts for this
186 * pmp, so make sure the flags are in a sane state.
189 hammer2_thr_create(hammer2_thread_t
*thr
, hammer2_pfs_t
*pmp
,
191 const char *id
, int clindex
, int repidx
,
192 void (*func
)(void *arg
))
194 thr
->pmp
= pmp
; /* xop helpers */
195 thr
->hmp
= hmp
; /* bulkfree */
196 thr
->clindex
= clindex
;
197 thr
->repidx
= repidx
;
198 TAILQ_INIT(&thr
->xopq
);
199 atomic_clear_int(&thr
->flags
, HAMMER2_THREAD_STOP
|
200 HAMMER2_THREAD_STOPPED
|
201 HAMMER2_THREAD_FREEZE
|
202 HAMMER2_THREAD_FROZEN
);
203 if (thr
->scratch
== NULL
)
204 thr
->scratch
= kmalloc(MAXPHYS
, M_HAMMER2
, M_WAITOK
| M_ZERO
);
206 lwkt_create(func
, thr
, &thr
->td
, NULL
, 0, repidx
% ncpus
,
207 "%s-%s.%02d", id
, pmp
->pfs_names
[clindex
], repidx
);
209 lwkt_create(func
, thr
, &thr
->td
, NULL
, 0, -1,
210 "%s-%s", id
, pmp
->pfs_names
[clindex
]);
212 lwkt_create(func
, thr
, &thr
->td
, NULL
, 0, -1, "%s", id
);
217 * Terminate a thread. This function will silently return if the thread
218 * was never initialized or has already been deleted.
220 * This is accomplished by setting the STOP flag and waiting for the td
221 * structure to become NULL.
224 hammer2_thr_delete(hammer2_thread_t
*thr
)
228 hammer2_thr_signal(thr
, HAMMER2_THREAD_STOP
);
229 hammer2_thr_wait(thr
, HAMMER2_THREAD_STOPPED
);
232 kfree(thr
->scratch
, M_HAMMER2
);
235 KKASSERT(TAILQ_EMPTY(&thr
->xopq
));
239 * Asynchronous remaster request. Ask the synchronization thread to
240 * start over soon (as if it were frozen and unfrozen, but without waiting).
241 * The thread always recalculates mastership relationships when restarting.
244 hammer2_thr_remaster(hammer2_thread_t
*thr
)
248 hammer2_thr_signal(thr
, HAMMER2_THREAD_REMASTER
);
252 hammer2_thr_freeze_async(hammer2_thread_t
*thr
)
254 hammer2_thr_signal(thr
, HAMMER2_THREAD_FREEZE
);
258 hammer2_thr_freeze(hammer2_thread_t
*thr
)
262 hammer2_thr_signal(thr
, HAMMER2_THREAD_FREEZE
);
263 hammer2_thr_wait(thr
, HAMMER2_THREAD_FROZEN
);
267 hammer2_thr_unfreeze(hammer2_thread_t
*thr
)
271 hammer2_thr_signal(thr
, HAMMER2_THREAD_UNFREEZE
);
272 hammer2_thr_wait_neg(thr
, HAMMER2_THREAD_FROZEN
);
276 hammer2_thr_break(hammer2_thread_t
*thr
)
278 if (thr
->flags
& (HAMMER2_THREAD_STOP
|
279 HAMMER2_THREAD_REMASTER
|
280 HAMMER2_THREAD_FREEZE
)) {
286 /****************************************************************************
288 ****************************************************************************/
291 hammer2_xop_group_init(hammer2_pfs_t
*pmp
, hammer2_xop_group_t
*xgrp
)
293 /* no extra fields in structure at the moment */
297 * Allocate a XOP request.
299 * Once allocated a XOP request can be started, collected, and retired,
300 * and can be retired early if desired.
302 * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
305 hammer2_xop_alloc(hammer2_inode_t
*ip
, int flags
)
309 xop
= objcache_get(cache_xops
, M_WAITOK
);
310 KKASSERT(xop
->head
.cluster
.array
[0].chain
== NULL
);
313 xop
->head
.func
= NULL
;
314 xop
->head
.flags
= flags
;
317 xop
->head
.collect_key
= 0;
318 if (flags
& HAMMER2_XOP_MODIFYING
)
319 xop
->head
.mtid
= hammer2_trans_sub(ip
->pmp
);
323 xop
->head
.cluster
.nchains
= ip
->cluster
.nchains
;
324 xop
->head
.cluster
.pmp
= ip
->pmp
;
325 xop
->head
.cluster
.flags
= HAMMER2_CLUSTER_LOCKED
;
328 * run_mask - Active thread (or frontend) associated with XOP
330 xop
->head
.run_mask
= HAMMER2_XOPMASK_VOP
;
332 hammer2_inode_ref(ip
);
338 hammer2_xop_setname(hammer2_xop_head_t
*xop
, const char *name
, size_t name_len
)
340 xop
->name1
= kmalloc(name_len
+ 1, M_HAMMER2
, M_WAITOK
| M_ZERO
);
341 xop
->name1_len
= name_len
;
342 bcopy(name
, xop
->name1
, name_len
);
346 hammer2_xop_setname2(hammer2_xop_head_t
*xop
, const char *name
, size_t name_len
)
348 xop
->name2
= kmalloc(name_len
+ 1, M_HAMMER2
, M_WAITOK
| M_ZERO
);
349 xop
->name2_len
= name_len
;
350 bcopy(name
, xop
->name2
, name_len
);
354 hammer2_xop_setname_inum(hammer2_xop_head_t
*xop
, hammer2_key_t inum
)
356 const size_t name_len
= 18;
358 xop
->name1
= kmalloc(name_len
+ 1, M_HAMMER2
, M_WAITOK
| M_ZERO
);
359 xop
->name1_len
= name_len
;
360 ksnprintf(xop
->name1
, name_len
+ 1, "0x%016jx", (intmax_t)inum
);
367 hammer2_xop_setip2(hammer2_xop_head_t
*xop
, hammer2_inode_t
*ip2
)
370 hammer2_inode_ref(ip2
);
374 hammer2_xop_setip3(hammer2_xop_head_t
*xop
, hammer2_inode_t
*ip3
)
377 hammer2_inode_ref(ip3
);
381 hammer2_xop_reinit(hammer2_xop_head_t
*xop
)
385 xop
->collect_key
= 0;
386 xop
->run_mask
= HAMMER2_XOPMASK_VOP
;
390 * A mounted PFS needs Xops threads to support frontend operations.
393 hammer2_xop_helper_create(hammer2_pfs_t
*pmp
)
398 lockmgr(&pmp
->lock
, LK_EXCLUSIVE
);
399 pmp
->has_xop_threads
= 1;
401 for (i
= 0; i
< pmp
->iroot
->cluster
.nchains
; ++i
) {
402 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
) {
403 if (pmp
->xop_groups
[j
].thrs
[i
].td
)
405 hammer2_thr_create(&pmp
->xop_groups
[j
].thrs
[i
],
408 hammer2_primary_xops_thread
);
411 lockmgr(&pmp
->lock
, LK_RELEASE
);
415 hammer2_xop_helper_cleanup(hammer2_pfs_t
*pmp
)
420 for (i
= 0; i
< pmp
->pfs_nmasters
; ++i
) {
421 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
) {
422 if (pmp
->xop_groups
[j
].thrs
[i
].td
)
423 hammer2_thr_delete(&pmp
->xop_groups
[j
].thrs
[i
]);
426 pmp
->has_xop_threads
= 0;
430 * Start a XOP request, queueing it to all nodes in the cluster to
431 * execute the cluster op.
433 * XXX optimize single-target case.
436 hammer2_xop_start_except(hammer2_xop_head_t
*xop
, hammer2_xop_func_t func
,
439 hammer2_inode_t
*ip1
;
441 hammer2_thread_t
*thr
;
448 if (pmp
->has_xop_threads
== 0)
449 hammer2_xop_helper_create(pmp
);
452 * The intent of the XOP sequencer is to ensure that ops on the same
453 * inode execute in the same order. This is necessary when issuing
454 * modifying operations to multiple targets because some targets might
455 * get behind and the frontend is allowed to complete the moment a
456 * quorum of targets succeed.
458 * Strategy operations must be segregated from non-strategy operations
459 * to avoid a deadlock. For example, if a vfsync and a bread/bwrite
460 * were queued to the same worker thread, the locked buffer in the
461 * strategy operation can deadlock the vfsync's buffer list scan.
463 * TODO - RENAME fails here because it is potentially modifying
464 * three different inodes.
466 if (xop
->flags
& HAMMER2_XOP_STRATEGY
) {
467 hammer2_xop_strategy_t
*xopst
;
469 xopst
= &((hammer2_xop_t
*)xop
)->xop_strategy
;
470 ng
= (int)(hammer2_icrc32(&xop
->ip1
, sizeof(xop
->ip1
)) ^
471 hammer2_icrc32(&xopst
->lbase
, sizeof(xopst
->lbase
)));
472 ng
= ng
& (HAMMER2_XOPGROUPS_MASK
>> 1);
473 ng
+= HAMMER2_XOPGROUPS
/ 2;
475 ng
= (int)(hammer2_icrc32(&xop
->ip1
, sizeof(xop
->ip1
)));
476 ng
= ng
& (HAMMER2_XOPGROUPS_MASK
>> 1);
481 * The instant xop is queued another thread can pick it off. In the
482 * case of asynchronous ops, another thread might even finish and
485 hammer2_spin_ex(&pmp
->xop_spin
);
486 nchains
= ip1
->cluster
.nchains
;
487 for (i
= 0; i
< nchains
; ++i
) {
489 * XXX ip1->cluster.array* not stable here. This temporary
490 * hack fixes basic issues in target XOPs which need to
491 * obtain a starting chain from the inode but does not
492 * address possible races against inode updates which
493 * might NULL-out a chain.
495 if (i
!= notidx
&& ip1
->cluster
.array
[i
].chain
) {
496 thr
= &pmp
->xop_groups
[ng
].thrs
[i
];
497 atomic_set_64(&xop
->run_mask
, 1LLU << i
);
498 atomic_set_64(&xop
->chk_mask
, 1LLU << i
);
499 xop
->collect
[i
].thr
= thr
;
500 TAILQ_INSERT_TAIL(&thr
->xopq
, xop
, collect
[i
].entry
);
503 hammer2_spin_unex(&pmp
->xop_spin
);
504 /* xop can become invalid at this point */
507 * Each thread has its own xopq
509 for (i
= 0; i
< nchains
; ++i
) {
511 thr
= &pmp
->xop_groups
[ng
].thrs
[i
];
512 hammer2_thr_signal(thr
, HAMMER2_THREAD_XOPQ
);
518 hammer2_xop_start(hammer2_xop_head_t
*xop
, hammer2_xop_func_t func
)
520 hammer2_xop_start_except(xop
, func
, -1);
524 * Retire a XOP. Used by both the VOP frontend and by the XOP backend.
527 hammer2_xop_retire(hammer2_xop_head_t
*xop
, uint64_t mask
)
529 hammer2_chain_t
*chain
;
534 * Remove the frontend collector or remove a backend feeder.
536 * When removing the frontend we must wakeup any backend feeders
537 * who are waiting for FIFO space.
539 * When removing the last backend feeder we must wakeup any waiting
542 KKASSERT(xop
->run_mask
& mask
);
543 nmask
= atomic_fetchadd_64(&xop
->run_mask
,
544 -mask
+ HAMMER2_XOPMASK_FEED
);
547 * More than one entity left
549 if ((nmask
& HAMMER2_XOPMASK_ALLDONE
) != mask
) {
551 * Frontend terminating, wakeup any backends waiting on
554 * NOTE!!! The xop can get ripped out from under us at
555 * this point, so do not reference it again.
556 * The wakeup(xop) doesn't touch the xop and
559 if (mask
== HAMMER2_XOPMASK_VOP
) {
560 if (nmask
& HAMMER2_XOPMASK_FIFOW
)
565 * Wakeup frontend if the last backend is terminating.
568 if ((nmask
& HAMMER2_XOPMASK_ALLDONE
) == HAMMER2_XOPMASK_VOP
) {
569 if (nmask
& HAMMER2_XOPMASK_WAIT
)
575 /* else nobody else left, we can ignore FIFOW */
578 * All collectors are gone, we can cleanup and dispose of the XOP.
579 * Note that this can wind up being a frontend OR a backend.
580 * Pending chains are locked shared and not owned by any thread.
582 * Cleanup the collection cluster.
584 for (i
= 0; i
< xop
->cluster
.nchains
; ++i
) {
585 xop
->cluster
.array
[i
].flags
= 0;
586 chain
= xop
->cluster
.array
[i
].chain
;
588 xop
->cluster
.array
[i
].chain
= NULL
;
589 hammer2_chain_drop_unhold(chain
);
594 * Cleanup the fifos. Since we are the only entity left on this
595 * xop we don't have to worry about fifo flow control, and one
596 * lfence() will do the job.
599 mask
= xop
->chk_mask
;
600 for (i
= 0; mask
&& i
< HAMMER2_MAXCLUSTER
; ++i
) {
601 hammer2_xop_fifo_t
*fifo
= &xop
->collect
[i
];
602 while (fifo
->ri
!= fifo
->wi
) {
603 chain
= fifo
->array
[fifo
->ri
& HAMMER2_XOPFIFO_MASK
];
605 hammer2_chain_drop_unhold(chain
);
612 * The inode is only held at this point, simply drop it.
615 hammer2_inode_drop(xop
->ip1
);
619 hammer2_inode_drop(xop
->ip2
);
623 hammer2_inode_drop(xop
->ip3
);
627 kfree(xop
->name1
, M_HAMMER2
);
632 kfree(xop
->name2
, M_HAMMER2
);
637 objcache_put(cache_xops
, xop
);
641 * (Backend) Returns non-zero if the frontend is still attached.
644 hammer2_xop_active(hammer2_xop_head_t
*xop
)
646 if (xop
->run_mask
& HAMMER2_XOPMASK_VOP
)
653 * (Backend) Feed chain data through the cluster validator and back to
654 * the frontend. Chains are fed from multiple nodes concurrently
655 * and pipelined via per-node FIFOs in the XOP.
657 * The chain must be locked (either shared or exclusive). The caller may
658 * unlock and drop the chain on return. This function will add an extra
659 * ref and hold the chain's data for the pass-back.
661 * No xop lock is needed because we are only manipulating fields under
662 * our direct control.
664 * Returns 0 on success and a hammer2 error code if sync is permanently
665 * lost. The caller retains a ref on the chain but by convention
666 * the lock is typically inherited by the xop (caller loses lock).
668 * Returns non-zero on error. In this situation the caller retains a
669 * ref on the chain but loses the lock (we unlock here).
672 hammer2_xop_feed(hammer2_xop_head_t
*xop
, hammer2_chain_t
*chain
,
673 int clindex
, int error
)
675 hammer2_xop_fifo_t
*fifo
;
679 * Early termination (typicaly of xop_readir)
681 if (hammer2_xop_active(xop
) == 0) {
682 error
= HAMMER2_ERROR_ABORTED
;
687 * Multi-threaded entry into the XOP collector. We own the
688 * fifo->wi for our clindex.
690 fifo
= &xop
->collect
[clindex
];
692 if (fifo
->ri
== fifo
->wi
- HAMMER2_XOPFIFO
)
694 while (fifo
->ri
== fifo
->wi
- HAMMER2_XOPFIFO
) {
695 atomic_set_int(&fifo
->flags
, HAMMER2_XOP_FIFO_STALL
);
696 mask
= xop
->run_mask
;
697 if ((mask
& HAMMER2_XOPMASK_VOP
) == 0) {
698 error
= HAMMER2_ERROR_ABORTED
;
701 tsleep_interlock(xop
, 0);
702 if (atomic_cmpset_64(&xop
->run_mask
, mask
,
703 mask
| HAMMER2_XOPMASK_FIFOW
)) {
704 if (fifo
->ri
== fifo
->wi
- HAMMER2_XOPFIFO
) {
705 tsleep(xop
, PINTERLOCKED
, "h2feed", hz
*60);
710 atomic_clear_int(&fifo
->flags
, HAMMER2_XOP_FIFO_STALL
);
712 hammer2_chain_ref_hold(chain
);
713 if (error
== 0 && chain
)
714 error
= chain
->error
;
715 fifo
->errors
[fifo
->wi
& HAMMER2_XOPFIFO_MASK
] = error
;
716 fifo
->array
[fifo
->wi
& HAMMER2_XOPFIFO_MASK
] = chain
;
720 mask
= atomic_fetchadd_64(&xop
->run_mask
, HAMMER2_XOPMASK_FEED
);
721 if (mask
& HAMMER2_XOPMASK_WAIT
) {
722 atomic_clear_64(&xop
->run_mask
, HAMMER2_XOPMASK_WAIT
);
728 * Cleanup. If an error occurred we eat the lock. If no error
729 * occurred the fifo inherits the lock and gains an additional ref.
731 * The caller's ref remains in both cases.
738 * (Frontend) collect a response from a running cluster op.
740 * Responses are fed from all appropriate nodes concurrently
741 * and collected into a cohesive response >= collect_key.
743 * The collector will return the instant quorum or other requirements
744 * are met, even if some nodes get behind or become non-responsive.
746 * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection,
747 * usually called synchronously from the
748 * node XOPs for the strategy code to
749 * fake the frontend collection and complete
750 * the BIO as soon as possible.
752 * HAMMER2_XOP_SYNCHRONIZER - Reqeuest synchronization with a particular
753 * cluster index, prevents looping when that
754 * index is out of sync so caller can act on
755 * the out of sync element. ESRCH and EDEADLK
756 * can be returned if this flag is specified.
758 * Returns 0 on success plus a filled out xop->cluster structure.
759 * Return ENOENT on normal termination.
760 * Otherwise return an error.
763 hammer2_xop_collect(hammer2_xop_head_t
*xop
, int flags
)
765 hammer2_xop_fifo_t
*fifo
;
766 hammer2_chain_t
*chain
;
771 int adv
; /* advance the element */
776 * First loop tries to advance pieces of the cluster which
779 lokey
= HAMMER2_KEY_MAX
;
780 keynull
= HAMMER2_CHECK_NULL
;
781 mask
= xop
->run_mask
;
784 for (i
= 0; i
< xop
->cluster
.nchains
; ++i
) {
785 chain
= xop
->cluster
.array
[i
].chain
;
788 } else if (chain
->bref
.key
< xop
->collect_key
) {
791 keynull
&= ~HAMMER2_CHECK_NULL
;
792 if (lokey
> chain
->bref
.key
)
793 lokey
= chain
->bref
.key
;
800 * Advance element if possible, advanced element may be NULL.
803 hammer2_chain_drop_unhold(chain
);
805 fifo
= &xop
->collect
[i
];
806 if (fifo
->ri
!= fifo
->wi
) {
808 chain
= fifo
->array
[fifo
->ri
& HAMMER2_XOPFIFO_MASK
];
809 error
= fifo
->errors
[fifo
->ri
& HAMMER2_XOPFIFO_MASK
];
811 xop
->cluster
.array
[i
].chain
= chain
;
812 xop
->cluster
.array
[i
].error
= error
;
815 xop
->cluster
.array
[i
].flags
|=
818 if (fifo
->wi
- fifo
->ri
<= HAMMER2_XOPFIFO
/ 2) {
819 if (fifo
->flags
& HAMMER2_XOP_FIFO_STALL
) {
820 atomic_clear_int(&fifo
->flags
,
821 HAMMER2_XOP_FIFO_STALL
);
826 --i
; /* loop on same index */
829 * Retain CITEM_NULL flag. If set just repeat EOF.
830 * If not, the NULL,0 combination indicates an
831 * operation in-progress.
833 xop
->cluster
.array
[i
].chain
= NULL
;
834 /* retain any CITEM_NULL setting */
839 * Determine whether the lowest collected key meets clustering
840 * requirements. Returns:
842 * 0 - key valid, cluster can be returned.
844 * ENOENT - normal end of scan, return ENOENT.
846 * ESRCH - sufficient elements collected, quorum agreement
847 * that lokey is not a valid element and should be
850 * EDEADLK - sufficient elements collected, no quorum agreement
851 * (and no agreement possible). In this situation a
852 * repair is needed, for now we loop.
854 * EINPROGRESS - insufficient elements collected to resolve, wait
855 * for event and loop.
857 if ((flags
& HAMMER2_XOP_COLLECT_WAITALL
) &&
858 (mask
& HAMMER2_XOPMASK_ALLDONE
) != HAMMER2_XOPMASK_VOP
) {
859 error
= HAMMER2_ERROR_EINPROGRESS
;
861 error
= hammer2_cluster_check(&xop
->cluster
, lokey
, keynull
);
863 if (error
== HAMMER2_ERROR_EINPROGRESS
) {
864 if (flags
& HAMMER2_XOP_COLLECT_NOWAIT
)
866 tsleep_interlock(xop
, 0);
867 if (atomic_cmpset_64(&xop
->run_mask
,
868 mask
, mask
| HAMMER2_XOPMASK_WAIT
)) {
869 tsleep(xop
, PINTERLOCKED
, "h2coll", hz
*60);
873 if (error
== HAMMER2_ERROR_ESRCH
) {
874 if (lokey
!= HAMMER2_KEY_MAX
) {
875 xop
->collect_key
= lokey
+ 1;
878 error
= HAMMER2_ERROR_ENOENT
;
880 if (error
== HAMMER2_ERROR_EDEADLK
) {
881 kprintf("hammer2: no quorum possible lokey %016jx\n",
883 if (lokey
!= HAMMER2_KEY_MAX
) {
884 xop
->collect_key
= lokey
+ 1;
887 error
= HAMMER2_ERROR_ENOENT
;
889 if (lokey
== HAMMER2_KEY_MAX
)
890 xop
->collect_key
= lokey
;
892 xop
->collect_key
= lokey
+ 1;
898 * N x M processing threads are available to handle XOPs, N per cluster
899 * index x M cluster nodes.
901 * Locate and return the next runnable xop, or NULL if no xops are
902 * present or none of the xops are currently runnable (for various reasons).
903 * The xop is left on the queue and serves to block other dependent xops
906 * Dependent xops will not be returned.
908 * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
910 * NOTE! Xops run concurrently for each cluster index.
912 #define XOP_HASH_SIZE 16
913 #define XOP_HASH_MASK (XOP_HASH_SIZE - 1)
917 xop_testhash(hammer2_thread_t
*thr
, hammer2_inode_t
*ip
, uint32_t *hash
)
922 hv
= (int)((uintptr_t)ip
+ (uintptr_t)thr
) / sizeof(hammer2_inode_t
);
923 mask
= 1U << (hv
& 31);
926 return ((int)(hash
[hv
& XOP_HASH_MASK
] & mask
));
931 xop_sethash(hammer2_thread_t
*thr
, hammer2_inode_t
*ip
, uint32_t *hash
)
936 hv
= (int)((uintptr_t)ip
+ (uintptr_t)thr
) / sizeof(hammer2_inode_t
);
937 mask
= 1U << (hv
& 31);
940 hash
[hv
& XOP_HASH_MASK
] |= mask
;
945 hammer2_xop_next(hammer2_thread_t
*thr
)
947 hammer2_pfs_t
*pmp
= thr
->pmp
;
948 int clindex
= thr
->clindex
;
949 uint32_t hash
[XOP_HASH_SIZE
] = { 0 };
950 hammer2_xop_head_t
*xop
;
952 hammer2_spin_ex(&pmp
->xop_spin
);
953 TAILQ_FOREACH(xop
, &thr
->xopq
, collect
[clindex
].entry
) {
957 if (xop_testhash(thr
, xop
->ip1
, hash
) ||
958 (xop
->ip2
&& xop_testhash(thr
, xop
->ip2
, hash
)) ||
959 (xop
->ip3
&& xop_testhash(thr
, xop
->ip3
, hash
))) {
962 xop_sethash(thr
, xop
->ip1
, hash
);
964 xop_sethash(thr
, xop
->ip2
, hash
);
966 xop_sethash(thr
, xop
->ip3
, hash
);
969 * Check already running
971 if (xop
->collect
[clindex
].flags
& HAMMER2_XOP_FIFO_RUN
)
975 * Found a good one, return it.
977 atomic_set_int(&xop
->collect
[clindex
].flags
,
978 HAMMER2_XOP_FIFO_RUN
);
981 hammer2_spin_unex(&pmp
->xop_spin
);
987 * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
989 * NOTE! Xops run concurrently for each cluster index.
993 hammer2_xop_dequeue(hammer2_thread_t
*thr
, hammer2_xop_head_t
*xop
)
995 hammer2_pfs_t
*pmp
= thr
->pmp
;
996 int clindex
= thr
->clindex
;
998 hammer2_spin_ex(&pmp
->xop_spin
);
999 TAILQ_REMOVE(&thr
->xopq
, xop
, collect
[clindex
].entry
);
1000 atomic_clear_int(&xop
->collect
[clindex
].flags
,
1001 HAMMER2_XOP_FIFO_RUN
);
1002 hammer2_spin_unex(&pmp
->xop_spin
);
1003 if (TAILQ_FIRST(&thr
->xopq
))
1004 hammer2_thr_signal(thr
, HAMMER2_THREAD_XOPQ
);
1008 * Primary management thread for xops support. Each node has several such
1009 * threads which replicate front-end operations on cluster nodes.
1011 * XOPS thread node operations, allowing the function to focus on a single
1012 * node in the cluster after validating the operation with the cluster.
1013 * This is primarily what prevents dead or stalled nodes from stalling
1017 hammer2_primary_xops_thread(void *arg
)
1019 hammer2_thread_t
*thr
= arg
;
1021 hammer2_xop_head_t
*xop
;
1025 hammer2_xop_func_t last_func
= NULL
;
1028 /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
1029 mask
= 1LLU << thr
->clindex
;
1035 * Handle stop request
1037 if (flags
& HAMMER2_THREAD_STOP
)
1041 * Handle freeze request
1043 if (flags
& HAMMER2_THREAD_FREEZE
) {
1044 hammer2_thr_signal2(thr
, HAMMER2_THREAD_FROZEN
,
1045 HAMMER2_THREAD_FREEZE
);
1049 if (flags
& HAMMER2_THREAD_UNFREEZE
) {
1050 hammer2_thr_signal2(thr
, 0,
1051 HAMMER2_THREAD_FROZEN
|
1052 HAMMER2_THREAD_UNFREEZE
);
1057 * Force idle if frozen until unfrozen or stopped.
1059 if (flags
& HAMMER2_THREAD_FROZEN
) {
1060 hammer2_thr_wait_any(thr
,
1061 HAMMER2_THREAD_UNFREEZE
|
1062 HAMMER2_THREAD_STOP
,
1068 * Reset state on REMASTER request
1070 if (flags
& HAMMER2_THREAD_REMASTER
) {
1071 hammer2_thr_signal2(thr
, 0, HAMMER2_THREAD_REMASTER
);
1072 /* reset state here */
1077 * Process requests. Each request can be multi-queued.
1079 * If we get behind and the frontend VOP is no longer active,
1080 * we retire the request without processing it. The callback
1081 * may also abort processing if the frontend VOP becomes
1084 if (flags
& HAMMER2_THREAD_XOPQ
) {
1085 nflags
= flags
& ~HAMMER2_THREAD_XOPQ
;
1086 if (!atomic_cmpset_int(&thr
->flags
, flags
, nflags
))
1091 while ((xop
= hammer2_xop_next(thr
)) != NULL
) {
1092 if (hammer2_xop_active(xop
)) {
1093 last_func
= xop
->func
;
1094 xop
->func(thr
, (hammer2_xop_t
*)xop
);
1095 hammer2_xop_dequeue(thr
, xop
);
1096 hammer2_xop_retire(xop
, mask
);
1098 last_func
= xop
->func
;
1099 hammer2_xop_feed(xop
, NULL
, thr
->clindex
,
1101 hammer2_xop_dequeue(thr
, xop
);
1102 hammer2_xop_retire(xop
, mask
);
1107 * Wait for event, interlock using THREAD_WAITING and
1110 * For robustness poll on a 30-second interval, but nominally
1111 * expect to be woken up.
1113 nflags
= flags
| HAMMER2_THREAD_WAITING
;
1115 tsleep_interlock(&thr
->flags
, 0);
1116 if (atomic_cmpset_int(&thr
->flags
, flags
, nflags
)) {
1117 tsleep(&thr
->flags
, PINTERLOCKED
, "h2idle", hz
*30);
1123 * Cleanup / termination
1125 while ((xop
= TAILQ_FIRST(&thr
->xopq
)) != NULL
) {
1126 kprintf("hammer2_thread: aborting xop %p\n", xop
->func
);
1127 TAILQ_REMOVE(&thr
->xopq
, xop
,
1128 collect
[thr
->clindex
].entry
);
1129 hammer2_xop_retire(xop
, mask
);
1133 hammer2_thr_signal(thr
, HAMMER2_THREAD_STOPPED
);
1134 /* thr structure can go invalid after this point */