2 * Copyright (c) 2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This module implements the hammer2 helper thread API, including
36 * the frontend/backend XOP API.
41 * Signal that the thread has work.
44 hammer2_thr_signal(hammer2_thread_t
*thr
, uint32_t flags
)
51 if (oflags
& HAMMER2_THREAD_WAITING
) {
52 if (atomic_cmpset_int(&thr
->flags
, oflags
,
53 (oflags
| flags
) & ~HAMMER2_THREAD_WAITING
)) {
58 if (atomic_cmpset_int(&thr
->flags
, oflags
,
67 * Return status to waiting client(s)
70 hammer2_thr_return(hammer2_thread_t
*thr
, uint32_t flags
)
78 nflags
= (oflags
| flags
) & ~HAMMER2_THREAD_CLIENTWAIT
;
80 if (oflags
& HAMMER2_THREAD_CLIENTWAIT
) {
81 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
)) {
86 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
))
93 * Wait until the bits in flags are set.
96 hammer2_thr_wait(hammer2_thread_t
*thr
, uint32_t flags
)
104 if ((oflags
& flags
) == flags
)
106 nflags
= oflags
| HAMMER2_THREAD_CLIENTWAIT
;
107 tsleep_interlock(thr
, 0);
108 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
)) {
109 tsleep(thr
, PINTERLOCKED
, "h2twait", hz
*60);
115 * Wait until the bits in flags are clear.
118 hammer2_thr_wait_neg(hammer2_thread_t
*thr
, uint32_t flags
)
126 if ((oflags
& flags
) == 0)
128 nflags
= oflags
| HAMMER2_THREAD_CLIENTWAIT
;
129 tsleep_interlock(thr
, 0);
130 if (atomic_cmpset_int(&thr
->flags
, oflags
, nflags
)) {
131 tsleep(thr
, PINTERLOCKED
, "h2twait", hz
*60);
137 * Initialize the supplied thread structure, starting the specified
141 hammer2_thr_create(hammer2_thread_t
*thr
, hammer2_pfs_t
*pmp
,
142 const char *id
, int clindex
, int repidx
,
143 void (*func
)(void *arg
))
146 thr
->clindex
= clindex
;
147 thr
->repidx
= repidx
;
148 TAILQ_INIT(&thr
->xopq
);
150 lwkt_create(func
, thr
, &thr
->td
, NULL
, 0, repidx
% ncpus
,
151 "%s-%s.%02d", id
, pmp
->pfs_names
[clindex
], repidx
);
153 lwkt_create(func
, thr
, &thr
->td
, NULL
, 0, -1,
154 "%s-%s", id
, pmp
->pfs_names
[clindex
]);
159 * Terminate a thread. This function will silently return if the thread
160 * was never initialized or has already been deleted.
162 * This is accomplished by setting the STOP flag and waiting for the td
163 * structure to become NULL.
166 hammer2_thr_delete(hammer2_thread_t
*thr
)
170 hammer2_thr_signal(thr
, HAMMER2_THREAD_STOP
);
171 hammer2_thr_wait(thr
, HAMMER2_THREAD_STOPPED
);
173 KKASSERT(TAILQ_EMPTY(&thr
->xopq
));
177 * Asynchronous remaster request. Ask the synchronization thread to
178 * start over soon (as if it were frozen and unfrozen, but without waiting).
179 * The thread always recalculates mastership relationships when restarting.
182 hammer2_thr_remaster(hammer2_thread_t
*thr
)
186 hammer2_thr_signal(thr
, HAMMER2_THREAD_REMASTER
);
190 hammer2_thr_freeze_async(hammer2_thread_t
*thr
)
192 hammer2_thr_signal(thr
, HAMMER2_THREAD_FREEZE
);
196 hammer2_thr_freeze(hammer2_thread_t
*thr
)
200 hammer2_thr_signal(thr
, HAMMER2_THREAD_FREEZE
);
201 hammer2_thr_wait(thr
, HAMMER2_THREAD_FROZEN
);
205 hammer2_thr_unfreeze(hammer2_thread_t
*thr
)
209 hammer2_thr_signal(thr
, HAMMER2_THREAD_UNFREEZE
);
210 hammer2_thr_wait_neg(thr
, HAMMER2_THREAD_FROZEN
);
214 hammer2_thr_break(hammer2_thread_t
*thr
)
216 if (thr
->flags
& (HAMMER2_THREAD_STOP
|
217 HAMMER2_THREAD_REMASTER
|
218 HAMMER2_THREAD_FREEZE
)) {
224 /****************************************************************************
226 ****************************************************************************/
229 hammer2_xop_group_init(hammer2_pfs_t
*pmp
, hammer2_xop_group_t
*xgrp
)
231 /* no extra fields in structure at the moment */
235 * Allocate a XOP request.
237 * Once allocated a XOP request can be started, collected, and retired,
238 * and can be retired early if desired.
240 * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
243 hammer2_xop_alloc(hammer2_inode_t
*ip
, int flags
)
247 xop
= objcache_get(cache_xops
, M_WAITOK
);
248 KKASSERT(xop
->head
.cluster
.array
[0].chain
== NULL
);
251 xop
->head
.func
= NULL
;
252 xop
->head
.flags
= flags
;
255 xop
->head
.collect_key
= 0;
256 xop
->head
.check_counter
= 0;
257 if (flags
& HAMMER2_XOP_MODIFYING
)
258 xop
->head
.mtid
= hammer2_trans_sub(ip
->pmp
);
262 xop
->head
.cluster
.nchains
= ip
->cluster
.nchains
;
263 xop
->head
.cluster
.pmp
= ip
->pmp
;
264 xop
->head
.cluster
.flags
= HAMMER2_CLUSTER_LOCKED
;
267 * run_mask - Active thread (or frontend) associated with XOP
269 xop
->head
.run_mask
= HAMMER2_XOPMASK_VOP
;
271 hammer2_inode_ref(ip
);
277 hammer2_xop_setname(hammer2_xop_head_t
*xop
, const char *name
, size_t name_len
)
279 xop
->name1
= kmalloc(name_len
+ 1, M_HAMMER2
, M_WAITOK
| M_ZERO
);
280 xop
->name1_len
= name_len
;
281 bcopy(name
, xop
->name1
, name_len
);
285 hammer2_xop_setname2(hammer2_xop_head_t
*xop
, const char *name
, size_t name_len
)
287 xop
->name2
= kmalloc(name_len
+ 1, M_HAMMER2
, M_WAITOK
| M_ZERO
);
288 xop
->name2_len
= name_len
;
289 bcopy(name
, xop
->name2
, name_len
);
293 hammer2_xop_setname_inum(hammer2_xop_head_t
*xop
, hammer2_key_t inum
)
295 const size_t name_len
= 18;
297 xop
->name1
= kmalloc(name_len
+ 1, M_HAMMER2
, M_WAITOK
| M_ZERO
);
298 xop
->name1_len
= name_len
;
299 ksnprintf(xop
->name1
, name_len
+ 1, "0x%016jx", (intmax_t)inum
);
306 hammer2_xop_setip2(hammer2_xop_head_t
*xop
, hammer2_inode_t
*ip2
)
309 hammer2_inode_ref(ip2
);
313 hammer2_xop_setip3(hammer2_xop_head_t
*xop
, hammer2_inode_t
*ip3
)
316 hammer2_inode_ref(ip3
);
320 hammer2_xop_reinit(hammer2_xop_head_t
*xop
)
324 xop
->collect_key
= 0;
325 xop
->run_mask
= HAMMER2_XOPMASK_VOP
;
329 * A mounted PFS needs Xops threads to support frontend operations.
332 hammer2_xop_helper_create(hammer2_pfs_t
*pmp
)
337 lockmgr(&pmp
->lock
, LK_EXCLUSIVE
);
338 pmp
->has_xop_threads
= 1;
340 for (i
= 0; i
< pmp
->iroot
->cluster
.nchains
; ++i
) {
341 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
) {
342 if (pmp
->xop_groups
[j
].thrs
[i
].td
)
344 hammer2_thr_create(&pmp
->xop_groups
[j
].thrs
[i
], pmp
,
346 hammer2_primary_xops_thread
);
349 lockmgr(&pmp
->lock
, LK_RELEASE
);
353 hammer2_xop_helper_cleanup(hammer2_pfs_t
*pmp
)
358 for (i
= 0; i
< pmp
->pfs_nmasters
; ++i
) {
359 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
) {
360 if (pmp
->xop_groups
[j
].thrs
[i
].td
)
361 hammer2_thr_delete(&pmp
->xop_groups
[j
].thrs
[i
]);
367 * Start a XOP request, queueing it to all nodes in the cluster to
368 * execute the cluster op.
370 * XXX optimize single-target case.
373 hammer2_xop_start_except(hammer2_xop_head_t
*xop
, hammer2_xop_func_t func
,
376 hammer2_inode_t
*ip1
;
378 hammer2_thread_t
*thr
;
385 if (pmp
->has_xop_threads
== 0)
386 hammer2_xop_helper_create(pmp
);
389 * The intent of the XOP sequencer is to ensure that ops on the same inode
390 * execute in the same order. This is necessary when issuing modifying operations
391 * to multiple targets because some targets might get behind and the frontend is
392 * allowed to complete the moment a quorum of targets succeed.
394 * Strategy operations must be segregated from non-strategy operations to avoid
395 * a deadlock. For example, if a vfsync and a bread/bwrite were queued to
396 * the same worker thread, the locked buffer in the strategy operation can deadlock
397 * the vfsync's buffer list scan.
399 * TODO - RENAME fails here because it is potentially modifying three different
402 if (xop
->flags
& HAMMER2_XOP_STRATEGY
) {
403 hammer2_xop_strategy_t
*xopst
;
405 xopst
= &((hammer2_xop_t
*)xop
)->xop_strategy
;
406 ng
= (int)(hammer2_icrc32(&xop
->ip1
, sizeof(xop
->ip1
)) ^
407 hammer2_icrc32(&xopst
->lbase
, sizeof(xopst
->lbase
)));
408 ng
= ng
& (HAMMER2_XOPGROUPS_MASK
>> 1);
409 ng
+= HAMMER2_XOPGROUPS
/ 2;
411 ng
= (int)(hammer2_icrc32(&xop
->ip1
, sizeof(xop
->ip1
)));
412 ng
= ng
& (HAMMER2_XOPGROUPS_MASK
>> 1);
417 * The instant xop is queued another thread can pick it off. In the
418 * case of asynchronous ops, another thread might even finish and
421 hammer2_spin_ex(&pmp
->xop_spin
);
422 nchains
= ip1
->cluster
.nchains
;
423 for (i
= 0; i
< nchains
; ++i
) {
425 * XXX ip1->cluster.array* not stable here. This temporary
426 * hack fixes basic issues in target XOPs which need to
427 * obtain a starting chain from the inode but does not
428 * address possible races against inode updates which
429 * might NULL-out a chain.
431 if (i
!= notidx
&& ip1
->cluster
.array
[i
].chain
) {
432 thr
= &pmp
->xop_groups
[ng
].thrs
[i
];
433 atomic_set_int(&xop
->run_mask
, 1U << i
);
434 atomic_set_int(&xop
->chk_mask
, 1U << i
);
435 TAILQ_INSERT_TAIL(&thr
->xopq
, xop
, collect
[i
].entry
);
438 hammer2_spin_unex(&pmp
->xop_spin
);
439 /* xop can become invalid at this point */
442 * Each thread has its own xopq
444 for (i
= 0; i
< nchains
; ++i
) {
446 thr
= &pmp
->xop_groups
[ng
].thrs
[i
];
447 hammer2_thr_signal(thr
, HAMMER2_THREAD_XOPQ
);
453 hammer2_xop_start(hammer2_xop_head_t
*xop
, hammer2_xop_func_t func
)
455 hammer2_xop_start_except(xop
, func
, -1);
459 * Retire a XOP. Used by both the VOP frontend and by the XOP backend.
462 hammer2_xop_retire(hammer2_xop_head_t
*xop
, uint32_t mask
)
464 hammer2_chain_t
*chain
;
469 * Remove the frontend collector or remove a backend feeder.
470 * When removing the frontend we must wakeup any backend feeders
471 * who are waiting for FIFO space.
473 * XXX optimize wakeup.
475 KKASSERT(xop
->run_mask
& mask
);
476 nmask
= atomic_fetchadd_int(&xop
->run_mask
, -mask
);
477 if ((nmask
& ~HAMMER2_XOPMASK_FIFOW
) != mask
) {
478 if (mask
== HAMMER2_XOPMASK_VOP
) {
479 if (nmask
& HAMMER2_XOPMASK_FIFOW
)
484 /* else nobody else left, we can ignore FIFOW */
487 * All collectors are gone, we can cleanup and dispose of the XOP.
488 * Note that this can wind up being a frontend OR a backend.
489 * Pending chains are locked shared and not owned by any thread.
493 * Cache the terminating cluster.
496 if ((ip
= xop
->ip1
) != NULL
) {
497 hammer2_cluster_t
*tmpclu
;
499 tmpclu
= hammer2_cluster_copy(&xop
->cluster
);
500 hammer2_spin_ex(&ip
->cluster_spin
);
501 tmpclu
= atomic_swap_ptr((volatile void **)&ip
->cluster_cache
,
503 hammer2_spin_unex(&ip
->cluster_spin
);
505 hammer2_cluster_drop(tmpclu
);
510 * Cleanup the collection cluster.
512 for (i
= 0; i
< xop
->cluster
.nchains
; ++i
) {
513 xop
->cluster
.array
[i
].flags
= 0;
514 chain
= xop
->cluster
.array
[i
].chain
;
516 xop
->cluster
.array
[i
].chain
= NULL
;
517 hammer2_chain_drop_unhold(chain
);
522 * Cleanup the fifos, use check_counter to optimize the loop.
523 * Since we are the only entity left on this xop we don't have
524 * to worry about fifo flow control, and one lfence() will do the
528 mask
= xop
->chk_mask
;
529 for (i
= 0; mask
&& i
< HAMMER2_MAXCLUSTER
; ++i
) {
530 hammer2_xop_fifo_t
*fifo
= &xop
->collect
[i
];
531 while (fifo
->ri
!= fifo
->wi
) {
532 chain
= fifo
->array
[fifo
->ri
& HAMMER2_XOPFIFO_MASK
];
534 hammer2_chain_drop_unhold(chain
);
541 * The inode is only held at this point, simply drop it.
544 hammer2_inode_drop(xop
->ip1
);
548 hammer2_inode_drop(xop
->ip2
);
552 hammer2_inode_drop(xop
->ip3
);
556 kfree(xop
->name1
, M_HAMMER2
);
561 kfree(xop
->name2
, M_HAMMER2
);
566 objcache_put(cache_xops
, xop
);
570 * (Backend) Returns non-zero if the frontend is still attached.
573 hammer2_xop_active(hammer2_xop_head_t
*xop
)
575 if (xop
->run_mask
& HAMMER2_XOPMASK_VOP
)
582 * (Backend) Feed chain data through the cluster validator and back to
583 * the frontend. Chains are fed from multiple nodes concurrently
584 * and pipelined via per-node FIFOs in the XOP.
586 * The chain must be locked (either shared or exclusive). The caller may
587 * unlock and drop the chain on return. This function will add an extra
588 * ref and hold the chain's data for the pass-back.
590 * No xop lock is needed because we are only manipulating fields under
591 * our direct control.
593 * Returns 0 on success and a hammer error code if sync is permanently
594 * lost. The caller retains a ref on the chain but by convention
595 * the lock is typically inherited by the xop (caller loses lock).
597 * Returns non-zero on error. In this situation the caller retains a
598 * ref on the chain but loses the lock (we unlock here).
601 hammer2_xop_feed(hammer2_xop_head_t
*xop
, hammer2_chain_t
*chain
,
602 int clindex
, int error
)
604 hammer2_xop_fifo_t
*fifo
;
608 * Early termination (typicaly of xop_readir)
610 if (hammer2_xop_active(xop
) == 0) {
616 * Multi-threaded entry into the XOP collector. We own the
617 * fifo->wi for our clindex.
619 fifo
= &xop
->collect
[clindex
];
621 if (fifo
->ri
== fifo
->wi
- HAMMER2_XOPFIFO
)
623 while (fifo
->ri
== fifo
->wi
- HAMMER2_XOPFIFO
) {
624 atomic_set_int(&fifo
->flags
, HAMMER2_XOP_FIFO_STALL
);
625 mask
= xop
->run_mask
;
626 if ((mask
& HAMMER2_XOPMASK_VOP
) == 0) {
630 tsleep_interlock(xop
, 0);
631 if (atomic_cmpset_int(&xop
->run_mask
, mask
,
632 mask
| HAMMER2_XOPMASK_FIFOW
)) {
633 if (fifo
->ri
== fifo
->wi
- HAMMER2_XOPFIFO
) {
634 tsleep(xop
, PINTERLOCKED
, "h2feed", hz
*60);
639 atomic_clear_int(&fifo
->flags
, HAMMER2_XOP_FIFO_STALL
);
641 hammer2_chain_ref_hold(chain
);
642 if (error
== 0 && chain
)
643 error
= chain
->error
;
644 fifo
->errors
[fifo
->wi
& HAMMER2_XOPFIFO_MASK
] = error
;
645 fifo
->array
[fifo
->wi
& HAMMER2_XOPFIFO_MASK
] = chain
;
648 if (atomic_fetchadd_int(&xop
->check_counter
, HAMMER2_XOP_CHKINC
) &
649 HAMMER2_XOP_CHKWAIT
) {
650 atomic_clear_int(&xop
->check_counter
, HAMMER2_XOP_CHKWAIT
);
651 wakeup(&xop
->check_counter
);
656 * Cleanup. If an error occurred we eat the lock. If no error
657 * occurred the fifo inherits the lock and gains an additional ref.
659 * The caller's ref remains in both cases.
666 * (Frontend) collect a response from a running cluster op.
668 * Responses are fed from all appropriate nodes concurrently
669 * and collected into a cohesive response >= collect_key.
671 * The collector will return the instant quorum or other requirements
672 * are met, even if some nodes get behind or become non-responsive.
674 * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection,
675 * usually called synchronously from the
676 * node XOPs for the strategy code to
677 * fake the frontend collection and complete
678 * the BIO as soon as possible.
680 * HAMMER2_XOP_SYNCHRONIZER - Reqeuest synchronization with a particular
681 * cluster index, prevents looping when that
682 * index is out of sync so caller can act on
683 * the out of sync element. ESRCH and EDEADLK
684 * can be returned if this flag is specified.
686 * Returns 0 on success plus a filled out xop->cluster structure.
687 * Return ENOENT on normal termination.
688 * Otherwise return an error.
691 hammer2_xop_collect(hammer2_xop_head_t
*xop
, int flags
)
693 hammer2_xop_fifo_t
*fifo
;
694 hammer2_chain_t
*chain
;
698 int adv
; /* advance the element */
700 uint32_t check_counter
;
704 * First loop tries to advance pieces of the cluster which
707 lokey
= HAMMER2_KEY_MAX
;
708 keynull
= HAMMER2_CHECK_NULL
;
709 check_counter
= xop
->check_counter
;
712 for (i
= 0; i
< xop
->cluster
.nchains
; ++i
) {
713 chain
= xop
->cluster
.array
[i
].chain
;
716 } else if (chain
->bref
.key
< xop
->collect_key
) {
719 keynull
&= ~HAMMER2_CHECK_NULL
;
720 if (lokey
> chain
->bref
.key
)
721 lokey
= chain
->bref
.key
;
728 * Advance element if possible, advanced element may be NULL.
731 hammer2_chain_drop_unhold(chain
);
733 fifo
= &xop
->collect
[i
];
734 if (fifo
->ri
!= fifo
->wi
) {
736 chain
= fifo
->array
[fifo
->ri
& HAMMER2_XOPFIFO_MASK
];
737 error
= fifo
->errors
[fifo
->ri
& HAMMER2_XOPFIFO_MASK
];
739 xop
->cluster
.array
[i
].chain
= chain
;
740 xop
->cluster
.array
[i
].error
= error
;
743 xop
->cluster
.array
[i
].flags
|=
746 if (fifo
->wi
- fifo
->ri
<= HAMMER2_XOPFIFO
/ 2) {
747 if (fifo
->flags
& HAMMER2_XOP_FIFO_STALL
) {
748 atomic_clear_int(&fifo
->flags
,
749 HAMMER2_XOP_FIFO_STALL
);
754 --i
; /* loop on same index */
757 * Retain CITEM_NULL flag. If set just repeat EOF.
758 * If not, the NULL,0 combination indicates an
759 * operation in-progress.
761 xop
->cluster
.array
[i
].chain
= NULL
;
762 /* retain any CITEM_NULL setting */
767 * Determine whether the lowest collected key meets clustering
768 * requirements. Returns:
770 * 0 - key valid, cluster can be returned.
772 * ENOENT - normal end of scan, return ENOENT.
774 * ESRCH - sufficient elements collected, quorum agreement
775 * that lokey is not a valid element and should be
778 * EDEADLK - sufficient elements collected, no quorum agreement
779 * (and no agreement possible). In this situation a
780 * repair is needed, for now we loop.
782 * EINPROGRESS - insufficient elements collected to resolve, wait
783 * for event and loop.
785 if ((flags
& HAMMER2_XOP_COLLECT_WAITALL
) &&
786 xop
->run_mask
!= HAMMER2_XOPMASK_VOP
) {
789 error
= hammer2_cluster_check(&xop
->cluster
, lokey
, keynull
);
791 if (error
== EINPROGRESS
) {
792 if ((flags
& HAMMER2_XOP_COLLECT_NOWAIT
) == 0)
793 tsleep_interlock(&xop
->check_counter
, 0);
794 if (atomic_cmpset_int(&xop
->check_counter
,
796 check_counter
| HAMMER2_XOP_CHKWAIT
)) {
797 if (flags
& HAMMER2_XOP_COLLECT_NOWAIT
)
799 tsleep(&xop
->check_counter
, PINTERLOCKED
, "h2coll", hz
*60);
803 if (error
== ESRCH
) {
804 if (lokey
!= HAMMER2_KEY_MAX
) {
805 xop
->collect_key
= lokey
+ 1;
810 if (error
== EDEADLK
) {
811 kprintf("hammer2: no quorum possible lokey %016jx\n",
813 if (lokey
!= HAMMER2_KEY_MAX
) {
814 xop
->collect_key
= lokey
+ 1;
819 if (lokey
== HAMMER2_KEY_MAX
)
820 xop
->collect_key
= lokey
;
822 xop
->collect_key
= lokey
+ 1;
828 * N x M processing threads are available to handle XOPs, N per cluster
829 * index x M cluster nodes. All the threads for any given cluster index
830 * share and pull from the same xopq.
832 * Locate and return the next runnable xop, or NULL if no xops are
833 * present or none of the xops are currently runnable (for various reasons).
834 * The xop is left on the queue and serves to block other dependent xops
837 * Dependent xops will not be returned.
839 * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
841 * NOTE! Xops run concurrently for each cluster index.
843 #define XOP_HASH_SIZE 16
844 #define XOP_HASH_MASK (XOP_HASH_SIZE - 1)
848 xop_testhash(hammer2_thread_t
*thr
, hammer2_inode_t
*ip
, uint32_t *hash
)
853 hv
= (int)((uintptr_t)ip
+ (uintptr_t)thr
) / sizeof(hammer2_inode_t
);
854 mask
= 1U << (hv
& 31);
857 return ((int)(hash
[hv
& XOP_HASH_MASK
] & mask
));
862 xop_sethash(hammer2_thread_t
*thr
, hammer2_inode_t
*ip
, uint32_t *hash
)
867 hv
= (int)((uintptr_t)ip
+ (uintptr_t)thr
) / sizeof(hammer2_inode_t
);
868 mask
= 1U << (hv
& 31);
871 hash
[hv
& XOP_HASH_MASK
] |= mask
;
876 hammer2_xop_next(hammer2_thread_t
*thr
)
878 hammer2_pfs_t
*pmp
= thr
->pmp
;
879 int clindex
= thr
->clindex
;
880 uint32_t hash
[XOP_HASH_SIZE
] = { 0 };
881 hammer2_xop_head_t
*xop
;
883 hammer2_spin_ex(&pmp
->xop_spin
);
884 TAILQ_FOREACH(xop
, &thr
->xopq
, collect
[clindex
].entry
) {
888 if (xop_testhash(thr
, xop
->ip1
, hash
) ||
889 (xop
->ip2
&& xop_testhash(thr
, xop
->ip2
, hash
)) ||
890 (xop
->ip3
&& xop_testhash(thr
, xop
->ip3
, hash
))) {
893 xop_sethash(thr
, xop
->ip1
, hash
);
895 xop_sethash(thr
, xop
->ip2
, hash
);
897 xop_sethash(thr
, xop
->ip3
, hash
);
900 * Check already running
902 if (xop
->collect
[clindex
].flags
& HAMMER2_XOP_FIFO_RUN
)
906 * Found a good one, return it.
908 atomic_set_int(&xop
->collect
[clindex
].flags
,
909 HAMMER2_XOP_FIFO_RUN
);
912 hammer2_spin_unex(&pmp
->xop_spin
);
918 * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
920 * NOTE! Xops run concurrently for each cluster index.
924 hammer2_xop_dequeue(hammer2_thread_t
*thr
, hammer2_xop_head_t
*xop
)
926 hammer2_pfs_t
*pmp
= thr
->pmp
;
927 int clindex
= thr
->clindex
;
929 hammer2_spin_ex(&pmp
->xop_spin
);
930 TAILQ_REMOVE(&thr
->xopq
, xop
, collect
[clindex
].entry
);
931 atomic_clear_int(&xop
->collect
[clindex
].flags
,
932 HAMMER2_XOP_FIFO_RUN
);
933 hammer2_spin_unex(&pmp
->xop_spin
);
934 if (TAILQ_FIRST(&thr
->xopq
))
935 hammer2_thr_signal(thr
, HAMMER2_THREAD_XOPQ
);
939 * Primary management thread for xops support. Each node has several such
940 * threads which replicate front-end operations on cluster nodes.
942 * XOPS thread node operations, allowing the function to focus on a single
943 * node in the cluster after validating the operation with the cluster.
944 * This is primarily what prevents dead or stalled nodes from stalling
948 hammer2_primary_xops_thread(void *arg
)
950 hammer2_thread_t
*thr
= arg
;
952 hammer2_xop_head_t
*xop
;
956 hammer2_xop_func_t last_func
= NULL
;
959 /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
960 mask
= 1U << thr
->clindex
;
966 * Handle stop request
968 if (flags
& HAMMER2_THREAD_STOP
)
972 * Handle freeze request
974 if (flags
& HAMMER2_THREAD_FREEZE
) {
975 nflags
= (flags
& ~(HAMMER2_THREAD_FREEZE
|
976 HAMMER2_THREAD_CLIENTWAIT
)) |
977 HAMMER2_THREAD_FROZEN
;
978 if (!atomic_cmpset_int(&thr
->flags
, flags
, nflags
))
980 if (flags
& HAMMER2_THREAD_CLIENTWAIT
)
986 if (flags
& HAMMER2_THREAD_UNFREEZE
) {
987 nflags
= flags
& ~(HAMMER2_THREAD_UNFREEZE
|
988 HAMMER2_THREAD_FROZEN
|
989 HAMMER2_THREAD_CLIENTWAIT
);
990 if (!atomic_cmpset_int(&thr
->flags
, flags
, nflags
))
992 if (flags
& HAMMER2_THREAD_CLIENTWAIT
)
999 * Force idle if frozen until unfrozen or stopped.
1001 if (flags
& HAMMER2_THREAD_FROZEN
) {
1002 nflags
= flags
| HAMMER2_THREAD_WAITING
;
1003 tsleep_interlock(&thr
->flags
, 0);
1004 if (atomic_cmpset_int(&thr
->flags
, flags
, nflags
)) {
1005 tsleep(&thr
->flags
, PINTERLOCKED
, "frozen", 0);
1006 atomic_clear_int(&thr
->flags
,
1007 HAMMER2_THREAD_WAITING
);
1013 * Reset state on REMASTER request
1015 if (flags
& HAMMER2_THREAD_REMASTER
) {
1016 nflags
= flags
& ~HAMMER2_THREAD_REMASTER
;
1017 if (atomic_cmpset_int(&thr
->flags
, flags
, nflags
)) {
1018 /* reset state here */
1024 * Process requests. Each request can be multi-queued.
1026 * If we get behind and the frontend VOP is no longer active,
1027 * we retire the request without processing it. The callback
1028 * may also abort processing if the frontend VOP becomes
1031 if (flags
& HAMMER2_THREAD_XOPQ
) {
1032 nflags
= flags
& ~HAMMER2_THREAD_XOPQ
;
1033 if (!atomic_cmpset_int(&thr
->flags
, flags
, nflags
))
1038 while ((xop
= hammer2_xop_next(thr
)) != NULL
) {
1039 if (hammer2_xop_active(xop
)) {
1040 last_func
= xop
->func
;
1041 xop
->func((hammer2_xop_t
*)xop
, thr
->clindex
);
1042 hammer2_xop_dequeue(thr
, xop
);
1043 hammer2_xop_retire(xop
, mask
);
1045 last_func
= xop
->func
;
1046 hammer2_xop_feed(xop
, NULL
, thr
->clindex
,
1048 hammer2_xop_dequeue(thr
, xop
);
1049 hammer2_xop_retire(xop
, mask
);
1054 * Wait for event, interlock using THREAD_WAITING and
1057 * For robustness poll on a 30-second interval, but nominally
1058 * expect to be woken up.
1060 nflags
= flags
| HAMMER2_THREAD_WAITING
;
1062 tsleep_interlock(&thr
->flags
, 0);
1063 if (atomic_cmpset_int(&thr
->flags
, flags
, nflags
)) {
1064 tsleep(&thr
->flags
, PINTERLOCKED
, "h2idle", hz
*30);
1065 atomic_clear_int(&thr
->flags
, HAMMER2_THREAD_WAITING
);
1071 * Cleanup / termination
1073 while ((xop
= TAILQ_FIRST(&thr
->xopq
)) != NULL
) {
1074 kprintf("hammer2_thread: aborting xop %p\n", xop
->func
);
1075 TAILQ_REMOVE(&thr
->xopq
, xop
,
1076 collect
[thr
->clindex
].entry
);
1077 hammer2_xop_retire(xop
, mask
);
1081 hammer2_thr_return(thr
, HAMMER2_THREAD_STOPPED
);
1082 /* thr structure can go invalid after this point */