2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.27 2008/05/18 20:57:56 nth Exp $
38 * This module implements IPI message queueing and the MI portion of IPI
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
48 #include <sys/rtprio.h>
49 #include <sys/queue.h>
50 #include <sys/thread2.h>
51 #include <sys/sysctl.h>
53 #include <sys/kthread.h>
54 #include <machine/cpu.h>
59 #include <vm/vm_param.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_pager.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_zone.h>
68 #include <machine/stdarg.h>
69 #include <machine/smp.h>
70 #include <machine/atomic.h>
73 static __int64_t ipiq_count
; /* total calls to lwkt_send_ipiq*() */
74 static __int64_t ipiq_fifofull
; /* number of fifo full conditions detected */
75 static __int64_t ipiq_avoided
; /* interlock with target avoids cpu ipi */
76 static __int64_t ipiq_passive
; /* passive IPI messages */
77 static __int64_t ipiq_cscount
; /* number of cpu synchronizations */
78 static int ipiq_optimized
= 1; /* XXX temporary sysctl */
79 static int ipiq_debug
; /* set to 1 for debug */
81 static int panic_ipiq_cpu
= -1;
82 static int panic_ipiq_count
= 100;
87 SYSCTL_QUAD(_lwkt
, OID_AUTO
, ipiq_count
, CTLFLAG_RW
, &ipiq_count
, 0,
88 "Number of IPI's sent");
89 SYSCTL_QUAD(_lwkt
, OID_AUTO
, ipiq_fifofull
, CTLFLAG_RW
, &ipiq_fifofull
, 0,
90 "Number of fifo full conditions detected");
91 SYSCTL_QUAD(_lwkt
, OID_AUTO
, ipiq_avoided
, CTLFLAG_RW
, &ipiq_avoided
, 0,
92 "Number of IPI's avoided by interlock with target cpu");
93 SYSCTL_QUAD(_lwkt
, OID_AUTO
, ipiq_passive
, CTLFLAG_RW
, &ipiq_passive
, 0,
94 "Number of passive IPI messages sent");
95 SYSCTL_QUAD(_lwkt
, OID_AUTO
, ipiq_cscount
, CTLFLAG_RW
, &ipiq_cscount
, 0,
96 "Number of cpu synchronizations");
97 SYSCTL_INT(_lwkt
, OID_AUTO
, ipiq_optimized
, CTLFLAG_RW
, &ipiq_optimized
, 0,
99 SYSCTL_INT(_lwkt
, OID_AUTO
, ipiq_debug
, CTLFLAG_RW
, &ipiq_debug
, 0,
102 SYSCTL_INT(_lwkt
, OID_AUTO
, panic_ipiq_cpu
, CTLFLAG_RW
, &panic_ipiq_cpu
, 0, "");
103 SYSCTL_INT(_lwkt
, OID_AUTO
, panic_ipiq_count
, CTLFLAG_RW
, &panic_ipiq_count
, 0, "");
106 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
107 #define IPIQ_ARG_SIZE (sizeof(void *) * 2 + sizeof(int) * 3)
109 #if !defined(KTR_IPIQ)
110 #define KTR_IPIQ KTR_ALL
112 KTR_INFO_MASTER(ipiq
);
113 KTR_INFO(KTR_IPIQ
, ipiq
, send_norm
, 0, IPIQ_STRING
, IPIQ_ARG_SIZE
);
114 KTR_INFO(KTR_IPIQ
, ipiq
, send_pasv
, 1, IPIQ_STRING
, IPIQ_ARG_SIZE
);
115 KTR_INFO(KTR_IPIQ
, ipiq
, send_nbio
, 2, IPIQ_STRING
, IPIQ_ARG_SIZE
);
116 KTR_INFO(KTR_IPIQ
, ipiq
, send_fail
, 3, IPIQ_STRING
, IPIQ_ARG_SIZE
);
117 KTR_INFO(KTR_IPIQ
, ipiq
, receive
, 4, IPIQ_STRING
, IPIQ_ARG_SIZE
);
118 KTR_INFO(KTR_IPIQ
, ipiq
, sync_start
, 5, "cpumask=%08x", sizeof(cpumask_t
));
119 KTR_INFO(KTR_IPIQ
, ipiq
, sync_end
, 6, "cpumask=%08x", sizeof(cpumask_t
));
120 KTR_INFO(KTR_IPIQ
, ipiq
, cpu_send
, 7, IPIQ_STRING
, IPIQ_ARG_SIZE
);
121 KTR_INFO(KTR_IPIQ
, ipiq
, send_end
, 8, IPIQ_STRING
, IPIQ_ARG_SIZE
);
123 #define logipiq(name, func, arg1, arg2, sgd, dgd) \
124 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
125 #define logipiq2(name, arg) \
126 KTR_LOG(ipiq_ ## name, arg)
132 static int lwkt_process_ipiq_core(globaldata_t sgd
, lwkt_ipiq_t ip
,
133 struct intrframe
*frame
);
134 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs
);
135 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs
);
138 * Send a function execution request to another cpu. The request is queued
139 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
140 * possible target cpu. The FIFO can be written.
142 * If the FIFO fills up we have to enable interrupts to avoid an APIC
143 * deadlock and process pending IPIQs while waiting for it to empty.
144 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
146 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
147 * end will take care of any pending interrupts.
149 * The actual hardware IPI is avoided if the target cpu is already processing
150 * the queue from a prior IPI. It is possible to pipeline IPI messages
151 * very quickly between cpus due to the FIFO hysteresis.
153 * Need not be called from a critical section.
156 lwkt_send_ipiq3(globaldata_t target
, ipifunc3_t func
, void *arg1
, int arg2
)
160 struct globaldata
*gd
= mycpu
;
162 logipiq(send_norm
, func
, arg1
, arg2
, gd
, target
);
165 func(arg1
, arg2
, NULL
);
166 logipiq(send_end
, func
, arg1
, arg2
, gd
, target
);
170 ++gd
->gd_intr_nesting_level
;
172 if (gd
->gd_intr_nesting_level
> 20)
173 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
175 KKASSERT(curthread
->td_critcount
);
177 ip
= &gd
->gd_ipiq
[target
->gd_cpuid
];
180 * Do not allow the FIFO to become full. Interrupts must be physically
181 * enabled while we liveloop to avoid deadlocking the APIC.
183 if (ip
->ip_windex
- ip
->ip_rindex
> MAXCPUFIFO
/ 2) {
184 #if defined(__i386__)
185 unsigned int eflags
= read_eflags();
186 #elif defined(__x86_64__)
187 unsigned long rflags
= read_rflags();
190 if (atomic_poll_acquire_int(&ip
->ip_npoll
) || ipiq_optimized
== 0) {
191 logipiq(cpu_send
, func
, arg1
, arg2
, gd
, target
);
192 cpu_send_ipiq(target
->gd_cpuid
);
196 DEBUG_PUSH_INFO("send_ipiq3");
197 while (ip
->ip_windex
- ip
->ip_rindex
> MAXCPUFIFO
/ 4) {
198 KKASSERT(ip
->ip_windex
- ip
->ip_rindex
!= MAXCPUFIFO
- 1);
202 #if defined(__i386__)
203 write_eflags(eflags
);
204 #elif defined(__x86_64__)
205 write_rflags(rflags
);
210 * Queue the new message
212 windex
= ip
->ip_windex
& MAXCPUFIFO_MASK
;
213 ip
->ip_func
[windex
] = func
;
214 ip
->ip_arg1
[windex
] = arg1
;
215 ip
->ip_arg2
[windex
] = arg2
;
218 --gd
->gd_intr_nesting_level
;
221 * signal the target cpu that there is work pending.
223 if (atomic_poll_acquire_int(&ip
->ip_npoll
)) {
224 logipiq(cpu_send
, func
, arg1
, arg2
, gd
, target
);
225 cpu_send_ipiq(target
->gd_cpuid
);
227 if (ipiq_optimized
== 0) {
228 logipiq(cpu_send
, func
, arg1
, arg2
, gd
, target
);
229 cpu_send_ipiq(target
->gd_cpuid
);
236 logipiq(send_end
, func
, arg1
, arg2
, gd
, target
);
237 return(ip
->ip_windex
);
241 * Similar to lwkt_send_ipiq() but this function does not actually initiate
242 * the IPI to the target cpu unless the FIFO has become too full, so it is
245 * This function is used for non-critical IPI messages, such as memory
246 * deallocations. The queue will typically be flushed by the target cpu at
247 * the next clock interrupt.
249 * Need not be called from a critical section.
252 lwkt_send_ipiq3_passive(globaldata_t target
, ipifunc3_t func
,
253 void *arg1
, int arg2
)
257 struct globaldata
*gd
= mycpu
;
259 KKASSERT(target
!= gd
);
261 logipiq(send_pasv
, func
, arg1
, arg2
, gd
, target
);
262 ++gd
->gd_intr_nesting_level
;
264 if (gd
->gd_intr_nesting_level
> 20)
265 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
267 KKASSERT(curthread
->td_critcount
);
270 ip
= &gd
->gd_ipiq
[target
->gd_cpuid
];
273 * Do not allow the FIFO to become full. Interrupts must be physically
274 * enabled while we liveloop to avoid deadlocking the APIC.
276 if (ip
->ip_windex
- ip
->ip_rindex
> MAXCPUFIFO
/ 2) {
277 #if defined(__i386__)
278 unsigned int eflags
= read_eflags();
279 #elif defined(__x86_64__)
280 unsigned long rflags
= read_rflags();
283 if (atomic_poll_acquire_int(&ip
->ip_npoll
) || ipiq_optimized
== 0) {
284 logipiq(cpu_send
, func
, arg1
, arg2
, gd
, target
);
285 cpu_send_ipiq(target
->gd_cpuid
);
289 DEBUG_PUSH_INFO("send_ipiq3_passive");
290 while (ip
->ip_windex
- ip
->ip_rindex
> MAXCPUFIFO
/ 4) {
291 KKASSERT(ip
->ip_windex
- ip
->ip_rindex
!= MAXCPUFIFO
- 1);
295 #if defined(__i386__)
296 write_eflags(eflags
);
297 #elif defined(__x86_64__)
298 write_rflags(rflags
);
303 * Queue the new message
305 windex
= ip
->ip_windex
& MAXCPUFIFO_MASK
;
306 ip
->ip_func
[windex
] = func
;
307 ip
->ip_arg1
[windex
] = arg1
;
308 ip
->ip_arg2
[windex
] = arg2
;
311 --gd
->gd_intr_nesting_level
;
314 * Do not signal the target cpu, it will pick up the IPI when it next
315 * polls (typically on the next tick).
319 logipiq(send_end
, func
, arg1
, arg2
, gd
, target
);
320 return(ip
->ip_windex
);
324 * Send an IPI request without blocking, return 0 on success, ENOENT on
325 * failure. The actual queueing of the hardware IPI may still force us
326 * to spin and process incoming IPIs but that will eventually go away
327 * when we've gotten rid of the other general IPIs.
330 lwkt_send_ipiq3_nowait(globaldata_t target
, ipifunc3_t func
,
331 void *arg1
, int arg2
)
335 struct globaldata
*gd
= mycpu
;
337 logipiq(send_nbio
, func
, arg1
, arg2
, gd
, target
);
338 KKASSERT(curthread
->td_critcount
);
340 func(arg1
, arg2
, NULL
);
341 logipiq(send_end
, func
, arg1
, arg2
, gd
, target
);
345 ip
= &gd
->gd_ipiq
[target
->gd_cpuid
];
347 if (ip
->ip_windex
- ip
->ip_rindex
>= MAXCPUFIFO
* 2 / 3) {
348 logipiq(send_fail
, func
, arg1
, arg2
, gd
, target
);
351 windex
= ip
->ip_windex
& MAXCPUFIFO_MASK
;
352 ip
->ip_func
[windex
] = func
;
353 ip
->ip_arg1
[windex
] = arg1
;
354 ip
->ip_arg2
[windex
] = arg2
;
359 * This isn't a passive IPI, we still have to signal the target cpu.
361 if (atomic_poll_acquire_int(&ip
->ip_npoll
)) {
362 logipiq(cpu_send
, func
, arg1
, arg2
, gd
, target
);
363 cpu_send_ipiq(target
->gd_cpuid
);
365 if (ipiq_optimized
== 0) {
366 logipiq(cpu_send
, func
, arg1
, arg2
, gd
, target
);
367 cpu_send_ipiq(target
->gd_cpuid
);
373 logipiq(send_end
, func
, arg1
, arg2
, gd
, target
);
378 * deprecated, used only by fast int forwarding.
381 lwkt_send_ipiq3_bycpu(int dcpu
, ipifunc3_t func
, void *arg1
, int arg2
)
383 return(lwkt_send_ipiq3(globaldata_find(dcpu
), func
, arg1
, arg2
));
387 * Send a message to several target cpus. Typically used for scheduling.
388 * The message will not be sent to stopped cpus.
391 lwkt_send_ipiq3_mask(cpumask_t mask
, ipifunc3_t func
, void *arg1
, int arg2
)
396 mask
&= ~stopped_cpus
;
398 cpuid
= BSFCPUMASK(mask
);
399 lwkt_send_ipiq3(globaldata_find(cpuid
), func
, arg1
, arg2
);
400 mask
&= ~CPUMASK(cpuid
);
407 * Wait for the remote cpu to finish processing a function.
409 * YYY we have to enable interrupts and process the IPIQ while waiting
410 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
411 * function to do this! YYY we really should 'block' here.
413 * MUST be called from a critical section. This routine may be called
414 * from an interrupt (for example, if an interrupt wakes a foreign thread
418 lwkt_wait_ipiq(globaldata_t target
, int seq
)
421 int maxc
= 100000000;
423 if (target
!= mycpu
) {
424 ip
= &mycpu
->gd_ipiq
[target
->gd_cpuid
];
425 if ((int)(ip
->ip_xindex
- seq
) < 0) {
426 #if defined(__i386__)
427 unsigned int eflags
= read_eflags();
428 #elif defined(__x86_64__)
429 unsigned long rflags
= read_rflags();
432 DEBUG_PUSH_INFO("wait_ipiq");
433 while ((int)(ip
->ip_xindex
- seq
) < 0) {
438 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu
->gd_cpuid
, target
->gd_cpuid
, ip
->ip_xindex
- seq
);
440 panic("LWKT_WAIT_IPIQ");
442 * xindex may be modified by another cpu, use a load fence
443 * to ensure that the loop does not use a speculative value
444 * (which may improve performance).
449 #if defined(__i386__)
450 write_eflags(eflags
);
451 #elif defined(__x86_64__)
452 write_rflags(rflags
);
459 lwkt_seq_ipiq(globaldata_t target
)
463 ip
= &mycpu
->gd_ipiq
[target
->gd_cpuid
];
464 return(ip
->ip_windex
);
468 * Called from IPI interrupt (like a fast interrupt), which has placed
469 * us in a critical section. The MP lock may or may not be held.
470 * May also be called from doreti or splz, or be reentrantly called
471 * indirectly through the ip_func[] we run.
473 * There are two versions, one where no interrupt frame is available (when
474 * called from the send code and from splz, and one where an interrupt
475 * frame is available.
477 * When the current cpu is mastering a cpusync we do NOT internally loop
478 * on the cpusyncq poll. We also do not re-flag a pending ipi due to
479 * the cpusyncq poll because this can cause doreti/splz to loop internally.
480 * The cpusync master's own loop must be allowed to run to avoid a deadlock.
483 lwkt_process_ipiq(void)
485 globaldata_t gd
= mycpu
;
491 for (n
= 0; n
< ncpus
; ++n
) {
492 if (n
!= gd
->gd_cpuid
) {
493 sgd
= globaldata_find(n
);
496 while (lwkt_process_ipiq_core(sgd
, &ip
[gd
->gd_cpuid
], NULL
))
501 if (gd
->gd_cpusyncq
.ip_rindex
!= gd
->gd_cpusyncq
.ip_windex
) {
502 if (lwkt_process_ipiq_core(gd
, &gd
->gd_cpusyncq
, NULL
)) {
503 if (gd
->gd_curthread
->td_cscount
== 0)
510 lwkt_process_ipiq_frame(struct intrframe
*frame
)
512 globaldata_t gd
= mycpu
;
518 for (n
= 0; n
< ncpus
; ++n
) {
519 if (n
!= gd
->gd_cpuid
) {
520 sgd
= globaldata_find(n
);
523 while (lwkt_process_ipiq_core(sgd
, &ip
[gd
->gd_cpuid
], frame
))
528 if (gd
->gd_cpusyncq
.ip_rindex
!= gd
->gd_cpusyncq
.ip_windex
) {
529 if (lwkt_process_ipiq_core(gd
, &gd
->gd_cpusyncq
, frame
)) {
530 if (gd
->gd_curthread
->td_cscount
== 0)
537 static int iqticks
[SMP_MAXCPU
];
538 static int iqcount
[SMP_MAXCPU
];
541 static int iqterm
[SMP_MAXCPU
];
545 lwkt_process_ipiq_core(globaldata_t sgd
, lwkt_ipiq_t ip
,
546 struct intrframe
*frame
)
548 globaldata_t mygd
= mycpu
;
551 ipifunc3_t copy_func
;
556 if (iqticks
[mygd
->gd_cpuid
] != ticks
) {
557 iqticks
[mygd
->gd_cpuid
] = ticks
;
558 iqcount
[mygd
->gd_cpuid
] = 0;
560 if (++iqcount
[mygd
->gd_cpuid
] > 3000000) {
561 kprintf("cpu %d ipiq maxed cscount %d spin %d\n",
563 mygd
->gd_curthread
->td_cscount
,
564 mygd
->gd_spinlocks_wr
);
565 iqcount
[mygd
->gd_cpuid
] = 0;
567 if (++iqterm
[mygd
->gd_cpuid
] > 10)
568 panic("cpu %d ipiq maxed", mygd
->gd_cpuid
);
571 for (i
= 0; i
< ncpus
; ++i
) {
572 if (globaldata_find(i
)->gd_infomsg
)
573 kprintf(" %s", globaldata_find(i
)->gd_infomsg
);
580 * Obtain the current write index, which is modified by a remote cpu.
581 * Issue a load fence to prevent speculative reads of e.g. data written
582 * by the other cpu prior to it updating the index.
584 KKASSERT(curthread
->td_critcount
);
587 ++mygd
->gd_intr_nesting_level
;
590 * NOTE: xindex is only updated after we are sure the function has
591 * finished execution. Beware lwkt_process_ipiq() reentrancy!
592 * The function may send an IPI which may block/drain.
594 * NOTE: Due to additional IPI operations that the callback function
595 * may make, it is possible for both rindex and windex to advance and
596 * thus for rindex to advance passed our cached windex.
598 * NOTE: A load fence is required to prevent speculative loads prior
599 * to the loading of ip_rindex. Even though stores might be
600 * ordered, loads are probably not. A memory fence is required
601 * to prevent reordering of the loads after the ip_rindex update.
603 while (wi
- (ri
= ip
->ip_rindex
) > 0) {
604 ri
&= MAXCPUFIFO_MASK
;
606 copy_func
= ip
->ip_func
[ri
];
607 copy_arg1
= ip
->ip_arg1
[ri
];
608 copy_arg2
= ip
->ip_arg2
[ri
];
611 KKASSERT((ip
->ip_rindex
& MAXCPUFIFO_MASK
) ==
612 ((ri
+ 1) & MAXCPUFIFO_MASK
));
613 logipiq(receive
, copy_func
, copy_arg1
, copy_arg2
, sgd
, mycpu
);
615 if (ipiq_debug
&& (ip
->ip_rindex
& 0xFFFFFF) == 0) {
616 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n",
618 copy_func
, copy_arg1
, copy_arg2
,
619 #if defined(__i386__)
620 (frame
? (void *)frame
->if_eip
: NULL
));
621 #elif defined(__amd64__)
622 (frame
? (void *)frame
->if_rip
: NULL
));
628 copy_func(copy_arg1
, copy_arg2
, frame
);
630 ip
->ip_xindex
= ip
->ip_rindex
;
634 * Simulate panics during the processing of an IPI
636 if (mycpu
->gd_cpuid
== panic_ipiq_cpu
&& panic_ipiq_count
) {
637 if (--panic_ipiq_count
== 0) {
639 Debugger("PANIC_DEBUG");
641 panic("PANIC_DEBUG");
647 --mygd
->gd_intr_nesting_level
;
650 * Return non-zero if there are more IPI messages pending on this
651 * ipiq. ip_npoll is left set as long as possible to reduce the
652 * number of IPIs queued by the originating cpu, but must be cleared
653 * *BEFORE* checking windex.
655 atomic_poll_release_int(&ip
->ip_npoll
);
656 return(wi
!= ip
->ip_windex
);
660 lwkt_sync_ipiq(void *arg
)
662 volatile cpumask_t
*cpumask
= arg
;
664 atomic_clear_cpumask(cpumask
, mycpu
->gd_cpumask
);
670 lwkt_synchronize_ipiqs(const char *wmesg
)
672 volatile cpumask_t other_cpumask
;
674 other_cpumask
= mycpu
->gd_other_cpus
& smp_active_mask
;
675 lwkt_send_ipiq_mask(other_cpumask
, lwkt_sync_ipiq
,
676 __DEVOLATILE(void *, &other_cpumask
));
678 while (other_cpumask
!= 0) {
679 tsleep_interlock(&other_cpumask
, 0);
680 if (other_cpumask
!= 0)
681 tsleep(&other_cpumask
, PINTERLOCKED
, wmesg
, 0);
688 * CPU Synchronization Support
690 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state.
691 * The current cpu is placed in a hard critical
694 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including
695 * current cpu if specified, then return.
698 lwkt_cpusync_simple(cpumask_t mask
, cpusync_func_t func
, void *arg
)
700 struct lwkt_cpusync cs
;
702 lwkt_cpusync_init(&cs
, mask
, func
, arg
);
703 lwkt_cpusync_interlock(&cs
);
704 lwkt_cpusync_deinterlock(&cs
);
709 lwkt_cpusync_interlock(lwkt_cpusync_t cs
)
712 globaldata_t gd
= mycpu
;
716 * mask acknowledge (cs_mack): 0->mask for stage 1
718 * mack does not include the current cpu.
720 mask
= cs
->cs_mask
& gd
->gd_other_cpus
& smp_active_mask
;
722 crit_enter_id("cpusync");
724 DEBUG_PUSH_INFO("cpusync_interlock");
726 ++gd
->gd_curthread
->td_cscount
;
727 lwkt_send_ipiq_mask(mask
, (ipifunc1_t
)lwkt_cpusync_remote1
, cs
);
728 logipiq2(sync_start
, mask
);
729 while (cs
->cs_mack
!= mask
) {
741 * Interlocked cpus have executed remote1 and are polling in remote2.
742 * To deinterlock we clear cs_mack and wait for the cpus to execute
743 * the func and set their bit in cs_mack again.
747 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs
)
749 globaldata_t gd
= mycpu
;
754 * mask acknowledge (cs_mack): mack->0->mack for stage 2
756 * Clearing cpu bits for polling cpus in cs_mack will cause them to
757 * execute stage 2, which executes the cs_func(cs_data) and then sets
758 * their bit in cs_mack again.
760 * mack does not include the current cpu.
765 if (cs
->cs_func
&& (cs
->cs_mask
& gd
->gd_cpumask
))
766 cs
->cs_func(cs
->cs_data
);
768 DEBUG_PUSH_INFO("cpusync_deinterlock");
769 while (cs
->cs_mack
!= mask
) {
775 * cpusyncq ipis may be left queued without the RQF flag set due to
776 * a non-zero td_cscount, so be sure to process any laggards after
777 * decrementing td_cscount.
779 --gd
->gd_curthread
->td_cscount
;
781 logipiq2(sync_end
, mask
);
783 crit_exit_id("cpusync");
785 if (cs
->cs_func
&& (cs
->cs_mask
& gd
->gd_cpumask
))
786 cs
->cs_func(cs
->cs_data
);
793 * helper IPI remote messaging function.
795 * Called on remote cpu when a new cpu synchronization request has been
796 * sent to us. Execute the run function and adjust cs_count, then requeue
797 * the request so we spin on it.
800 lwkt_cpusync_remote1(lwkt_cpusync_t cs
)
802 globaldata_t gd
= mycpu
;
804 atomic_set_cpumask(&cs
->cs_mack
, gd
->gd_cpumask
);
805 lwkt_cpusync_remote2(cs
);
809 * helper IPI remote messaging function.
811 * Poll for the originator telling us to finish. If it hasn't, requeue
812 * our request so we spin on it.
815 lwkt_cpusync_remote2(lwkt_cpusync_t cs
)
817 globaldata_t gd
= mycpu
;
819 if ((cs
->cs_mack
& gd
->gd_cpumask
) == 0) {
821 cs
->cs_func(cs
->cs_data
);
822 atomic_set_cpumask(&cs
->cs_mack
, gd
->gd_cpumask
);
827 ip
= &gd
->gd_cpusyncq
;
828 wi
= ip
->ip_windex
& MAXCPUFIFO_MASK
;
829 ip
->ip_func
[wi
] = (ipifunc3_t
)(ipifunc1_t
)lwkt_cpusync_remote2
;
830 ip
->ip_arg1
[wi
] = cs
;
834 if (ipiq_debug
&& (ip
->ip_windex
& 0xFFFFFF) == 0) {
835 kprintf("cpu %d cm=%016jx %016jx f=%p\n",
837 (intmax_t)cs
->cs_mask
, (intmax_t)cs
->cs_mack
,