kernel - usched_dfly revamp (6), reimplement shared spinlocks & misc others
[dragonfly.git] / sys / kern / lwkt_ipiq.c
bloba7c31bc054b8570b66cc050d810eab227b620ad7
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
36 * This module implements IPI message queueing and the MI portion of IPI
37 * message processing.
40 #include "opt_ddb.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/rtprio.h>
47 #include <sys/queue.h>
48 #include <sys/thread2.h>
49 #include <sys/sysctl.h>
50 #include <sys/ktr.h>
51 #include <sys/kthread.h>
52 #include <machine/cpu.h>
53 #include <sys/lock.h>
54 #include <sys/caps.h>
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_pager.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_zone.h>
66 #include <machine/stdarg.h>
67 #include <machine/smp.h>
68 #include <machine/atomic.h>
70 #ifdef SMP
71 static __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */
72 static __int64_t ipiq_fifofull; /* number of fifo full conditions detected */
73 static __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */
74 static __int64_t ipiq_passive; /* passive IPI messages */
75 static __int64_t ipiq_cscount; /* number of cpu synchronizations */
76 static int ipiq_debug; /* set to 1 for debug */
77 #ifdef PANIC_DEBUG
78 static int panic_ipiq_cpu = -1;
79 static int panic_ipiq_count = 100;
80 #endif
81 #endif
83 #ifdef SMP
84 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0,
85 "Number of IPI's sent");
86 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0,
87 "Number of fifo full conditions detected");
88 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_avoided, CTLFLAG_RW, &ipiq_avoided, 0,
89 "Number of IPI's avoided by interlock with target cpu");
90 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_passive, CTLFLAG_RW, &ipiq_passive, 0,
91 "Number of passive IPI messages sent");
92 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0,
93 "Number of cpu synchronizations");
94 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0,
95 "");
96 #ifdef PANIC_DEBUG
97 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, "");
98 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, "");
99 #endif
101 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
102 #define IPIQ_ARGS void *func, void *arg1, int arg2, int scpu, int dcpu
104 #if !defined(KTR_IPIQ)
105 #define KTR_IPIQ KTR_ALL
106 #endif
107 KTR_INFO_MASTER(ipiq);
108 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARGS);
109 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARGS);
110 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARGS);
111 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARGS);
112 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARGS);
113 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08lx", unsigned long mask);
114 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08lx", unsigned long mask);
115 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARGS);
116 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARGS);
118 #define logipiq(name, func, arg1, arg2, sgd, dgd) \
119 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
120 #define logipiq2(name, arg) \
121 KTR_LOG(ipiq_ ## name, arg)
123 #endif /* SMP */
125 #ifdef SMP
127 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
128 struct intrframe *frame);
129 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs);
130 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs);
133 * Send a function execution request to another cpu. The request is queued
134 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
135 * possible target cpu. The FIFO can be written.
137 * If the FIFO fills up we have to enable interrupts to avoid an APIC
138 * deadlock and process pending IPIQs while waiting for it to empty.
139 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
141 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
142 * end will take care of any pending interrupts.
144 * The actual hardware IPI is avoided if the target cpu is already processing
145 * the queue from a prior IPI. It is possible to pipeline IPI messages
146 * very quickly between cpus due to the FIFO hysteresis.
148 * Need not be called from a critical section.
151 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
153 lwkt_ipiq_t ip;
154 int windex;
155 struct globaldata *gd = mycpu;
157 logipiq(send_norm, func, arg1, arg2, gd, target);
159 if (target == gd) {
160 func(arg1, arg2, NULL);
161 logipiq(send_end, func, arg1, arg2, gd, target);
162 return(0);
164 crit_enter();
165 ++gd->gd_intr_nesting_level;
166 #ifdef INVARIANTS
167 if (gd->gd_intr_nesting_level > 20)
168 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
169 #endif
170 KKASSERT(curthread->td_critcount);
171 ++ipiq_count;
172 ip = &gd->gd_ipiq[target->gd_cpuid];
175 * Do not allow the FIFO to become full. Interrupts must be physically
176 * enabled while we liveloop to avoid deadlocking the APIC.
178 * The target ipiq may have gotten filled up due to passive IPIs and thus
179 * not be aware that its queue is too full, so be sure to issue an
180 * ipiq interrupt to the target cpu.
182 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
183 #if defined(__i386__)
184 unsigned int eflags = read_eflags();
185 #elif defined(__x86_64__)
186 unsigned long rflags = read_rflags();
187 #endif
189 cpu_enable_intr();
190 ++ipiq_fifofull;
191 DEBUG_PUSH_INFO("send_ipiq3");
192 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
193 if (atomic_poll_acquire_int(&target->gd_npoll)) {
194 logipiq(cpu_send, func, arg1, arg2, gd, target);
195 cpu_send_ipiq(target->gd_cpuid);
197 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
198 lwkt_process_ipiq();
199 cpu_pause();
201 DEBUG_POP_INFO();
202 #if defined(__i386__)
203 write_eflags(eflags);
204 #elif defined(__x86_64__)
205 write_rflags(rflags);
206 #endif
210 * Queue the new message
212 windex = ip->ip_windex & MAXCPUFIFO_MASK;
213 ip->ip_info[windex].func = func;
214 ip->ip_info[windex].arg1 = arg1;
215 ip->ip_info[windex].arg2 = arg2;
216 cpu_sfence();
217 ++ip->ip_windex;
218 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask);
221 * signal the target cpu that there is work pending.
223 if (atomic_poll_acquire_int(&target->gd_npoll)) {
224 logipiq(cpu_send, func, arg1, arg2, gd, target);
225 cpu_send_ipiq(target->gd_cpuid);
226 } else {
227 ++ipiq_avoided;
229 --gd->gd_intr_nesting_level;
230 crit_exit();
231 logipiq(send_end, func, arg1, arg2, gd, target);
233 return(ip->ip_windex);
237 * Similar to lwkt_send_ipiq() but this function does not actually initiate
238 * the IPI to the target cpu unless the FIFO has become too full, so it is
239 * very fast.
241 * This function is used for non-critical IPI messages, such as memory
242 * deallocations. The queue will typically be flushed by the target cpu at
243 * the next clock interrupt.
245 * Need not be called from a critical section.
248 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
249 void *arg1, int arg2)
251 lwkt_ipiq_t ip;
252 int windex;
253 struct globaldata *gd = mycpu;
255 KKASSERT(target != gd);
256 crit_enter();
257 ++gd->gd_intr_nesting_level;
258 logipiq(send_pasv, func, arg1, arg2, gd, target);
259 #ifdef INVARIANTS
260 if (gd->gd_intr_nesting_level > 20)
261 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
262 #endif
263 KKASSERT(curthread->td_critcount);
264 ++ipiq_count;
265 ++ipiq_passive;
266 ip = &gd->gd_ipiq[target->gd_cpuid];
269 * Do not allow the FIFO to become full. Interrupts must be physically
270 * enabled while we liveloop to avoid deadlocking the APIC.
272 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
273 #if defined(__i386__)
274 unsigned int eflags = read_eflags();
275 #elif defined(__x86_64__)
276 unsigned long rflags = read_rflags();
277 #endif
279 cpu_enable_intr();
280 ++ipiq_fifofull;
281 DEBUG_PUSH_INFO("send_ipiq3_passive");
282 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
283 if (atomic_poll_acquire_int(&target->gd_npoll)) {
284 logipiq(cpu_send, func, arg1, arg2, gd, target);
285 cpu_send_ipiq(target->gd_cpuid);
287 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
288 lwkt_process_ipiq();
289 cpu_pause();
291 DEBUG_POP_INFO();
292 #if defined(__i386__)
293 write_eflags(eflags);
294 #elif defined(__x86_64__)
295 write_rflags(rflags);
296 #endif
300 * Queue the new message
302 windex = ip->ip_windex & MAXCPUFIFO_MASK;
303 ip->ip_info[windex].func = func;
304 ip->ip_info[windex].arg1 = arg1;
305 ip->ip_info[windex].arg2 = arg2;
306 cpu_sfence();
307 ++ip->ip_windex;
308 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask);
309 --gd->gd_intr_nesting_level;
312 * Do not signal the target cpu, it will pick up the IPI when it next
313 * polls (typically on the next tick).
315 crit_exit();
316 logipiq(send_end, func, arg1, arg2, gd, target);
318 return(ip->ip_windex);
322 * Send an IPI request without blocking, return 0 on success, ENOENT on
323 * failure. The actual queueing of the hardware IPI may still force us
324 * to spin and process incoming IPIs but that will eventually go away
325 * when we've gotten rid of the other general IPIs.
328 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func,
329 void *arg1, int arg2)
331 lwkt_ipiq_t ip;
332 int windex;
333 struct globaldata *gd = mycpu;
335 logipiq(send_nbio, func, arg1, arg2, gd, target);
336 KKASSERT(curthread->td_critcount);
337 if (target == gd) {
338 func(arg1, arg2, NULL);
339 logipiq(send_end, func, arg1, arg2, gd, target);
340 return(0);
342 crit_enter();
343 ++gd->gd_intr_nesting_level;
344 ++ipiq_count;
345 ip = &gd->gd_ipiq[target->gd_cpuid];
347 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) {
348 logipiq(send_fail, func, arg1, arg2, gd, target);
349 --gd->gd_intr_nesting_level;
350 crit_exit();
351 return(ENOENT);
353 windex = ip->ip_windex & MAXCPUFIFO_MASK;
354 ip->ip_info[windex].func = func;
355 ip->ip_info[windex].arg1 = arg1;
356 ip->ip_info[windex].arg2 = arg2;
357 cpu_sfence();
358 ++ip->ip_windex;
359 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask);
362 * This isn't a passive IPI, we still have to signal the target cpu.
364 if (atomic_poll_acquire_int(&target->gd_npoll)) {
365 logipiq(cpu_send, func, arg1, arg2, gd, target);
366 cpu_send_ipiq(target->gd_cpuid);
367 } else {
368 ++ipiq_avoided;
370 --gd->gd_intr_nesting_level;
371 crit_exit();
373 logipiq(send_end, func, arg1, arg2, gd, target);
374 return(0);
378 * deprecated, used only by fast int forwarding.
381 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2)
383 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2));
387 * Send a message to several target cpus. Typically used for scheduling.
388 * The message will not be sent to stopped cpus.
391 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2)
393 int cpuid;
394 int count = 0;
396 mask &= ~stopped_cpus;
397 while (mask) {
398 cpuid = BSFCPUMASK(mask);
399 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
400 mask &= ~CPUMASK(cpuid);
401 ++count;
403 return(count);
407 * Wait for the remote cpu to finish processing a function.
409 * YYY we have to enable interrupts and process the IPIQ while waiting
410 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
411 * function to do this! YYY we really should 'block' here.
413 * MUST be called from a critical section. This routine may be called
414 * from an interrupt (for example, if an interrupt wakes a foreign thread
415 * up).
417 void
418 lwkt_wait_ipiq(globaldata_t target, int seq)
420 lwkt_ipiq_t ip;
421 int maxc = 100000000;
423 if (target != mycpu) {
424 ip = &mycpu->gd_ipiq[target->gd_cpuid];
425 if ((int)(ip->ip_xindex - seq) < 0) {
426 #if defined(__i386__)
427 unsigned int eflags = read_eflags();
428 #elif defined(__x86_64__)
429 unsigned long rflags = read_rflags();
430 #endif
431 cpu_enable_intr();
432 DEBUG_PUSH_INFO("wait_ipiq");
433 while ((int)(ip->ip_xindex - seq) < 0) {
434 crit_enter();
435 lwkt_process_ipiq();
436 crit_exit();
437 if (--maxc == 0)
438 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq);
439 if (maxc < -1000000)
440 panic("LWKT_WAIT_IPIQ");
442 * xindex may be modified by another cpu, use a load fence
443 * to ensure that the loop does not use a speculative value
444 * (which may improve performance).
446 cpu_lfence();
448 DEBUG_POP_INFO();
449 #if defined(__i386__)
450 write_eflags(eflags);
451 #elif defined(__x86_64__)
452 write_rflags(rflags);
453 #endif
459 lwkt_seq_ipiq(globaldata_t target)
461 lwkt_ipiq_t ip;
463 ip = &mycpu->gd_ipiq[target->gd_cpuid];
464 return(ip->ip_windex);
468 * Called from IPI interrupt (like a fast interrupt), which has placed
469 * us in a critical section. The MP lock may or may not be held.
470 * May also be called from doreti or splz, or be reentrantly called
471 * indirectly through the ip_info[].func we run.
473 * There are two versions, one where no interrupt frame is available (when
474 * called from the send code and from splz, and one where an interrupt
475 * frame is available.
477 * When the current cpu is mastering a cpusync we do NOT internally loop
478 * on the cpusyncq poll. We also do not re-flag a pending ipi due to
479 * the cpusyncq poll because this can cause doreti/splz to loop internally.
480 * The cpusync master's own loop must be allowed to run to avoid a deadlock.
482 void
483 lwkt_process_ipiq(void)
485 globaldata_t gd = mycpu;
486 globaldata_t sgd;
487 lwkt_ipiq_t ip;
488 cpumask_t mask;
489 int n;
491 ++gd->gd_processing_ipiq;
492 again:
493 cpu_lfence();
494 mask = gd->gd_ipimask;
495 atomic_clear_cpumask(&gd->gd_ipimask, mask);
496 while (mask) {
497 n = BSFCPUMASK(mask);
498 if (n != gd->gd_cpuid) {
499 sgd = globaldata_find(n);
500 ip = sgd->gd_ipiq;
501 if (ip != NULL) {
502 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL))
506 mask &= ~CPUMASK(n);
510 * Process pending cpusyncs. If the current thread has a cpusync
511 * active cpusync we only run the list once and do not re-flag
512 * as the thread itself is processing its interlock.
514 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) {
515 if (gd->gd_curthread->td_cscount == 0)
516 goto again;
517 /* need_ipiq(); do not reflag */
521 * Interlock to allow more IPI interrupts. Recheck ipimask after
522 * releasing gd_npoll.
524 if (gd->gd_ipimask)
525 goto again;
526 atomic_poll_release_int(&gd->gd_npoll);
527 cpu_mfence();
528 if (gd->gd_ipimask)
529 goto again;
530 --gd->gd_processing_ipiq;
533 void
534 lwkt_process_ipiq_frame(struct intrframe *frame)
536 globaldata_t gd = mycpu;
537 globaldata_t sgd;
538 lwkt_ipiq_t ip;
539 cpumask_t mask;
540 int n;
542 again:
543 cpu_lfence();
544 mask = gd->gd_ipimask;
545 atomic_clear_cpumask(&gd->gd_ipimask, mask);
546 while (mask) {
547 n = BSFCPUMASK(mask);
548 if (n != gd->gd_cpuid) {
549 sgd = globaldata_find(n);
550 ip = sgd->gd_ipiq;
551 if (ip != NULL) {
552 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame))
556 mask &= ~CPUMASK(n);
558 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
559 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) {
560 if (gd->gd_curthread->td_cscount == 0)
561 goto again;
562 /* need_ipiq(); do not reflag */
567 * Interlock to allow more IPI interrupts. Recheck ipimask after
568 * releasing gd_npoll.
570 if (gd->gd_ipimask)
571 goto again;
572 atomic_poll_release_int(&gd->gd_npoll);
573 cpu_mfence();
574 if (gd->gd_ipimask)
575 goto again;
578 #if 0
579 static int iqticks[SMP_MAXCPU];
580 static int iqcount[SMP_MAXCPU];
581 #endif
582 #if 0
583 static int iqterm[SMP_MAXCPU];
584 #endif
586 static int
587 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
588 struct intrframe *frame)
590 globaldata_t mygd = mycpu;
591 int ri;
592 int wi;
593 ipifunc3_t copy_func;
594 void *copy_arg1;
595 int copy_arg2;
597 #if 0
598 if (iqticks[mygd->gd_cpuid] != ticks) {
599 iqticks[mygd->gd_cpuid] = ticks;
600 iqcount[mygd->gd_cpuid] = 0;
602 if (++iqcount[mygd->gd_cpuid] > 3000000) {
603 kprintf("cpu %d ipiq maxed cscount %d spin %d\n",
604 mygd->gd_cpuid,
605 mygd->gd_curthread->td_cscount,
606 mygd->gd_spinlocks);
607 iqcount[mygd->gd_cpuid] = 0;
608 #if 0
609 if (++iqterm[mygd->gd_cpuid] > 10)
610 panic("cpu %d ipiq maxed", mygd->gd_cpuid);
611 #endif
612 int i;
613 for (i = 0; i < ncpus; ++i) {
614 if (globaldata_find(i)->gd_infomsg)
615 kprintf(" %s", globaldata_find(i)->gd_infomsg);
617 kprintf("\n");
619 #endif
622 * Clear the originating core from our ipimask, we will process all
623 * incoming messages.
625 * Obtain the current write index, which is modified by a remote cpu.
626 * Issue a load fence to prevent speculative reads of e.g. data written
627 * by the other cpu prior to it updating the index.
629 KKASSERT(curthread->td_critcount);
630 wi = ip->ip_windex;
631 cpu_lfence();
632 ++mygd->gd_intr_nesting_level;
635 * NOTE: xindex is only updated after we are sure the function has
636 * finished execution. Beware lwkt_process_ipiq() reentrancy!
637 * The function may send an IPI which may block/drain.
639 * NOTE: Due to additional IPI operations that the callback function
640 * may make, it is possible for both rindex and windex to advance and
641 * thus for rindex to advance passed our cached windex.
643 * NOTE: A load fence is required to prevent speculative loads prior
644 * to the loading of ip_rindex. Even though stores might be
645 * ordered, loads are probably not. A memory fence is required
646 * to prevent reordering of the loads after the ip_rindex update.
648 * NOTE: Single pass only. Returns non-zero if the queue is not empty
649 * on return.
651 while (wi - (ri = ip->ip_rindex) > 0) {
652 ri &= MAXCPUFIFO_MASK;
653 cpu_lfence();
654 copy_func = ip->ip_info[ri].func;
655 copy_arg1 = ip->ip_info[ri].arg1;
656 copy_arg2 = ip->ip_info[ri].arg2;
657 cpu_mfence();
658 ++ip->ip_rindex;
659 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) ==
660 ((ri + 1) & MAXCPUFIFO_MASK));
661 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu);
662 #ifdef INVARIANTS
663 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) {
664 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n",
665 mycpu->gd_cpuid,
666 copy_func, copy_arg1, copy_arg2,
667 #if defined(__i386__)
668 (frame ? (void *)frame->if_eip : NULL));
669 #elif defined(__amd64__)
670 (frame ? (void *)frame->if_rip : NULL));
671 #else
672 NULL);
673 #endif
675 #endif
676 copy_func(copy_arg1, copy_arg2, frame);
677 cpu_sfence();
678 ip->ip_xindex = ip->ip_rindex;
680 #ifdef PANIC_DEBUG
682 * Simulate panics during the processing of an IPI
684 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) {
685 if (--panic_ipiq_count == 0) {
686 #ifdef DDB
687 Debugger("PANIC_DEBUG");
688 #else
689 panic("PANIC_DEBUG");
690 #endif
693 #endif
695 --mygd->gd_intr_nesting_level;
698 * Return non-zero if there is still more in the queue.
700 cpu_lfence();
701 return (ip->ip_rindex != ip->ip_windex);
704 static void
705 lwkt_sync_ipiq(void *arg)
707 volatile cpumask_t *cpumask = arg;
709 atomic_clear_cpumask(cpumask, mycpu->gd_cpumask);
710 if (*cpumask == 0)
711 wakeup(cpumask);
714 void
715 lwkt_synchronize_ipiqs(const char *wmesg)
717 volatile cpumask_t other_cpumask;
719 other_cpumask = mycpu->gd_other_cpus & smp_active_mask;
720 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq,
721 __DEVOLATILE(void *, &other_cpumask));
723 while (other_cpumask != 0) {
724 tsleep_interlock(&other_cpumask, 0);
725 if (other_cpumask != 0)
726 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0);
730 #endif
733 * CPU Synchronization Support
735 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state.
736 * The current cpu is placed in a hard critical
737 * section.
739 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including
740 * current cpu if specified, then return.
742 void
743 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg)
745 struct lwkt_cpusync cs;
747 lwkt_cpusync_init(&cs, mask, func, arg);
748 lwkt_cpusync_interlock(&cs);
749 lwkt_cpusync_deinterlock(&cs);
753 void
754 lwkt_cpusync_interlock(lwkt_cpusync_t cs)
756 #ifdef SMP
757 #if 0
758 const char *smsg = "SMPSYNL";
759 #endif
760 globaldata_t gd = mycpu;
761 cpumask_t mask;
764 * mask acknowledge (cs_mack): 0->mask for stage 1
766 * mack does not include the current cpu.
768 mask = cs->cs_mask & gd->gd_other_cpus & smp_active_mask;
769 cs->cs_mack = 0;
770 crit_enter_id("cpusync");
771 if (mask) {
772 DEBUG_PUSH_INFO("cpusync_interlock");
773 ++ipiq_cscount;
774 ++gd->gd_curthread->td_cscount;
775 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs);
776 logipiq2(sync_start, (long)mask);
777 #if 0
778 if (gd->gd_curthread->td_wmesg == NULL)
779 gd->gd_curthread->td_wmesg = smsg;
780 #endif
781 while (cs->cs_mack != mask) {
782 lwkt_process_ipiq();
783 cpu_pause();
785 #if 0
786 if (gd->gd_curthread->td_wmesg == smsg)
787 gd->gd_curthread->td_wmesg = NULL;
788 #endif
789 DEBUG_POP_INFO();
791 #else
792 cs->cs_mack = 0;
793 #endif
797 * Interlocked cpus have executed remote1 and are polling in remote2.
798 * To deinterlock we clear cs_mack and wait for the cpus to execute
799 * the func and set their bit in cs_mack again.
802 void
803 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs)
805 globaldata_t gd = mycpu;
806 #ifdef SMP
807 #if 0
808 const char *smsg = "SMPSYNU";
809 #endif
810 cpumask_t mask;
813 * mask acknowledge (cs_mack): mack->0->mack for stage 2
815 * Clearing cpu bits for polling cpus in cs_mack will cause them to
816 * execute stage 2, which executes the cs_func(cs_data) and then sets
817 * their bit in cs_mack again.
819 * mack does not include the current cpu.
821 mask = cs->cs_mack;
822 cpu_ccfence();
823 cs->cs_mack = 0;
824 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask))
825 cs->cs_func(cs->cs_data);
826 if (mask) {
827 DEBUG_PUSH_INFO("cpusync_deinterlock");
828 #if 0
829 if (gd->gd_curthread->td_wmesg == NULL)
830 gd->gd_curthread->td_wmesg = smsg;
831 #endif
832 while (cs->cs_mack != mask) {
833 lwkt_process_ipiq();
834 cpu_pause();
836 #if 0
837 if (gd->gd_curthread->td_wmesg == smsg)
838 gd->gd_curthread->td_wmesg = NULL;
839 #endif
840 DEBUG_POP_INFO();
842 * cpusyncq ipis may be left queued without the RQF flag set due to
843 * a non-zero td_cscount, so be sure to process any laggards after
844 * decrementing td_cscount.
846 --gd->gd_curthread->td_cscount;
847 lwkt_process_ipiq();
848 logipiq2(sync_end, (long)mask);
850 crit_exit_id("cpusync");
851 #else
852 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask))
853 cs->cs_func(cs->cs_data);
854 #endif
857 #ifdef SMP
860 * helper IPI remote messaging function.
862 * Called on remote cpu when a new cpu synchronization request has been
863 * sent to us. Execute the run function and adjust cs_count, then requeue
864 * the request so we spin on it.
866 static void
867 lwkt_cpusync_remote1(lwkt_cpusync_t cs)
869 globaldata_t gd = mycpu;
871 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask);
872 lwkt_cpusync_remote2(cs);
876 * helper IPI remote messaging function.
878 * Poll for the originator telling us to finish. If it hasn't, requeue
879 * our request so we spin on it.
881 static void
882 lwkt_cpusync_remote2(lwkt_cpusync_t cs)
884 globaldata_t gd = mycpu;
886 if ((cs->cs_mack & gd->gd_cpumask) == 0) {
887 if (cs->cs_func)
888 cs->cs_func(cs->cs_data);
889 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask);
890 } else {
891 lwkt_ipiq_t ip;
892 int wi;
894 ip = &gd->gd_cpusyncq;
895 wi = ip->ip_windex & MAXCPUFIFO_MASK;
896 ip->ip_info[wi].func = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2;
897 ip->ip_info[wi].arg1 = cs;
898 ip->ip_info[wi].arg2 = 0;
899 cpu_sfence();
900 KKASSERT(ip->ip_windex - ip->ip_rindex < MAXCPUFIFO);
901 ++ip->ip_windex;
902 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) {
903 kprintf("cpu %d cm=%016jx %016jx f=%p\n",
904 gd->gd_cpuid,
905 (intmax_t)cs->cs_mask, (intmax_t)cs->cs_mack,
906 cs->cs_func);
911 #endif