vkernel - Sync to recent API changes
[dragonfly.git] / sys / platform / vkernel64 / platform / machintr.c
blobee9c59498a4b055d490007fc5fbf50a70d2fa3f9
1 /*
2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/machintr.h>
39 #include <sys/errno.h>
40 #include <sys/mman.h>
41 #include <sys/globaldata.h>
42 #include <sys/interrupt.h>
43 #include <stdio.h>
44 #include <signal.h>
45 #include <machine/globaldata.h>
46 #include <machine/md_var.h>
47 #include <sys/thread2.h>
49 #include <unistd.h>
52 * Interrupt Subsystem ABI
55 static void dummy_intr_disable(int);
56 static void dummy_intr_enable(int);
57 static void dummy_intr_setup(int, int);
58 static void dummy_intr_teardown(int);
59 static int dummy_legacy_intr_cpuid(int);
60 static void dummy_finalize(void);
61 static void dummy_intrcleanup(void);
62 static void dummy_stabilize(void);
64 struct machintr_abi MachIntrABI = {
65 MACHINTR_GENERIC,
66 .intr_disable = dummy_intr_disable,
67 .intr_enable = dummy_intr_enable,
68 .intr_setup = dummy_intr_setup,
69 .intr_teardown = dummy_intr_teardown,
70 .legacy_intr_cpuid = dummy_legacy_intr_cpuid,
72 .finalize = dummy_finalize,
73 .cleanup = dummy_intrcleanup,
74 .stabilize = dummy_stabilize
77 static void
78 dummy_intr_disable(int intr)
82 static void
83 dummy_intr_enable(int intr)
87 static void
88 dummy_intr_setup(int intr, int flags)
92 static void
93 dummy_intr_teardown(int intr)
97 static void
98 dummy_finalize(void)
102 static void
103 dummy_intrcleanup(void)
107 static void
108 dummy_stabilize(void)
112 static int
113 dummy_legacy_intr_cpuid(int irq __unused)
115 return 0;
119 * Process pending interrupts
121 void
122 splz(void)
124 struct mdglobaldata *gd = mdcpu;
125 thread_t td = gd->mi.gd_curthread;
126 int irq;
128 while (gd->mi.gd_reqflags & (RQF_IPIQ|RQF_INTPEND|
129 RQF_TIMER|RQF_KQUEUE)) {
130 crit_enter_quick(td);
131 if (gd->mi.gd_reqflags & RQF_IPIQ) {
132 atomic_clear_int(&gd->mi.gd_reqflags, RQF_IPIQ);
133 atomic_swap_int(&gd->mi.gd_npoll, 0);
134 lwkt_process_ipiq();
136 if (gd->mi.gd_reqflags & RQF_INTPEND) {
137 atomic_clear_int(&gd->mi.gd_reqflags, RQF_INTPEND);
138 while ((irq = ffs(gd->gd_spending)) != 0) {
139 --irq;
140 atomic_clear_int(&gd->gd_spending, 1 << irq);
141 irq += FIRST_SOFTINT;
142 sched_ithd_soft(irq);
144 while ((irq = ffs(gd->gd_fpending)) != 0) {
145 --irq;
146 atomic_clear_int(&gd->gd_fpending, 1 << irq);
147 sched_ithd_hard_virtual(irq);
150 if (gd->mi.gd_reqflags & RQF_TIMER) {
151 atomic_clear_int(&gd->mi.gd_reqflags, RQF_TIMER);
152 vktimer_intr(NULL);
154 if (gd->mi.gd_reqflags & RQF_KQUEUE) {
155 atomic_clear_int(&gd->mi.gd_reqflags, RQF_KQUEUE);
156 kqueue_intr(NULL);
158 crit_exit_noyield(td);
163 * Allows an unprotected signal handler or mailbox to signal an interrupt
165 * For sched_ithd_hard_virtual() to properly preempt via lwkt_schedule() we
166 * cannot enter a critical section here. We use td_nest_count instead.
168 void
169 signalintr(int intr)
171 struct mdglobaldata *gd = mdcpu;
172 thread_t td = gd->mi.gd_curthread;
174 if (td->td_critcount || td->td_nest_count) {
175 atomic_set_int_nonlocked(&gd->gd_fpending, 1 << intr);
176 atomic_set_int(&gd->mi.gd_reqflags, RQF_INTPEND);
177 umtx_wakeup(&gd->mi.gd_reqflags, 0);
178 } else {
179 ++td->td_nest_count;
180 cpu_ccfence();
181 atomic_clear_int(&gd->gd_fpending, 1 << intr);
182 sched_ithd_hard_virtual(intr);
183 cpu_ccfence();
184 --td->td_nest_count;
189 * Must block any signal normally handled as maskable interrupt.
191 void
192 cpu_disable_intr(void)
194 sigblock(sigmask(SIGALRM)|sigmask(SIGIO)|sigmask(SIGUSR1)|
195 sigmask(SIGURG));
198 void
199 cpu_enable_intr(void)
201 sigsetmask(0);
204 void
205 cpu_mask_all_signals(void)
207 sigblock(sigmask(SIGALRM)|sigmask(SIGIO)|sigmask(SIGQUIT)|
208 sigmask(SIGUSR1)|sigmask(SIGTERM)|sigmask(SIGWINCH)|
209 sigmask(SIGUSR2)|sigmask(SIGURG));
212 void
213 cpu_unmask_all_signals(void)
215 sigsetmask(0);