2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
7 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14 #include <machine/segments.h>
16 #include <machine_base/icu/icu.h>
17 #include <bus/isa/isa.h>
22 #include <machine_base/apic/ioapic_ipl.h>
23 #include <machine/intr_machdep.h>
26 /* convert an absolute IRQ# into bitmask */
27 #define IRQ_LBIT(irq_num) (1UL << (irq_num & 0x3f))
30 #define IRQ_SBITS(irq_num) ((irq_num) & 0x3f)
32 /* convert an absolute IRQ# into gd_ipending index */
33 #define IRQ_LIDX(irq_num) ((irq_num) >> 6)
35 #define MPLOCKED lock ;
37 #define APIC_PUSH_FRAME \
38 PUSH_FRAME ;
/* 15 regs + space for 5 extras */ \
39 movq $
0,TF_XFLAGS
(%rsp
) ; \
40 movq $
0,TF_TRAPNO
(%rsp
) ; \
41 movq $
0,TF_ADDR
(%rsp
) ; \
42 movq $
0,TF_FLAGS
(%rsp
) ; \
43 movq $
0,TF_ERR
(%rsp
) ; \
47 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
48 * segment register being changed (e.g. by procfs), which is why syscalls
51 #define APIC_POP_FRAME \
54 #define IOAPICADDR(irq_num) \
55 CNAME
(ioapic_irqs
) + IOAPIC_IRQI_SIZE
* (irq_num
) + IOAPIC_IRQI_ADDR
56 #define REDIRIDX(irq_num) \
57 CNAME
(ioapic_irqs
) + IOAPIC_IRQI_SIZE
* (irq_num
) + IOAPIC_IRQI_IDX
58 #define IOAPICFLAGS(irq_num) \
59 CNAME
(ioapic_irqs
) + IOAPIC_IRQI_SIZE
* (irq_num
) + IOAPIC_IRQI_FLAGS
61 #define MASK_IRQ(irq_num) \
62 IOAPIC_IMASK_LOCK ;
/* into critical reg */ \
63 testl $IOAPIC_IRQI_FLAG_MASKED
, IOAPICFLAGS
(irq_num
) ; \
64 jne
7f ;
/* masked, don't mask */ \
65 orl $IOAPIC_IRQI_FLAG_MASKED
, IOAPICFLAGS
(irq_num
) ; \
66 /* set the mask bit */ \
67 movq IOAPICADDR
(irq_num
), %rcx ;
/* ioapic addr */ \
68 movl REDIRIDX
(irq_num
), %eax ;
/* get the index */ \
69 movl
%eax
, (%rcx
) ;
/* write the index */ \
70 orl $IOART_INTMASK
,IOAPIC_WINDOW
(%rcx
) ;
/* set the mask */ \
71 7: ;
/* already masked */ \
72 IOAPIC_IMASK_UNLOCK ; \
75 * Test to see whether we are handling an edge or level triggered INT.
76 * Level-triggered INTs must still be masked as we don't clear the source,
77 * and the EOI cycle would cause redundant INTs to occur.
79 #define MASK_LEVEL_IRQ(irq_num) \
80 testl $IOAPIC_IRQI_FLAG_LEVEL
, IOAPICFLAGS
(irq_num
) ; \
81 jz
9f ;
/* edge, don't mask */ \
86 * Test to see if the source is currntly masked, clear if so.
88 #define UNMASK_IRQ(irq_num) \
91 IOAPIC_IMASK_LOCK ;
/* into critical reg */ \
92 testl $IOAPIC_IRQI_FLAG_MASKED
, IOAPICFLAGS
(irq_num
) ; \
93 je
7f ;
/* bit clear, not masked */ \
94 andl $~IOAPIC_IRQI_FLAG_MASKED
, IOAPICFLAGS
(irq_num
) ; \
95 /* clear mask bit */ \
96 movq IOAPICADDR
(irq_num
),%rcx ;
/* ioapic addr */ \
97 movl REDIRIDX
(irq_num
), %eax ;
/* get the index */ \
98 movl
%eax
,(%rcx
) ;
/* write the index */ \
99 andl $~IOART_INTMASK
,IOAPIC_WINDOW
(%rcx
) ;
/* clear the mask */ \
101 IOAPIC_IMASK_UNLOCK ; \
105 * Interrupt call handlers run in the following sequence:
107 * - Push the trap frame required by doreti
108 * - Mask the interrupt and reenable its source
109 * - If we cannot take the interrupt set its ipending bit and
111 * - If we can take the interrupt clear its ipending bit,
112 * call the handler, then unmask and doreti.
114 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
117 #define INTR_HANDLER(irq_num) \
120 IDTVEC
(ioapic_intr
##irq_num) ; \
122 FAKE_MCOUNT
(TF_RIP
(%rsp
)) ; \
123 MASK_LEVEL_IRQ
(irq_num
) ; \
125 movl $
0, LA_EOI
(%rax
) ; \
126 movq PCPU
(curthread
),%rbx ; \
127 testl $
-1,TD_NEST_COUNT
(%rbx
) ; \
129 testl $
-1,TD_CRITCOUNT
(%rbx
) ; \
132 /* in critical section, make interrupt pending */ \
133 /* set the pending bit and return, leave interrupt masked */ \
135 shlq $IRQ_SBITS
(irq_num
),%rcx ; \
136 movq $IRQ_LIDX
(irq_num
),%rdx ; \
137 orq
%rcx
,PCPU_E8
(ipending
,%rdx
) ; \
138 orl $RQF_INTPEND
,PCPU
(reqflags
) ; \
141 /* clear pending bit, run handler */ \
143 shlq $IRQ_SBITS
(irq_num
),%rcx ; \
145 movq $IRQ_LIDX
(irq_num
),%rdx ; \
146 andq
%rcx
,PCPU_E8
(ipending
,%rdx
) ; \
147 pushq $irq_num ;
/* trapframe -> intrframe */ \
148 movq
%rsp
, %rdi ;
/* pass frame by reference */ \
149 incl TD_CRITCOUNT
(%rbx
) ; \
151 call ithread_fast_handler ;
/* returns 0 to unmask */ \
152 decl TD_CRITCOUNT
(%rbx
) ; \
153 addq $
8, %rsp ;
/* intrframe -> trapframe */ \
154 UNMASK_IRQ
(irq_num
) ; \
160 * Handle "spurious INTerrupts".
162 * NOTE: This is different than the "spurious INTerrupt" generated by an
163 * 8259 PIC for missing INTs. See the APIC documentation for details.
164 * This routine should NOT do an 'EOI' cycle.
166 * NOTE: Even though we don't do anything here we must still swapgs if
167 * coming from a user frame in case the iretq faults... just use
168 * the nominal APIC_PUSH_FRAME sequence to get it done.
175 /* No EOI cycle used here */
176 FAKE_MCOUNT
(TF_RIP
(%rsp
))
182 * Handle TLB shootdowns.
184 * NOTE: interrupts are left disabled.
192 movl $
0, LA_EOI
(%rax
) /* End Of Interrupt to APIC */
193 FAKE_MCOUNT
(TF_RIP
(%rsp
))
194 incl PCPU
(cnt
) + V_IPI
195 subq $
8,%rsp
/* make same as interrupt frame */
196 movq
%rsp
,%rdi
/* pass frame by reference */
198 addq $
8,%rsp
/* turn into trapframe */
201 jmp doreti
/* doreti b/c intrs enabled */
204 * Handle sniffs - sniff %rip and %rsp.
212 movl $
0, LA_EOI
(%rax
) /* End Of Interrupt to APIC */
213 FAKE_MCOUNT
(TF_RIP
(%rsp
))
214 incl PCPU
(cnt
) + V_IPI
215 movq TF_RIP
(%rsp
),%rax
216 movq
%rax
,PCPU
(sample_pc
)
217 movq TF_RSP
(%rsp
),%rax
218 movq
%rax
,PCPU
(sample_sp
)
224 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
226 * - We cannot call doreti
227 * - Signals its receipt.
228 * - Waits for permission to restart.
229 * - Processing pending IPIQ events while waiting.
230 * - Signals its restart.
239 movl $
0, LA_EOI
(%rax
) /* End Of Interrupt to APIC */
241 movl PCPU
(cpuid
), %eax
242 imull $PCB_SIZE
, %eax
243 leaq CNAME
(stoppcbs
), %rdi
245 call CNAME
(savectx
) /* Save process context */
248 * Indicate that we have stopped and loop waiting for permission
249 * to start again. We must still process IPI events while in a
252 * Interrupts must remain enabled for non-IPI'd per-cpu interrupts
253 * (e.g. Xtimer, Xinvltlb).
255 #if CPUMASK_ELEMENTS != 4
256 #error "assembly incompatible with cpumask_t"
258 movq PCPU
(cpumask
)+0,%rax
/* stopped_cpus |= 1 << cpuid */
259 MPLOCKED orq
%rax
, stopped_cpus+
0
260 movq PCPU
(cpumask
)+8,%rax
261 MPLOCKED orq
%rax
, stopped_cpus+
8
262 movq PCPU
(cpumask
)+16,%rax
263 MPLOCKED orq
%rax
, stopped_cpus+
16
264 movq PCPU
(cpumask
)+24,%rax
265 MPLOCKED orq
%rax
, stopped_cpus+
24
267 movq PCPU
(curthread
),%rbx
268 incl PCPU
(intr_nesting_level
)
269 incl TD_CRITCOUNT
(%rbx
)
272 andl $~RQF_IPIQ
,PCPU
(reqflags
)
273 call lwkt_smp_stopped
277 movq started_cpus+
0,%rax
/* while (!(started_cpus & (1<<id))) */
278 andq PCPU
(cpumask
)+0,%rax
280 movq started_cpus+
8,%rax
281 andq PCPU
(cpumask
)+8,%rax
283 movq started_cpus+
16,%rax
284 andq PCPU
(cpumask
)+16,%rax
286 movq started_cpus+
24,%rax
287 andq PCPU
(cpumask
)+24,%rax
292 movq PCPU
(other_cpus
)+0,%rax
/* started_cpus &= ~(1 << cpuid) */
293 MPLOCKED andq
%rax
, started_cpus+
0
294 movq PCPU
(other_cpus
)+8,%rax
295 MPLOCKED andq
%rax
, started_cpus+
8
296 movq PCPU
(other_cpus
)+16,%rax
297 MPLOCKED andq
%rax
, started_cpus+
16
298 movq PCPU
(other_cpus
)+24,%rax
299 MPLOCKED andq
%rax
, started_cpus+
24
301 movq PCPU
(other_cpus
)+0,%rax
/* stopped_cpus &= ~(1 << cpuid) */
302 MPLOCKED andq
%rax
, stopped_cpus+
0
303 movq PCPU
(other_cpus
)+8,%rax
304 MPLOCKED andq
%rax
, stopped_cpus+
8
305 movq PCPU
(other_cpus
)+16,%rax
306 MPLOCKED andq
%rax
, stopped_cpus+
16
307 movq PCPU
(other_cpus
)+24,%rax
308 MPLOCKED andq
%rax
, stopped_cpus+
24
313 movq CNAME
(cpustop_restartfunc
), %rax
316 movq $
0, CNAME
(cpustop_restartfunc
) /* One-shot */
320 decl TD_CRITCOUNT
(%rbx
)
321 decl PCPU
(intr_nesting_level
)
327 * For now just have one ipiq IPI, but what we really want is
328 * to have one for each source cpu to the APICs don't get stalled
329 * backlogging the requests.
337 movl $
0, LA_EOI
(%rax
) /* End Of Interrupt to APIC */
338 FAKE_MCOUNT
(TF_RIP
(%rsp
))
340 incl PCPU
(cnt
) + V_IPI
341 movq PCPU
(curthread
),%rbx
342 testl $
-1,TD_CRITCOUNT
(%rbx
)
344 subq $
8,%rsp
/* make same as interrupt frame */
345 movq
%rsp
,%rdi
/* pass frame by reference */
346 incl PCPU
(intr_nesting_level
)
347 incl TD_CRITCOUNT
(%rbx
)
350 xchgl
%eax
,PCPU
(npoll
) /* (atomic op) allow another Xipi */
351 call lwkt_process_ipiq_frame
352 decl TD_CRITCOUNT
(%rbx
)
353 decl PCPU
(intr_nesting_level
)
354 addq $
8,%rsp
/* turn into trapframe */
358 orl $RQF_IPIQ
,PCPU
(reqflags
)
369 movl $
0, LA_EOI
(%rax
) /* End Of Interrupt to APIC */
370 FAKE_MCOUNT
(TF_RIP
(%rsp
))
372 subq $
8,%rsp
/* make same as interrupt frame */
373 movq
%rsp
,%rdi
/* pass frame by reference */
374 call pcpu_timer_always
375 addq $
8,%rsp
/* turn into trapframe */
377 incl PCPU
(cnt
) + V_TIMER
378 movq TF_RIP
(%rsp
),%rbx
/* sample addr before checking crit */
379 movq
%rbx
,PCPU
(sample_pc
)
380 movq PCPU
(curthread
),%rbx
381 testl $
-1,TD_CRITCOUNT
(%rbx
)
383 testl $
-1,TD_NEST_COUNT
(%rbx
)
385 subq $
8,%rsp
/* make same as interrupt frame */
386 movq
%rsp
,%rdi
/* pass frame by reference */
387 incl PCPU
(intr_nesting_level
)
388 incl TD_CRITCOUNT
(%rbx
)
390 call pcpu_timer_process_frame
391 decl TD_CRITCOUNT
(%rbx
)
392 decl PCPU
(intr_nesting_level
)
393 addq $
8,%rsp
/* turn into trapframe */
397 orl $RQF_TIMER
,PCPU
(reqflags
)
599 #if CPUMASK_ELEMENTS != 4
600 #error "assembly incompatible with cpumask_t"
602 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
603 .globl stopped_cpus, started_cpus
615 .globl CNAME(cpustop_restartfunc)
616 CNAME
(cpustop_restartfunc
):