2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.31 2005/11/04 21:16:57 dillon Exp $
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/ipl.h>
12 #include <machine/lock.h>
13 #include <machine/psl.h>
14 #include <machine/trap.h>
16 #include <i386/icu/icu.h>
17 #include <bus/isa/i386/isa.h>
23 #include <machine/smp.h>
24 #include "i386/isa/intr_machdep.h"
26 /* convert an absolute IRQ# into a bitmask */
27 #define IRQ_LBIT(irq_num) (1 << (irq_num))
29 /* make an index into the IO APIC from the IRQ# */
30 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
33 #define MPLOCKED lock ;
39 * Push an interrupt frame in a format acceptable to doreti, reload
40 * the segment registers for the kernel.
43 pushl $
0 ;
/* dummy error code */ \
44 pushl $
0 ;
/* dummy trap type */ \
46 pushl
%ds ;
/* save data and extra segments ... */ \
56 pushfl ;
/* phys int frame / flags */ \
57 pushl
%cs ;
/* phys int frame / cs */ \
58 pushl
12(%esp
) ;
/* original caller eip */ \
59 pushl $
0 ;
/* dummy error code */ \
60 pushl $
0 ;
/* dummy trap type */ \
61 subl $
12*4,%esp ;
/* pushal + 3 seg regs (dummy) + CPL */ \
64 * Warning: POP_FRAME can only be used if there is no chance of a
65 * segment register being changed (e.g. by procfs), which is why syscalls
73 addl $
2*4,%esp ;
/* dummy trap & error codes */ \
78 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
79 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
81 #define MASK_IRQ(irq_num) \
82 APIC_IMASK_LOCK ;
/* into critical reg */ \
83 testl $IRQ_LBIT
(irq_num
), apic_imen ; \
84 jne
7f ;
/* masked, don't mask */ \
85 orl $IRQ_LBIT
(irq_num
), apic_imen ;
/* set the mask bit */ \
86 movl IOAPICADDR
(irq_num
), %ecx ;
/* ioapic addr */ \
87 movl REDIRIDX
(irq_num
), %eax ;
/* get the index */ \
88 movl
%eax
, (%ecx
) ;
/* write the index */ \
89 movl IOAPIC_WINDOW
(%ecx
), %eax ;
/* current value */ \
90 orl $IOART_INTMASK
, %eax ;
/* set the mask */ \
91 movl
%eax
, IOAPIC_WINDOW
(%ecx
) ;
/* new value */ \
92 7: ;
/* already masked */ \
96 * Test to see whether we are handling an edge or level triggered INT.
97 * Level-triggered INTs must still be masked as we don't clear the source,
98 * and the EOI cycle would cause redundant INTs to occur.
100 #define MASK_LEVEL_IRQ(irq_num) \
101 testl $IRQ_LBIT
(irq_num
), apic_pin_trigger ; \
102 jz
9f ;
/* edge, don't mask */ \
103 MASK_IRQ
(irq_num
) ; \
107 * Test to see if the source is currntly masked, clear if so.
109 #define UNMASK_IRQ(irq_num) \
112 APIC_IMASK_LOCK ;
/* into critical reg */ \
113 testl $IRQ_LBIT
(irq_num
), apic_imen ; \
114 je
7f ;
/* bit clear, not masked */ \
115 andl $~IRQ_LBIT
(irq_num
), apic_imen ;
/* clear mask bit */ \
116 movl IOAPICADDR
(irq_num
),%ecx ;
/* ioapic addr */ \
117 movl REDIRIDX
(irq_num
), %eax ;
/* get the index */ \
118 movl
%eax
,(%ecx
) ;
/* write the index */ \
119 movl IOAPIC_WINDOW
(%ecx
),%eax ;
/* current value */ \
120 andl $~IOART_INTMASK
,%eax ;
/* clear the mask */ \
121 movl
%eax
,IOAPIC_WINDOW
(%ecx
) ;
/* new value */ \
123 APIC_IMASK_UNLOCK ; \
129 * Fast interrupt call handlers run in the following sequence:
131 * - Push the trap frame required by doreti
132 * - Mask the interrupt and reenable its source
133 * - If we cannot take the interrupt set its fpending bit and
134 * doreti. Note that we cannot mess with mp_lock at all
135 * if we entered from a critical section!
136 * - If we can take the interrupt clear its fpending bit,
137 * call the handler, then unmask and doreti.
139 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
142 #define FAST_INTR(irq_num, vec_name) \
147 FAKE_MCOUNT
(13*4(%esp
)) ; \
148 MASK_LEVEL_IRQ
(irq_num
) ; \
149 movl $
0, lapic_eoi ; \
150 movl PCPU
(curthread
),%ebx ; \
151 movl $
0,%eax ;
/* CURRENT CPL IN FRAME (REMOVED) */ \
153 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
) ; \
156 /* in critical section, make interrupt pending */ \
157 /* set the pending bit and return, leave interrupt masked */ \
158 orl $IRQ_LBIT
(irq_num
),PCPU
(fpending
) ; \
159 orl $RQF_INTPEND
,PCPU
(reqflags
) ; \
162 /* clear pending bit, run handler */ \
163 andl $~IRQ_LBIT
(irq_num
),PCPU
(fpending
) ; \
165 call ithread_fast_handler ;
/* returns 0 to unmask */ \
167 UNMASK_IRQ
(irq_num
) ; \
173 * Slow interrupt call handlers run in the following sequence:
175 * - Push the trap frame required by doreti.
176 * - Mask the interrupt and reenable its source.
177 * - If we cannot take the interrupt set its ipending bit and
178 * doreti. In addition to checking for a critical section
179 * and cpl mask we also check to see if the thread is still
180 * running. Note that we cannot mess with mp_lock at all
181 * if we entered from a critical section!
182 * - If we can take the interrupt clear its ipending bit
183 * and schedule the thread. Leave interrupts masked and doreti.
185 * Note that calls to sched_ithd() are made with interrupts enabled
186 * and outside a critical section. YYY sched_ithd may preempt us
187 * synchronously (fix interrupt stacking).
189 * YYY can cache gd base pointer instead of using hidden %fs
193 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
198 maybe_extra_ipending ; \
200 MASK_LEVEL_IRQ
(irq_num
) ; \
201 incl PCPU
(cnt
) + V_INTR ; \
202 movl $
0, lapic_eoi ; \
203 movl PCPU
(curthread
),%ebx ; \
204 movl $
0,%eax ;
/* CURRENT CPL IN FRAME (REMOVED) */ \
205 pushl
%eax ;
/* cpl do restore */ \
206 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
) ; \
209 /* set the pending bit and return, leave the interrupt masked */ \
210 orl $IRQ_LBIT
(irq_num
), PCPU
(ipending
) ; \
211 orl $RQF_INTPEND
,PCPU
(reqflags
) ; \
214 /* set running bit, clear pending bit, run handler */ \
215 andl $~IRQ_LBIT
(irq_num
), PCPU
(ipending
) ; \
225 * Wrong interrupt call handlers. We program these into APIC vectors
226 * that should otherwise never occur. For example, we program the SLOW
227 * vector for irq N with this when we program the FAST vector with the
230 * XXX for now all we can do is EOI it. We can't call do_wrongintr
231 * (yet) because we could be in a critical section.
233 #define WRONGINTR(irq_num,vec_name) \
238 movl $
0, lapic_eoi ;
/* End Of Interrupt to APIC */ \
239 /*pushl $irq_num ;*/ \
240 /*call do_wrongintr ;*/ \
248 * Handle "spurious INTerrupts".
250 * This is different than the "spurious INTerrupt" generated by an
251 * 8259 PIC for missing INTs. See the APIC documentation for details.
252 * This routine should NOT do an 'EOI' cycle.
259 /* No EOI cycle used here */
265 * Handle TLB shootdowns.
273 movl
%cr3
, %eax
/* invalidate the TLB */
276 ss
/* stack segment, avoid %ds load */
277 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
284 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
286 * - Signals its receipt.
287 * - Waits for permission to restart.
288 * - Processing pending IPIQ events while waiting.
289 * - Signals its restart.
301 pushl
%ds
/* save current data segment */
305 mov
%ax
, %ds
/* use KERNEL data segment */
309 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
311 movl PCPU
(cpuid
), %eax
312 imull $PCB_SIZE
, %eax
313 leal CNAME
(stoppcbs
)(%eax
), %eax
315 call CNAME
(savectx
) /* Save process context */
319 movl PCPU
(cpuid
), %eax
322 * Indicate that we have stopped and loop waiting for permission
323 * to start again. We must still process IPI events while in a
327 btsl
%eax
, stopped_cpus
/* stopped_cpus |= (1<<id) */
329 andl $~RQF_IPIQ
,PCPU
(reqflags
)
331 call lwkt_smp_stopped
333 btl %eax
, started_cpus
/* while (!(started_cpus & (1<<id))) */
337 btrl
%eax
, started_cpus
/* started_cpus &= ~(1<<id) */
339 btrl
%eax
, stopped_cpus
/* stopped_cpus &= ~(1<<id) */
344 movl CNAME
(cpustop_restartfunc
), %eax
347 movl $
0, CNAME
(cpustop_restartfunc
) /* One-shot */
352 popl
%ds
/* restore previous data segment */
361 * For now just have one ipiq IPI, but what we really want is
362 * to have one for each source cpu to the APICs don't get stalled
363 * backlogging the requests.
370 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
371 FAKE_MCOUNT
(13*4(%esp
))
373 movl PCPU
(curthread
),%ebx
374 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
)
376 subl $
8,%esp
/* make same as interrupt frame */
377 incl PCPU
(intr_nesting_level
)
378 addl $TDPRI_CRIT
,TD_PRI
(%ebx
)
379 call lwkt_process_ipiq_frame
380 subl $TDPRI_CRIT
,TD_PRI
(%ebx
)
381 decl PCPU
(intr_nesting_level
)
383 pushl $
0 /* CPL for frame (REMOVED) */
387 orl $RQF_IPIQ
,PCPU
(reqflags
)
395 FAST_INTR
(0,apic_fastintr0
)
396 FAST_INTR
(1,apic_fastintr1
)
397 FAST_INTR
(2,apic_fastintr2
)
398 FAST_INTR
(3,apic_fastintr3
)
399 FAST_INTR
(4,apic_fastintr4
)
400 FAST_INTR
(5,apic_fastintr5
)
401 FAST_INTR
(6,apic_fastintr6
)
402 FAST_INTR
(7,apic_fastintr7
)
403 FAST_INTR
(8,apic_fastintr8
)
404 FAST_INTR
(9,apic_fastintr9
)
405 FAST_INTR
(10,apic_fastintr10
)
406 FAST_INTR
(11,apic_fastintr11
)
407 FAST_INTR
(12,apic_fastintr12
)
408 FAST_INTR
(13,apic_fastintr13
)
409 FAST_INTR
(14,apic_fastintr14
)
410 FAST_INTR
(15,apic_fastintr15
)
411 FAST_INTR
(16,apic_fastintr16
)
412 FAST_INTR
(17,apic_fastintr17
)
413 FAST_INTR
(18,apic_fastintr18
)
414 FAST_INTR
(19,apic_fastintr19
)
415 FAST_INTR
(20,apic_fastintr20
)
416 FAST_INTR
(21,apic_fastintr21
)
417 FAST_INTR
(22,apic_fastintr22
)
418 FAST_INTR
(23,apic_fastintr23
)
420 /* YYY what is this garbage? */
422 SLOW_INTR
(0,apic_slowintr0
,)
423 SLOW_INTR
(1,apic_slowintr1
,)
424 SLOW_INTR
(2,apic_slowintr2
,)
425 SLOW_INTR
(3,apic_slowintr3
,)
426 SLOW_INTR
(4,apic_slowintr4
,)
427 SLOW_INTR
(5,apic_slowintr5
,)
428 SLOW_INTR
(6,apic_slowintr6
,)
429 SLOW_INTR
(7,apic_slowintr7
,)
430 SLOW_INTR
(8,apic_slowintr8
,)
431 SLOW_INTR
(9,apic_slowintr9
,)
432 SLOW_INTR
(10,apic_slowintr10
,)
433 SLOW_INTR
(11,apic_slowintr11
,)
434 SLOW_INTR
(12,apic_slowintr12
,)
435 SLOW_INTR
(13,apic_slowintr13
,)
436 SLOW_INTR
(14,apic_slowintr14
,)
437 SLOW_INTR
(15,apic_slowintr15
,)
438 SLOW_INTR
(16,apic_slowintr16
,)
439 SLOW_INTR
(17,apic_slowintr17
,)
440 SLOW_INTR
(18,apic_slowintr18
,)
441 SLOW_INTR
(19,apic_slowintr19
,)
442 SLOW_INTR
(20,apic_slowintr20
,)
443 SLOW_INTR
(21,apic_slowintr21
,)
444 SLOW_INTR
(22,apic_slowintr22
,)
445 SLOW_INTR
(23,apic_slowintr23
,)
447 WRONGINTR
(0,apic_wrongintr0
)
448 WRONGINTR
(1,apic_wrongintr1
)
449 WRONGINTR
(2,apic_wrongintr2
)
450 WRONGINTR
(3,apic_wrongintr3
)
451 WRONGINTR
(4,apic_wrongintr4
)
452 WRONGINTR
(5,apic_wrongintr5
)
453 WRONGINTR
(6,apic_wrongintr6
)
454 WRONGINTR
(7,apic_wrongintr7
)
455 WRONGINTR
(8,apic_wrongintr8
)
456 WRONGINTR
(9,apic_wrongintr9
)
457 WRONGINTR
(10,apic_wrongintr10
)
458 WRONGINTR
(11,apic_wrongintr11
)
459 WRONGINTR
(12,apic_wrongintr12
)
460 WRONGINTR
(13,apic_wrongintr13
)
461 WRONGINTR
(14,apic_wrongintr14
)
462 WRONGINTR
(15,apic_wrongintr15
)
463 WRONGINTR
(16,apic_wrongintr16
)
464 WRONGINTR
(17,apic_wrongintr17
)
465 WRONGINTR
(18,apic_wrongintr18
)
466 WRONGINTR
(19,apic_wrongintr19
)
467 WRONGINTR
(20,apic_wrongintr20
)
468 WRONGINTR
(21,apic_wrongintr21
)
469 WRONGINTR
(22,apic_wrongintr22
)
470 WRONGINTR
(23,apic_wrongintr23
)
477 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
478 .globl stopped_cpus, started_cpus
484 .globl CNAME(cpustop_restartfunc)
485 CNAME
(cpustop_restartfunc
):
488 .globl apic_pin_trigger