2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/isa.h>
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num) (1 << (irq_num))
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
32 #define MPLOCKED lock ;
38 * Push an interrupt frame in a format acceptable to doreti, reload
39 * the segment registers for the kernel.
42 pushl $
0 ;
/* dummy error code */ \
43 pushl $
0 ;
/* dummy trap type */ \
44 pushl $
0 ;
/* dummy xflags type */ \
46 pushl
%ds ;
/* save data and extra segments ... */ \
59 pushfl ;
/* phys int frame / flags */ \
60 pushl
%cs ;
/* phys int frame / cs */ \
61 pushl
12(%esp
) ;
/* original caller eip */ \
62 pushl $
0 ;
/* dummy error code */ \
63 pushl $
0 ;
/* dummy trap type */ \
64 pushl $
0 ;
/* dummy xflags type */ \
65 subl $
13*4,%esp ;
/* pushal + 4 seg regs (dummy) + CPL */ \
68 * Warning: POP_FRAME can only be used if there is no chance of a
69 * segment register being changed (e.g. by procfs), which is why syscalls
78 addl $
3*4,%esp ;
/* dummy xflags, trap & error codes */ \
83 #define IOAPICADDR(irq_num) \
84 CNAME
(int_to_apicintpin
) + IOAPIC_IM_SIZE
* (irq_num
) + IOAPIC_IM_ADDR
85 #define REDIRIDX(irq_num) \
86 CNAME
(int_to_apicintpin
) + IOAPIC_IM_SIZE
* (irq_num
) + IOAPIC_IM_ENTIDX
87 #define IOAPICFLAGS(irq_num) \
88 CNAME
(int_to_apicintpin
) + IOAPIC_IM_SIZE
* (irq_num
) + IOAPIC_IM_FLAGS
90 #define MASK_IRQ(irq_num) \
91 APIC_IMASK_LOCK ;
/* into critical reg */ \
92 testl $IOAPIC_IM_FLAG_MASKED
, IOAPICFLAGS
(irq_num
) ; \
93 jne
7f ;
/* masked, don't mask */ \
94 orl $IOAPIC_IM_FLAG_MASKED
, IOAPICFLAGS
(irq_num
) ; \
95 /* set the mask bit */ \
96 movl IOAPICADDR
(irq_num
), %ecx ;
/* ioapic addr */ \
97 movl REDIRIDX
(irq_num
), %eax ;
/* get the index */ \
98 movl
%eax
, (%ecx
) ;
/* write the index */ \
99 orl $IOART_INTMASK
,IOAPIC_WINDOW
(%ecx
) ;
/* set the mask */ \
100 7: ;
/* already masked */ \
101 APIC_IMASK_UNLOCK ; \
104 * Test to see whether we are handling an edge or level triggered INT.
105 * Level-triggered INTs must still be masked as we don't clear the source,
106 * and the EOI cycle would cause redundant INTs to occur.
108 #define MASK_LEVEL_IRQ(irq_num) \
109 testl $IOAPIC_IM_FLAG_LEVEL
, IOAPICFLAGS
(irq_num
) ; \
110 jz
9f ;
/* edge, don't mask */ \
111 MASK_IRQ
(irq_num
) ; \
115 * Test to see if the source is currntly masked, clear if so.
117 #define UNMASK_IRQ(irq_num) \
120 APIC_IMASK_LOCK ;
/* into critical reg */ \
121 testl $IOAPIC_IM_FLAG_MASKED
, IOAPICFLAGS
(irq_num
) ; \
122 je
7f ;
/* bit clear, not masked */ \
123 andl $~IOAPIC_IM_FLAG_MASKED
, IOAPICFLAGS
(irq_num
) ; \
124 /* clear mask bit */ \
125 movl IOAPICADDR
(irq_num
),%ecx ;
/* ioapic addr */ \
126 movl REDIRIDX
(irq_num
), %eax ;
/* get the index */ \
127 movl
%eax
,(%ecx
) ;
/* write the index */ \
128 andl $~IOART_INTMASK
,IOAPIC_WINDOW
(%ecx
) ;
/* clear the mask */ \
130 APIC_IMASK_UNLOCK ; \
136 * Fast interrupt call handlers run in the following sequence:
138 * - Push the trap frame required by doreti
139 * - Mask the interrupt and reenable its source
140 * - If we cannot take the interrupt set its fpending bit and
141 * doreti. Note that we cannot mess with mp_lock at all
142 * if we entered from a critical section!
143 * - If we can take the interrupt clear its fpending bit,
144 * call the handler, then unmask and doreti.
146 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
149 #define FAST_INTR(irq_num, vec_name) \
154 FAKE_MCOUNT
(15*4(%esp
)) ; \
155 MASK_LEVEL_IRQ
(irq_num
) ; \
156 movl $
0, lapic_eoi ; \
157 movl PCPU
(curthread
),%ebx ; \
158 movl $
0,%eax ;
/* CURRENT CPL IN FRAME (REMOVED) */ \
160 testl $
-1,TD_NEST_COUNT
(%ebx
) ; \
162 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
) ; \
165 /* in critical section, make interrupt pending */ \
166 /* set the pending bit and return, leave interrupt masked */ \
167 orl $IRQ_LBIT
(irq_num
),PCPU
(fpending
) ; \
168 orl $RQF_INTPEND
,PCPU
(reqflags
) ; \
171 /* clear pending bit, run handler */ \
172 andl $~IRQ_LBIT
(irq_num
),PCPU
(fpending
) ; \
174 pushl
%esp ;
/* pass frame by reference */ \
175 call ithread_fast_handler ;
/* returns 0 to unmask */ \
177 UNMASK_IRQ
(irq_num
) ; \
183 * Slow interrupt call handlers run in the following sequence:
185 * - Push the trap frame required by doreti.
186 * - Mask the interrupt and reenable its source.
187 * - If we cannot take the interrupt set its ipending bit and
188 * doreti. In addition to checking for a critical section
189 * and cpl mask we also check to see if the thread is still
190 * running. Note that we cannot mess with mp_lock at all
191 * if we entered from a critical section!
192 * - If we can take the interrupt clear its ipending bit
193 * and schedule the thread. Leave interrupts masked and doreti.
195 * Note that calls to sched_ithd() are made with interrupts enabled
196 * and outside a critical section. YYY sched_ithd may preempt us
197 * synchronously (fix interrupt stacking).
199 * YYY can cache gd base pointer instead of using hidden %fs
203 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
208 maybe_extra_ipending ; \
210 MASK_LEVEL_IRQ
(irq_num
) ; \
211 incl PCPU
(cnt
) + V_INTR ; \
212 movl $
0, lapic_eoi ; \
213 movl PCPU
(curthread
),%ebx ; \
214 movl $
0,%eax ;
/* CURRENT CPL IN FRAME (REMOVED) */ \
215 pushl
%eax ;
/* cpl do restore */ \
216 testl $
-1,TD_NEST_COUNT
(%ebx
) ; \
218 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
) ; \
221 /* set the pending bit and return, leave the interrupt masked */ \
222 orl $IRQ_LBIT
(irq_num
), PCPU
(ipending
) ; \
223 orl $RQF_INTPEND
,PCPU
(reqflags
) ; \
226 /* set running bit, clear pending bit, run handler */ \
227 andl $~IRQ_LBIT
(irq_num
), PCPU
(ipending
) ; \
228 incl TD_NEST_COUNT
(%ebx
) ; \
234 decl TD_NEST_COUNT
(%ebx
) ; \
240 * Wrong interrupt call handlers. We program these into APIC vectors
241 * that should otherwise never occur. For example, we program the SLOW
242 * vector for irq N with this when we program the FAST vector with the
245 * XXX for now all we can do is EOI it. We can't call do_wrongintr
246 * (yet) because we could be in a critical section.
248 #define WRONGINTR(irq_num,vec_name) \
253 movl $
0, lapic_eoi ;
/* End Of Interrupt to APIC */ \
254 /*pushl $irq_num ;*/ \
255 /*call do_wrongintr ;*/ \
263 * Handle "spurious INTerrupts".
265 * This is different than the "spurious INTerrupt" generated by an
266 * 8259 PIC for missing INTs. See the APIC documentation for details.
267 * This routine should NOT do an 'EOI' cycle.
274 /* No EOI cycle used here */
280 * Handle TLB shootdowns.
288 movl
%cr3
, %eax
/* invalidate the TLB */
291 ss
/* stack segment, avoid %ds load */
292 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
299 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
301 * - Signals its receipt.
302 * - Waits for permission to restart.
303 * - Processing pending IPIQ events while waiting.
304 * - Signals its restart.
316 pushl
%ds
/* save current data segment */
320 mov
%ax
, %ds
/* use KERNEL data segment */
324 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
326 movl PCPU
(cpuid
), %eax
327 imull $PCB_SIZE
, %eax
328 leal CNAME
(stoppcbs
)(%eax
), %eax
330 call CNAME
(savectx
) /* Save process context */
334 movl PCPU
(cpuid
), %eax
337 * Indicate that we have stopped and loop waiting for permission
338 * to start again. We must still process IPI events while in a
342 btsl
%eax
, stopped_cpus
/* stopped_cpus |= (1<<id) */
344 andl $~RQF_IPIQ
,PCPU
(reqflags
)
346 call lwkt_smp_stopped
348 btl %eax
, started_cpus
/* while (!(started_cpus & (1<<id))) */
352 btrl
%eax
, started_cpus
/* started_cpus &= ~(1<<id) */
354 btrl
%eax
, stopped_cpus
/* stopped_cpus &= ~(1<<id) */
359 movl CNAME
(cpustop_restartfunc
), %eax
362 movl $
0, CNAME
(cpustop_restartfunc
) /* One-shot */
367 popl
%ds
/* restore previous data segment */
376 * For now just have one ipiq IPI, but what we really want is
377 * to have one for each source cpu to the APICs don't get stalled
378 * backlogging the requests.
385 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
386 FAKE_MCOUNT
(15*4(%esp
))
388 incl PCPU
(cnt
) + V_IPI
389 movl PCPU
(curthread
),%ebx
390 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
)
392 subl $
8,%esp
/* make same as interrupt frame */
393 pushl
%esp
/* pass frame by reference */
394 incl PCPU
(intr_nesting_level
)
395 addl $TDPRI_CRIT
,TD_PRI
(%ebx
)
396 call lwkt_process_ipiq_frame
397 subl $TDPRI_CRIT
,TD_PRI
(%ebx
)
398 decl PCPU
(intr_nesting_level
)
400 pushl $
0 /* CPL for frame (REMOVED) */
404 orl $RQF_IPIQ
,PCPU
(reqflags
)
414 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
415 FAKE_MCOUNT
(15*4(%esp
))
417 incl PCPU
(cnt
) + V_TIMER
418 movl PCPU
(curthread
),%ebx
419 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
)
421 testl $
-1,TD_NEST_COUNT
(%ebx
)
423 subl $
8,%esp
/* make same as interrupt frame */
424 pushl
%esp
/* pass frame by reference */
425 incl PCPU
(intr_nesting_level
)
426 addl $TDPRI_CRIT
,TD_PRI
(%ebx
)
427 call lapic_timer_process_frame
428 subl $TDPRI_CRIT
,TD_PRI
(%ebx
)
429 decl PCPU
(intr_nesting_level
)
431 pushl $
0 /* CPL for frame (REMOVED) */
435 orl $RQF_TIMER
,PCPU
(reqflags
)
443 FAST_INTR
(0,apic_fastintr0
)
444 FAST_INTR
(1,apic_fastintr1
)
445 FAST_INTR
(2,apic_fastintr2
)
446 FAST_INTR
(3,apic_fastintr3
)
447 FAST_INTR
(4,apic_fastintr4
)
448 FAST_INTR
(5,apic_fastintr5
)
449 FAST_INTR
(6,apic_fastintr6
)
450 FAST_INTR
(7,apic_fastintr7
)
451 FAST_INTR
(8,apic_fastintr8
)
452 FAST_INTR
(9,apic_fastintr9
)
453 FAST_INTR
(10,apic_fastintr10
)
454 FAST_INTR
(11,apic_fastintr11
)
455 FAST_INTR
(12,apic_fastintr12
)
456 FAST_INTR
(13,apic_fastintr13
)
457 FAST_INTR
(14,apic_fastintr14
)
458 FAST_INTR
(15,apic_fastintr15
)
459 FAST_INTR
(16,apic_fastintr16
)
460 FAST_INTR
(17,apic_fastintr17
)
461 FAST_INTR
(18,apic_fastintr18
)
462 FAST_INTR
(19,apic_fastintr19
)
463 FAST_INTR
(20,apic_fastintr20
)
464 FAST_INTR
(21,apic_fastintr21
)
465 FAST_INTR
(22,apic_fastintr22
)
466 FAST_INTR
(23,apic_fastintr23
)
468 /* YYY what is this garbage? */
470 SLOW_INTR
(0,apic_slowintr0
,)
471 SLOW_INTR
(1,apic_slowintr1
,)
472 SLOW_INTR
(2,apic_slowintr2
,)
473 SLOW_INTR
(3,apic_slowintr3
,)
474 SLOW_INTR
(4,apic_slowintr4
,)
475 SLOW_INTR
(5,apic_slowintr5
,)
476 SLOW_INTR
(6,apic_slowintr6
,)
477 SLOW_INTR
(7,apic_slowintr7
,)
478 SLOW_INTR
(8,apic_slowintr8
,)
479 SLOW_INTR
(9,apic_slowintr9
,)
480 SLOW_INTR
(10,apic_slowintr10
,)
481 SLOW_INTR
(11,apic_slowintr11
,)
482 SLOW_INTR
(12,apic_slowintr12
,)
483 SLOW_INTR
(13,apic_slowintr13
,)
484 SLOW_INTR
(14,apic_slowintr14
,)
485 SLOW_INTR
(15,apic_slowintr15
,)
486 SLOW_INTR
(16,apic_slowintr16
,)
487 SLOW_INTR
(17,apic_slowintr17
,)
488 SLOW_INTR
(18,apic_slowintr18
,)
489 SLOW_INTR
(19,apic_slowintr19
,)
490 SLOW_INTR
(20,apic_slowintr20
,)
491 SLOW_INTR
(21,apic_slowintr21
,)
492 SLOW_INTR
(22,apic_slowintr22
,)
493 SLOW_INTR
(23,apic_slowintr23
,)
495 WRONGINTR
(0,apic_wrongintr0
)
496 WRONGINTR
(1,apic_wrongintr1
)
497 WRONGINTR
(2,apic_wrongintr2
)
498 WRONGINTR
(3,apic_wrongintr3
)
499 WRONGINTR
(4,apic_wrongintr4
)
500 WRONGINTR
(5,apic_wrongintr5
)
501 WRONGINTR
(6,apic_wrongintr6
)
502 WRONGINTR
(7,apic_wrongintr7
)
503 WRONGINTR
(8,apic_wrongintr8
)
504 WRONGINTR
(9,apic_wrongintr9
)
505 WRONGINTR
(10,apic_wrongintr10
)
506 WRONGINTR
(11,apic_wrongintr11
)
507 WRONGINTR
(12,apic_wrongintr12
)
508 WRONGINTR
(13,apic_wrongintr13
)
509 WRONGINTR
(14,apic_wrongintr14
)
510 WRONGINTR
(15,apic_wrongintr15
)
511 WRONGINTR
(16,apic_wrongintr16
)
512 WRONGINTR
(17,apic_wrongintr17
)
513 WRONGINTR
(18,apic_wrongintr18
)
514 WRONGINTR
(19,apic_wrongintr19
)
515 WRONGINTR
(20,apic_wrongintr20
)
516 WRONGINTR
(21,apic_wrongintr21
)
517 WRONGINTR
(22,apic_wrongintr22
)
518 WRONGINTR
(23,apic_wrongintr23
)
525 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
526 .globl stopped_cpus, started_cpus
532 .globl CNAME(cpustop_restartfunc)
533 CNAME
(cpustop_restartfunc
):