2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.38 2008/05/08 01:21:06 dillon Exp $
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/i386/isa.h>
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num) (1 << (irq_num))
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
32 #define MPLOCKED lock ;
38 * Push an interrupt frame in a format acceptable to doreti, reload
39 * the segment registers for the kernel.
42 pushl $
0 ;
/* dummy error code */ \
43 pushl $
0 ;
/* dummy trap type */ \
44 pushl $
0 ;
/* dummy xflags type */ \
46 pushl
%ds ;
/* save data and extra segments ... */ \
59 pushfl ;
/* phys int frame / flags */ \
60 pushl
%cs ;
/* phys int frame / cs */ \
61 pushl
12(%esp
) ;
/* original caller eip */ \
62 pushl $
0 ;
/* dummy error code */ \
63 pushl $
0 ;
/* dummy trap type */ \
64 pushl $
0 ;
/* dummy xflags type */ \
65 subl $
13*4,%esp ;
/* pushal + 4 seg regs (dummy) + CPL */ \
68 * Warning: POP_FRAME can only be used if there is no chance of a
69 * segment register being changed (e.g. by procfs), which is why syscalls
78 addl $
3*4,%esp ;
/* dummy xflags, trap & error codes */ \
83 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
84 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
86 #define MASK_IRQ(irq_num) \
87 APIC_IMASK_LOCK ;
/* into critical reg */ \
88 testl $IRQ_LBIT
(irq_num
), apic_imen ; \
89 jne
7f ;
/* masked, don't mask */ \
90 orl $IRQ_LBIT
(irq_num
), apic_imen ;
/* set the mask bit */ \
91 movl IOAPICADDR
(irq_num
), %ecx ;
/* ioapic addr */ \
92 movl REDIRIDX
(irq_num
), %eax ;
/* get the index */ \
93 movl
%eax
, (%ecx
) ;
/* write the index */ \
94 movl IOAPIC_WINDOW
(%ecx
), %eax ;
/* current value */ \
95 orl $IOART_INTMASK
, %eax ;
/* set the mask */ \
96 movl
%eax
, IOAPIC_WINDOW
(%ecx
) ;
/* new value */ \
97 7: ;
/* already masked */ \
101 * Test to see whether we are handling an edge or level triggered INT.
102 * Level-triggered INTs must still be masked as we don't clear the source,
103 * and the EOI cycle would cause redundant INTs to occur.
105 #define MASK_LEVEL_IRQ(irq_num) \
106 testl $IRQ_LBIT
(irq_num
), apic_pin_trigger ; \
107 jz
9f ;
/* edge, don't mask */ \
108 MASK_IRQ
(irq_num
) ; \
112 * Test to see if the source is currntly masked, clear if so.
114 #define UNMASK_IRQ(irq_num) \
117 APIC_IMASK_LOCK ;
/* into critical reg */ \
118 testl $IRQ_LBIT
(irq_num
), apic_imen ; \
119 je
7f ;
/* bit clear, not masked */ \
120 andl $~IRQ_LBIT
(irq_num
), apic_imen ;
/* clear mask bit */ \
121 movl IOAPICADDR
(irq_num
),%ecx ;
/* ioapic addr */ \
122 movl REDIRIDX
(irq_num
), %eax ;
/* get the index */ \
123 movl
%eax
,(%ecx
) ;
/* write the index */ \
124 movl IOAPIC_WINDOW
(%ecx
),%eax ;
/* current value */ \
125 andl $~IOART_INTMASK
,%eax ;
/* clear the mask */ \
126 movl
%eax
,IOAPIC_WINDOW
(%ecx
) ;
/* new value */ \
128 APIC_IMASK_UNLOCK ; \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
139 * doreti. Note that we cannot mess with mp_lock at all
140 * if we entered from a critical section!
141 * - If we can take the interrupt clear its fpending bit,
142 * call the handler, then unmask and doreti.
144 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
147 #define FAST_INTR(irq_num, vec_name) \
152 FAKE_MCOUNT
(15*4(%esp
)) ; \
153 MASK_LEVEL_IRQ
(irq_num
) ; \
154 movl $
0, lapic_eoi ; \
155 movl PCPU
(curthread
),%ebx ; \
156 movl $
0,%eax ;
/* CURRENT CPL IN FRAME (REMOVED) */ \
158 testl $
-1,TD_NEST_COUNT
(%ebx
) ; \
160 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
) ; \
163 /* in critical section, make interrupt pending */ \
164 /* set the pending bit and return, leave interrupt masked */ \
165 orl $IRQ_LBIT
(irq_num
),PCPU
(fpending
) ; \
166 orl $RQF_INTPEND
,PCPU
(reqflags
) ; \
169 /* clear pending bit, run handler */ \
170 andl $~IRQ_LBIT
(irq_num
),PCPU
(fpending
) ; \
172 pushl
%esp ;
/* pass frame by reference */ \
173 call ithread_fast_handler ;
/* returns 0 to unmask */ \
175 UNMASK_IRQ
(irq_num
) ; \
181 * Slow interrupt call handlers run in the following sequence:
183 * - Push the trap frame required by doreti.
184 * - Mask the interrupt and reenable its source.
185 * - If we cannot take the interrupt set its ipending bit and
186 * doreti. In addition to checking for a critical section
187 * and cpl mask we also check to see if the thread is still
188 * running. Note that we cannot mess with mp_lock at all
189 * if we entered from a critical section!
190 * - If we can take the interrupt clear its ipending bit
191 * and schedule the thread. Leave interrupts masked and doreti.
193 * Note that calls to sched_ithd() are made with interrupts enabled
194 * and outside a critical section. YYY sched_ithd may preempt us
195 * synchronously (fix interrupt stacking).
197 * YYY can cache gd base pointer instead of using hidden %fs
201 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
206 maybe_extra_ipending ; \
208 MASK_LEVEL_IRQ
(irq_num
) ; \
209 incl PCPU
(cnt
) + V_INTR ; \
210 movl $
0, lapic_eoi ; \
211 movl PCPU
(curthread
),%ebx ; \
212 movl $
0,%eax ;
/* CURRENT CPL IN FRAME (REMOVED) */ \
213 pushl
%eax ;
/* cpl do restore */ \
214 testl $
-1,TD_NEST_COUNT
(%ebx
) ; \
216 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
) ; \
219 /* set the pending bit and return, leave the interrupt masked */ \
220 orl $IRQ_LBIT
(irq_num
), PCPU
(ipending
) ; \
221 orl $RQF_INTPEND
,PCPU
(reqflags
) ; \
224 /* set running bit, clear pending bit, run handler */ \
225 andl $~IRQ_LBIT
(irq_num
), PCPU
(ipending
) ; \
226 incl TD_NEST_COUNT
(%ebx
) ; \
232 decl TD_NEST_COUNT
(%ebx
) ; \
238 * Wrong interrupt call handlers. We program these into APIC vectors
239 * that should otherwise never occur. For example, we program the SLOW
240 * vector for irq N with this when we program the FAST vector with the
243 * XXX for now all we can do is EOI it. We can't call do_wrongintr
244 * (yet) because we could be in a critical section.
246 #define WRONGINTR(irq_num,vec_name) \
251 movl $
0, lapic_eoi ;
/* End Of Interrupt to APIC */ \
252 /*pushl $irq_num ;*/ \
253 /*call do_wrongintr ;*/ \
261 * Handle "spurious INTerrupts".
263 * This is different than the "spurious INTerrupt" generated by an
264 * 8259 PIC for missing INTs. See the APIC documentation for details.
265 * This routine should NOT do an 'EOI' cycle.
272 /* No EOI cycle used here */
278 * Handle TLB shootdowns.
286 movl
%cr3
, %eax
/* invalidate the TLB */
289 ss
/* stack segment, avoid %ds load */
290 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
297 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
299 * - Signals its receipt.
300 * - Waits for permission to restart.
301 * - Processing pending IPIQ events while waiting.
302 * - Signals its restart.
314 pushl
%ds
/* save current data segment */
318 mov
%ax
, %ds
/* use KERNEL data segment */
322 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
324 movl PCPU
(cpuid
), %eax
325 imull $PCB_SIZE
, %eax
326 leal CNAME
(stoppcbs
)(%eax
), %eax
328 call CNAME
(savectx
) /* Save process context */
332 movl PCPU
(cpuid
), %eax
335 * Indicate that we have stopped and loop waiting for permission
336 * to start again. We must still process IPI events while in a
340 btsl
%eax
, stopped_cpus
/* stopped_cpus |= (1<<id) */
342 andl $~RQF_IPIQ
,PCPU
(reqflags
)
344 call lwkt_smp_stopped
346 btl %eax
, started_cpus
/* while (!(started_cpus & (1<<id))) */
350 btrl
%eax
, started_cpus
/* started_cpus &= ~(1<<id) */
352 btrl
%eax
, stopped_cpus
/* stopped_cpus &= ~(1<<id) */
357 movl CNAME
(cpustop_restartfunc
), %eax
360 movl $
0, CNAME
(cpustop_restartfunc
) /* One-shot */
365 popl
%ds
/* restore previous data segment */
374 * For now just have one ipiq IPI, but what we really want is
375 * to have one for each source cpu to the APICs don't get stalled
376 * backlogging the requests.
383 movl $
0, lapic_eoi
/* End Of Interrupt to APIC */
384 FAKE_MCOUNT
(15*4(%esp
))
386 movl PCPU
(curthread
),%ebx
387 cmpl $TDPRI_CRIT
,TD_PRI
(%ebx
)
389 subl $
8,%esp
/* make same as interrupt frame */
390 pushl
%esp
/* pass frame by reference */
391 incl PCPU
(intr_nesting_level
)
392 addl $TDPRI_CRIT
,TD_PRI
(%ebx
)
393 call lwkt_process_ipiq_frame
394 subl $TDPRI_CRIT
,TD_PRI
(%ebx
)
395 decl PCPU
(intr_nesting_level
)
397 pushl $
0 /* CPL for frame (REMOVED) */
401 orl $RQF_IPIQ
,PCPU
(reqflags
)
409 FAST_INTR
(0,apic_fastintr0
)
410 FAST_INTR
(1,apic_fastintr1
)
411 FAST_INTR
(2,apic_fastintr2
)
412 FAST_INTR
(3,apic_fastintr3
)
413 FAST_INTR
(4,apic_fastintr4
)
414 FAST_INTR
(5,apic_fastintr5
)
415 FAST_INTR
(6,apic_fastintr6
)
416 FAST_INTR
(7,apic_fastintr7
)
417 FAST_INTR
(8,apic_fastintr8
)
418 FAST_INTR
(9,apic_fastintr9
)
419 FAST_INTR
(10,apic_fastintr10
)
420 FAST_INTR
(11,apic_fastintr11
)
421 FAST_INTR
(12,apic_fastintr12
)
422 FAST_INTR
(13,apic_fastintr13
)
423 FAST_INTR
(14,apic_fastintr14
)
424 FAST_INTR
(15,apic_fastintr15
)
425 FAST_INTR
(16,apic_fastintr16
)
426 FAST_INTR
(17,apic_fastintr17
)
427 FAST_INTR
(18,apic_fastintr18
)
428 FAST_INTR
(19,apic_fastintr19
)
429 FAST_INTR
(20,apic_fastintr20
)
430 FAST_INTR
(21,apic_fastintr21
)
431 FAST_INTR
(22,apic_fastintr22
)
432 FAST_INTR
(23,apic_fastintr23
)
434 /* YYY what is this garbage? */
436 SLOW_INTR
(0,apic_slowintr0
,)
437 SLOW_INTR
(1,apic_slowintr1
,)
438 SLOW_INTR
(2,apic_slowintr2
,)
439 SLOW_INTR
(3,apic_slowintr3
,)
440 SLOW_INTR
(4,apic_slowintr4
,)
441 SLOW_INTR
(5,apic_slowintr5
,)
442 SLOW_INTR
(6,apic_slowintr6
,)
443 SLOW_INTR
(7,apic_slowintr7
,)
444 SLOW_INTR
(8,apic_slowintr8
,)
445 SLOW_INTR
(9,apic_slowintr9
,)
446 SLOW_INTR
(10,apic_slowintr10
,)
447 SLOW_INTR
(11,apic_slowintr11
,)
448 SLOW_INTR
(12,apic_slowintr12
,)
449 SLOW_INTR
(13,apic_slowintr13
,)
450 SLOW_INTR
(14,apic_slowintr14
,)
451 SLOW_INTR
(15,apic_slowintr15
,)
452 SLOW_INTR
(16,apic_slowintr16
,)
453 SLOW_INTR
(17,apic_slowintr17
,)
454 SLOW_INTR
(18,apic_slowintr18
,)
455 SLOW_INTR
(19,apic_slowintr19
,)
456 SLOW_INTR
(20,apic_slowintr20
,)
457 SLOW_INTR
(21,apic_slowintr21
,)
458 SLOW_INTR
(22,apic_slowintr22
,)
459 SLOW_INTR
(23,apic_slowintr23
,)
461 WRONGINTR
(0,apic_wrongintr0
)
462 WRONGINTR
(1,apic_wrongintr1
)
463 WRONGINTR
(2,apic_wrongintr2
)
464 WRONGINTR
(3,apic_wrongintr3
)
465 WRONGINTR
(4,apic_wrongintr4
)
466 WRONGINTR
(5,apic_wrongintr5
)
467 WRONGINTR
(6,apic_wrongintr6
)
468 WRONGINTR
(7,apic_wrongintr7
)
469 WRONGINTR
(8,apic_wrongintr8
)
470 WRONGINTR
(9,apic_wrongintr9
)
471 WRONGINTR
(10,apic_wrongintr10
)
472 WRONGINTR
(11,apic_wrongintr11
)
473 WRONGINTR
(12,apic_wrongintr12
)
474 WRONGINTR
(13,apic_wrongintr13
)
475 WRONGINTR
(14,apic_wrongintr14
)
476 WRONGINTR
(15,apic_wrongintr15
)
477 WRONGINTR
(16,apic_wrongintr16
)
478 WRONGINTR
(17,apic_wrongintr17
)
479 WRONGINTR
(18,apic_wrongintr18
)
480 WRONGINTR
(19,apic_wrongintr19
)
481 WRONGINTR
(20,apic_wrongintr20
)
482 WRONGINTR
(21,apic_wrongintr21
)
483 WRONGINTR
(22,apic_wrongintr22
)
484 WRONGINTR
(23,apic_wrongintr23
)
491 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
492 .globl stopped_cpus, started_cpus
498 .globl CNAME(cpustop_restartfunc)
499 CNAME
(cpustop_restartfunc
):
502 .globl apic_pin_trigger