Apply post-install correction of +CONTEXT files. nrelease now also
[dragonfly/netmp.git] / sys / i386 / apic / apic_vector.s
blob327989e372895cacaafee212c16b8ca00cc85239
1 /*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.31 2005/11/04 21:16:57 dillon Exp $
5 */
7 #include "use_npx.h"
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/ipl.h>
12 #include <machine/lock.h>
13 #include <machine/psl.h>
14 #include <machine/trap.h>
16 #include <i386/icu/icu.h>
17 #include <bus/isa/i386/isa.h>
19 #include "assym.s"
21 #include "apicreg.h"
22 #include "apic_ipl.h"
23 #include <machine/smp.h>
24 #include "i386/isa/intr_machdep.h"
26 /* convert an absolute IRQ# into a bitmask */
27 #define IRQ_LBIT(irq_num) (1 << (irq_num))
29 /* make an index into the IO APIC from the IRQ# */
30 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
32 #ifdef SMP
33 #define MPLOCKED lock ;
34 #else
35 #define MPLOCKED
36 #endif
39 * Push an interrupt frame in a format acceptable to doreti, reload
40 * the segment registers for the kernel.
42 #define PUSH_FRAME \
43 pushl $0 ; /* dummy error code */ \
44 pushl $0 ; /* dummy trap type */ \
45 pushal ; \
46 pushl %ds ; /* save data and extra segments ... */ \
47 pushl %es ; \
48 pushl %fs ; \
49 mov $KDSEL,%ax ; \
50 mov %ax,%ds ; \
51 mov %ax,%es ; \
52 mov $KPSEL,%ax ; \
53 mov %ax,%fs ; \
55 #define PUSH_DUMMY \
56 pushfl ; /* phys int frame / flags */ \
57 pushl %cs ; /* phys int frame / cs */ \
58 pushl 12(%esp) ; /* original caller eip */ \
59 pushl $0 ; /* dummy error code */ \
60 pushl $0 ; /* dummy trap type */ \
61 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
64 * Warning: POP_FRAME can only be used if there is no chance of a
65 * segment register being changed (e.g. by procfs), which is why syscalls
66 * have to use doreti.
68 #define POP_FRAME \
69 popl %fs ; \
70 popl %es ; \
71 popl %ds ; \
72 popal ; \
73 addl $2*4,%esp ; /* dummy trap & error codes */ \
75 #define POP_DUMMY \
76 addl $17*4,%esp ; \
78 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
79 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
81 #define MASK_IRQ(irq_num) \
82 APIC_IMASK_LOCK ; /* into critical reg */ \
83 testl $IRQ_LBIT(irq_num), apic_imen ; \
84 jne 7f ; /* masked, don't mask */ \
85 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
86 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
87 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
88 movl %eax, (%ecx) ; /* write the index */ \
89 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
90 orl $IOART_INTMASK, %eax ; /* set the mask */ \
91 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
92 7: ; /* already masked */ \
93 APIC_IMASK_UNLOCK ; \
96 * Test to see whether we are handling an edge or level triggered INT.
97 * Level-triggered INTs must still be masked as we don't clear the source,
98 * and the EOI cycle would cause redundant INTs to occur.
100 #define MASK_LEVEL_IRQ(irq_num) \
101 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
102 jz 9f ; /* edge, don't mask */ \
103 MASK_IRQ(irq_num) ; \
104 9: ; \
107 * Test to see if the source is currntly masked, clear if so.
109 #define UNMASK_IRQ(irq_num) \
110 cmpl $0,%eax ; \
111 jnz 8f ; \
112 APIC_IMASK_LOCK ; /* into critical reg */ \
113 testl $IRQ_LBIT(irq_num), apic_imen ; \
114 je 7f ; /* bit clear, not masked */ \
115 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
116 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
117 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
118 movl %eax,(%ecx) ; /* write the index */ \
119 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
120 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
121 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
122 7: ; \
123 APIC_IMASK_UNLOCK ; \
124 8: ; \
126 #ifdef APIC_IO
129 * Fast interrupt call handlers run in the following sequence:
131 * - Push the trap frame required by doreti
132 * - Mask the interrupt and reenable its source
133 * - If we cannot take the interrupt set its fpending bit and
134 * doreti. Note that we cannot mess with mp_lock at all
135 * if we entered from a critical section!
136 * - If we can take the interrupt clear its fpending bit,
137 * call the handler, then unmask and doreti.
139 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
142 #define FAST_INTR(irq_num, vec_name) \
143 .text ; \
144 SUPERALIGN_TEXT ; \
145 IDTVEC(vec_name) ; \
146 PUSH_FRAME ; \
147 FAKE_MCOUNT(13*4(%esp)) ; \
148 MASK_LEVEL_IRQ(irq_num) ; \
149 movl $0, lapic_eoi ; \
150 movl PCPU(curthread),%ebx ; \
151 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
152 pushl %eax ; \
153 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
154 jl 2f ; \
155 1: ; \
156 /* in critical section, make interrupt pending */ \
157 /* set the pending bit and return, leave interrupt masked */ \
158 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
159 orl $RQF_INTPEND,PCPU(reqflags) ; \
160 jmp 5f ; \
161 2: ; \
162 /* clear pending bit, run handler */ \
163 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
164 pushl $irq_num ; \
165 call ithread_fast_handler ; /* returns 0 to unmask */ \
166 addl $4, %esp ; \
167 UNMASK_IRQ(irq_num) ; \
168 5: ; \
169 MEXITCOUNT ; \
170 jmp doreti ; \
173 * Slow interrupt call handlers run in the following sequence:
175 * - Push the trap frame required by doreti.
176 * - Mask the interrupt and reenable its source.
177 * - If we cannot take the interrupt set its ipending bit and
178 * doreti. In addition to checking for a critical section
179 * and cpl mask we also check to see if the thread is still
180 * running. Note that we cannot mess with mp_lock at all
181 * if we entered from a critical section!
182 * - If we can take the interrupt clear its ipending bit
183 * and schedule the thread. Leave interrupts masked and doreti.
185 * Note that calls to sched_ithd() are made with interrupts enabled
186 * and outside a critical section. YYY sched_ithd may preempt us
187 * synchronously (fix interrupt stacking).
189 * YYY can cache gd base pointer instead of using hidden %fs
190 * prefixes.
193 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
194 .text ; \
195 SUPERALIGN_TEXT ; \
196 IDTVEC(vec_name) ; \
197 PUSH_FRAME ; \
198 maybe_extra_ipending ; \
200 MASK_LEVEL_IRQ(irq_num) ; \
201 incl PCPU(cnt) + V_INTR ; \
202 movl $0, lapic_eoi ; \
203 movl PCPU(curthread),%ebx ; \
204 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
205 pushl %eax ; /* cpl do restore */ \
206 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
207 jl 2f ; \
208 1: ; \
209 /* set the pending bit and return, leave the interrupt masked */ \
210 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
211 orl $RQF_INTPEND,PCPU(reqflags) ; \
212 jmp 5f ; \
213 2: ; \
214 /* set running bit, clear pending bit, run handler */ \
215 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
216 sti ; \
217 pushl $irq_num ; \
218 call sched_ithd ; \
219 addl $4,%esp ; \
220 5: ; \
221 MEXITCOUNT ; \
222 jmp doreti ; \
225 * Wrong interrupt call handlers. We program these into APIC vectors
226 * that should otherwise never occur. For example, we program the SLOW
227 * vector for irq N with this when we program the FAST vector with the
228 * real interrupt.
230 * XXX for now all we can do is EOI it. We can't call do_wrongintr
231 * (yet) because we could be in a critical section.
233 #define WRONGINTR(irq_num,vec_name) \
234 .text ; \
235 SUPERALIGN_TEXT ; \
236 IDTVEC(vec_name) ; \
237 PUSH_FRAME ; \
238 movl $0, lapic_eoi ; /* End Of Interrupt to APIC */ \
239 /*pushl $irq_num ;*/ \
240 /*call do_wrongintr ;*/ \
241 /*addl $4,%esp ;*/ \
242 POP_FRAME ; \
243 iret ; \
245 #endif
248 * Handle "spurious INTerrupts".
249 * Notes:
250 * This is different than the "spurious INTerrupt" generated by an
251 * 8259 PIC for missing INTs. See the APIC documentation for details.
252 * This routine should NOT do an 'EOI' cycle.
254 .text
255 SUPERALIGN_TEXT
256 .globl Xspuriousint
257 Xspuriousint:
259 /* No EOI cycle used here */
261 iret
265 * Handle TLB shootdowns.
267 .text
268 SUPERALIGN_TEXT
269 .globl Xinvltlb
270 Xinvltlb:
271 pushl %eax
273 movl %cr3, %eax /* invalidate the TLB */
274 movl %eax, %cr3
276 ss /* stack segment, avoid %ds load */
277 movl $0, lapic_eoi /* End Of Interrupt to APIC */
279 popl %eax
280 iret
284 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
286 * - Signals its receipt.
287 * - Waits for permission to restart.
288 * - Processing pending IPIQ events while waiting.
289 * - Signals its restart.
292 .text
293 SUPERALIGN_TEXT
294 .globl Xcpustop
295 Xcpustop:
296 pushl %ebp
297 movl %esp, %ebp
298 pushl %eax
299 pushl %ecx
300 pushl %edx
301 pushl %ds /* save current data segment */
302 pushl %fs
304 movl $KDSEL, %eax
305 mov %ax, %ds /* use KERNEL data segment */
306 movl $KPSEL, %eax
307 mov %ax, %fs
309 movl $0, lapic_eoi /* End Of Interrupt to APIC */
311 movl PCPU(cpuid), %eax
312 imull $PCB_SIZE, %eax
313 leal CNAME(stoppcbs)(%eax), %eax
314 pushl %eax
315 call CNAME(savectx) /* Save process context */
316 addl $4, %esp
319 movl PCPU(cpuid), %eax
322 * Indicate that we have stopped and loop waiting for permission
323 * to start again. We must still process IPI events while in a
324 * stopped state.
326 MPLOCKED
327 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
329 andl $~RQF_IPIQ,PCPU(reqflags)
330 pushl %eax
331 call lwkt_smp_stopped
332 popl %eax
333 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
334 jnc 1b
336 MPLOCKED
337 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
338 MPLOCKED
339 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
341 test %eax, %eax
342 jnz 2f
344 movl CNAME(cpustop_restartfunc), %eax
345 test %eax, %eax
346 jz 2f
347 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
349 call *%eax
351 popl %fs
352 popl %ds /* restore previous data segment */
353 popl %edx
354 popl %ecx
355 popl %eax
356 movl %ebp, %esp
357 popl %ebp
358 iret
361 * For now just have one ipiq IPI, but what we really want is
362 * to have one for each source cpu to the APICs don't get stalled
363 * backlogging the requests.
365 .text
366 SUPERALIGN_TEXT
367 .globl Xipiq
368 Xipiq:
369 PUSH_FRAME
370 movl $0, lapic_eoi /* End Of Interrupt to APIC */
371 FAKE_MCOUNT(13*4(%esp))
373 movl PCPU(curthread),%ebx
374 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
375 jge 1f
376 subl $8,%esp /* make same as interrupt frame */
377 incl PCPU(intr_nesting_level)
378 addl $TDPRI_CRIT,TD_PRI(%ebx)
379 call lwkt_process_ipiq_frame
380 subl $TDPRI_CRIT,TD_PRI(%ebx)
381 decl PCPU(intr_nesting_level)
382 addl $8,%esp
383 pushl $0 /* CPL for frame (REMOVED) */
384 MEXITCOUNT
385 jmp doreti
387 orl $RQF_IPIQ,PCPU(reqflags)
388 MEXITCOUNT
389 POP_FRAME
390 iret
392 #ifdef APIC_IO
394 MCOUNT_LABEL(bintr)
395 FAST_INTR(0,apic_fastintr0)
396 FAST_INTR(1,apic_fastintr1)
397 FAST_INTR(2,apic_fastintr2)
398 FAST_INTR(3,apic_fastintr3)
399 FAST_INTR(4,apic_fastintr4)
400 FAST_INTR(5,apic_fastintr5)
401 FAST_INTR(6,apic_fastintr6)
402 FAST_INTR(7,apic_fastintr7)
403 FAST_INTR(8,apic_fastintr8)
404 FAST_INTR(9,apic_fastintr9)
405 FAST_INTR(10,apic_fastintr10)
406 FAST_INTR(11,apic_fastintr11)
407 FAST_INTR(12,apic_fastintr12)
408 FAST_INTR(13,apic_fastintr13)
409 FAST_INTR(14,apic_fastintr14)
410 FAST_INTR(15,apic_fastintr15)
411 FAST_INTR(16,apic_fastintr16)
412 FAST_INTR(17,apic_fastintr17)
413 FAST_INTR(18,apic_fastintr18)
414 FAST_INTR(19,apic_fastintr19)
415 FAST_INTR(20,apic_fastintr20)
416 FAST_INTR(21,apic_fastintr21)
417 FAST_INTR(22,apic_fastintr22)
418 FAST_INTR(23,apic_fastintr23)
420 /* YYY what is this garbage? */
422 SLOW_INTR(0,apic_slowintr0,)
423 SLOW_INTR(1,apic_slowintr1,)
424 SLOW_INTR(2,apic_slowintr2,)
425 SLOW_INTR(3,apic_slowintr3,)
426 SLOW_INTR(4,apic_slowintr4,)
427 SLOW_INTR(5,apic_slowintr5,)
428 SLOW_INTR(6,apic_slowintr6,)
429 SLOW_INTR(7,apic_slowintr7,)
430 SLOW_INTR(8,apic_slowintr8,)
431 SLOW_INTR(9,apic_slowintr9,)
432 SLOW_INTR(10,apic_slowintr10,)
433 SLOW_INTR(11,apic_slowintr11,)
434 SLOW_INTR(12,apic_slowintr12,)
435 SLOW_INTR(13,apic_slowintr13,)
436 SLOW_INTR(14,apic_slowintr14,)
437 SLOW_INTR(15,apic_slowintr15,)
438 SLOW_INTR(16,apic_slowintr16,)
439 SLOW_INTR(17,apic_slowintr17,)
440 SLOW_INTR(18,apic_slowintr18,)
441 SLOW_INTR(19,apic_slowintr19,)
442 SLOW_INTR(20,apic_slowintr20,)
443 SLOW_INTR(21,apic_slowintr21,)
444 SLOW_INTR(22,apic_slowintr22,)
445 SLOW_INTR(23,apic_slowintr23,)
447 WRONGINTR(0,apic_wrongintr0)
448 WRONGINTR(1,apic_wrongintr1)
449 WRONGINTR(2,apic_wrongintr2)
450 WRONGINTR(3,apic_wrongintr3)
451 WRONGINTR(4,apic_wrongintr4)
452 WRONGINTR(5,apic_wrongintr5)
453 WRONGINTR(6,apic_wrongintr6)
454 WRONGINTR(7,apic_wrongintr7)
455 WRONGINTR(8,apic_wrongintr8)
456 WRONGINTR(9,apic_wrongintr9)
457 WRONGINTR(10,apic_wrongintr10)
458 WRONGINTR(11,apic_wrongintr11)
459 WRONGINTR(12,apic_wrongintr12)
460 WRONGINTR(13,apic_wrongintr13)
461 WRONGINTR(14,apic_wrongintr14)
462 WRONGINTR(15,apic_wrongintr15)
463 WRONGINTR(16,apic_wrongintr16)
464 WRONGINTR(17,apic_wrongintr17)
465 WRONGINTR(18,apic_wrongintr18)
466 WRONGINTR(19,apic_wrongintr19)
467 WRONGINTR(20,apic_wrongintr20)
468 WRONGINTR(21,apic_wrongintr21)
469 WRONGINTR(22,apic_wrongintr22)
470 WRONGINTR(23,apic_wrongintr23)
471 MCOUNT_LABEL(eintr)
473 #endif
475 .data
477 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
478 .globl stopped_cpus, started_cpus
479 stopped_cpus:
480 .long 0
481 started_cpus:
482 .long 0
484 .globl CNAME(cpustop_restartfunc)
485 CNAME(cpustop_restartfunc):
486 .long 0
488 .globl apic_pin_trigger
489 apic_pin_trigger:
490 .long 0
492 .text