HAMMER Utilities: Sync with 59A
[dragonfly.git] / sys / platform / pc32 / apic / apic_vector.s
blob783a28ada2abcb8887e82f42d392b7b54fe252ea
1 /*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.38 2008/05/08 01:21:06 dillon Exp $
5 */
7 #include "use_npx.h"
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/i386/isa.h>
18 #include "assym.s"
20 #include "apicreg.h"
21 #include "apic_ipl.h"
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num) (1 << (irq_num))
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
31 #ifdef SMP
32 #define MPLOCKED lock ;
33 #else
34 #define MPLOCKED
35 #endif
38 * Push an interrupt frame in a format acceptable to doreti, reload
39 * the segment registers for the kernel.
41 #define PUSH_FRAME \
42 pushl $0 ; /* dummy error code */ \
43 pushl $0 ; /* dummy trap type */ \
44 pushl $0 ; /* dummy xflags type */ \
45 pushal ; \
46 pushl %ds ; /* save data and extra segments ... */ \
47 pushl %es ; \
48 pushl %fs ; \
49 pushl %gs ; \
50 cld ; \
51 mov $KDSEL,%ax ; \
52 mov %ax,%ds ; \
53 mov %ax,%es ; \
54 mov %ax,%gs ; \
55 mov $KPSEL,%ax ; \
56 mov %ax,%fs ; \
58 #define PUSH_DUMMY \
59 pushfl ; /* phys int frame / flags */ \
60 pushl %cs ; /* phys int frame / cs */ \
61 pushl 12(%esp) ; /* original caller eip */ \
62 pushl $0 ; /* dummy error code */ \
63 pushl $0 ; /* dummy trap type */ \
64 pushl $0 ; /* dummy xflags type */ \
65 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
68 * Warning: POP_FRAME can only be used if there is no chance of a
69 * segment register being changed (e.g. by procfs), which is why syscalls
70 * have to use doreti.
72 #define POP_FRAME \
73 popl %gs ; \
74 popl %fs ; \
75 popl %es ; \
76 popl %ds ; \
77 popal ; \
78 addl $3*4,%esp ; /* dummy xflags, trap & error codes */ \
80 #define POP_DUMMY \
81 addl $19*4,%esp ; \
83 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
84 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
86 #define MASK_IRQ(irq_num) \
87 APIC_IMASK_LOCK ; /* into critical reg */ \
88 testl $IRQ_LBIT(irq_num), apic_imen ; \
89 jne 7f ; /* masked, don't mask */ \
90 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
91 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
92 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
93 movl %eax, (%ecx) ; /* write the index */ \
94 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
95 orl $IOART_INTMASK, %eax ; /* set the mask */ \
96 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
97 7: ; /* already masked */ \
98 APIC_IMASK_UNLOCK ; \
101 * Test to see whether we are handling an edge or level triggered INT.
102 * Level-triggered INTs must still be masked as we don't clear the source,
103 * and the EOI cycle would cause redundant INTs to occur.
105 #define MASK_LEVEL_IRQ(irq_num) \
106 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
107 jz 9f ; /* edge, don't mask */ \
108 MASK_IRQ(irq_num) ; \
109 9: ; \
112 * Test to see if the source is currntly masked, clear if so.
114 #define UNMASK_IRQ(irq_num) \
115 cmpl $0,%eax ; \
116 jnz 8f ; \
117 APIC_IMASK_LOCK ; /* into critical reg */ \
118 testl $IRQ_LBIT(irq_num), apic_imen ; \
119 je 7f ; /* bit clear, not masked */ \
120 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
121 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
122 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
123 movl %eax,(%ecx) ; /* write the index */ \
124 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
125 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
126 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
127 7: ; \
128 APIC_IMASK_UNLOCK ; \
129 8: ; \
131 #ifdef APIC_IO
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
139 * doreti. Note that we cannot mess with mp_lock at all
140 * if we entered from a critical section!
141 * - If we can take the interrupt clear its fpending bit,
142 * call the handler, then unmask and doreti.
144 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
147 #define FAST_INTR(irq_num, vec_name) \
148 .text ; \
149 SUPERALIGN_TEXT ; \
150 IDTVEC(vec_name) ; \
151 PUSH_FRAME ; \
152 FAKE_MCOUNT(15*4(%esp)) ; \
153 MASK_LEVEL_IRQ(irq_num) ; \
154 movl $0, lapic_eoi ; \
155 movl PCPU(curthread),%ebx ; \
156 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
157 pushl %eax ; \
158 testl $-1,TD_NEST_COUNT(%ebx) ; \
159 jne 1f ; \
160 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
161 jl 2f ; \
162 1: ; \
163 /* in critical section, make interrupt pending */ \
164 /* set the pending bit and return, leave interrupt masked */ \
165 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
166 orl $RQF_INTPEND,PCPU(reqflags) ; \
167 jmp 5f ; \
168 2: ; \
169 /* clear pending bit, run handler */ \
170 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
171 pushl $irq_num ; \
172 pushl %esp ; /* pass frame by reference */ \
173 call ithread_fast_handler ; /* returns 0 to unmask */ \
174 addl $8, %esp ; \
175 UNMASK_IRQ(irq_num) ; \
176 5: ; \
177 MEXITCOUNT ; \
178 jmp doreti ; \
181 * Slow interrupt call handlers run in the following sequence:
183 * - Push the trap frame required by doreti.
184 * - Mask the interrupt and reenable its source.
185 * - If we cannot take the interrupt set its ipending bit and
186 * doreti. In addition to checking for a critical section
187 * and cpl mask we also check to see if the thread is still
188 * running. Note that we cannot mess with mp_lock at all
189 * if we entered from a critical section!
190 * - If we can take the interrupt clear its ipending bit
191 * and schedule the thread. Leave interrupts masked and doreti.
193 * Note that calls to sched_ithd() are made with interrupts enabled
194 * and outside a critical section. YYY sched_ithd may preempt us
195 * synchronously (fix interrupt stacking).
197 * YYY can cache gd base pointer instead of using hidden %fs
198 * prefixes.
201 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
202 .text ; \
203 SUPERALIGN_TEXT ; \
204 IDTVEC(vec_name) ; \
205 PUSH_FRAME ; \
206 maybe_extra_ipending ; \
208 MASK_LEVEL_IRQ(irq_num) ; \
209 incl PCPU(cnt) + V_INTR ; \
210 movl $0, lapic_eoi ; \
211 movl PCPU(curthread),%ebx ; \
212 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
213 pushl %eax ; /* cpl do restore */ \
214 testl $-1,TD_NEST_COUNT(%ebx) ; \
215 jne 1f ; \
216 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
217 jl 2f ; \
218 1: ; \
219 /* set the pending bit and return, leave the interrupt masked */ \
220 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
221 orl $RQF_INTPEND,PCPU(reqflags) ; \
222 jmp 5f ; \
223 2: ; \
224 /* set running bit, clear pending bit, run handler */ \
225 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
226 incl TD_NEST_COUNT(%ebx) ; \
227 sti ; \
228 pushl $irq_num ; \
229 call sched_ithd ; \
230 addl $4,%esp ; \
231 cli ; \
232 decl TD_NEST_COUNT(%ebx) ; \
233 5: ; \
234 MEXITCOUNT ; \
235 jmp doreti ; \
238 * Wrong interrupt call handlers. We program these into APIC vectors
239 * that should otherwise never occur. For example, we program the SLOW
240 * vector for irq N with this when we program the FAST vector with the
241 * real interrupt.
243 * XXX for now all we can do is EOI it. We can't call do_wrongintr
244 * (yet) because we could be in a critical section.
246 #define WRONGINTR(irq_num,vec_name) \
247 .text ; \
248 SUPERALIGN_TEXT ; \
249 IDTVEC(vec_name) ; \
250 PUSH_FRAME ; \
251 movl $0, lapic_eoi ; /* End Of Interrupt to APIC */ \
252 /*pushl $irq_num ;*/ \
253 /*call do_wrongintr ;*/ \
254 /*addl $4,%esp ;*/ \
255 POP_FRAME ; \
256 iret ; \
258 #endif
261 * Handle "spurious INTerrupts".
262 * Notes:
263 * This is different than the "spurious INTerrupt" generated by an
264 * 8259 PIC for missing INTs. See the APIC documentation for details.
265 * This routine should NOT do an 'EOI' cycle.
267 .text
268 SUPERALIGN_TEXT
269 .globl Xspuriousint
270 Xspuriousint:
272 /* No EOI cycle used here */
274 iret
278 * Handle TLB shootdowns.
280 .text
281 SUPERALIGN_TEXT
282 .globl Xinvltlb
283 Xinvltlb:
284 pushl %eax
286 movl %cr3, %eax /* invalidate the TLB */
287 movl %eax, %cr3
289 ss /* stack segment, avoid %ds load */
290 movl $0, lapic_eoi /* End Of Interrupt to APIC */
292 popl %eax
293 iret
297 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
299 * - Signals its receipt.
300 * - Waits for permission to restart.
301 * - Processing pending IPIQ events while waiting.
302 * - Signals its restart.
305 .text
306 SUPERALIGN_TEXT
307 .globl Xcpustop
308 Xcpustop:
309 pushl %ebp
310 movl %esp, %ebp
311 pushl %eax
312 pushl %ecx
313 pushl %edx
314 pushl %ds /* save current data segment */
315 pushl %fs
317 movl $KDSEL, %eax
318 mov %ax, %ds /* use KERNEL data segment */
319 movl $KPSEL, %eax
320 mov %ax, %fs
322 movl $0, lapic_eoi /* End Of Interrupt to APIC */
324 movl PCPU(cpuid), %eax
325 imull $PCB_SIZE, %eax
326 leal CNAME(stoppcbs)(%eax), %eax
327 pushl %eax
328 call CNAME(savectx) /* Save process context */
329 addl $4, %esp
332 movl PCPU(cpuid), %eax
335 * Indicate that we have stopped and loop waiting for permission
336 * to start again. We must still process IPI events while in a
337 * stopped state.
339 MPLOCKED
340 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
342 andl $~RQF_IPIQ,PCPU(reqflags)
343 pushl %eax
344 call lwkt_smp_stopped
345 popl %eax
346 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
347 jnc 1b
349 MPLOCKED
350 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
351 MPLOCKED
352 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
354 test %eax, %eax
355 jnz 2f
357 movl CNAME(cpustop_restartfunc), %eax
358 test %eax, %eax
359 jz 2f
360 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
362 call *%eax
364 popl %fs
365 popl %ds /* restore previous data segment */
366 popl %edx
367 popl %ecx
368 popl %eax
369 movl %ebp, %esp
370 popl %ebp
371 iret
374 * For now just have one ipiq IPI, but what we really want is
375 * to have one for each source cpu to the APICs don't get stalled
376 * backlogging the requests.
378 .text
379 SUPERALIGN_TEXT
380 .globl Xipiq
381 Xipiq:
382 PUSH_FRAME
383 movl $0, lapic_eoi /* End Of Interrupt to APIC */
384 FAKE_MCOUNT(15*4(%esp))
386 movl PCPU(curthread),%ebx
387 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
388 jge 1f
389 subl $8,%esp /* make same as interrupt frame */
390 pushl %esp /* pass frame by reference */
391 incl PCPU(intr_nesting_level)
392 addl $TDPRI_CRIT,TD_PRI(%ebx)
393 call lwkt_process_ipiq_frame
394 subl $TDPRI_CRIT,TD_PRI(%ebx)
395 decl PCPU(intr_nesting_level)
396 addl $12,%esp
397 pushl $0 /* CPL for frame (REMOVED) */
398 MEXITCOUNT
399 jmp doreti
401 orl $RQF_IPIQ,PCPU(reqflags)
402 MEXITCOUNT
403 POP_FRAME
404 iret
406 #ifdef APIC_IO
408 MCOUNT_LABEL(bintr)
409 FAST_INTR(0,apic_fastintr0)
410 FAST_INTR(1,apic_fastintr1)
411 FAST_INTR(2,apic_fastintr2)
412 FAST_INTR(3,apic_fastintr3)
413 FAST_INTR(4,apic_fastintr4)
414 FAST_INTR(5,apic_fastintr5)
415 FAST_INTR(6,apic_fastintr6)
416 FAST_INTR(7,apic_fastintr7)
417 FAST_INTR(8,apic_fastintr8)
418 FAST_INTR(9,apic_fastintr9)
419 FAST_INTR(10,apic_fastintr10)
420 FAST_INTR(11,apic_fastintr11)
421 FAST_INTR(12,apic_fastintr12)
422 FAST_INTR(13,apic_fastintr13)
423 FAST_INTR(14,apic_fastintr14)
424 FAST_INTR(15,apic_fastintr15)
425 FAST_INTR(16,apic_fastintr16)
426 FAST_INTR(17,apic_fastintr17)
427 FAST_INTR(18,apic_fastintr18)
428 FAST_INTR(19,apic_fastintr19)
429 FAST_INTR(20,apic_fastintr20)
430 FAST_INTR(21,apic_fastintr21)
431 FAST_INTR(22,apic_fastintr22)
432 FAST_INTR(23,apic_fastintr23)
434 /* YYY what is this garbage? */
436 SLOW_INTR(0,apic_slowintr0,)
437 SLOW_INTR(1,apic_slowintr1,)
438 SLOW_INTR(2,apic_slowintr2,)
439 SLOW_INTR(3,apic_slowintr3,)
440 SLOW_INTR(4,apic_slowintr4,)
441 SLOW_INTR(5,apic_slowintr5,)
442 SLOW_INTR(6,apic_slowintr6,)
443 SLOW_INTR(7,apic_slowintr7,)
444 SLOW_INTR(8,apic_slowintr8,)
445 SLOW_INTR(9,apic_slowintr9,)
446 SLOW_INTR(10,apic_slowintr10,)
447 SLOW_INTR(11,apic_slowintr11,)
448 SLOW_INTR(12,apic_slowintr12,)
449 SLOW_INTR(13,apic_slowintr13,)
450 SLOW_INTR(14,apic_slowintr14,)
451 SLOW_INTR(15,apic_slowintr15,)
452 SLOW_INTR(16,apic_slowintr16,)
453 SLOW_INTR(17,apic_slowintr17,)
454 SLOW_INTR(18,apic_slowintr18,)
455 SLOW_INTR(19,apic_slowintr19,)
456 SLOW_INTR(20,apic_slowintr20,)
457 SLOW_INTR(21,apic_slowintr21,)
458 SLOW_INTR(22,apic_slowintr22,)
459 SLOW_INTR(23,apic_slowintr23,)
461 WRONGINTR(0,apic_wrongintr0)
462 WRONGINTR(1,apic_wrongintr1)
463 WRONGINTR(2,apic_wrongintr2)
464 WRONGINTR(3,apic_wrongintr3)
465 WRONGINTR(4,apic_wrongintr4)
466 WRONGINTR(5,apic_wrongintr5)
467 WRONGINTR(6,apic_wrongintr6)
468 WRONGINTR(7,apic_wrongintr7)
469 WRONGINTR(8,apic_wrongintr8)
470 WRONGINTR(9,apic_wrongintr9)
471 WRONGINTR(10,apic_wrongintr10)
472 WRONGINTR(11,apic_wrongintr11)
473 WRONGINTR(12,apic_wrongintr12)
474 WRONGINTR(13,apic_wrongintr13)
475 WRONGINTR(14,apic_wrongintr14)
476 WRONGINTR(15,apic_wrongintr15)
477 WRONGINTR(16,apic_wrongintr16)
478 WRONGINTR(17,apic_wrongintr17)
479 WRONGINTR(18,apic_wrongintr18)
480 WRONGINTR(19,apic_wrongintr19)
481 WRONGINTR(20,apic_wrongintr20)
482 WRONGINTR(21,apic_wrongintr21)
483 WRONGINTR(22,apic_wrongintr22)
484 WRONGINTR(23,apic_wrongintr23)
485 MCOUNT_LABEL(eintr)
487 #endif
489 .data
491 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
492 .globl stopped_cpus, started_cpus
493 stopped_cpus:
494 .long 0
495 started_cpus:
496 .long 0
498 .globl CNAME(cpustop_restartfunc)
499 CNAME(cpustop_restartfunc):
500 .long 0
502 .globl apic_pin_trigger
503 apic_pin_trigger:
504 .long 0
506 .text