IO APIC: Get rid of apic_imen
[dragonfly.git] / sys / platform / pc32 / apic / apic_vector.s
blob9bfccea38de51124e8c2e3020e32bdc802f8bd98
1 /*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
5 */
7 #include "use_npx.h"
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/isa.h>
18 #include "assym.s"
20 #include "apicreg.h"
21 #include "apic_ipl.h"
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num) (1 << (irq_num))
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
31 #ifdef SMP
32 #define MPLOCKED lock ;
33 #else
34 #define MPLOCKED
35 #endif
38 * Push an interrupt frame in a format acceptable to doreti, reload
39 * the segment registers for the kernel.
41 #define PUSH_FRAME \
42 pushl $0 ; /* dummy error code */ \
43 pushl $0 ; /* dummy trap type */ \
44 pushl $0 ; /* dummy xflags type */ \
45 pushal ; \
46 pushl %ds ; /* save data and extra segments ... */ \
47 pushl %es ; \
48 pushl %fs ; \
49 pushl %gs ; \
50 cld ; \
51 mov $KDSEL,%ax ; \
52 mov %ax,%ds ; \
53 mov %ax,%es ; \
54 mov %ax,%gs ; \
55 mov $KPSEL,%ax ; \
56 mov %ax,%fs ; \
58 #define PUSH_DUMMY \
59 pushfl ; /* phys int frame / flags */ \
60 pushl %cs ; /* phys int frame / cs */ \
61 pushl 12(%esp) ; /* original caller eip */ \
62 pushl $0 ; /* dummy error code */ \
63 pushl $0 ; /* dummy trap type */ \
64 pushl $0 ; /* dummy xflags type */ \
65 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
68 * Warning: POP_FRAME can only be used if there is no chance of a
69 * segment register being changed (e.g. by procfs), which is why syscalls
70 * have to use doreti.
72 #define POP_FRAME \
73 popl %gs ; \
74 popl %fs ; \
75 popl %es ; \
76 popl %ds ; \
77 popal ; \
78 addl $3*4,%esp ; /* dummy xflags, trap & error codes */ \
80 #define POP_DUMMY \
81 addl $19*4,%esp ; \
83 #define IOAPICADDR(irq_num) \
84 CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_ADDR
85 #define REDIRIDX(irq_num) \
86 CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_ENTIDX
87 #define IOAPICFLAGS(irq_num) \
88 CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_FLAGS
90 #define MASK_IRQ(irq_num) \
91 APIC_IMASK_LOCK ; /* into critical reg */ \
92 testl $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
93 jne 7f ; /* masked, don't mask */ \
94 orl $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
95 /* set the mask bit */ \
96 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
97 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
98 movl %eax, (%ecx) ; /* write the index */ \
99 orl $IOART_INTMASK,IOAPIC_WINDOW(%ecx) ;/* set the mask */ \
100 7: ; /* already masked */ \
101 APIC_IMASK_UNLOCK ; \
104 * Test to see whether we are handling an edge or level triggered INT.
105 * Level-triggered INTs must still be masked as we don't clear the source,
106 * and the EOI cycle would cause redundant INTs to occur.
108 #define MASK_LEVEL_IRQ(irq_num) \
109 testl $IOAPIC_IM_FLAG_LEVEL, IOAPICFLAGS(irq_num) ; \
110 jz 9f ; /* edge, don't mask */ \
111 MASK_IRQ(irq_num) ; \
112 9: ; \
115 * Test to see if the source is currntly masked, clear if so.
117 #define UNMASK_IRQ(irq_num) \
118 cmpl $0,%eax ; \
119 jnz 8f ; \
120 APIC_IMASK_LOCK ; /* into critical reg */ \
121 testl $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
124 /* clear mask bit */ \
125 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
126 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
127 movl %eax,(%ecx) ; /* write the index */ \
128 andl $~IOART_INTMASK,IOAPIC_WINDOW(%ecx) ;/* clear the mask */ \
129 7: ; \
130 APIC_IMASK_UNLOCK ; \
131 8: ; \
133 #ifdef APIC_IO
136 * Fast interrupt call handlers run in the following sequence:
138 * - Push the trap frame required by doreti
139 * - Mask the interrupt and reenable its source
140 * - If we cannot take the interrupt set its fpending bit and
141 * doreti. Note that we cannot mess with mp_lock at all
142 * if we entered from a critical section!
143 * - If we can take the interrupt clear its fpending bit,
144 * call the handler, then unmask and doreti.
146 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
149 #define FAST_INTR(irq_num, vec_name) \
150 .text ; \
151 SUPERALIGN_TEXT ; \
152 IDTVEC(vec_name) ; \
153 PUSH_FRAME ; \
154 FAKE_MCOUNT(15*4(%esp)) ; \
155 MASK_LEVEL_IRQ(irq_num) ; \
156 movl $0, lapic_eoi ; \
157 movl PCPU(curthread),%ebx ; \
158 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
159 pushl %eax ; \
160 testl $-1,TD_NEST_COUNT(%ebx) ; \
161 jne 1f ; \
162 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
163 jl 2f ; \
164 1: ; \
165 /* in critical section, make interrupt pending */ \
166 /* set the pending bit and return, leave interrupt masked */ \
167 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
168 orl $RQF_INTPEND,PCPU(reqflags) ; \
169 jmp 5f ; \
170 2: ; \
171 /* clear pending bit, run handler */ \
172 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
173 pushl $irq_num ; \
174 pushl %esp ; /* pass frame by reference */ \
175 call ithread_fast_handler ; /* returns 0 to unmask */ \
176 addl $8, %esp ; \
177 UNMASK_IRQ(irq_num) ; \
178 5: ; \
179 MEXITCOUNT ; \
180 jmp doreti ; \
183 * Slow interrupt call handlers run in the following sequence:
185 * - Push the trap frame required by doreti.
186 * - Mask the interrupt and reenable its source.
187 * - If we cannot take the interrupt set its ipending bit and
188 * doreti. In addition to checking for a critical section
189 * and cpl mask we also check to see if the thread is still
190 * running. Note that we cannot mess with mp_lock at all
191 * if we entered from a critical section!
192 * - If we can take the interrupt clear its ipending bit
193 * and schedule the thread. Leave interrupts masked and doreti.
195 * Note that calls to sched_ithd() are made with interrupts enabled
196 * and outside a critical section. YYY sched_ithd may preempt us
197 * synchronously (fix interrupt stacking).
199 * YYY can cache gd base pointer instead of using hidden %fs
200 * prefixes.
203 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
204 .text ; \
205 SUPERALIGN_TEXT ; \
206 IDTVEC(vec_name) ; \
207 PUSH_FRAME ; \
208 maybe_extra_ipending ; \
210 MASK_LEVEL_IRQ(irq_num) ; \
211 incl PCPU(cnt) + V_INTR ; \
212 movl $0, lapic_eoi ; \
213 movl PCPU(curthread),%ebx ; \
214 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
215 pushl %eax ; /* cpl do restore */ \
216 testl $-1,TD_NEST_COUNT(%ebx) ; \
217 jne 1f ; \
218 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
219 jl 2f ; \
220 1: ; \
221 /* set the pending bit and return, leave the interrupt masked */ \
222 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
223 orl $RQF_INTPEND,PCPU(reqflags) ; \
224 jmp 5f ; \
225 2: ; \
226 /* set running bit, clear pending bit, run handler */ \
227 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
228 incl TD_NEST_COUNT(%ebx) ; \
229 sti ; \
230 pushl $irq_num ; \
231 call sched_ithd ; \
232 addl $4,%esp ; \
233 cli ; \
234 decl TD_NEST_COUNT(%ebx) ; \
235 5: ; \
236 MEXITCOUNT ; \
237 jmp doreti ; \
240 * Wrong interrupt call handlers. We program these into APIC vectors
241 * that should otherwise never occur. For example, we program the SLOW
242 * vector for irq N with this when we program the FAST vector with the
243 * real interrupt.
245 * XXX for now all we can do is EOI it. We can't call do_wrongintr
246 * (yet) because we could be in a critical section.
248 #define WRONGINTR(irq_num,vec_name) \
249 .text ; \
250 SUPERALIGN_TEXT ; \
251 IDTVEC(vec_name) ; \
252 PUSH_FRAME ; \
253 movl $0, lapic_eoi ; /* End Of Interrupt to APIC */ \
254 /*pushl $irq_num ;*/ \
255 /*call do_wrongintr ;*/ \
256 /*addl $4,%esp ;*/ \
257 POP_FRAME ; \
258 iret ; \
260 #endif
263 * Handle "spurious INTerrupts".
264 * Notes:
265 * This is different than the "spurious INTerrupt" generated by an
266 * 8259 PIC for missing INTs. See the APIC documentation for details.
267 * This routine should NOT do an 'EOI' cycle.
269 .text
270 SUPERALIGN_TEXT
271 .globl Xspuriousint
272 Xspuriousint:
274 /* No EOI cycle used here */
276 iret
280 * Handle TLB shootdowns.
282 .text
283 SUPERALIGN_TEXT
284 .globl Xinvltlb
285 Xinvltlb:
286 pushl %eax
288 movl %cr3, %eax /* invalidate the TLB */
289 movl %eax, %cr3
291 ss /* stack segment, avoid %ds load */
292 movl $0, lapic_eoi /* End Of Interrupt to APIC */
294 popl %eax
295 iret
299 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
301 * - Signals its receipt.
302 * - Waits for permission to restart.
303 * - Processing pending IPIQ events while waiting.
304 * - Signals its restart.
307 .text
308 SUPERALIGN_TEXT
309 .globl Xcpustop
310 Xcpustop:
311 pushl %ebp
312 movl %esp, %ebp
313 pushl %eax
314 pushl %ecx
315 pushl %edx
316 pushl %ds /* save current data segment */
317 pushl %fs
319 movl $KDSEL, %eax
320 mov %ax, %ds /* use KERNEL data segment */
321 movl $KPSEL, %eax
322 mov %ax, %fs
324 movl $0, lapic_eoi /* End Of Interrupt to APIC */
326 movl PCPU(cpuid), %eax
327 imull $PCB_SIZE, %eax
328 leal CNAME(stoppcbs)(%eax), %eax
329 pushl %eax
330 call CNAME(savectx) /* Save process context */
331 addl $4, %esp
334 movl PCPU(cpuid), %eax
337 * Indicate that we have stopped and loop waiting for permission
338 * to start again. We must still process IPI events while in a
339 * stopped state.
341 MPLOCKED
342 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
344 andl $~RQF_IPIQ,PCPU(reqflags)
345 pushl %eax
346 call lwkt_smp_stopped
347 popl %eax
348 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
349 jnc 1b
351 MPLOCKED
352 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
353 MPLOCKED
354 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
356 test %eax, %eax
357 jnz 2f
359 movl CNAME(cpustop_restartfunc), %eax
360 test %eax, %eax
361 jz 2f
362 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
364 call *%eax
366 popl %fs
367 popl %ds /* restore previous data segment */
368 popl %edx
369 popl %ecx
370 popl %eax
371 movl %ebp, %esp
372 popl %ebp
373 iret
376 * For now just have one ipiq IPI, but what we really want is
377 * to have one for each source cpu to the APICs don't get stalled
378 * backlogging the requests.
380 .text
381 SUPERALIGN_TEXT
382 .globl Xipiq
383 Xipiq:
384 PUSH_FRAME
385 movl $0, lapic_eoi /* End Of Interrupt to APIC */
386 FAKE_MCOUNT(15*4(%esp))
388 incl PCPU(cnt) + V_IPI
389 movl PCPU(curthread),%ebx
390 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
391 jge 1f
392 subl $8,%esp /* make same as interrupt frame */
393 pushl %esp /* pass frame by reference */
394 incl PCPU(intr_nesting_level)
395 addl $TDPRI_CRIT,TD_PRI(%ebx)
396 call lwkt_process_ipiq_frame
397 subl $TDPRI_CRIT,TD_PRI(%ebx)
398 decl PCPU(intr_nesting_level)
399 addl $12,%esp
400 pushl $0 /* CPL for frame (REMOVED) */
401 MEXITCOUNT
402 jmp doreti
404 orl $RQF_IPIQ,PCPU(reqflags)
405 MEXITCOUNT
406 POP_FRAME
407 iret
409 .text
410 SUPERALIGN_TEXT
411 .globl Xtimer
412 Xtimer:
413 PUSH_FRAME
414 movl $0, lapic_eoi /* End Of Interrupt to APIC */
415 FAKE_MCOUNT(15*4(%esp))
417 incl PCPU(cnt) + V_TIMER
418 movl PCPU(curthread),%ebx
419 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
420 jge 1f
421 testl $-1,TD_NEST_COUNT(%ebx)
422 jne 1f
423 subl $8,%esp /* make same as interrupt frame */
424 pushl %esp /* pass frame by reference */
425 incl PCPU(intr_nesting_level)
426 addl $TDPRI_CRIT,TD_PRI(%ebx)
427 call lapic_timer_process_frame
428 subl $TDPRI_CRIT,TD_PRI(%ebx)
429 decl PCPU(intr_nesting_level)
430 addl $12,%esp
431 pushl $0 /* CPL for frame (REMOVED) */
432 MEXITCOUNT
433 jmp doreti
435 orl $RQF_TIMER,PCPU(reqflags)
436 MEXITCOUNT
437 POP_FRAME
438 iret
440 #ifdef APIC_IO
442 MCOUNT_LABEL(bintr)
443 FAST_INTR(0,apic_fastintr0)
444 FAST_INTR(1,apic_fastintr1)
445 FAST_INTR(2,apic_fastintr2)
446 FAST_INTR(3,apic_fastintr3)
447 FAST_INTR(4,apic_fastintr4)
448 FAST_INTR(5,apic_fastintr5)
449 FAST_INTR(6,apic_fastintr6)
450 FAST_INTR(7,apic_fastintr7)
451 FAST_INTR(8,apic_fastintr8)
452 FAST_INTR(9,apic_fastintr9)
453 FAST_INTR(10,apic_fastintr10)
454 FAST_INTR(11,apic_fastintr11)
455 FAST_INTR(12,apic_fastintr12)
456 FAST_INTR(13,apic_fastintr13)
457 FAST_INTR(14,apic_fastintr14)
458 FAST_INTR(15,apic_fastintr15)
459 FAST_INTR(16,apic_fastintr16)
460 FAST_INTR(17,apic_fastintr17)
461 FAST_INTR(18,apic_fastintr18)
462 FAST_INTR(19,apic_fastintr19)
463 FAST_INTR(20,apic_fastintr20)
464 FAST_INTR(21,apic_fastintr21)
465 FAST_INTR(22,apic_fastintr22)
466 FAST_INTR(23,apic_fastintr23)
468 /* YYY what is this garbage? */
470 SLOW_INTR(0,apic_slowintr0,)
471 SLOW_INTR(1,apic_slowintr1,)
472 SLOW_INTR(2,apic_slowintr2,)
473 SLOW_INTR(3,apic_slowintr3,)
474 SLOW_INTR(4,apic_slowintr4,)
475 SLOW_INTR(5,apic_slowintr5,)
476 SLOW_INTR(6,apic_slowintr6,)
477 SLOW_INTR(7,apic_slowintr7,)
478 SLOW_INTR(8,apic_slowintr8,)
479 SLOW_INTR(9,apic_slowintr9,)
480 SLOW_INTR(10,apic_slowintr10,)
481 SLOW_INTR(11,apic_slowintr11,)
482 SLOW_INTR(12,apic_slowintr12,)
483 SLOW_INTR(13,apic_slowintr13,)
484 SLOW_INTR(14,apic_slowintr14,)
485 SLOW_INTR(15,apic_slowintr15,)
486 SLOW_INTR(16,apic_slowintr16,)
487 SLOW_INTR(17,apic_slowintr17,)
488 SLOW_INTR(18,apic_slowintr18,)
489 SLOW_INTR(19,apic_slowintr19,)
490 SLOW_INTR(20,apic_slowintr20,)
491 SLOW_INTR(21,apic_slowintr21,)
492 SLOW_INTR(22,apic_slowintr22,)
493 SLOW_INTR(23,apic_slowintr23,)
495 WRONGINTR(0,apic_wrongintr0)
496 WRONGINTR(1,apic_wrongintr1)
497 WRONGINTR(2,apic_wrongintr2)
498 WRONGINTR(3,apic_wrongintr3)
499 WRONGINTR(4,apic_wrongintr4)
500 WRONGINTR(5,apic_wrongintr5)
501 WRONGINTR(6,apic_wrongintr6)
502 WRONGINTR(7,apic_wrongintr7)
503 WRONGINTR(8,apic_wrongintr8)
504 WRONGINTR(9,apic_wrongintr9)
505 WRONGINTR(10,apic_wrongintr10)
506 WRONGINTR(11,apic_wrongintr11)
507 WRONGINTR(12,apic_wrongintr12)
508 WRONGINTR(13,apic_wrongintr13)
509 WRONGINTR(14,apic_wrongintr14)
510 WRONGINTR(15,apic_wrongintr15)
511 WRONGINTR(16,apic_wrongintr16)
512 WRONGINTR(17,apic_wrongintr17)
513 WRONGINTR(18,apic_wrongintr18)
514 WRONGINTR(19,apic_wrongintr19)
515 WRONGINTR(20,apic_wrongintr20)
516 WRONGINTR(21,apic_wrongintr21)
517 WRONGINTR(22,apic_wrongintr22)
518 WRONGINTR(23,apic_wrongintr23)
519 MCOUNT_LABEL(eintr)
521 #endif
523 .data
525 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
526 .globl stopped_cpus, started_cpus
527 stopped_cpus:
528 .long 0
529 started_cpus:
530 .long 0
532 .globl CNAME(cpustop_restartfunc)
533 CNAME(cpustop_restartfunc):
534 .long 0
536 .text