kernel - Fix excessive ipiq recursion (3)
[dragonfly.git] / sys / platform / pc64 / apic / apic_vector.s
blobc5c924327576d4e6cc72368420f8a6e8da010b8b
1 /*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 */
6 #if 0
7 #include "opt_auto_eoi.h"
8 #endif
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14 #include <machine/segments.h>
16 #include <machine_base/icu/icu.h>
17 #include <bus/isa/isa.h>
19 #include "assym.s"
21 #include "apicreg.h"
22 #include <machine_base/apic/ioapic_ipl.h>
23 #include <machine/intr_machdep.h>
25 #ifdef foo
26 /* convert an absolute IRQ# into bitmask */
27 #define IRQ_LBIT(irq_num) (1UL << (irq_num & 0x3f))
28 #endif
30 #define IRQ_SBITS(irq_num) ((irq_num) & 0x3f)
32 /* convert an absolute IRQ# into gd_ipending index */
33 #define IRQ_LIDX(irq_num) ((irq_num) >> 6)
35 #define MPLOCKED lock ;
37 #define APIC_PUSH_FRAME \
38 PUSH_FRAME ; /* 15 regs + space for 5 extras */ \
39 movq $0,TF_XFLAGS(%rsp) ; \
40 movq $0,TF_TRAPNO(%rsp) ; \
41 movq $0,TF_ADDR(%rsp) ; \
42 movq $0,TF_FLAGS(%rsp) ; \
43 movq $0,TF_ERR(%rsp) ; \
44 cld ; \
47 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
48 * segment register being changed (e.g. by procfs), which is why syscalls
49 * have to use doreti.
51 #define APIC_POP_FRAME \
52 POP_FRAME ; \
54 #define IOAPICADDR(irq_num) \
55 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_ADDR
56 #define REDIRIDX(irq_num) \
57 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_IDX
58 #define IOAPICFLAGS(irq_num) \
59 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_FLAGS
61 #define MASK_IRQ(irq_num) \
62 IOAPIC_IMASK_LOCK ; /* into critical reg */ \
63 testl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
64 jne 7f ; /* masked, don't mask */ \
65 orl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
66 /* set the mask bit */ \
67 movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \
68 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
69 movl %eax, (%rcx) ; /* write the index */ \
70 orl $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */ \
71 7: ; /* already masked */ \
72 IOAPIC_IMASK_UNLOCK ; \
75 * Test to see whether we are handling an edge or level triggered INT.
76 * Level-triggered INTs must still be masked as we don't clear the source,
77 * and the EOI cycle would cause redundant INTs to occur.
79 #define MASK_LEVEL_IRQ(irq_num) \
80 testl $IOAPIC_IRQI_FLAG_LEVEL, IOAPICFLAGS(irq_num) ; \
81 jz 9f ; /* edge, don't mask */ \
82 MASK_IRQ(irq_num) ; \
83 9: ; \
86 * Test to see if the source is currntly masked, clear if so.
88 #define UNMASK_IRQ(irq_num) \
89 cmpl $0,%eax ; \
90 jnz 8f ; \
91 IOAPIC_IMASK_LOCK ; /* into critical reg */ \
92 testl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
93 je 7f ; /* bit clear, not masked */ \
94 andl $~IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
95 /* clear mask bit */ \
96 movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \
97 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
98 movl %eax,(%rcx) ; /* write the index */ \
99 andl $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \
100 7: ; \
101 IOAPIC_IMASK_UNLOCK ; \
102 8: ; \
105 * Interrupt call handlers run in the following sequence:
107 * - Push the trap frame required by doreti
108 * - Mask the interrupt and reenable its source
109 * - If we cannot take the interrupt set its ipending bit and
110 * doreti.
111 * - If we can take the interrupt clear its ipending bit,
112 * call the handler, then unmask and doreti.
114 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
117 #define INTR_HANDLER(irq_num) \
118 .text ; \
119 SUPERALIGN_TEXT ; \
120 IDTVEC(ioapic_intr##irq_num) ; \
121 APIC_PUSH_FRAME ; \
122 FAKE_MCOUNT(TF_RIP(%rsp)) ; \
123 MASK_LEVEL_IRQ(irq_num) ; \
124 movq lapic, %rax ; \
125 movl $0, LA_EOI(%rax) ; \
126 movq PCPU(curthread),%rbx ; \
127 testl $-1,TD_NEST_COUNT(%rbx) ; \
128 jne 1f ; \
129 testl $-1,TD_CRITCOUNT(%rbx) ; \
130 je 2f ; \
131 1: ; \
132 /* in critical section, make interrupt pending */ \
133 /* set the pending bit and return, leave interrupt masked */ \
134 movq $1,%rcx ; \
135 shlq $IRQ_SBITS(irq_num),%rcx ; \
136 movq $IRQ_LIDX(irq_num),%rdx ; \
137 orq %rcx,PCPU_E8(ipending,%rdx) ; \
138 orl $RQF_INTPEND,PCPU(reqflags) ; \
139 jmp 5f ; \
140 2: ; \
141 /* clear pending bit, run handler */ \
142 movq $1,%rcx ; \
143 shlq $IRQ_SBITS(irq_num),%rcx ; \
144 notq %rcx ; \
145 movq $IRQ_LIDX(irq_num),%rdx ; \
146 andq %rcx,PCPU_E8(ipending,%rdx) ; \
147 pushq $irq_num ; /* trapframe -> intrframe */ \
148 movq %rsp, %rdi ; /* pass frame by reference */ \
149 incl TD_CRITCOUNT(%rbx) ; \
150 sti ; \
151 call ithread_fast_handler ; /* returns 0 to unmask */ \
152 decl TD_CRITCOUNT(%rbx) ; \
153 addq $8, %rsp ; /* intrframe -> trapframe */ \
154 UNMASK_IRQ(irq_num) ; \
155 5: ; \
156 MEXITCOUNT ; \
157 jmp doreti ; \
160 * Handle "spurious INTerrupts".
162 * NOTE: This is different than the "spurious INTerrupt" generated by an
163 * 8259 PIC for missing INTs. See the APIC documentation for details.
164 * This routine should NOT do an 'EOI' cycle.
166 * NOTE: Even though we don't do anything here we must still swapgs if
167 * coming from a user frame in case the iretq faults... just use
168 * the nominal APIC_PUSH_FRAME sequence to get it done.
170 .text
171 SUPERALIGN_TEXT
172 .globl Xspuriousint
173 Xspuriousint:
174 APIC_PUSH_FRAME
175 /* No EOI cycle used here */
176 FAKE_MCOUNT(TF_RIP(%rsp))
177 MEXITCOUNT
178 APIC_POP_FRAME
179 jmp doreti_iret
182 * Handle TLB shootdowns.
184 * NOTE: interrupts are left disabled.
186 .text
187 SUPERALIGN_TEXT
188 .globl Xinvltlb
189 Xinvltlb:
190 APIC_PUSH_FRAME
191 movq lapic, %rax
192 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
193 FAKE_MCOUNT(TF_RIP(%rsp))
194 incl PCPU(cnt) + V_IPI
195 subq $8,%rsp /* make same as interrupt frame */
196 movq %rsp,%rdi /* pass frame by reference */
197 call smp_inval_intr
198 addq $8,%rsp /* turn into trapframe */
199 MEXITCOUNT
200 /*APIC_POP_FRAME*/
201 jmp doreti /* doreti b/c intrs enabled */
204 * Handle sniffs - sniff %rip and %rsp.
206 .text
207 SUPERALIGN_TEXT
208 .globl Xsniff
209 Xsniff:
210 APIC_PUSH_FRAME
211 movq lapic, %rax
212 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
213 FAKE_MCOUNT(TF_RIP(%rsp))
214 incl PCPU(cnt) + V_IPI
215 movq TF_RIP(%rsp),%rax
216 movq %rax,PCPU(sample_pc)
217 movq TF_RSP(%rsp),%rax
218 movq %rax,PCPU(sample_sp)
219 MEXITCOUNT
220 APIC_POP_FRAME
221 jmp doreti_iret
224 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
226 * - We cannot call doreti
227 * - Signals its receipt.
228 * - Waits for permission to restart.
229 * - Processing pending IPIQ events while waiting.
230 * - Signals its restart.
233 .text
234 SUPERALIGN_TEXT
235 .globl Xcpustop
236 Xcpustop:
237 APIC_PUSH_FRAME
238 movq lapic, %rax
239 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
241 movl PCPU(cpuid), %eax
242 imull $PCB_SIZE, %eax
243 leaq CNAME(stoppcbs), %rdi
244 addq %rax, %rdi
245 call CNAME(savectx) /* Save process context */
248 * Indicate that we have stopped and loop waiting for permission
249 * to start again. We must still process IPI events while in a
250 * stopped state.
252 * Interrupts must remain enabled for non-IPI'd per-cpu interrupts
253 * (e.g. Xtimer, Xinvltlb).
255 #if CPUMASK_ELEMENTS != 4
256 #error "assembly incompatible with cpumask_t"
257 #endif
258 movq PCPU(cpumask)+0,%rax /* stopped_cpus |= 1 << cpuid */
259 MPLOCKED orq %rax, stopped_cpus+0
260 movq PCPU(cpumask)+8,%rax
261 MPLOCKED orq %rax, stopped_cpus+8
262 movq PCPU(cpumask)+16,%rax
263 MPLOCKED orq %rax, stopped_cpus+16
264 movq PCPU(cpumask)+24,%rax
265 MPLOCKED orq %rax, stopped_cpus+24
267 movq PCPU(curthread),%rbx
268 incl PCPU(intr_nesting_level)
269 incl TD_CRITCOUNT(%rbx)
272 andl $~RQF_IPIQ,PCPU(reqflags)
273 call lwkt_smp_stopped
274 pause
276 subq %rdi,%rdi
277 movq started_cpus+0,%rax /* while (!(started_cpus & (1<<id))) */
278 andq PCPU(cpumask)+0,%rax
279 orq %rax,%rdi
280 movq started_cpus+8,%rax
281 andq PCPU(cpumask)+8,%rax
282 orq %rax,%rdi
283 movq started_cpus+16,%rax
284 andq PCPU(cpumask)+16,%rax
285 orq %rax,%rdi
286 movq started_cpus+24,%rax
287 andq PCPU(cpumask)+24,%rax
288 orq %rax,%rdi
289 testq %rdi,%rdi
290 jz 1b
292 movq PCPU(other_cpus)+0,%rax /* started_cpus &= ~(1 << cpuid) */
293 MPLOCKED andq %rax, started_cpus+0
294 movq PCPU(other_cpus)+8,%rax
295 MPLOCKED andq %rax, started_cpus+8
296 movq PCPU(other_cpus)+16,%rax
297 MPLOCKED andq %rax, started_cpus+16
298 movq PCPU(other_cpus)+24,%rax
299 MPLOCKED andq %rax, started_cpus+24
301 movq PCPU(other_cpus)+0,%rax /* stopped_cpus &= ~(1 << cpuid) */
302 MPLOCKED andq %rax, stopped_cpus+0
303 movq PCPU(other_cpus)+8,%rax
304 MPLOCKED andq %rax, stopped_cpus+8
305 movq PCPU(other_cpus)+16,%rax
306 MPLOCKED andq %rax, stopped_cpus+16
307 movq PCPU(other_cpus)+24,%rax
308 MPLOCKED andq %rax, stopped_cpus+24
310 cmpl $0,PCPU(cpuid)
311 jnz 2f
313 movq CNAME(cpustop_restartfunc), %rax
314 testq %rax, %rax
315 jz 2f
316 movq $0, CNAME(cpustop_restartfunc) /* One-shot */
318 call *%rax
320 decl TD_CRITCOUNT(%rbx)
321 decl PCPU(intr_nesting_level)
322 MEXITCOUNT
323 /*APIC_POP_FRAME*/
324 jmp doreti
327 * For now just have one ipiq IPI, but what we really want is
328 * to have one for each source cpu to the APICs don't get stalled
329 * backlogging the requests.
331 .text
332 SUPERALIGN_TEXT
333 .globl Xipiq
334 Xipiq:
335 APIC_PUSH_FRAME
336 movq lapic, %rax
337 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
338 FAKE_MCOUNT(TF_RIP(%rsp))
340 incl PCPU(cnt) + V_IPI
341 movq PCPU(curthread),%rbx
342 testl $-1,TD_CRITCOUNT(%rbx)
343 jne 1f
344 subq $8,%rsp /* make same as interrupt frame */
345 movq %rsp,%rdi /* pass frame by reference */
346 incl PCPU(intr_nesting_level)
347 incl TD_CRITCOUNT(%rbx)
348 subq %rax,%rax
350 xchgl %eax,PCPU(npoll) /* (atomic op) allow another Xipi */
351 call lwkt_process_ipiq_frame
352 decl TD_CRITCOUNT(%rbx)
353 decl PCPU(intr_nesting_level)
354 addq $8,%rsp /* turn into trapframe */
355 MEXITCOUNT
356 jmp doreti
358 orl $RQF_IPIQ,PCPU(reqflags)
359 MEXITCOUNT
360 APIC_POP_FRAME
361 jmp doreti_iret
363 .text
364 SUPERALIGN_TEXT
365 .globl Xtimer
366 Xtimer:
367 APIC_PUSH_FRAME
368 movq lapic, %rax
369 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
370 FAKE_MCOUNT(TF_RIP(%rsp))
372 subq $8,%rsp /* make same as interrupt frame */
373 movq %rsp,%rdi /* pass frame by reference */
374 call pcpu_timer_always
375 addq $8,%rsp /* turn into trapframe */
377 incl PCPU(cnt) + V_TIMER
378 movq TF_RIP(%rsp),%rbx /* sample addr before checking crit */
379 movq %rbx,PCPU(sample_pc)
380 movq PCPU(curthread),%rbx
381 testl $-1,TD_CRITCOUNT(%rbx)
382 jne 1f
383 testl $-1,TD_NEST_COUNT(%rbx)
384 jne 1f
385 subq $8,%rsp /* make same as interrupt frame */
386 movq %rsp,%rdi /* pass frame by reference */
387 incl PCPU(intr_nesting_level)
388 incl TD_CRITCOUNT(%rbx)
390 call pcpu_timer_process_frame
391 decl TD_CRITCOUNT(%rbx)
392 decl PCPU(intr_nesting_level)
393 addq $8,%rsp /* turn into trapframe */
394 MEXITCOUNT
395 jmp doreti
397 orl $RQF_TIMER,PCPU(reqflags)
398 MEXITCOUNT
399 APIC_POP_FRAME
400 jmp doreti_iret
402 MCOUNT_LABEL(bintr)
403 INTR_HANDLER(0)
404 INTR_HANDLER(1)
405 INTR_HANDLER(2)
406 INTR_HANDLER(3)
407 INTR_HANDLER(4)
408 INTR_HANDLER(5)
409 INTR_HANDLER(6)
410 INTR_HANDLER(7)
411 INTR_HANDLER(8)
412 INTR_HANDLER(9)
413 INTR_HANDLER(10)
414 INTR_HANDLER(11)
415 INTR_HANDLER(12)
416 INTR_HANDLER(13)
417 INTR_HANDLER(14)
418 INTR_HANDLER(15)
419 INTR_HANDLER(16)
420 INTR_HANDLER(17)
421 INTR_HANDLER(18)
422 INTR_HANDLER(19)
423 INTR_HANDLER(20)
424 INTR_HANDLER(21)
425 INTR_HANDLER(22)
426 INTR_HANDLER(23)
427 INTR_HANDLER(24)
428 INTR_HANDLER(25)
429 INTR_HANDLER(26)
430 INTR_HANDLER(27)
431 INTR_HANDLER(28)
432 INTR_HANDLER(29)
433 INTR_HANDLER(30)
434 INTR_HANDLER(31)
435 INTR_HANDLER(32)
436 INTR_HANDLER(33)
437 INTR_HANDLER(34)
438 INTR_HANDLER(35)
439 INTR_HANDLER(36)
440 INTR_HANDLER(37)
441 INTR_HANDLER(38)
442 INTR_HANDLER(39)
443 INTR_HANDLER(40)
444 INTR_HANDLER(41)
445 INTR_HANDLER(42)
446 INTR_HANDLER(43)
447 INTR_HANDLER(44)
448 INTR_HANDLER(45)
449 INTR_HANDLER(46)
450 INTR_HANDLER(47)
451 INTR_HANDLER(48)
452 INTR_HANDLER(49)
453 INTR_HANDLER(50)
454 INTR_HANDLER(51)
455 INTR_HANDLER(52)
456 INTR_HANDLER(53)
457 INTR_HANDLER(54)
458 INTR_HANDLER(55)
459 INTR_HANDLER(56)
460 INTR_HANDLER(57)
461 INTR_HANDLER(58)
462 INTR_HANDLER(59)
463 INTR_HANDLER(60)
464 INTR_HANDLER(61)
465 INTR_HANDLER(62)
466 INTR_HANDLER(63)
467 INTR_HANDLER(64)
468 INTR_HANDLER(65)
469 INTR_HANDLER(66)
470 INTR_HANDLER(67)
471 INTR_HANDLER(68)
472 INTR_HANDLER(69)
473 INTR_HANDLER(70)
474 INTR_HANDLER(71)
475 INTR_HANDLER(72)
476 INTR_HANDLER(73)
477 INTR_HANDLER(74)
478 INTR_HANDLER(75)
479 INTR_HANDLER(76)
480 INTR_HANDLER(77)
481 INTR_HANDLER(78)
482 INTR_HANDLER(79)
483 INTR_HANDLER(80)
484 INTR_HANDLER(81)
485 INTR_HANDLER(82)
486 INTR_HANDLER(83)
487 INTR_HANDLER(84)
488 INTR_HANDLER(85)
489 INTR_HANDLER(86)
490 INTR_HANDLER(87)
491 INTR_HANDLER(88)
492 INTR_HANDLER(89)
493 INTR_HANDLER(90)
494 INTR_HANDLER(91)
495 INTR_HANDLER(92)
496 INTR_HANDLER(93)
497 INTR_HANDLER(94)
498 INTR_HANDLER(95)
499 INTR_HANDLER(96)
500 INTR_HANDLER(97)
501 INTR_HANDLER(98)
502 INTR_HANDLER(99)
503 INTR_HANDLER(100)
504 INTR_HANDLER(101)
505 INTR_HANDLER(102)
506 INTR_HANDLER(103)
507 INTR_HANDLER(104)
508 INTR_HANDLER(105)
509 INTR_HANDLER(106)
510 INTR_HANDLER(107)
511 INTR_HANDLER(108)
512 INTR_HANDLER(109)
513 INTR_HANDLER(110)
514 INTR_HANDLER(111)
515 INTR_HANDLER(112)
516 INTR_HANDLER(113)
517 INTR_HANDLER(114)
518 INTR_HANDLER(115)
519 INTR_HANDLER(116)
520 INTR_HANDLER(117)
521 INTR_HANDLER(118)
522 INTR_HANDLER(119)
523 INTR_HANDLER(120)
524 INTR_HANDLER(121)
525 INTR_HANDLER(122)
526 INTR_HANDLER(123)
527 INTR_HANDLER(124)
528 INTR_HANDLER(125)
529 INTR_HANDLER(126)
530 INTR_HANDLER(127)
531 INTR_HANDLER(128)
532 INTR_HANDLER(129)
533 INTR_HANDLER(130)
534 INTR_HANDLER(131)
535 INTR_HANDLER(132)
536 INTR_HANDLER(133)
537 INTR_HANDLER(134)
538 INTR_HANDLER(135)
539 INTR_HANDLER(136)
540 INTR_HANDLER(137)
541 INTR_HANDLER(138)
542 INTR_HANDLER(139)
543 INTR_HANDLER(140)
544 INTR_HANDLER(141)
545 INTR_HANDLER(142)
546 INTR_HANDLER(143)
547 INTR_HANDLER(144)
548 INTR_HANDLER(145)
549 INTR_HANDLER(146)
550 INTR_HANDLER(147)
551 INTR_HANDLER(148)
552 INTR_HANDLER(149)
553 INTR_HANDLER(150)
554 INTR_HANDLER(151)
555 INTR_HANDLER(152)
556 INTR_HANDLER(153)
557 INTR_HANDLER(154)
558 INTR_HANDLER(155)
559 INTR_HANDLER(156)
560 INTR_HANDLER(157)
561 INTR_HANDLER(158)
562 INTR_HANDLER(159)
563 INTR_HANDLER(160)
564 INTR_HANDLER(161)
565 INTR_HANDLER(162)
566 INTR_HANDLER(163)
567 INTR_HANDLER(164)
568 INTR_HANDLER(165)
569 INTR_HANDLER(166)
570 INTR_HANDLER(167)
571 INTR_HANDLER(168)
572 INTR_HANDLER(169)
573 INTR_HANDLER(170)
574 INTR_HANDLER(171)
575 INTR_HANDLER(172)
576 INTR_HANDLER(173)
577 INTR_HANDLER(174)
578 INTR_HANDLER(175)
579 INTR_HANDLER(176)
580 INTR_HANDLER(177)
581 INTR_HANDLER(178)
582 INTR_HANDLER(179)
583 INTR_HANDLER(180)
584 INTR_HANDLER(181)
585 INTR_HANDLER(182)
586 INTR_HANDLER(183)
587 INTR_HANDLER(184)
588 INTR_HANDLER(185)
589 INTR_HANDLER(186)
590 INTR_HANDLER(187)
591 INTR_HANDLER(188)
592 INTR_HANDLER(189)
593 INTR_HANDLER(190)
594 INTR_HANDLER(191)
595 MCOUNT_LABEL(eintr)
597 .data
599 #if CPUMASK_ELEMENTS != 4
600 #error "assembly incompatible with cpumask_t"
601 #endif
602 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
603 .globl stopped_cpus, started_cpus
604 stopped_cpus:
605 .quad 0
606 .quad 0
607 .quad 0
608 .quad 0
609 started_cpus:
610 .quad 0
611 .quad 0
612 .quad 0
613 .quad 0
615 .globl CNAME(cpustop_restartfunc)
616 CNAME(cpustop_restartfunc):
617 .quad 0
619 .text