amd64: Fix improper saving of a register.
[dragonfly.git] / sys / platform / pc64 / amd64 / ipl.s
blob38d6cc72b659907229e3ed92570b19b5f2eca5de
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * ---
36 * Copyright (c) 1989, 1990 William F. Jolitz.
37 * Copyright (c) 1990 The Regents of the University of California.
38 * All rights reserved.
40 * This code is derived from software contributed to Berkeley by
41 * William Jolitz.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
71 * @(#)ipl.s
73 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
74 * $DragonFly: src/sys/platform/pc64/amd64/ipl.s,v 1.1 2008/08/29 17:07:10 dillon Exp $
77 #include <machine/asmacros.h>
78 #include <machine/segments.h>
79 #include <machine/ipl.h>
80 #include <machine/lock.h>
81 #include <machine/psl.h>
82 #include <machine/trap.h>
84 #include "assym.s"
87 * AT/386
88 * Vector interrupt control section
90 * ipending - Pending interrupts (set when a masked interrupt occurs)
91 * spending - Pending software interrupts
93 .data
94 ALIGN_DATA
96 .globl fastunpend_count
97 fastunpend_count: .long 0
99 .text
100 SUPERALIGN_TEXT
103 * GENERAL NOTES
105 * - fast interrupts are always called with a critical section
106 * held
108 * - we release our critical section when scheduling interrupt
109 * or softinterrupt threads in order so they can preempt
110 * (unless we are called manually from a critical section, in
111 * which case there will still be a critical section and
112 * they won't preempt anyway).
114 * - TD_NEST_COUNT prevents splz from nesting too deeply within
115 * itself. It is *not* actually an interrupt nesting count.
116 * PCPU(intr_nesting_level) is an interrupt nesting count.
118 * - We have to be careful in regards to local interrupts
119 * occuring simultaniously with our doreti and splz
120 * processing.
124 * DORETI
126 * Handle return from interrupts, traps and syscalls. This function
127 * checks the cpl for unmasked pending interrupts (fast, normal, or
128 * soft) and schedules them if appropriate, then irets.
130 * If we are in a critical section we cannot run any pending ints
131 * nor can be play with mp_lock.
133 * The stack contains a trapframe at the start of doreti.
135 SUPERALIGN_TEXT
136 .globl doreti
137 .type doreti,@function
138 doreti:
139 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
140 movq $0,%rax /* irq mask unavailable due to BGL */
141 movq PCPU(curthread),%rbx
142 cli /* interlock with TDPRI_CRIT */
143 cmpl $0,PCPU(reqflags) /* short cut if nothing to do */
144 je 5f
145 cmpl $TDPRI_CRIT,TD_PRI(%rbx) /* can't unpend if in critical sec */
146 jge 5f
147 addl $TDPRI_CRIT,TD_PRI(%rbx) /* force all ints to pending */
148 doreti_next:
149 sti /* allow new interrupts */
150 movl %eax,%ecx /* irq mask unavailable due to BGL */
151 notl %ecx
152 cli /* disallow YYY remove */
153 #ifdef SMP
154 testl $RQF_IPIQ,PCPU(reqflags)
155 jnz doreti_ipiq
156 testl $RQF_TIMER,PCPU(reqflags)
157 jnz doreti_timer
158 #endif
159 testl PCPU(fpending),%ecx /* check for an unmasked fast int */
160 jnz doreti_fast
162 testl PCPU(ipending),%ecx /* check for an unmasked slow int */
163 jnz doreti_intr
165 movl PCPU(spending),%ecx /* check for a pending software int */
166 cmpl $0,%ecx
167 jnz doreti_soft
169 testl $RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
170 jz 2f
172 /* ASTs are only applicable when returning to userland */
173 testb $SEL_RPL_MASK,TF_CS(%rsp)
174 jnz doreti_ast
177 * Nothing left to do, finish up. Interrupts are still disabled.
178 * %eax contains the mask of IRQ's that are not available due to
179 * BGL requirements. We can only clear RQF_INTPEND if *ALL* pending
180 * interrupts have been processed.
182 subl $TDPRI_CRIT,TD_PRI(%rbx) /* interlocked with cli */
183 testl %eax,%eax
184 jnz 5f
185 andl $~RQF_INTPEND,PCPU(reqflags)
187 MEXITCOUNT
190 * Restore register and iret. iret can fault on %rip (which is
191 * really stupid). If this occurs we re-fault and vector to
192 * doreti_iret_fault().
194 * ...
195 * can be set from user mode, this can result in a kernel mode
196 * exception. The trap code will revector to the *_fault code
197 * which then sets up a T_PROTFLT signal. If the signal is
198 * sent to userland, sendsig() will automatically clean up all
199 * the segment registers to avoid a loop.
201 .globl doreti_iret
202 .globl doreti_syscall_ret
203 doreti_syscall_ret:
204 POP_FRAME /* registers and %gs (+cli) */
205 /* special global also used by exception.S */
206 doreti_iret:
207 iretq
210 * doreti_iret_fault. Alternative return code for
211 * the case where we get a fault in the doreti_exit code
212 * above. trap() (sys/platform/pc64/amd64/trap.c) catches this specific
213 * case, sends the process a signal and continues in the
214 * corresponding place in the code below.
216 ALIGN_TEXT
217 .globl doreti_iret_fault
218 doreti_iret_fault:
219 PUSH_FRAME_NOSWAP
220 testq $PSL_I,TF_RFLAGS(%rsp)
221 jz 2f
224 movq $T_PROTFLT,TF_TRAPNO(%rsp)
225 movq $0,TF_ERR(%rsp) /* XXX should be the error code */
226 movq $0,TF_ADDR(%rsp)
227 FAKE_MCOUNT(TF_RIP(%rsp))
228 jmp calltrap
231 * FAST interrupt pending. NOTE: stack context holds frame structure
232 * for fast interrupt procedure, do not do random pushes or pops!
234 ALIGN_TEXT
235 doreti_fast:
236 andl PCPU(fpending),%ecx /* only check fast ints */
237 bsfl %ecx, %ecx /* locate the next dispatchable int */
238 btrl %ecx, PCPU(fpending) /* is it really still pending? */
239 jnc doreti_next
240 pushq %rax /* save IRQ mask unavailable for BGL */
241 /* NOTE: is also CPL in frame */
242 #if 0
243 #ifdef SMP
244 pushq %rcx /* save ecx */
245 call try_mplock
246 popq %rcx
247 testl %eax,%eax
248 jz 1f
249 /* MP lock successful */
250 #endif
251 #endif
252 incl PCPU(intr_nesting_level)
253 call dofastunpend /* unpend fast intr %ecx */
254 decl PCPU(intr_nesting_level)
255 #if 0
256 #ifdef SMP
257 call rel_mplock
258 #endif
259 #endif
260 popq %rax
261 jmp doreti_next
263 btsl %ecx, PCPU(fpending) /* oops, couldn't get the MP lock */
264 popq %rax /* add to temp. cpl mask to ignore */
265 orl PCPU(fpending),%eax
266 jmp doreti_next
269 * INTR interrupt pending
271 * Temporarily back-out our critical section to allow an interrupt
272 * preempt us when we schedule it. Bump intr_nesting_level to
273 * prevent the switch code from recursing via splz too deeply.
275 ALIGN_TEXT
276 doreti_intr:
277 andl PCPU(ipending),%ecx /* only check normal ints */
278 bsfl %ecx, %ecx /* locate the next dispatchable int */
279 btrl %ecx, PCPU(ipending) /* is it really still pending? */
280 jnc doreti_next
281 pushq %rax
282 movl %ecx,%edi /* argument to C function */
283 incl TD_NEST_COUNT(%rbx) /* prevent doreti/splz nesting */
284 subl $TDPRI_CRIT,TD_PRI(%rbx) /* so we can preempt */
285 call sched_ithd /* YYY must pull in imasks */
286 addl $TDPRI_CRIT,TD_PRI(%rbx)
287 decl TD_NEST_COUNT(%rbx)
288 popq %rax
289 jmp doreti_next
292 * SOFT interrupt pending
294 * Temporarily back-out our critical section to allow an interrupt
295 * preempt us when we schedule it. Bump intr_nesting_level to
296 * prevent the switch code from recursing via splz too deeply.
298 ALIGN_TEXT
299 doreti_soft:
300 bsfl %ecx,%ecx /* locate the next pending softint */
301 btrl %ecx,PCPU(spending) /* make sure its still pending */
302 jnc doreti_next
303 addl $FIRST_SOFTINT,%ecx /* actual intr number */
304 pushq %rax
305 movl %ecx,%edi /* argument to C call */
306 incl TD_NEST_COUNT(%rbx) /* prevent doreti/splz nesting */
307 subl $TDPRI_CRIT,TD_PRI(%rbx) /* so we can preempt */
308 call sched_ithd /* YYY must pull in imasks */
309 addl $TDPRI_CRIT,TD_PRI(%rbx)
310 decl TD_NEST_COUNT(%rbx)
311 popq %rax
312 jmp doreti_next
315 * AST pending. We clear RQF_AST_SIGNAL automatically, the others
316 * are cleared by the trap as they are processed.
318 * Temporarily back-out our critical section because trap() can be
319 * a long-winded call, and we want to be more syscall-like.
321 * YYY theoretically we can call lwkt_switch directly if all we need
322 * to do is a reschedule.
324 doreti_ast:
325 andl $~(RQF_AST_SIGNAL|RQF_AST_UPCALL),PCPU(reqflags)
327 movl %eax,%r12d /* save cpl (can't use stack) */
328 movl $T_ASTFLT,TF_TRAPNO(%rsp)
329 movq %rsp,%rdi /* pass frame by ref (%edi = C arg) */
330 subl $TDPRI_CRIT,TD_PRI(%rbx)
331 call trap
332 addl $TDPRI_CRIT,TD_PRI(%rbx)
333 movl %r12d,%eax /* restore cpl for loop */
334 jmp doreti_next
336 #ifdef SMP
338 * IPIQ message pending. We clear RQF_IPIQ automatically.
340 doreti_ipiq:
341 movl %eax,%r12d /* save cpl (can't use stack) */
342 incl PCPU(intr_nesting_level)
343 andl $~RQF_IPIQ,PCPU(reqflags)
344 subq $16,%rsp /* add dummy vec and ppl */
345 movq %rsp,%rdi /* pass frame by ref (C arg) */
346 call lwkt_process_ipiq_frame
347 addq $16,%rsp
348 decl PCPU(intr_nesting_level)
349 movl %r12d,%eax /* restore cpl for loop */
350 jmp doreti_next
352 doreti_timer:
353 movl %eax,%r12d /* save cpl (can't use stack) */
354 incl PCPU(intr_nesting_level)
355 andl $~RQF_TIMER,PCPU(reqflags)
356 subq $16,%rsp /* add dummy vec and ppl */
357 movq %rsp,%rdi /* pass frame by ref (C arg) */
358 call lapic_timer_process_frame
359 addq $16,%rsp
360 decl PCPU(intr_nesting_level)
361 movl %r12d,%eax /* restore cpl for loop */
362 jmp doreti_next
364 #endif
367 * SPLZ() a C callable procedure to dispatch any unmasked pending
368 * interrupts regardless of critical section nesting. ASTs
369 * are not dispatched.
371 * Use %eax to track those IRQs that could not be processed
372 * due to BGL requirements.
374 SUPERALIGN_TEXT
376 ENTRY(splz)
377 pushfq
378 pushq %rbx
379 movq PCPU(curthread),%rbx
380 addl $TDPRI_CRIT,TD_PRI(%rbx)
381 movl $0,%eax
383 splz_next:
385 movl %eax,%ecx /* ecx = ~CPL */
386 notl %ecx
387 #ifdef SMP
388 testl $RQF_IPIQ,PCPU(reqflags)
389 jnz splz_ipiq
390 testl $RQF_TIMER,PCPU(reqflags)
391 jnz splz_timer
392 #endif
393 testl PCPU(fpending),%ecx /* check for an unmasked fast int */
394 jnz splz_fast
396 testl PCPU(ipending),%ecx
397 jnz splz_intr
399 movl PCPU(spending),%ecx
400 cmpl $0,%ecx
401 jnz splz_soft
403 subl $TDPRI_CRIT,TD_PRI(%rbx)
406 * Nothing left to do, finish up. Interrupts are still disabled.
407 * If our mask of IRQs we couldn't process due to BGL requirements
408 * is 0 then there are no pending interrupt sources left and we
409 * can clear RQF_INTPEND.
411 testl %eax,%eax
412 jnz 5f
413 andl $~RQF_INTPEND,PCPU(reqflags)
415 popq %rbx
416 popfq
420 * FAST interrupt pending
422 ALIGN_TEXT
423 splz_fast:
424 andl PCPU(fpending),%ecx /* only check fast ints */
425 bsfl %ecx, %ecx /* locate the next dispatchable int */
426 btrl %ecx, PCPU(fpending) /* is it really still pending? */
427 jnc splz_next
428 pushq %rax
429 #if 0
430 #ifdef SMP
431 movl %ecx,%edi /* argument to try_mplock */
432 call try_mplock
433 testl %eax,%eax
434 jz 1f
435 #endif
436 #endif
437 incl PCPU(intr_nesting_level)
438 call dofastunpend /* unpend fast intr %ecx */
439 decl PCPU(intr_nesting_level)
440 #if 0
441 #ifdef SMP
442 call rel_mplock
443 #endif
444 #endif
445 popq %rax
446 jmp splz_next
448 btsl %ecx, PCPU(fpending) /* oops, couldn't get the MP lock */
449 popq %rax
450 orl PCPU(fpending),%eax
451 jmp splz_next
454 * INTR interrupt pending
456 * Temporarily back-out our critical section to allow the interrupt
457 * preempt us.
459 ALIGN_TEXT
460 splz_intr:
461 andl PCPU(ipending),%ecx /* only check normal ints */
462 bsfl %ecx, %ecx /* locate the next dispatchable int */
463 btrl %ecx, PCPU(ipending) /* is it really still pending? */
464 jnc splz_next
466 pushq %rax
467 movl %ecx,%edi /* C argument */
468 subl $TDPRI_CRIT,TD_PRI(%rbx)
469 incl TD_NEST_COUNT(%rbx) /* prevent doreti/splz nesting */
470 call sched_ithd /* YYY must pull in imasks */
471 addl $TDPRI_CRIT,TD_PRI(%rbx)
472 decl TD_NEST_COUNT(%rbx) /* prevent doreti/splz nesting */
473 popq %rax
474 jmp splz_next
477 * SOFT interrupt pending
479 * Temporarily back-out our critical section to allow the interrupt
480 * preempt us.
482 ALIGN_TEXT
483 splz_soft:
484 bsfl %ecx,%ecx /* locate the next pending softint */
485 btrl %ecx,PCPU(spending) /* make sure its still pending */
486 jnc splz_next
487 addl $FIRST_SOFTINT,%ecx /* actual intr number */
489 pushq %rax
490 movl %ecx,%edi /* C argument */
491 subl $TDPRI_CRIT,TD_PRI(%rbx)
492 incl TD_NEST_COUNT(%rbx) /* prevent doreti/splz nesting */
493 call sched_ithd /* YYY must pull in imasks */
494 addl $TDPRI_CRIT,TD_PRI(%rbx)
495 decl TD_NEST_COUNT(%rbx) /* prevent doreti/splz nesting */
496 popq %rax
497 jmp splz_next
499 #ifdef SMP
500 splz_ipiq:
501 andl $~RQF_IPIQ,PCPU(reqflags)
502 pushq %rax
503 call lwkt_process_ipiq
504 popq %rax
505 jmp splz_next
507 splz_timer:
508 andl $~RQF_TIMER,PCPU(reqflags)
509 pushq %rax
510 call lapic_timer_process
511 popq %rax
512 jmp splz_next
513 #endif
516 * dofastunpend(%ecx:intr)
518 * A FAST interrupt previously made pending can now be run,
519 * execute it by pushing a dummy interrupt frame and
520 * calling ithread_fast_handler to execute or schedule it.
522 * ithread_fast_handler() returns 0 if it wants us to unmask
523 * further interrupts.
525 #define PUSH_DUMMY \
526 pushfq ; /* phys int frame / flags */ \
527 movl %cs,%eax ; \
528 pushq %rax ; /* phys int frame / cs */ \
529 pushq 3*8(%rsp) ; /* original caller eip */ \
530 subq $TF_RIP,%rsp ; /* trap frame */ \
531 movq $0,TF_XFLAGS(%rsp) ; /* extras */ \
532 movq $0,TF_TRAPNO(%rsp) ; /* extras */ \
533 movq $0,TF_ADDR(%rsp) ; /* extras */ \
534 movq $0,TF_FLAGS(%rsp) ; /* extras */ \
535 movq $0,TF_ERR(%rsp) ; /* extras */ \
537 #define POP_DUMMY \
538 addq $TF_RIP+(3*8),%rsp ; \
540 dofastunpend:
541 pushq %rbp /* frame for backtrace */
542 movq %rsp,%rbp
543 PUSH_DUMMY
544 pushq %rcx /* last part of intrframe = intr */
545 incl fastunpend_count
546 movq %rsp,%rdi /* pass frame by reference C arg */
547 call ithread_fast_handler /* returns 0 to unmask */
548 popq %rdi /* intrframe->trapframe */
549 /* + also rdi C arg to next call */
550 cmpl $0,%eax
551 jnz 1f
552 movq MachIntrABI + MACHINTR_INTREN, %rax
553 callq *%rax /* MachIntrABI.intren(intr) */
555 POP_DUMMY
556 popq %rbp