linprocfs - Introduce /proc/mounts
[dragonfly.git] / sys / platform / pc32 / isa / ipl.s
bloba6e5baebcdc6876cc9f5e447ddc7a24794f5a688
1 /*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * @(#)ipl.s
39 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
40 * $DragonFly: src/sys/platform/pc32/isa/ipl.s,v 1.28 2007/01/22 19:37:04 corecode Exp $
43 #include "use_npx.h"
45 #include <machine/asmacros.h>
46 #include <machine/segments.h>
47 #include <machine/ipl.h>
48 #include <machine/lock.h>
49 #include <machine/psl.h>
50 #include <machine/trap.h>
52 #include "assym.s"
55 * AT/386
56 * Vector interrupt control section
58 * fpending - Pending interrupts (set when a masked interrupt occurs)
59 * spending - Pending software interrupts
61 .data
62 ALIGN_DATA
64 .globl fastunpend_count
65 fastunpend_count: .long 0
67 .text
68 SUPERALIGN_TEXT
71 * GENERAL NOTES
73 * - fast interrupts are always called with a critical section
74 * held
76 * - we release our critical section when scheduling interrupt
77 * or softinterrupt threads in order so they can preempt
78 * (unless we are called manually from a critical section, in
79 * which case there will still be a critical section and
80 * they won't preempt anyway).
82 * - TD_NEST_COUNT prevents splz from nesting too deeply within
83 * itself. It is *not* actually an interrupt nesting count.
84 * PCPU(intr_nesting_level) is an interrupt nesting count.
86 * - We have to be careful in regards to local interrupts
87 * occuring simultaniously with our doreti and splz
88 * processing.
92 * DORETI
94 * Handle return from interrupts, traps and syscalls. This function
95 * checks the cpl for unmasked pending interrupts (fast, normal, or
96 * soft) and schedules them if appropriate, then irets.
98 * If we are in a critical section we cannot run any pending ints
99 * nor can be play with mp_lock.
101 * NOTE: Since SPLs no longer exist, all callers of this function
102 * push $0 for the CPL. HOWEVER, we *STILL* use the cpl mask within
103 * this function to mark fast interrupts which could not be dispatched
104 * do to the unavailability of the BGL.
106 SUPERALIGN_TEXT
107 .globl doreti
108 .type doreti,@function
109 doreti:
110 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
111 popl %eax /* cpl to restore XXX */
112 movl $0,%eax /* irq mask unavailable due to BGL */
113 movl PCPU(curthread),%ebx
114 cli /* interlock with TDPRI_CRIT */
115 cmpl $0,PCPU(reqflags) /* short cut if nothing to do */
116 je 5f
117 cmpl $TDPRI_CRIT,TD_PRI(%ebx) /* can't unpend if in critical sec */
118 jge 5f
119 addl $TDPRI_CRIT,TD_PRI(%ebx) /* force all ints to pending */
120 doreti_next:
121 sti /* allow new interrupts */
122 movl %eax,%ecx /* irq mask unavailable due to BGL */
123 notl %ecx
124 cli /* disallow YYY remove */
125 #ifdef SMP
126 testl $RQF_IPIQ,PCPU(reqflags)
127 jnz doreti_ipiq
128 testl $RQF_TIMER,PCPU(reqflags)
129 jnz doreti_timer
130 #endif
131 testl PCPU(fpending),%ecx /* check for an unmasked fast int */
132 jnz doreti_fast
134 movl PCPU(spending),%ecx /* check for a pending software int */
135 cmpl $0,%ecx
136 jnz doreti_soft
138 testl $RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
139 jz 2f
140 testl $PSL_VM,TF_EFLAGS(%esp)
141 jz 1f
142 cmpl $1,in_vm86call /* YYY make per 'cpu'? */
143 jnz doreti_ast
145 /* ASTs are only applicable when returning to userland */
146 testb $SEL_RPL_MASK,TF_CS(%esp)
147 jnz doreti_ast
150 * Nothing left to do, finish up. Interrupts are still disabled.
151 * %eax contains the mask of IRQ's that are not available due to
152 * BGL requirements. We can only clear RQF_INTPEND if *ALL* pending
153 * interrupts have been processed.
155 subl $TDPRI_CRIT,TD_PRI(%ebx) /* interlocked with cli */
156 testl %eax,%eax
157 jnz 5f
158 andl $~RQF_INTPEND,PCPU(reqflags)
160 MEXITCOUNT
163 * Restore the segment registers. Since segment register values
164 * can be set from user mode, this can result in a kernel mode
165 * exception. The trap code will revector to the *_fault code
166 * which then sets up a T_PROTFLT signal. If the signal is
167 * sent to userland, sendsig() will automatically clean up all
168 * the segment registers to avoid a loop.
170 .globl doreti_popl_gs
171 .globl doreti_popl_fs
172 .globl doreti_popl_es
173 .globl doreti_popl_ds
174 .globl doreti_iret
175 .globl doreti_syscall_ret
176 doreti_syscall_ret:
177 doreti_popl_gs:
178 popl %gs
179 doreti_popl_fs:
180 popl %fs
181 doreti_popl_es:
182 popl %es
183 doreti_popl_ds:
184 popl %ds
185 popal
186 addl $3*4,%esp /* xflags, trap, err */
187 doreti_iret:
188 iret
190 ALIGN_TEXT
191 .globl doreti_iret_fault
192 doreti_iret_fault:
193 subl $3*4,%esp /* xflags, trap, err */
194 pushal
195 pushl %ds
196 .globl doreti_popl_ds_fault
197 doreti_popl_ds_fault:
198 pushl %es
199 .globl doreti_popl_es_fault
200 doreti_popl_es_fault:
201 pushl %fs
202 .globl doreti_popl_fs_fault
203 doreti_popl_fs_fault:
204 pushl %gs
205 .globl doreti_popl_gs_fault
206 doreti_popl_gs_fault:
207 movl $0,TF_ERR(%esp) /* XXX should be the error code */
208 movl $T_PROTFLT,TF_TRAPNO(%esp)
209 jmp alltraps_with_regs_pushed
212 * FAST interrupt pending. NOTE: stack context holds frame structure
213 * for fast interrupt procedure, do not do random pushes or pops!
215 ALIGN_TEXT
216 doreti_fast:
217 andl PCPU(fpending),%ecx /* only check fast ints */
218 bsfl %ecx, %ecx /* locate the next dispatchable int */
219 btrl %ecx, PCPU(fpending) /* is it really still pending? */
220 jnc doreti_next
221 pushl %eax /* save IRQ mask unavailable for BGL */
222 /* NOTE: is also CPL in frame */
223 call dofastunpend /* unpend fast intr %ecx */
224 popl %eax
225 jmp doreti_next
228 * SOFT interrupt pending
230 * Temporarily back-out our critical section to allow an interrupt
231 * preempt us when we schedule it. Bump intr_nesting_level to
232 * prevent the switch code from recursing via splz too deeply.
234 ALIGN_TEXT
235 doreti_soft:
236 bsfl %ecx,%ecx /* locate the next pending softint */
237 btrl %ecx,PCPU(spending) /* make sure its still pending */
238 jnc doreti_next
239 addl $FIRST_SOFTINT,%ecx /* actual intr number */
240 pushl %eax
241 pushl %ecx
242 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
243 subl $TDPRI_CRIT,TD_PRI(%ebx) /* so we can preempt */
244 call sched_ithd /* YYY must pull in imasks */
245 addl $TDPRI_CRIT,TD_PRI(%ebx)
246 decl TD_NEST_COUNT(%ebx)
247 addl $4,%esp
248 popl %eax
249 jmp doreti_next
252 * AST pending. We clear RQF_AST_SIGNAL automatically, the others
253 * are cleared by the trap as they are processed.
255 * Temporarily back-out our critical section because trap() can be
256 * a long-winded call, and we want to be more syscall-like.
258 * YYY theoretically we can call lwkt_switch directly if all we need
259 * to do is a reschedule.
261 doreti_ast:
262 andl $~(RQF_AST_SIGNAL|RQF_AST_UPCALL),PCPU(reqflags)
264 movl %eax,%esi /* save cpl (can't use stack) */
265 movl $T_ASTFLT,TF_TRAPNO(%esp)
266 pushl %esp /* pass frame by reference */
267 subl $TDPRI_CRIT,TD_PRI(%ebx)
268 call trap
269 addl $TDPRI_CRIT,TD_PRI(%ebx)
270 addl $4,%esp
271 movl %esi,%eax /* restore cpl for loop */
272 jmp doreti_next
274 #ifdef SMP
276 * IPIQ message pending. We clear RQF_IPIQ automatically.
278 doreti_ipiq:
279 movl %eax,%esi /* save cpl (can't use stack) */
280 incl PCPU(intr_nesting_level)
281 andl $~RQF_IPIQ,PCPU(reqflags)
282 subl $8,%esp /* add dummy vec and ppl */
283 pushl %esp /* pass frame by reference */
284 call lwkt_process_ipiq_frame
285 addl $12,%esp
286 decl PCPU(intr_nesting_level)
287 movl %esi,%eax /* restore cpl for loop */
288 jmp doreti_next
290 doreti_timer:
291 movl %eax,%esi /* save cpl (can't use stack) */
292 incl PCPU(intr_nesting_level)
293 andl $~RQF_TIMER,PCPU(reqflags)
294 subl $8,%esp /* add dummy vec and ppl */
295 pushl %esp /* pass frame by reference */
296 call lapic_timer_process_frame
297 addl $12,%esp
298 decl PCPU(intr_nesting_level)
299 movl %esi,%eax /* restore cpl for loop */
300 jmp doreti_next
302 #endif
305 * SPLZ() a C callable procedure to dispatch any unmasked pending
306 * interrupts regardless of critical section nesting. ASTs
307 * are not dispatched.
309 * Use %eax to track those IRQs that could not be processed
310 * due to BGL requirements.
312 SUPERALIGN_TEXT
314 ENTRY(splz)
315 pushfl
316 pushl %ebx
317 movl PCPU(curthread),%ebx
318 addl $TDPRI_CRIT,TD_PRI(%ebx)
319 movl $0,%eax
321 splz_next:
323 movl %eax,%ecx /* ecx = ~CPL */
324 notl %ecx
325 #ifdef SMP
326 testl $RQF_IPIQ,PCPU(reqflags)
327 jnz splz_ipiq
328 testl $RQF_TIMER,PCPU(reqflags)
329 jnz splz_timer
330 #endif
331 testl PCPU(fpending),%ecx /* check for an unmasked fast int */
332 jnz splz_fast
334 movl PCPU(spending),%ecx
335 cmpl $0,%ecx
336 jnz splz_soft
338 subl $TDPRI_CRIT,TD_PRI(%ebx)
341 * Nothing left to do, finish up. Interrupts are still disabled.
342 * If our mask of IRQs we couldn't process due to BGL requirements
343 * is 0 then there are no pending interrupt sources left and we
344 * can clear RQF_INTPEND.
346 testl %eax,%eax
347 jnz 5f
348 andl $~RQF_INTPEND,PCPU(reqflags)
350 popl %ebx
351 popfl
355 * FAST interrupt pending
357 ALIGN_TEXT
358 splz_fast:
359 andl PCPU(fpending),%ecx /* only check fast ints */
360 bsfl %ecx, %ecx /* locate the next dispatchable int */
361 btrl %ecx, PCPU(fpending) /* is it really still pending? */
362 jnc splz_next
363 pushl %eax
364 call dofastunpend /* unpend fast intr %ecx */
365 popl %eax
366 jmp splz_next
369 * SOFT interrupt pending
371 * Temporarily back-out our critical section to allow the interrupt
372 * preempt us.
374 ALIGN_TEXT
375 splz_soft:
376 bsfl %ecx,%ecx /* locate the next pending softint */
377 btrl %ecx,PCPU(spending) /* make sure its still pending */
378 jnc splz_next
379 addl $FIRST_SOFTINT,%ecx /* actual intr number */
381 pushl %eax
382 pushl %ecx
383 subl $TDPRI_CRIT,TD_PRI(%ebx)
384 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
385 call sched_ithd /* YYY must pull in imasks */
386 addl $TDPRI_CRIT,TD_PRI(%ebx)
387 decl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
388 addl $4,%esp
389 popl %eax
390 jmp splz_next
392 #ifdef SMP
393 splz_ipiq:
394 andl $~RQF_IPIQ,PCPU(reqflags)
395 pushl %eax
396 call lwkt_process_ipiq
397 popl %eax
398 jmp splz_next
400 splz_timer:
401 andl $~RQF_TIMER,PCPU(reqflags)
402 pushl %eax
403 call lapic_timer_process
404 popl %eax
405 jmp splz_next
406 #endif
409 * dofastunpend(%ecx:intr)
411 * A FAST interrupt previously made pending can now be run,
412 * execute it by pushing a dummy interrupt frame and
413 * calling ithread_fast_handler to execute or schedule it.
415 * ithread_fast_handler() returns 0 if it wants us to unmask
416 * further interrupts.
418 #define PUSH_DUMMY \
419 pushfl ; /* phys int frame / flags */ \
420 pushl %cs ; /* phys int frame / cs */ \
421 pushl 12(%esp) ; /* original caller eip */ \
422 pushl $0 ; /* dummy error code */ \
423 pushl $0 ; /* dummy trap type */ \
424 pushl $0 ; /* dummy xflags */ \
425 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
427 #define POP_DUMMY \
428 addl $19*4,%esp ; \
430 dofastunpend:
431 pushl %ebp /* frame for backtrace */
432 movl %esp,%ebp
433 PUSH_DUMMY
434 pushl %ecx /* last part of intrframe = intr */
435 incl fastunpend_count
436 pushl %esp /* pass frame by reference */
437 call ithread_fast_handler /* returns 0 to unmask */
438 addl $4,%esp /* remove pointer, now intr on top */
439 cmpl $0,%eax
440 jnz 1f
441 movl MachIntrABI + MACHINTR_INTREN, %eax
442 call *%eax /* MachIntrABI.intren(intr) */
444 addl $4,%esp
445 POP_DUMMY
446 popl %ebp