Finish moving the kernel from tsc_freq (32 bits) to tsc_frequency (64 bits).
[dragonfly.git] / sys / platform / pc32 / isa / ipl.s
blob172a81927f1247a994124c4d74b2f6e6ac36ae08
1 /*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * @(#)ipl.s
39 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
40 * $DragonFly: src/sys/platform/pc32/isa/ipl.s,v 1.28 2007/01/22 19:37:04 corecode Exp $
43 #include "use_npx.h"
45 #include <machine/asmacros.h>
46 #include <machine/segments.h>
47 #include <machine/ipl.h>
48 #include <machine/lock.h>
49 #include <machine/psl.h>
50 #include <machine/trap.h>
52 #include "assym.s"
55 * AT/386
56 * Vector interrupt control section
58 * ipending - Pending interrupts (set when a masked interrupt occurs)
59 * spending - Pending software interrupts
61 .data
62 ALIGN_DATA
64 .globl fastunpend_count
65 fastunpend_count: .long 0
67 .text
68 SUPERALIGN_TEXT
71 * GENERAL NOTES
73 * - fast interrupts are always called with a critical section
74 * held
76 * - we release our critical section when scheduling interrupt
77 * or softinterrupt threads in order so they can preempt
78 * (unless we are called manually from a critical section, in
79 * which case there will still be a critical section and
80 * they won't preempt anyway).
82 * - TD_NEST_COUNT prevents splz from nesting too deeply within
83 * itself. It is *not* actually an interrupt nesting count.
84 * PCPU(intr_nesting_level) is an interrupt nesting count.
86 * - We have to be careful in regards to local interrupts
87 * occuring simultaniously with our doreti and splz
88 * processing.
92 * DORETI
94 * Handle return from interrupts, traps and syscalls. This function
95 * checks the cpl for unmasked pending interrupts (fast, normal, or
96 * soft) and schedules them if appropriate, then irets.
98 * If we are in a critical section we cannot run any pending ints
99 * nor can be play with mp_lock.
101 * NOTE: Since SPLs no longer exist, all callers of this function
102 * push $0 for the CPL. HOWEVER, we *STILL* use the cpl mask within
103 * this function to mark fast interrupts which could not be dispatched
104 * do to the unavailability of the BGL.
106 SUPERALIGN_TEXT
107 .globl doreti
108 .type doreti,@function
109 doreti:
110 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
111 popl %eax /* cpl to restore XXX */
112 movl $0,%eax /* irq mask unavailable due to BGL */
113 movl PCPU(curthread),%ebx
114 cli /* interlock with TDPRI_CRIT */
115 cmpl $0,PCPU(reqflags) /* short cut if nothing to do */
116 je 5f
117 cmpl $TDPRI_CRIT,TD_PRI(%ebx) /* can't unpend if in critical sec */
118 jge 5f
119 addl $TDPRI_CRIT,TD_PRI(%ebx) /* force all ints to pending */
120 doreti_next:
121 sti /* allow new interrupts */
122 movl %eax,%ecx /* irq mask unavailable due to BGL */
123 notl %ecx
124 cli /* disallow YYY remove */
125 #ifdef SMP
126 testl $RQF_IPIQ,PCPU(reqflags)
127 jnz doreti_ipiq
128 #endif
129 testl PCPU(fpending),%ecx /* check for an unmasked fast int */
130 jnz doreti_fast
132 testl PCPU(ipending),%ecx /* check for an unmasked slow int */
133 jnz doreti_intr
135 movl PCPU(spending),%ecx /* check for a pending software int */
136 cmpl $0,%ecx
137 jnz doreti_soft
139 testl $RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
140 jz 2f
141 testl $PSL_VM,TF_EFLAGS(%esp)
142 jz 1f
143 cmpl $1,in_vm86call /* YYY make per 'cpu'? */
144 jnz doreti_ast
146 /* ASTs are only applicable when returning to userland */
147 testb $SEL_RPL_MASK,TF_CS(%esp)
148 jnz doreti_ast
151 * Nothing left to do, finish up. Interrupts are still disabled.
152 * %eax contains the mask of IRQ's that are not available due to
153 * BGL requirements. We can only clear RQF_INTPEND if *ALL* pending
154 * interrupts have been processed.
156 subl $TDPRI_CRIT,TD_PRI(%ebx) /* interlocked with cli */
157 testl %eax,%eax
158 jnz 5f
159 andl $~RQF_INTPEND,PCPU(reqflags)
161 MEXITCOUNT
164 * Restore the segment registers. Since segment register values
165 * can be set from user mode, this can result in a kernel mode
166 * exception. The trap code will revector to the *_fault code
167 * which then sets up a T_PROTFLT signal. If the signal is
168 * sent to userland, sendsig() will automatically clean up all
169 * the segment registers to avoid a loop.
171 .globl doreti_popl_gs
172 .globl doreti_popl_fs
173 .globl doreti_popl_es
174 .globl doreti_popl_ds
175 .globl doreti_iret
176 .globl doreti_syscall_ret
177 doreti_syscall_ret:
178 doreti_popl_gs:
179 popl %gs
180 doreti_popl_fs:
181 popl %fs
182 doreti_popl_es:
183 popl %es
184 doreti_popl_ds:
185 popl %ds
186 popal
187 addl $3*4,%esp /* xflags, trap, err */
188 doreti_iret:
189 iret
191 ALIGN_TEXT
192 .globl doreti_iret_fault
193 doreti_iret_fault:
194 subl $3*4,%esp /* xflags, trap, err */
195 pushal
196 pushl %ds
197 .globl doreti_popl_ds_fault
198 doreti_popl_ds_fault:
199 pushl %es
200 .globl doreti_popl_es_fault
201 doreti_popl_es_fault:
202 pushl %fs
203 .globl doreti_popl_fs_fault
204 doreti_popl_fs_fault:
205 pushl %gs
206 .globl doreti_popl_gs_fault
207 doreti_popl_gs_fault:
208 movl $0,TF_ERR(%esp) /* XXX should be the error code */
209 movl $T_PROTFLT,TF_TRAPNO(%esp)
210 jmp alltraps_with_regs_pushed
213 * FAST interrupt pending. NOTE: stack context holds frame structure
214 * for fast interrupt procedure, do not do random pushes or pops!
216 ALIGN_TEXT
217 doreti_fast:
218 andl PCPU(fpending),%ecx /* only check fast ints */
219 bsfl %ecx, %ecx /* locate the next dispatchable int */
220 btrl %ecx, PCPU(fpending) /* is it really still pending? */
221 jnc doreti_next
222 pushl %eax /* save IRQ mask unavailable for BGL */
223 /* NOTE: is also CPL in frame */
224 #if 0
225 #ifdef SMP
226 pushl %ecx /* save ecx */
227 call try_mplock
228 popl %ecx
229 testl %eax,%eax
230 jz 1f
231 /* MP lock successful */
232 #endif
233 #endif
234 incl PCPU(intr_nesting_level)
235 call dofastunpend /* unpend fast intr %ecx */
236 decl PCPU(intr_nesting_level)
237 #if 0
238 #ifdef SMP
239 call rel_mplock
240 #endif
241 #endif
242 popl %eax
243 jmp doreti_next
245 btsl %ecx, PCPU(fpending) /* oops, couldn't get the MP lock */
246 popl %eax /* add to temp. cpl mask to ignore */
247 orl PCPU(fpending),%eax
248 jmp doreti_next
251 * INTR interrupt pending
253 * Temporarily back-out our critical section to allow an interrupt
254 * preempt us when we schedule it. Bump intr_nesting_level to
255 * prevent the switch code from recursing via splz too deeply.
257 ALIGN_TEXT
258 doreti_intr:
259 andl PCPU(ipending),%ecx /* only check normal ints */
260 bsfl %ecx, %ecx /* locate the next dispatchable int */
261 btrl %ecx, PCPU(ipending) /* is it really still pending? */
262 jnc doreti_next
263 pushl %eax
264 pushl %ecx
265 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
266 subl $TDPRI_CRIT,TD_PRI(%ebx) /* so we can preempt */
267 call sched_ithd /* YYY must pull in imasks */
268 addl $TDPRI_CRIT,TD_PRI(%ebx)
269 decl TD_NEST_COUNT(%ebx)
270 addl $4,%esp
271 popl %eax
272 jmp doreti_next
275 * SOFT interrupt pending
277 * Temporarily back-out our critical section to allow an interrupt
278 * preempt us when we schedule it. Bump intr_nesting_level to
279 * prevent the switch code from recursing via splz too deeply.
281 ALIGN_TEXT
282 doreti_soft:
283 bsfl %ecx,%ecx /* locate the next pending softint */
284 btrl %ecx,PCPU(spending) /* make sure its still pending */
285 jnc doreti_next
286 addl $FIRST_SOFTINT,%ecx /* actual intr number */
287 pushl %eax
288 pushl %ecx
289 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
290 subl $TDPRI_CRIT,TD_PRI(%ebx) /* so we can preempt */
291 call sched_ithd /* YYY must pull in imasks */
292 addl $TDPRI_CRIT,TD_PRI(%ebx)
293 decl TD_NEST_COUNT(%ebx)
294 addl $4,%esp
295 popl %eax
296 jmp doreti_next
299 * AST pending. We clear RQF_AST_SIGNAL automatically, the others
300 * are cleared by the trap as they are processed.
302 * Temporarily back-out our critical section because trap() can be
303 * a long-winded call, and we want to be more syscall-like.
305 * YYY theoretically we can call lwkt_switch directly if all we need
306 * to do is a reschedule.
308 doreti_ast:
309 andl $~(RQF_AST_SIGNAL|RQF_AST_UPCALL),PCPU(reqflags)
311 movl %eax,%esi /* save cpl (can't use stack) */
312 movl $T_ASTFLT,TF_TRAPNO(%esp)
313 pushl %esp /* pass frame by reference */
314 subl $TDPRI_CRIT,TD_PRI(%ebx)
315 call trap
316 addl $TDPRI_CRIT,TD_PRI(%ebx)
317 addl $4,%esp
318 movl %esi,%eax /* restore cpl for loop */
319 jmp doreti_next
321 #ifdef SMP
323 * IPIQ message pending. We clear RQF_IPIQ automatically.
325 doreti_ipiq:
326 movl %eax,%esi /* save cpl (can't use stack) */
327 incl PCPU(intr_nesting_level)
328 andl $~RQF_IPIQ,PCPU(reqflags)
329 subl $8,%esp /* add dummy vec and ppl */
330 pushl %esp /* pass frame by reference */
331 call lwkt_process_ipiq_frame
332 addl $12,%esp
333 decl PCPU(intr_nesting_level)
334 movl %esi,%eax /* restore cpl for loop */
335 jmp doreti_next
337 #endif
340 * SPLZ() a C callable procedure to dispatch any unmasked pending
341 * interrupts regardless of critical section nesting. ASTs
342 * are not dispatched.
344 * Use %eax to track those IRQs that could not be processed
345 * due to BGL requirements.
347 SUPERALIGN_TEXT
349 ENTRY(splz)
350 pushfl
351 pushl %ebx
352 movl PCPU(curthread),%ebx
353 addl $TDPRI_CRIT,TD_PRI(%ebx)
354 movl $0,%eax
356 splz_next:
358 movl %eax,%ecx /* ecx = ~CPL */
359 notl %ecx
360 #ifdef SMP
361 testl $RQF_IPIQ,PCPU(reqflags)
362 jnz splz_ipiq
363 #endif
364 testl PCPU(fpending),%ecx /* check for an unmasked fast int */
365 jnz splz_fast
367 testl PCPU(ipending),%ecx
368 jnz splz_intr
370 movl PCPU(spending),%ecx
371 cmpl $0,%ecx
372 jnz splz_soft
374 subl $TDPRI_CRIT,TD_PRI(%ebx)
377 * Nothing left to do, finish up. Interrupts are still disabled.
378 * If our mask of IRQs we couldn't process due to BGL requirements
379 * is 0 then there are no pending interrupt sources left and we
380 * can clear RQF_INTPEND.
382 testl %eax,%eax
383 jnz 5f
384 andl $~RQF_INTPEND,PCPU(reqflags)
386 popl %ebx
387 popfl
391 * FAST interrupt pending
393 ALIGN_TEXT
394 splz_fast:
395 andl PCPU(fpending),%ecx /* only check fast ints */
396 bsfl %ecx, %ecx /* locate the next dispatchable int */
397 btrl %ecx, PCPU(fpending) /* is it really still pending? */
398 jnc splz_next
399 pushl %eax
400 #if 0
401 #ifdef SMP
402 pushl %ecx
403 call try_mplock
404 popl %ecx
405 testl %eax,%eax
406 jz 1f
407 #endif
408 #endif
409 incl PCPU(intr_nesting_level)
410 call dofastunpend /* unpend fast intr %ecx */
411 decl PCPU(intr_nesting_level)
412 #if 0
413 #ifdef SMP
414 call rel_mplock
415 #endif
416 #endif
417 popl %eax
418 jmp splz_next
420 btsl %ecx, PCPU(fpending) /* oops, couldn't get the MP lock */
421 popl %eax
422 orl PCPU(fpending),%eax
423 jmp splz_next
426 * INTR interrupt pending
428 * Temporarily back-out our critical section to allow the interrupt
429 * preempt us.
431 ALIGN_TEXT
432 splz_intr:
433 andl PCPU(ipending),%ecx /* only check normal ints */
434 bsfl %ecx, %ecx /* locate the next dispatchable int */
435 btrl %ecx, PCPU(ipending) /* is it really still pending? */
436 jnc splz_next
438 pushl %eax
439 pushl %ecx
440 subl $TDPRI_CRIT,TD_PRI(%ebx)
441 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
442 call sched_ithd /* YYY must pull in imasks */
443 addl $TDPRI_CRIT,TD_PRI(%ebx)
444 decl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
445 addl $4,%esp
446 popl %eax
447 jmp splz_next
450 * SOFT interrupt pending
452 * Temporarily back-out our critical section to allow the interrupt
453 * preempt us.
455 ALIGN_TEXT
456 splz_soft:
457 bsfl %ecx,%ecx /* locate the next pending softint */
458 btrl %ecx,PCPU(spending) /* make sure its still pending */
459 jnc splz_next
460 addl $FIRST_SOFTINT,%ecx /* actual intr number */
462 pushl %eax
463 pushl %ecx
464 subl $TDPRI_CRIT,TD_PRI(%ebx)
465 incl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
466 call sched_ithd /* YYY must pull in imasks */
467 addl $TDPRI_CRIT,TD_PRI(%ebx)
468 decl TD_NEST_COUNT(%ebx) /* prevent doreti/splz nesting */
469 addl $4,%esp
470 popl %eax
471 jmp splz_next
473 #ifdef SMP
474 splz_ipiq:
475 andl $~RQF_IPIQ,PCPU(reqflags)
476 pushl %eax
477 call lwkt_process_ipiq
478 popl %eax
479 jmp splz_next
480 #endif
483 * dofastunpend(%ecx:intr)
485 * A FAST interrupt previously made pending can now be run,
486 * execute it by pushing a dummy interrupt frame and
487 * calling ithread_fast_handler to execute or schedule it.
489 * ithread_fast_handler() returns 0 if it wants us to unmask
490 * further interrupts.
492 #define PUSH_DUMMY \
493 pushfl ; /* phys int frame / flags */ \
494 pushl %cs ; /* phys int frame / cs */ \
495 pushl 12(%esp) ; /* original caller eip */ \
496 pushl $0 ; /* dummy error code */ \
497 pushl $0 ; /* dummy trap type */ \
498 pushl $0 ; /* dummy xflags */ \
499 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
501 #define POP_DUMMY \
502 addl $19*4,%esp ; \
504 dofastunpend:
505 pushl %ebp /* frame for backtrace */
506 movl %esp,%ebp
507 PUSH_DUMMY
508 pushl %ecx /* last part of intrframe = intr */
509 incl fastunpend_count
510 pushl %esp /* pass frame by reference */
511 call ithread_fast_handler /* returns 0 to unmask */
512 addl $4,%esp /* remove pointer, now intr on top */
513 cmpl $0,%eax
514 jnz 1f
515 movl MachIntrABI + MACHINTR_INTREN, %eax
516 call *%eax /* MachIntrABI.intren(intr) */
518 addl $4,%esp
519 POP_DUMMY
520 popl %ebp