8956 Implement KPTI (fix cstyle)
[unleashed.git] / usr / src / uts / i86pc / os / intr.c
blobf66f0e69e8df6bc8a4ad3569f30eaa2c200a10ec
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2018 Joyent, Inc. All rights reserverd.
28 * To understand the present state of interrupt handling on i86pc, we must
29 * first consider the history of interrupt controllers and our way of handling
30 * interrupts.
32 * History of Interrupt Controllers on i86pc
33 * -----------------------------------------
35 * Intel 8259 and 8259A
37 * The first interrupt controller that attained widespread use on i86pc was
38 * the Intel 8259(A) Programmable Interrupt Controller that first saw use with
39 * the 8086. It took up to 8 interrupt sources and combined them into one
40 * output wire. Up to 8 8259s could be slaved together providing up to 64 IRQs.
41 * With the switch to the 8259A, level mode interrupts became possible. For a
42 * long time on i86pc the 8259A was the only way to handle interrupts and it
43 * had its own set of quirks. The 8259A and its corresponding interval timer
44 * the 8254 are programmed using outb and inb instructions.
46 * Intel Advanced Programmable Interrupt Controller (APIC)
48 * Starting around the time of the introduction of the P6 family
49 * microarchitecture (i686) Intel introduced a new interrupt controller.
50 * Instead of having the series of slaved 8259A devices, Intel opted to outfit
51 * each processor with a Local APIC (lapic) and to outfit the system with at
52 * least one, but potentially more, I/O APICs (ioapic). The lapics and ioapics
53 * initially communicated over a dedicated bus, but this has since been
54 * replaced. Each physical core and even hyperthread currently contains its
55 * own local apic, which is not shared. There are a few exceptions for
56 * hyperthreads, but that does not usually concern us.
58 * Instead of talking directly to 8259 for status, sending End Of Interrupt
59 * (EOI), etc. a microprocessor now communicates directly to the lapic. This
60 * also allows for each microprocessor to be able to have independent controls.
61 * The programming method is different from the 8259. Consumers map the lapic
62 * registers into uncacheable memory to read and manipulate the state.
64 * The number of addressable interrupt vectors was increased to 256. However
65 * vectors 0-31 are reserved for the processor exception handling, leaving the
66 * remaining vectors for general use. In addition to hardware generated
67 * interrupts, the lapic provides a way for generating inter-processor
68 * interrupts (IPI) which are the basis for CPU cross calls and CPU pokes.
70 * AMD ended up implementing the Intel APIC architecture in lieu of their work
71 * with Cyrix.
73 * Intel x2apic
75 * The x2apic is an extension to the lapic which started showing up around the
76 * same time as the Sandy Bridge chipsets. It provides a new programming mode
77 * as well as new features. The goal of the x2apic is to solve a few problems
78 * with the previous generation of lapic and the x2apic is backwards compatible
79 * with the previous programming and model. The only downsides to using the
80 * backwards compatibility is that you are not able to take advantage of the new
81 * x2apic features.
83 * o The APIC ID is increased from an 8-bit value to a 32-bit value. This
84 * increases the maximum number of addressable physical processors beyond
85 * 256. This new ID is assembled in a similar manner as the information that
86 * is obtainable by the extended cpuid topology leaves.
88 * o A new means of generating IPIs was introduced.
90 * o Instead of memory mapping the registers, the x2apic only allows for
91 * programming it through a series of wrmsrs. This has important semantic
92 * side effects. Recall that the registers were previously all mapped to
93 * uncachable memory which meant that all operations to the local apic were
94 * serializing instructions. With the switch to using wrmsrs this has been
95 * relaxed and these operations can no longer be assumed to be serializing
96 * instructions.
98 * Note for the rest of this we are only going to concern ourselves with the
99 * apic and x2apic which practically all of i86pc has been using now for
100 * quite some time.
102 * Interrupt Priority Levels
103 * -------------------------
105 * On i86pc systems there are a total of fifteen interrupt priority levels
106 * (ipls) which range from 1-15. Level 0 is for normal processing and
107 * non-interrupt processing. To manipulate these values the family of spl
108 * functions (which date back to UNIX on the PDP-11) are used. Specifically,
109 * splr() to raise the priority level and splx() to lower it. One should not
110 * generally call setspl() directly.
112 * Both i86pc and the supported SPARC platforms honor the same conventions for
113 * the meaning behind these IPLs. The most important IPL is the platform's
114 * LOCK_LEVEL (0xa on i86pc). If a thread is above LOCK_LEVEL it _must_ not
115 * sleep on any synchronization object. The only allowed synchronization
116 * primitive is a mutex that has been specifically initialized to be a spin
117 * lock (see mutex_init(9F)). Another important level is DISP_LEVEL (0xb on
118 * i86pc). You must be at DISP_LEVEL if you want to control the dispatcher.
119 * The XC_HI_PIL is the highest level (0xf) and is used during cross-calls.
121 * Each interrupt that is registered in the system fires at a specific IPL.
122 * Generally most interrupts fire below LOCK_LEVEL.
124 * PSM Drivers
125 * -----------
127 * We currently have three sets of PSM (platform specific module) drivers
128 * available. uppc, pcplusmp, and apix. uppc (uni-processor PC) is the original
129 * driver that interacts with the 8259A and 8254. In general, it is not used
130 * anymore given the prevalence of the apic.
132 * The system prefers to use the apix driver over the pcplusmp driver. The apix
133 * driver requires HW support for an x2apic. If there is no x2apic HW, apix
134 * will not be used. In general we prefer using the apix driver over the
135 * pcplusmp driver because it gives us much more flexibility with respect to
136 * interrupts. In the apix driver each local apic has its own independent set
137 * of interrupts, whereas the pcplusmp driver only has a single global set of
138 * interrupts. This is why pcplusmp only supports a finite number of interrupts
139 * per IPL -- generally 16, often less. The apix driver supports using either
140 * the x2apic or the local apic programing modes. The programming mode does not
141 * change the number of interrupts available, just the number of processors
142 * that we can address. For the apix driver, the x2apic mode is enabled if the
143 * system supports interrupt re-mapping, otherwise the module manages the
144 * x2apic in local mode.
146 * When there is no x2apic present, we default back to the pcplusmp PSM driver.
147 * In general, this is not problematic unless you have more than 256
148 * processors in the machine or you do not have enough interrupts available.
150 * Controlling Interrupt Generation on i86pc
151 * -----------------------------------------
153 * There are two different ways to manipulate which interrupts will be
154 * generated on i86pc. Each offers different degrees of control.
156 * The first is through the flags register (eflags and rflags on i386 and amd64
157 * respectively). The IF bit determines whether or not interrupts are enabled
158 * or disabled. This is manipulated in one of several ways. The most common way
159 * is through the cli and sti instructions. These clear the IF flag and set it,
160 * respectively, for the current processor. The other common way is through the
161 * use of the intr_clear and intr_restore functions.
163 * Assuming interrupts are not blocked by the IF flag, then the second form is
164 * through the Processor-Priority Register (PPR). The PPR is used to determine
165 * whether or not a pending interrupt should be delivered. If the ipl of the
166 * new interrupt is higher than the current value in the PPR, then the lapic
167 * will either deliver it immediately (if interrupts are not in progress) or it
168 * will deliver it once the current interrupt processing has issued an EOI. The
169 * highest unmasked interrupt will be the one delivered.
171 * The PPR register is based upon the max of the following two registers in the
172 * lapic, the TPR register (also known as CR8 on amd64) that can be used to
173 * mask interrupt levels, and the current vector. Because the pcplusmp module
174 * always sets TPR appropriately early in the do_interrupt path, we can usually
175 * just think that the PPR is the TPR. The pcplusmp module also issues an EOI
176 * once it has set the TPR, so higher priority interrupts can come in while
177 * we're servicing a lower priority interrupt.
179 * Handling Interrupts
180 * -------------------
182 * Interrupts can be broken down into three categories based on priority and
183 * source:
185 * o High level interrupts
186 * o Low level hardware interrupts
187 * o Low level software interrupts
189 * High Level Interrupts
191 * High level interrupts encompasses both hardware-sourced and software-sourced
192 * interrupts. Examples of high level hardware interrupts include the serial
193 * console. High level software-sourced interrupts are still delivered through
194 * the local apic through IPIs. This is primarily cross calls.
196 * When a high level interrupt comes in, we will raise the SPL and then pin the
197 * current lwp to the processor. We will use its lwp, but our own interrupt
198 * stack and process the high level interrupt in-situ. These handlers are
199 * designed to be very short in nature and cannot go to sleep, only block on a
200 * spin lock. If the interrupt has a lot of work to do, it must generate a
201 * low-priority software interrupt that will be processed later.
203 * Low level hardware interrupts
205 * Low level hardware interrupts start off like their high-level cousins. The
206 * current CPU contains a number of kernel threads (kthread_t) that can be used
207 * to process low level interrupts. These are shared between both low level
208 * hardware and software interrupts. Note that while we run with our
209 * kthread_t, we borrow the pinned threads lwp_t until such a time as we hit a
210 * synchronization object. If we hit one and need to sleep, then the scheduler
211 * will instead create the rest of what we need.
213 * Low level software interrupts
215 * Low level software interrupts are handled in a similar way as hardware
216 * interrupts, but the notification vector is different. Each CPU has a bitmask
217 * of pending software interrupts. We can notify a CPU to process software
218 * interrupts through a specific trap vector as well as through several
219 * checks that are performed throughout the code. These checks will look at
220 * processing software interrupts as we lower our spl.
222 * We attempt to process the highest pending software interrupt that we can
223 * which is greater than our current IPL. If none currently exist, then we move
224 * on. We process a software interrupt in a similar fashion to a hardware
225 * interrupt.
227 * Traditional Interrupt Flow
228 * --------------------------
230 * The following diagram tracks the flow of the traditional uppc and pcplusmp
231 * interrupt handlers. The apix driver has its own version of do_interrupt().
232 * We come into the interrupt handler with all interrupts masked by the IF
233 * flag. This is because we set up the handler using an interrupt-gate, which
234 * is defined architecturally to have cleared the IF flag for us.
236 * +--------------+ +----------------+ +-----------+
237 * | _interrupt() |--->| do_interrupt() |--->| *setlvl() |
238 * +--------------+ +----------------+ +-----------+
239 * | | |
240 * | | |
241 * low-level| | | softint
242 * HW int | | +---------------------------------------+
243 * +--------------+ | | |
244 * | intr_thread_ |<-----+ | hi-level int |
245 * | prolog() | | +----------+ |
246 * +--------------+ +--->| hilevel_ | Not on intr stack |
247 * | | intr_ |-----------------+ |
248 * | | prolog() | | |
249 * +------------+ +----------+ | |
250 * | switch_sp_ | | On intr v |
251 * | and_call() | | Stack +------------+ |
252 * +------------+ | | switch_sp_ | |
253 * | v | and_call() | |
254 * v +-----------+ +------------+ |
255 * +-----------+ | dispatch_ | | |
256 * | dispatch_ | +-------------------| hilevel() |<------------+ |
257 * | hardint() | | +-----------+ |
258 * +-----------+ | |
259 * | v |
260 * | +-----+ +----------------------+ +-----+ hi-level |
261 * +---->| sti |->| av_dispatch_autovect |->| cli |---------+ |
262 * +-----+ +----------------------+ +-----+ | |
263 * | | | |
264 * v | | |
265 * +----------+ | | |
266 * | for each | | | |
267 * | handler | | | |
268 * | *intr() | | v |
269 * +--------------+ +----------+ | +----------------+ |
270 * | intr_thread_ | low-level | | hilevel_intr_ | |
271 * | epilog() |<-------------------------------+ | epilog() | |
272 * +--------------+ +----------------+ |
273 * | | | |
274 * | +----------------------v v---------------------+ |
275 * | +------------+ |
276 * | +---------------------->| *setlvlx() | |
277 * | | +------------+ |
278 * | | | |
279 * | | v |
280 * | | +--------+ +------------------+ +-------------+ |
281 * | | | return |<----| softint pending? |----->| dosoftint() |<-----+
282 * | | +--------+ no +------------------+ yes +-------------+
283 * | | ^ | |
284 * | | | softint pil too low | |
285 * | | +--------------------------------------+ |
286 * | | v
287 * | | +-----------+ +------------+ +-----------+
288 * | | | dispatch_ |<-----| switch_sp_ |<---------| *setspl() |
289 * | | | softint() | | and_call() | +-----------+
290 * | | +-----------+ +------------+
291 * | | |
292 * | | v
293 * | | +-----+ +----------------------+ +-----+ +------------+
294 * | | | sti |->| av_dispatch_autovect |->| cli |->| dosoftint_ |
295 * | | +-----+ +----------------------+ +-----+ | epilog() |
296 * | | +------------+
297 * | | | |
298 * | +----------------------------------------------------+ |
299 * v |
300 * +-----------+ |
301 * | interrupt | |
302 * | thread |<---------------------------------------------------+
303 * | blocked |
304 * +-----------+
307 * +----------------+ +------------+ +-----------+ +-------+ +---------+
308 * | set_base_spl() |->| *setlvlx() |->| splhigh() |->| sti() |->| swtch() |
309 * +----------------+ +------------+ +-----------+ +-------+ +---------+
311 * Calls made on Interrupt Stacks and Epilogue routines
313 * We use the switch_sp_and_call() assembly routine to switch our sp to the
314 * interrupt stacks and then call the appropriate dispatch function. In the
315 * case of interrupts which may block, softints and hardints, we always ensure
316 * that we are still on the interrupt thread when we call the epilog routine.
317 * This is not just important, it's necessary. If the interrupt thread blocked,
318 * we won't return from our switch_sp_and_call() function and instead we'll go
319 * through and set ourselves up to swtch() directly.
321 * New Interrupt Flow
322 * ------------------
324 * The apix module has its own interrupt path. This is done for various
325 * reasons. The first is that rather than having global interrupt vectors, we
326 * now have per-cpu vectors.
328 * The other substantial change is that the apix design does not use the TPR to
329 * mask interrupts below the current level. In fact, except for one special
330 * case, it does not use the TPR at all. Instead, it only uses the IF flag
331 * (cli/sti) to either block all interrupts or allow any interrupts to come in.
332 * The design is such that when interrupts are allowed to come in, if we are
333 * currently servicing a higher priority interupt, the new interrupt is treated
334 * as pending and serviced later. Specifically, in the pcplusmp module's
335 * apic_intr_enter() the code masks interrupts at or below the current
336 * IPL using the TPR before sending EOI, whereas the apix module's
337 * apix_intr_enter() simply sends EOI.
339 * The one special case where the apix code uses the TPR is when it calls
340 * through the apic_reg_ops function pointer apic_write_task_reg in
341 * apix_init_intr() to initially mask all levels and then finally to enable all
342 * levels.
344 * Recall that we come into the interrupt handler with all interrupts masked
345 * by the IF flag. This is because we set up the handler using an
346 * interrupt-gate which is defined architecturally to have cleared the IF flag
347 * for us.
349 * +--------------+ +---------------------+
350 * | _interrupt() |--->| apix_do_interrupt() |
351 * +--------------+ +---------------------+
353 * hard int? +----+--------+ softint?
354 * | | (but no low-level looping)
355 * +-----------+ |
356 * | *setlvl() | |
357 * +---------+ +-----------+ +----------------------------------+
358 * |apix_add_| check IPL | |
359 * |pending_ |<-------------+------+----------------------+ |
360 * |hardint()| low-level int| hi-level int| |
361 * +---------+ v v |
362 * | check IPL +-----------------+ +---------------+ |
363 * +--+-----+ | apix_intr_ | | apix_hilevel_ | |
364 * | | | thread_prolog() | | intr_prolog() | |
365 * | return +-----------------+ +---------------+ |
366 * | | | On intr |
367 * | +------------+ | stack? +------------+ |
368 * | | switch_sp_ | +---------| switch_sp_ | |
369 * | | and_call() | | | and_call() | |
370 * | +------------+ | +------------+ |
371 * | | | | |
372 * | +----------------+ +----------------+ |
373 * | | apix_dispatch_ | | apix_dispatch_ | |
374 * | | lowlevel() | | hilevel() | |
375 * | +----------------+ +----------------+ |
376 * | | | |
377 * | v v |
378 * | +-------------------------+ |
379 * | |apix_dispatch_by_vector()|----+ |
380 * | +-------------------------+ | |
381 * | !XC_HI_PIL| | | | |
382 * | +---+ +-------+ +---+ | |
383 * | |sti| |*intr()| |cli| | |
384 * | +---+ +-------+ +---+ | hi-level? |
385 * | +---------------------------+----+ |
386 * | v low-level? v |
387 * | +----------------+ +----------------+ |
388 * | | apix_intr_ | | apix_hilevel_ | |
389 * | | thread_epilog()| | intr_epilog() | |
390 * | +----------------+ +----------------+ |
391 * | | | |
392 * | v-----------------+--------------------------------+ |
393 * | +------------+ |
394 * | | *setlvlx() | +----------------------------------------------------+
395 * | +------------+ |
396 * | | | +--------------------------------+ low
397 * v v v------+ v | level
398 * +------------------+ +------------------+ +-----------+ | pending?
399 * | apix_do_pending_ |----->| apix_do_pending_ |----->| apix_do_ |--+
400 * | hilevel() | | hardint() | | softint() | |
401 * +------------------+ +------------------+ +-----------+ return
402 * | | |
403 * | while pending | while pending | while pending
404 * | hi-level | low-level | softint
405 * | | |
406 * +---------------+ +-----------------+ +-----------------+
407 * | apix_hilevel_ | | apix_intr_ | | apix_do_ |
408 * | intr_prolog() | | thread_prolog() | | softint_prolog()|
409 * +---------------+ +-----------------+ +-----------------+
410 * | On intr | |
411 * | stack? +------------+ +------------+ +------------+
412 * +--------| switch_sp_ | | switch_sp_ | | switch_sp_ |
413 * | | and_call() | | and_call() | | and_call() |
414 * | +------------+ +------------+ +------------+
415 * | | | |
416 * +------------------+ +------------------+ +------------------------+
417 * | apix_dispatch_ | | apix_dispatch_ | | apix_dispatch_softint()|
418 * | pending_hilevel()| | pending_hardint()| +------------------------+
419 * +------------------+ +------------------+ | | | |
420 * | | | | | | | |
421 * | +----------------+ | +----------------+ | | | |
422 * | | apix_hilevel_ | | | apix_intr_ | | | | |
423 * | | intr_epilog() | | | thread_epilog()| | | | |
424 * | +----------------+ | +----------------+ | | | |
425 * | | | | | | | |
426 * | +------------+ | +----------+ +------+ | | |
427 * | | *setlvlx() | | |*setlvlx()| | | | |
428 * | +------------+ | +----------+ | +----------+ | +---------+
429 * | | +---+ |av_ | +---+ |apix_do_ |
430 * +---------------------------------+ |sti| |dispatch_ | |cli| |softint_ |
431 * | apix_dispatch_pending_autovect()| +---+ |softvect()| +---+ |epilog() |
432 * +---------------------------------+ +----------+ +---------+
433 * |!XC_HI_PIL | | | |
434 * +---+ +-------+ +---+ +----------+ +-------+
435 * |sti| |*intr()| |cli| |apix_post_| |*intr()|
436 * +---+ +-------+ +---+ |hardint() | +-------+
437 * +----------+
440 #include <sys/cpuvar.h>
441 #include <sys/cpu_event.h>
442 #include <sys/regset.h>
443 #include <sys/psw.h>
444 #include <sys/types.h>
445 #include <sys/thread.h>
446 #include <sys/systm.h>
447 #include <sys/segments.h>
448 #include <sys/pcb.h>
449 #include <sys/trap.h>
450 #include <sys/ftrace.h>
451 #include <sys/traptrace.h>
452 #include <sys/clock.h>
453 #include <sys/panic.h>
454 #include <sys/disp.h>
455 #include <vm/seg_kp.h>
456 #include <sys/stack.h>
457 #include <sys/sysmacros.h>
458 #include <sys/cmn_err.h>
459 #include <sys/kstat.h>
460 #include <sys/smp_impldefs.h>
461 #include <sys/pool_pset.h>
462 #include <sys/zone.h>
463 #include <sys/bitmap.h>
464 #include <sys/archsystm.h>
465 #include <sys/machsystm.h>
466 #include <sys/ontrap.h>
467 #include <sys/x86_archext.h>
468 #include <sys/promif.h>
469 #include <vm/hat_i86.h>
470 #if defined(__xpv)
471 #include <sys/hypervisor.h>
472 #endif
474 #if defined(__amd64) && !defined(__xpv)
475 /* If this fails, then the padding numbers in machcpuvar.h are wrong. */
476 CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_pad)) <
477 MMU_PAGESIZE);
478 CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_kpti)) >=
479 MMU_PAGESIZE);
480 CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_kpti_dbg)) <
481 2 * MMU_PAGESIZE);
482 CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_pad2)) <
483 2 * MMU_PAGESIZE);
484 CTASSERT(((sizeof (struct kpti_frame)) & 0xF) == 0);
485 CTASSERT(((offsetof(cpu_t, cpu_m) +
486 offsetof(struct machcpu, mcpu_kpti_dbg)) & 0xF) == 0);
487 CTASSERT((offsetof(struct kpti_frame, kf_tr_rsp) & 0xF) == 0);
488 #endif
490 #if defined(__xpv) && defined(DEBUG)
493 * This panic message is intended as an aid to interrupt debugging.
495 * The associated assertion tests the condition of enabling
496 * events when events are already enabled. The implication
497 * being that whatever code the programmer thought was
498 * protected by having events disabled until the second
499 * enable happened really wasn't protected at all ..
502 int stistipanic = 1; /* controls the debug panic check */
503 const char *stistimsg = "stisti";
504 ulong_t laststi[NCPU];
507 * This variable tracks the last place events were disabled on each cpu
508 * it assists in debugging when asserts that interrupts are enabled trip.
510 ulong_t lastcli[NCPU];
512 #endif
514 void do_interrupt(struct regs *rp, trap_trace_rec_t *ttp);
516 void (*do_interrupt_common)(struct regs *, trap_trace_rec_t *) = do_interrupt;
517 uintptr_t (*get_intr_handler)(int, short) = NULL;
520 * Set cpu's base SPL level to the highest active interrupt level
522 void
523 set_base_spl(void)
525 struct cpu *cpu = CPU;
526 uint16_t active = (uint16_t)cpu->cpu_intr_actv;
528 cpu->cpu_base_spl = active == 0 ? 0 : bsrw_insn(active);
532 * Do all the work necessary to set up the cpu and thread structures
533 * to dispatch a high-level interrupt.
535 * Returns 0 if we're -not- already on the high-level interrupt stack,
536 * (and *must* switch to it), non-zero if we are already on that stack.
538 * Called with interrupts masked.
539 * The 'pil' is already set to the appropriate level for rp->r_trapno.
541 static int
542 hilevel_intr_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil, struct regs *rp)
544 struct machcpu *mcpu = &cpu->cpu_m;
545 uint_t mask;
546 hrtime_t intrtime;
547 hrtime_t now = tsc_read();
549 ASSERT(pil > LOCK_LEVEL);
551 if (pil == CBE_HIGH_PIL) {
552 cpu->cpu_profile_pil = oldpil;
553 if (USERMODE(rp->r_cs)) {
554 cpu->cpu_profile_pc = 0;
555 cpu->cpu_profile_upc = rp->r_pc;
556 cpu->cpu_cpcprofile_pc = 0;
557 cpu->cpu_cpcprofile_upc = rp->r_pc;
558 } else {
559 cpu->cpu_profile_pc = rp->r_pc;
560 cpu->cpu_profile_upc = 0;
561 cpu->cpu_cpcprofile_pc = rp->r_pc;
562 cpu->cpu_cpcprofile_upc = 0;
566 mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK;
567 if (mask != 0) {
568 int nestpil;
571 * We have interrupted another high-level interrupt.
572 * Load starting timestamp, compute interval, update
573 * cumulative counter.
575 nestpil = bsrw_insn((uint16_t)mask);
576 ASSERT(nestpil < pil);
577 intrtime = now -
578 mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)];
579 mcpu->intrstat[nestpil][0] += intrtime;
580 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
582 * Another high-level interrupt is active below this one, so
583 * there is no need to check for an interrupt thread. That
584 * will be done by the lowest priority high-level interrupt
585 * active.
587 } else {
588 kthread_t *t = cpu->cpu_thread;
591 * See if we are interrupting a low-level interrupt thread.
592 * If so, account for its time slice only if its time stamp
593 * is non-zero.
595 if ((t->t_flag & T_INTR_THREAD) != 0 && t->t_intr_start != 0) {
596 intrtime = now - t->t_intr_start;
597 mcpu->intrstat[t->t_pil][0] += intrtime;
598 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
599 t->t_intr_start = 0;
604 * Store starting timestamp in CPU structure for this PIL.
606 mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] = now;
608 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
610 if (pil == 15) {
612 * To support reentrant level 15 interrupts, we maintain a
613 * recursion count in the top half of cpu_intr_actv. Only
614 * when this count hits zero do we clear the PIL 15 bit from
615 * the lower half of cpu_intr_actv.
617 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
618 (*refcntp)++;
621 mask = cpu->cpu_intr_actv;
623 cpu->cpu_intr_actv |= (1 << pil);
625 return (mask & CPU_INTR_ACTV_HIGH_LEVEL_MASK);
629 * Does most of the work of returning from a high level interrupt.
631 * Returns 0 if there are no more high level interrupts (in which
632 * case we must switch back to the interrupted thread stack) or
633 * non-zero if there are more (in which case we should stay on it).
635 * Called with interrupts masked
637 static int
638 hilevel_intr_epilog(struct cpu *cpu, uint_t pil, uint_t oldpil, uint_t vecnum)
640 struct machcpu *mcpu = &cpu->cpu_m;
641 uint_t mask;
642 hrtime_t intrtime;
643 hrtime_t now = tsc_read();
645 ASSERT(mcpu->mcpu_pri == pil);
647 cpu->cpu_stats.sys.intr[pil - 1]++;
649 ASSERT(cpu->cpu_intr_actv & (1 << pil));
651 if (pil == 15) {
653 * To support reentrant level 15 interrupts, we maintain a
654 * recursion count in the top half of cpu_intr_actv. Only
655 * when this count hits zero do we clear the PIL 15 bit from
656 * the lower half of cpu_intr_actv.
658 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
660 ASSERT(*refcntp > 0);
662 if (--(*refcntp) == 0)
663 cpu->cpu_intr_actv &= ~(1 << pil);
664 } else {
665 cpu->cpu_intr_actv &= ~(1 << pil);
668 ASSERT(mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] != 0);
670 intrtime = now - mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)];
671 mcpu->intrstat[pil][0] += intrtime;
672 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
675 * Check for lower-pil nested high-level interrupt beneath
676 * current one. If so, place a starting timestamp in its
677 * pil_high_start entry.
679 mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK;
680 if (mask != 0) {
681 int nestpil;
684 * find PIL of nested interrupt
686 nestpil = bsrw_insn((uint16_t)mask);
687 ASSERT(nestpil < pil);
688 mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)] = now;
690 * (Another high-level interrupt is active below this one,
691 * so there is no need to check for an interrupt
692 * thread. That will be done by the lowest priority
693 * high-level interrupt active.)
695 } else {
697 * Check to see if there is a low-level interrupt active.
698 * If so, place a starting timestamp in the thread
699 * structure.
701 kthread_t *t = cpu->cpu_thread;
703 if (t->t_flag & T_INTR_THREAD)
704 t->t_intr_start = now;
707 mcpu->mcpu_pri = oldpil;
708 (void) (*setlvlx)(oldpil, vecnum);
710 return (cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK);
714 * Set up the cpu, thread and interrupt thread structures for
715 * executing an interrupt thread. The new stack pointer of the
716 * interrupt thread (which *must* be switched to) is returned.
718 static caddr_t
719 intr_thread_prolog(struct cpu *cpu, caddr_t stackptr, uint_t pil)
721 struct machcpu *mcpu = &cpu->cpu_m;
722 kthread_t *t, *volatile it;
723 hrtime_t now = tsc_read();
725 ASSERT(pil > 0);
726 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
727 cpu->cpu_intr_actv |= (1 << pil);
730 * Get set to run an interrupt thread.
731 * There should always be an interrupt thread, since we
732 * allocate one for each level on each CPU.
734 * t_intr_start could be zero due to cpu_intr_swtch_enter.
736 t = cpu->cpu_thread;
737 if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) {
738 hrtime_t intrtime = now - t->t_intr_start;
739 mcpu->intrstat[t->t_pil][0] += intrtime;
740 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
741 t->t_intr_start = 0;
744 ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
746 t->t_sp = (uintptr_t)stackptr; /* mark stack in curthread for resume */
749 * unlink the interrupt thread off the cpu
751 * Note that the code in kcpc_overflow_intr -relies- on the
752 * ordering of events here - in particular that t->t_lwp of
753 * the interrupt thread is set to the pinned thread *before*
754 * curthread is changed.
756 it = cpu->cpu_intr_thread;
757 cpu->cpu_intr_thread = it->t_link;
758 it->t_intr = t;
759 it->t_lwp = t->t_lwp;
762 * (threads on the interrupt thread free list could have state
763 * preset to TS_ONPROC, but it helps in debugging if
764 * they're TS_FREE.)
766 it->t_state = TS_ONPROC;
768 cpu->cpu_thread = it; /* new curthread on this cpu */
769 it->t_pil = (uchar_t)pil;
770 it->t_pri = intr_pri + (pri_t)pil;
771 it->t_intr_start = now;
773 return (it->t_stk);
777 #ifdef DEBUG
778 int intr_thread_cnt;
779 #endif
782 * Called with interrupts disabled
784 static void
785 intr_thread_epilog(struct cpu *cpu, uint_t vec, uint_t oldpil)
787 struct machcpu *mcpu = &cpu->cpu_m;
788 kthread_t *t;
789 kthread_t *it = cpu->cpu_thread; /* curthread */
790 uint_t pil, basespl;
791 hrtime_t intrtime;
792 hrtime_t now = tsc_read();
794 pil = it->t_pil;
795 cpu->cpu_stats.sys.intr[pil - 1]++;
797 ASSERT(it->t_intr_start != 0);
798 intrtime = now - it->t_intr_start;
799 mcpu->intrstat[pil][0] += intrtime;
800 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
802 ASSERT(cpu->cpu_intr_actv & (1 << pil));
803 cpu->cpu_intr_actv &= ~(1 << pil);
806 * If there is still an interrupted thread underneath this one
807 * then the interrupt was never blocked and the return is
808 * fairly simple. Otherwise it isn't.
810 if ((t = it->t_intr) == NULL) {
812 * The interrupted thread is no longer pinned underneath
813 * the interrupt thread. This means the interrupt must
814 * have blocked, and the interrupted thread has been
815 * unpinned, and has probably been running around the
816 * system for a while.
818 * Since there is no longer a thread under this one, put
819 * this interrupt thread back on the CPU's free list and
820 * resume the idle thread which will dispatch the next
821 * thread to run.
823 #ifdef DEBUG
824 intr_thread_cnt++;
825 #endif
826 cpu->cpu_stats.sys.intrblk++;
828 * Set CPU's base SPL based on active interrupts bitmask
830 set_base_spl();
831 basespl = cpu->cpu_base_spl;
832 mcpu->mcpu_pri = basespl;
833 (*setlvlx)(basespl, vec);
834 (void) splhigh();
835 sti();
836 it->t_state = TS_FREE;
838 * Return interrupt thread to pool
840 it->t_link = cpu->cpu_intr_thread;
841 cpu->cpu_intr_thread = it;
842 swtch();
843 panic("intr_thread_epilog: swtch returned");
844 /*NOTREACHED*/
848 * Return interrupt thread to the pool
850 it->t_link = cpu->cpu_intr_thread;
851 cpu->cpu_intr_thread = it;
852 it->t_state = TS_FREE;
854 basespl = cpu->cpu_base_spl;
855 pil = MAX(oldpil, basespl);
856 mcpu->mcpu_pri = pil;
857 (*setlvlx)(pil, vec);
858 t->t_intr_start = now;
859 cpu->cpu_thread = t;
863 * intr_get_time() is a resource for interrupt handlers to determine how
864 * much time has been spent handling the current interrupt. Such a function
865 * is needed because higher level interrupts can arrive during the
866 * processing of an interrupt. intr_get_time() only returns time spent in the
867 * current interrupt handler.
869 * The caller must be calling from an interrupt handler running at a pil
870 * below or at lock level. Timings are not provided for high-level
871 * interrupts.
873 * The first time intr_get_time() is called while handling an interrupt,
874 * it returns the time since the interrupt handler was invoked. Subsequent
875 * calls will return the time since the prior call to intr_get_time(). Time
876 * is returned as ticks. Use scalehrtimef() to convert ticks to nsec.
878 * Theory Of Intrstat[][]:
880 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
881 * uint64_ts per pil.
883 * intrstat[pil][0] is a cumulative count of the number of ticks spent
884 * handling all interrupts at the specified pil on this CPU. It is
885 * exported via kstats to the user.
887 * intrstat[pil][1] is always a count of ticks less than or equal to the
888 * value in [0]. The difference between [1] and [0] is the value returned
889 * by a call to intr_get_time(). At the start of interrupt processing,
890 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
891 * time, [0] will increase, but [1] will remain the same. A call to
892 * intr_get_time() will return the difference, then update [1] to be the
893 * same as [0]. Future calls will return the time since the last call.
894 * Finally, when the interrupt completes, [1] is updated to the same as [0].
896 * Implementation:
898 * intr_get_time() works much like a higher level interrupt arriving. It
899 * "checkpoints" the timing information by incrementing intrstat[pil][0]
900 * to include elapsed running time, and by setting t_intr_start to rdtsc.
901 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
902 * and updates intrstat[pil][1] to be the same as the new value of
903 * intrstat[pil][0].
905 * In the normal handling of interrupts, after an interrupt handler returns
906 * and the code in intr_thread() updates intrstat[pil][0], it then sets
907 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
908 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
909 * is 0.
911 * Whenever interrupts arrive on a CPU which is handling a lower pil
912 * interrupt, they update the lower pil's [0] to show time spent in the
913 * handler that they've interrupted. This results in a growing discrepancy
914 * between [0] and [1], which is returned the next time intr_get_time() is
915 * called. Time spent in the higher-pil interrupt will not be returned in
916 * the next intr_get_time() call from the original interrupt, because
917 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
919 uint64_t
920 intr_get_time(void)
922 struct cpu *cpu;
923 struct machcpu *mcpu;
924 kthread_t *t;
925 uint64_t time, delta, ret;
926 uint_t pil;
928 cli();
929 cpu = CPU;
930 mcpu = &cpu->cpu_m;
931 t = cpu->cpu_thread;
932 pil = t->t_pil;
933 ASSERT((cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK) == 0);
934 ASSERT(t->t_flag & T_INTR_THREAD);
935 ASSERT(pil != 0);
936 ASSERT(t->t_intr_start != 0);
938 time = tsc_read();
939 delta = time - t->t_intr_start;
940 t->t_intr_start = time;
942 time = mcpu->intrstat[pil][0] + delta;
943 ret = time - mcpu->intrstat[pil][1];
944 mcpu->intrstat[pil][0] = time;
945 mcpu->intrstat[pil][1] = time;
946 cpu->cpu_intracct[cpu->cpu_mstate] += delta;
948 sti();
949 return (ret);
952 static caddr_t
953 dosoftint_prolog(
954 struct cpu *cpu,
955 caddr_t stackptr,
956 uint32_t st_pending,
957 uint_t oldpil)
959 kthread_t *t, *volatile it;
960 struct machcpu *mcpu = &cpu->cpu_m;
961 uint_t pil;
962 hrtime_t now;
964 top:
965 ASSERT(st_pending == mcpu->mcpu_softinfo.st_pending);
967 pil = bsrw_insn((uint16_t)st_pending);
968 if (pil <= oldpil || pil <= cpu->cpu_base_spl)
969 return (0);
972 * XX64 Sigh.
974 * This is a transliteration of the i386 assembler code for
975 * soft interrupts. One question is "why does this need
976 * to be atomic?" One possible race is -other- processors
977 * posting soft interrupts to us in set_pending() i.e. the
978 * CPU might get preempted just after the address computation,
979 * but just before the atomic transaction, so another CPU would
980 * actually set the original CPU's st_pending bit. However,
981 * it looks like it would be simpler to disable preemption there.
982 * Are there other races for which preemption control doesn't work?
984 * The i386 assembler version -also- checks to see if the bit
985 * being cleared was actually set; if it wasn't, it rechecks
986 * for more. This seems a bit strange, as the only code that
987 * ever clears the bit is -this- code running with interrupts
988 * disabled on -this- CPU. This code would probably be cheaper:
990 * atomic_and_32((uint32_t *)&mcpu->mcpu_softinfo.st_pending,
991 * ~(1 << pil));
993 * and t->t_preempt--/++ around set_pending() even cheaper,
994 * but at this point, correctness is critical, so we slavishly
995 * emulate the i386 port.
997 if (atomic_btr32((uint32_t *)
998 &mcpu->mcpu_softinfo.st_pending, pil) == 0) {
999 st_pending = mcpu->mcpu_softinfo.st_pending;
1000 goto top;
1003 mcpu->mcpu_pri = pil;
1004 (*setspl)(pil);
1006 now = tsc_read();
1009 * Get set to run interrupt thread.
1010 * There should always be an interrupt thread since we
1011 * allocate one for each level on the CPU.
1013 it = cpu->cpu_intr_thread;
1014 cpu->cpu_intr_thread = it->t_link;
1016 /* t_intr_start could be zero due to cpu_intr_swtch_enter. */
1017 t = cpu->cpu_thread;
1018 if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) {
1019 hrtime_t intrtime = now - t->t_intr_start;
1020 mcpu->intrstat[pil][0] += intrtime;
1021 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
1022 t->t_intr_start = 0;
1026 * Note that the code in kcpc_overflow_intr -relies- on the
1027 * ordering of events here - in particular that t->t_lwp of
1028 * the interrupt thread is set to the pinned thread *before*
1029 * curthread is changed.
1031 it->t_lwp = t->t_lwp;
1032 it->t_state = TS_ONPROC;
1035 * Push interrupted thread onto list from new thread.
1036 * Set the new thread as the current one.
1037 * Set interrupted thread's T_SP because if it is the idle thread,
1038 * resume() may use that stack between threads.
1041 ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
1042 t->t_sp = (uintptr_t)stackptr;
1044 it->t_intr = t;
1045 cpu->cpu_thread = it;
1048 * Set bit for this pil in CPU's interrupt active bitmask.
1050 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
1051 cpu->cpu_intr_actv |= (1 << pil);
1054 * Initialize thread priority level from intr_pri
1056 it->t_pil = (uchar_t)pil;
1057 it->t_pri = (pri_t)pil + intr_pri;
1058 it->t_intr_start = now;
1060 return (it->t_stk);
1063 static void
1064 dosoftint_epilog(struct cpu *cpu, uint_t oldpil)
1066 struct machcpu *mcpu = &cpu->cpu_m;
1067 kthread_t *t, *it;
1068 uint_t pil, basespl;
1069 hrtime_t intrtime;
1070 hrtime_t now = tsc_read();
1072 it = cpu->cpu_thread;
1073 pil = it->t_pil;
1075 cpu->cpu_stats.sys.intr[pil - 1]++;
1077 ASSERT(cpu->cpu_intr_actv & (1 << pil));
1078 cpu->cpu_intr_actv &= ~(1 << pil);
1079 intrtime = now - it->t_intr_start;
1080 mcpu->intrstat[pil][0] += intrtime;
1081 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
1084 * If there is still an interrupted thread underneath this one
1085 * then the interrupt was never blocked and the return is
1086 * fairly simple. Otherwise it isn't.
1088 if ((t = it->t_intr) == NULL) {
1090 * Put thread back on the interrupt thread list.
1091 * This was an interrupt thread, so set CPU's base SPL.
1093 set_base_spl();
1094 it->t_state = TS_FREE;
1095 it->t_link = cpu->cpu_intr_thread;
1096 cpu->cpu_intr_thread = it;
1097 (void) splhigh();
1098 sti();
1099 swtch();
1100 /*NOTREACHED*/
1101 panic("dosoftint_epilog: swtch returned");
1103 it->t_link = cpu->cpu_intr_thread;
1104 cpu->cpu_intr_thread = it;
1105 it->t_state = TS_FREE;
1106 cpu->cpu_thread = t;
1107 if (t->t_flag & T_INTR_THREAD)
1108 t->t_intr_start = now;
1109 basespl = cpu->cpu_base_spl;
1110 pil = MAX(oldpil, basespl);
1111 mcpu->mcpu_pri = pil;
1112 (*setspl)(pil);
1117 * Make the interrupted thread 'to' be runnable.
1119 * Since t->t_sp has already been saved, t->t_pc is all
1120 * that needs to be set in this function.
1122 * Returns the interrupt level of the interrupt thread.
1125 intr_passivate(
1126 kthread_t *it, /* interrupt thread */
1127 kthread_t *t) /* interrupted thread */
1129 extern void _sys_rtt();
1131 ASSERT(it->t_flag & T_INTR_THREAD);
1132 ASSERT(SA(t->t_sp) == t->t_sp);
1134 t->t_pc = (uintptr_t)_sys_rtt;
1135 return (it->t_pil);
1139 * Create interrupt kstats for this CPU.
1141 void
1142 cpu_create_intrstat(cpu_t *cp)
1144 int i;
1145 kstat_t *intr_ksp;
1146 kstat_named_t *knp;
1147 char name[KSTAT_STRLEN];
1148 zoneid_t zoneid;
1150 ASSERT(MUTEX_HELD(&cpu_lock));
1152 if (pool_pset_enabled())
1153 zoneid = GLOBAL_ZONEID;
1154 else
1155 zoneid = ALL_ZONES;
1157 intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc",
1158 KSTAT_TYPE_NAMED, PIL_MAX * 2, NULL, zoneid);
1161 * Initialize each PIL's named kstat
1163 if (intr_ksp != NULL) {
1164 intr_ksp->ks_update = cpu_kstat_intrstat_update;
1165 knp = (kstat_named_t *)intr_ksp->ks_data;
1166 intr_ksp->ks_private = cp;
1167 for (i = 0; i < PIL_MAX; i++) {
1168 (void) snprintf(name, KSTAT_STRLEN, "level-%d-time",
1169 i + 1);
1170 kstat_named_init(&knp[i * 2], name, KSTAT_DATA_UINT64);
1171 (void) snprintf(name, KSTAT_STRLEN, "level-%d-count",
1172 i + 1);
1173 kstat_named_init(&knp[(i * 2) + 1], name,
1174 KSTAT_DATA_UINT64);
1176 kstat_install(intr_ksp);
1181 * Delete interrupt kstats for this CPU.
1183 void
1184 cpu_delete_intrstat(cpu_t *cp)
1186 kstat_delete_byname_zone("cpu", cp->cpu_id, "intrstat", ALL_ZONES);
1190 * Convert interrupt statistics from CPU ticks to nanoseconds and
1191 * update kstat.
1194 cpu_kstat_intrstat_update(kstat_t *ksp, int rw)
1196 kstat_named_t *knp = ksp->ks_data;
1197 cpu_t *cpup = (cpu_t *)ksp->ks_private;
1198 int i;
1199 hrtime_t hrt;
1201 if (rw == KSTAT_WRITE)
1202 return (EACCES);
1204 for (i = 0; i < PIL_MAX; i++) {
1205 hrt = (hrtime_t)cpup->cpu_m.intrstat[i + 1][0];
1206 scalehrtimef(&hrt);
1207 knp[i * 2].value.ui64 = (uint64_t)hrt;
1208 knp[(i * 2) + 1].value.ui64 = cpup->cpu_stats.sys.intr[i];
1211 return (0);
1215 * An interrupt thread is ending a time slice, so compute the interval it
1216 * ran for and update the statistic for its PIL.
1218 void
1219 cpu_intr_swtch_enter(kthread_id_t t)
1221 uint64_t interval;
1222 uint64_t start;
1223 cpu_t *cpu;
1225 ASSERT((t->t_flag & T_INTR_THREAD) != 0);
1226 ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
1229 * We could be here with a zero timestamp. This could happen if:
1230 * an interrupt thread which no longer has a pinned thread underneath
1231 * it (i.e. it blocked at some point in its past) has finished running
1232 * its handler. intr_thread() updated the interrupt statistic for its
1233 * PIL and zeroed its timestamp. Since there was no pinned thread to
1234 * return to, swtch() gets called and we end up here.
1236 * Note that we use atomic ops below (atomic_cas_64 and
1237 * atomic_add_64), which we don't use in the functions above,
1238 * because we're not called with interrupts blocked, but the
1239 * epilog/prolog functions are.
1241 if (t->t_intr_start) {
1242 do {
1243 start = t->t_intr_start;
1244 interval = tsc_read() - start;
1245 } while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
1246 cpu = CPU;
1247 cpu->cpu_m.intrstat[t->t_pil][0] += interval;
1249 atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
1250 interval);
1251 } else
1252 ASSERT(t->t_intr == NULL);
1256 * An interrupt thread is returning from swtch(). Place a starting timestamp
1257 * in its thread structure.
1259 void
1260 cpu_intr_swtch_exit(kthread_id_t t)
1262 uint64_t ts;
1264 ASSERT((t->t_flag & T_INTR_THREAD) != 0);
1265 ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
1267 do {
1268 ts = t->t_intr_start;
1269 } while (atomic_cas_64(&t->t_intr_start, ts, tsc_read()) != ts);
1273 * Dispatch a hilevel interrupt (one above LOCK_LEVEL)
1275 /*ARGSUSED*/
1276 static void
1277 dispatch_hilevel(uint_t vector, uint_t arg2)
1279 sti();
1280 av_dispatch_autovect(vector);
1281 cli();
1285 * Dispatch a soft interrupt
1287 /*ARGSUSED*/
1288 static void
1289 dispatch_softint(uint_t oldpil, uint_t arg2)
1291 struct cpu *cpu = CPU;
1293 sti();
1294 av_dispatch_softvect((int)cpu->cpu_thread->t_pil);
1295 cli();
1298 * Must run softint_epilog() on the interrupt thread stack, since
1299 * there may not be a return from it if the interrupt thread blocked.
1301 dosoftint_epilog(cpu, oldpil);
1305 * Dispatch a normal interrupt
1307 static void
1308 dispatch_hardint(uint_t vector, uint_t oldipl)
1310 struct cpu *cpu = CPU;
1312 sti();
1313 av_dispatch_autovect(vector);
1314 cli();
1317 * Must run intr_thread_epilog() on the interrupt thread stack, since
1318 * there may not be a return from it if the interrupt thread blocked.
1320 intr_thread_epilog(cpu, vector, oldipl);
1324 * Deliver any softints the current interrupt priority allows.
1325 * Called with interrupts disabled.
1327 void
1328 dosoftint(struct regs *regs)
1330 struct cpu *cpu = CPU;
1331 int oldipl;
1332 caddr_t newsp;
1334 while (cpu->cpu_softinfo.st_pending) {
1335 oldipl = cpu->cpu_pri;
1336 newsp = dosoftint_prolog(cpu, (caddr_t)regs,
1337 cpu->cpu_softinfo.st_pending, oldipl);
1339 * If returned stack pointer is NULL, priority is too high
1340 * to run any of the pending softints now.
1341 * Break out and they will be run later.
1343 if (newsp == NULL)
1344 break;
1345 switch_sp_and_call(newsp, dispatch_softint, oldipl, 0);
1350 * Interrupt service routine, called with interrupts disabled.
1352 /*ARGSUSED*/
1353 void
1354 do_interrupt(struct regs *rp, trap_trace_rec_t *ttp)
1356 struct cpu *cpu = CPU;
1357 int newipl, oldipl = cpu->cpu_pri;
1358 uint_t vector;
1359 caddr_t newsp;
1361 #ifdef TRAPTRACE
1362 ttp->ttr_marker = TT_INTERRUPT;
1363 ttp->ttr_ipl = 0xff;
1364 ttp->ttr_pri = oldipl;
1365 ttp->ttr_spl = cpu->cpu_base_spl;
1366 ttp->ttr_vector = 0xff;
1367 #endif /* TRAPTRACE */
1369 cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR);
1371 ++*(uint16_t *)&cpu->cpu_m.mcpu_istamp;
1374 * If it's a softint go do it now.
1376 if (rp->r_trapno == T_SOFTINT) {
1377 dosoftint(rp);
1378 ASSERT(!interrupts_enabled());
1379 return;
1383 * Raise the interrupt priority.
1385 newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno);
1386 #ifdef TRAPTRACE
1387 ttp->ttr_ipl = newipl;
1388 #endif /* TRAPTRACE */
1391 * Bail if it is a spurious interrupt
1393 if (newipl == -1)
1394 return;
1395 cpu->cpu_pri = newipl;
1396 vector = rp->r_trapno;
1397 #ifdef TRAPTRACE
1398 ttp->ttr_vector = vector;
1399 #endif /* TRAPTRACE */
1400 if (newipl > LOCK_LEVEL) {
1402 * High priority interrupts run on this cpu's interrupt stack.
1404 if (hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) {
1405 newsp = cpu->cpu_intr_stack;
1406 switch_sp_and_call(newsp, dispatch_hilevel, vector, 0);
1407 } else { /* already on the interrupt stack */
1408 dispatch_hilevel(vector, 0);
1410 (void) hilevel_intr_epilog(cpu, newipl, oldipl, vector);
1411 } else {
1413 * Run this interrupt in a separate thread.
1415 newsp = intr_thread_prolog(cpu, (caddr_t)rp, newipl);
1416 switch_sp_and_call(newsp, dispatch_hardint, vector, oldipl);
1419 #if !defined(__xpv)
1421 * Deliver any pending soft interrupts.
1423 if (cpu->cpu_softinfo.st_pending)
1424 dosoftint(rp);
1425 #endif /* !__xpv */
1430 * Common tasks always done by _sys_rtt, called with interrupts disabled.
1431 * Returns 1 if returning to userland, 0 if returning to system mode.
1434 sys_rtt_common(struct regs *rp)
1436 kthread_t *tp;
1437 extern void mutex_exit_critical_start();
1438 extern long mutex_exit_critical_size;
1439 extern void mutex_owner_running_critical_start();
1440 extern long mutex_owner_running_critical_size;
1442 loop:
1445 * Check if returning to user
1447 tp = CPU->cpu_thread;
1448 if (USERMODE(rp->r_cs)) {
1450 * Check if AST pending.
1452 if (tp->t_astflag) {
1454 * Let trap() handle the AST
1456 sti();
1457 rp->r_trapno = T_AST;
1458 trap(rp, (caddr_t)0, CPU->cpu_id);
1459 cli();
1460 goto loop;
1463 #if defined(__amd64)
1465 * We are done if segment registers do not need updating.
1467 if (tp->t_lwp->lwp_pcb.pcb_rupdate == 0)
1468 return (1);
1470 if (update_sregs(rp, tp->t_lwp)) {
1472 * 1 or more of the selectors is bad.
1473 * Deliver a SIGSEGV.
1475 proc_t *p = ttoproc(tp);
1477 sti();
1478 mutex_enter(&p->p_lock);
1479 tp->t_lwp->lwp_cursig = SIGSEGV;
1480 mutex_exit(&p->p_lock);
1481 psig();
1482 tp->t_sig_check = 1;
1483 cli();
1485 tp->t_lwp->lwp_pcb.pcb_rupdate = 0;
1487 #endif /* __amd64 */
1488 return (1);
1491 #if !defined(__xpv)
1493 * Assert that we're not trying to return into the syscall return
1494 * trampolines. Things will go baaaaad if we try to do that.
1496 * Note that none of these run with interrupts on, so this should
1497 * never happen (even in the sysexit case the STI doesn't take effect
1498 * until after sysexit finishes).
1500 extern void tr_sysc_ret_start();
1501 extern void tr_sysc_ret_end();
1502 ASSERT(!(rp->r_pc >= (uintptr_t)tr_sysc_ret_start &&
1503 rp->r_pc <= (uintptr_t)tr_sysc_ret_end));
1504 #endif
1507 * Here if we are returning to supervisor mode.
1508 * Check for a kernel preemption request.
1510 if (CPU->cpu_kprunrun && (rp->r_ps & PS_IE)) {
1513 * Do nothing if already in kpreempt
1515 if (!tp->t_preempt_lk) {
1516 tp->t_preempt_lk = 1;
1517 sti();
1518 kpreempt(1); /* asynchronous kpreempt call */
1519 cli();
1520 tp->t_preempt_lk = 0;
1525 * If we interrupted the mutex_exit() critical region we must
1526 * reset the PC back to the beginning to prevent missed wakeups
1527 * See the comments in mutex_exit() for details.
1529 if ((uintptr_t)rp->r_pc - (uintptr_t)mutex_exit_critical_start <
1530 mutex_exit_critical_size) {
1531 rp->r_pc = (greg_t)mutex_exit_critical_start;
1535 * If we interrupted the mutex_owner_running() critical region we
1536 * must reset the PC back to the beginning to prevent dereferencing
1537 * of a freed thread pointer. See the comments in mutex_owner_running
1538 * for details.
1540 if ((uintptr_t)rp->r_pc -
1541 (uintptr_t)mutex_owner_running_critical_start <
1542 mutex_owner_running_critical_size) {
1543 rp->r_pc = (greg_t)mutex_owner_running_critical_start;
1546 return (0);
1549 void
1550 send_dirint(int cpuid, int int_level)
1552 (*send_dirintf)(cpuid, int_level);
1555 #define IS_FAKE_SOFTINT(flag, newpri) \
1556 (((flag) & PS_IE) && \
1557 (((*get_pending_spl)() > (newpri)) || \
1558 bsrw_insn((uint16_t)cpu->cpu_softinfo.st_pending) > (newpri)))
1561 * do_splx routine, takes new ipl to set
1562 * returns the old ipl.
1563 * We are careful not to set priority lower than CPU->cpu_base_pri,
1564 * even though it seems we're raising the priority, it could be set
1565 * higher at any time by an interrupt routine, so we must block interrupts
1566 * and look at CPU->cpu_base_pri
1569 do_splx(int newpri)
1571 ulong_t flag;
1572 cpu_t *cpu;
1573 int curpri, basepri;
1575 flag = intr_clear();
1576 cpu = CPU; /* ints are disabled, now safe to cache cpu ptr */
1577 curpri = cpu->cpu_m.mcpu_pri;
1578 basepri = cpu->cpu_base_spl;
1579 if (newpri < basepri)
1580 newpri = basepri;
1581 cpu->cpu_m.mcpu_pri = newpri;
1582 (*setspl)(newpri);
1584 * If we are going to reenable interrupts see if new priority level
1585 * allows pending softint delivery.
1587 if (IS_FAKE_SOFTINT(flag, newpri))
1588 fakesoftint();
1589 ASSERT(!interrupts_enabled());
1590 intr_restore(flag);
1591 return (curpri);
1595 * Common spl raise routine, takes new ipl to set
1596 * returns the old ipl, will not lower ipl.
1599 splr(int newpri)
1601 ulong_t flag;
1602 cpu_t *cpu;
1603 int curpri, basepri;
1605 flag = intr_clear();
1606 cpu = CPU; /* ints are disabled, now safe to cache cpu ptr */
1607 curpri = cpu->cpu_m.mcpu_pri;
1609 * Only do something if new priority is larger
1611 if (newpri > curpri) {
1612 basepri = cpu->cpu_base_spl;
1613 if (newpri < basepri)
1614 newpri = basepri;
1615 cpu->cpu_m.mcpu_pri = newpri;
1616 (*setspl)(newpri);
1618 * See if new priority level allows pending softint delivery
1620 if (IS_FAKE_SOFTINT(flag, newpri))
1621 fakesoftint();
1623 intr_restore(flag);
1624 return (curpri);
1628 getpil(void)
1630 return (CPU->cpu_m.mcpu_pri);
1634 spl_xcall(void)
1636 return (splr(ipltospl(XCALL_PIL)));
1640 interrupts_enabled(void)
1642 ulong_t flag;
1644 flag = getflags();
1645 return ((flag & PS_IE) == PS_IE);
1648 #ifdef DEBUG
1649 void
1650 assert_ints_enabled(void)
1652 ASSERT(!interrupts_unleashed || interrupts_enabled());
1654 #endif /* DEBUG */