2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/thread.h>
37 #include <sys/random.h>
38 #include <sys/serialize.h>
39 #include <sys/interrupt.h>
41 #include <sys/machintr.h>
43 #include <machine/frame.h>
45 #include <sys/thread2.h>
46 #include <sys/mplock2.h>
50 typedef struct intrec
{
52 struct intr_info
*info
;
58 struct lwkt_serialize
*serializer
;
63 struct thread
*i_thread
; /* don't embed struct thread */
64 struct random_softc i_random
;
65 long i_count
; /* interrupts dispatched */
67 short i_mplock_required
;
73 unsigned long i_straycount
;
78 struct intr_info_block
{
79 struct intr_info ary
[MAXCPU
][MAX_INTS
];
82 static struct intr_info_block
*intr_block
;
83 static struct intr_info
*swi_info_ary
[MAX_SOFTINTS
];
85 static int max_installed_hard_intr
[MAXCPU
];
87 MALLOC_DEFINE(M_INTRMNG
, "intrmng", "interrupt management");
90 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000
93 * Assert that callers into interrupt handlers don't return with
94 * dangling tokens, spinlocks, or mp locks.
98 #define TD_INVARIANTS_DECLARE \
100 lwkt_tokref_t curstop
102 #define TD_INVARIANTS_GET(td) \
104 spincount = (td)->td_gd->gd_spinlocks; \
105 curstop = (td)->td_toks_stop; \
108 #define TD_INVARIANTS_TEST(td, name) \
110 KASSERT(spincount == (td)->td_gd->gd_spinlocks, \
111 ("spincount mismatch after interrupt handler %s", \
113 KASSERT(curstop == (td)->td_toks_stop, \
114 ("token count mismatch after interrupt handler %s", \
122 #define TD_INVARIANTS_DECLARE
123 #define TD_INVARIANTS_GET(td)
124 #define TD_INVARIANTS_TEST(td, name)
126 #endif /* ndef INVARIANTS */
128 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS
);
129 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS
);
130 static void emergency_intr_timer_callback(systimer_t
, int, struct intrframe
*);
131 static void ithread_handler(void *arg
);
132 static void ithread_emergency(void *arg
);
133 static void report_stray_interrupt(struct intr_info
*info
, const char *func
);
134 static void int_moveto_destcpu(int *, int);
135 static void int_moveto_origcpu(int, int);
136 static void sched_ithd_intern(struct intr_info
*info
);
138 static struct systimer emergency_intr_timer
[MAXCPU
];
139 static struct thread
*emergency_intr_thread
[MAXCPU
];
141 #define ISTATE_NOTHREAD 0
142 #define ISTATE_NORMAL 1
143 #define ISTATE_LIVELOCKED 2
145 static int livelock_limit
= 40000;
146 static int livelock_limit_hi
= 120000;
147 static int livelock_lowater
= 20000;
148 static int livelock_debug
= -1;
149 SYSCTL_INT(_kern
, OID_AUTO
, livelock_limit
,
150 CTLFLAG_RW
, &livelock_limit
, 0, "Livelock interrupt rate limit");
151 SYSCTL_INT(_kern
, OID_AUTO
, livelock_limit_hi
,
152 CTLFLAG_RW
, &livelock_limit_hi
, 0,
153 "Livelock interrupt rate limit (high frequency)");
154 SYSCTL_INT(_kern
, OID_AUTO
, livelock_lowater
,
155 CTLFLAG_RW
, &livelock_lowater
, 0, "Livelock low-water mark restore");
156 SYSCTL_INT(_kern
, OID_AUTO
, livelock_debug
,
157 CTLFLAG_RW
, &livelock_debug
, 0, "Livelock debug intr#");
159 static int emergency_intr_enable
= 0; /* emergency interrupt polling */
160 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable
);
161 SYSCTL_PROC(_kern
, OID_AUTO
, emergency_intr_enable
, CTLTYPE_INT
| CTLFLAG_RW
,
162 0, 0, sysctl_emergency_enable
, "I", "Emergency Interrupt Poll Enable");
164 static int emergency_intr_freq
= 10; /* emergency polling frequency */
165 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq
);
166 SYSCTL_PROC(_kern
, OID_AUTO
, emergency_intr_freq
, CTLTYPE_INT
| CTLFLAG_RW
,
167 0, 0, sysctl_emergency_freq
, "I", "Emergency Interrupt Poll Frequency");
170 * Sysctl support routines
173 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS
)
175 int error
, enabled
, cpuid
, freq
, origcpu
;
177 enabled
= emergency_intr_enable
;
178 error
= sysctl_handle_int(oidp
, &enabled
, 0, req
);
179 if (error
|| req
->newptr
== NULL
)
181 emergency_intr_enable
= enabled
;
182 if (emergency_intr_enable
)
183 freq
= emergency_intr_freq
;
188 for (cpuid
= 0; cpuid
< ncpus
; ++cpuid
) {
189 lwkt_migratecpu(cpuid
);
190 systimer_adjust_periodic(&emergency_intr_timer
[cpuid
], freq
);
192 lwkt_migratecpu(origcpu
);
197 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS
)
199 int error
, phz
, cpuid
, freq
, origcpu
;
201 phz
= emergency_intr_freq
;
202 error
= sysctl_handle_int(oidp
, &phz
, 0, req
);
203 if (error
|| req
->newptr
== NULL
)
207 else if (phz
> EMERGENCY_INTR_POLLING_FREQ_MAX
)
208 phz
= EMERGENCY_INTR_POLLING_FREQ_MAX
;
210 emergency_intr_freq
= phz
;
211 if (emergency_intr_enable
)
212 freq
= emergency_intr_freq
;
217 for (cpuid
= 0; cpuid
< ncpus
; ++cpuid
) {
218 lwkt_migratecpu(cpuid
);
219 systimer_adjust_periodic(&emergency_intr_timer
[cpuid
], freq
);
221 lwkt_migratecpu(origcpu
);
226 * Register an SWI or INTerrupt handler.
229 register_swi(int intr
, inthand2_t
*handler
, void *arg
, const char *name
,
230 struct lwkt_serialize
*serializer
, int cpuid
)
232 if (intr
< FIRST_SOFTINT
|| intr
>= MAX_INTS
)
233 panic("register_swi: bad intr %d", intr
);
236 cpuid
= intr
% ncpus
;
237 return(register_int(intr
, handler
, arg
, name
, serializer
, 0, cpuid
));
241 register_swi_mp(int intr
, inthand2_t
*handler
, void *arg
, const char *name
,
242 struct lwkt_serialize
*serializer
, int cpuid
)
244 if (intr
< FIRST_SOFTINT
|| intr
>= MAX_INTS
)
245 panic("register_swi: bad intr %d", intr
);
248 cpuid
= intr
% ncpus
;
249 return(register_int(intr
, handler
, arg
, name
, serializer
,
250 INTR_MPSAFE
, cpuid
));
254 register_int(int intr
, inthand2_t
*handler
, void *arg
, const char *name
,
255 struct lwkt_serialize
*serializer
, int intr_flags
, int cpuid
)
257 struct intr_info
*info
;
258 struct intrec
**list
;
262 KKASSERT(cpuid
>= 0 && cpuid
< ncpus
);
264 if (intr
< 0 || intr
>= MAX_INTS
)
265 panic("register_int: bad intr %d", intr
);
268 info
= &intr_block
->ary
[cpuid
][intr
];
270 int_moveto_destcpu(&orig_cpuid
, cpuid
);
273 * This intr has been registered as exclusive one, so
276 if (info
->i_flags
& INTR_EXCL
)
280 * This intr has been registered as shared one, so it
281 * can't be used for exclusive handler.
283 list
= &info
->i_reclist
;
284 if ((intr_flags
& INTR_EXCL
) && *list
!= NULL
)
288 * Construct an interrupt handler record
290 rec
= kmalloc(sizeof(struct intrec
), M_DEVBUF
, M_INTWAIT
);
291 rec
->name
= kmalloc(strlen(name
) + 1, M_DEVBUF
, M_INTWAIT
);
292 strcpy(rec
->name
, name
);
295 rec
->handler
= handler
;
298 rec
->intr_flags
= intr_flags
;
300 rec
->serializer
= serializer
;
303 * Create an emergency polling thread and set up a systimer to wake
304 * it up. objcache isn't operational yet so use kmalloc.
306 * objcache may not be operational yet, use kmalloc().
308 if (emergency_intr_thread
[cpuid
] == NULL
) {
309 emergency_intr_thread
[cpuid
] = kmalloc(sizeof(struct thread
), M_DEVBUF
,
311 lwkt_create(ithread_emergency
, NULL
, NULL
,
312 emergency_intr_thread
[cpuid
],
313 TDF_NOSTART
| TDF_INTTHREAD
, cpuid
, "ithreadE %d",
315 systimer_init_periodic_nq(&emergency_intr_timer
[cpuid
],
316 emergency_intr_timer_callback
,
317 emergency_intr_thread
[cpuid
],
318 (emergency_intr_enable
? emergency_intr_freq
: 1));
322 * Create an interrupt thread if necessary, leave it in an unscheduled
325 if (info
->i_state
== ISTATE_NOTHREAD
) {
326 info
->i_state
= ISTATE_NORMAL
;
327 info
->i_thread
= kmalloc(sizeof(struct thread
), M_DEVBUF
,
329 lwkt_create(ithread_handler
, (void *)(intptr_t)intr
, NULL
,
330 info
->i_thread
, TDF_NOSTART
| TDF_INTTHREAD
, cpuid
,
331 "ithread%d %d", intr
, cpuid
);
332 if (intr
>= FIRST_SOFTINT
)
333 lwkt_setpri(info
->i_thread
, TDPRI_SOFT_NORM
);
335 lwkt_setpri(info
->i_thread
, TDPRI_INT_MED
);
336 info
->i_thread
->td_preemptable
= lwkt_preempt
;
340 * Keep track of how many fast and slow interrupts we have.
341 * Set i_mplock_required if any handler in the chain requires
342 * the MP lock to operate.
344 if ((intr_flags
& INTR_MPSAFE
) == 0) {
345 info
->i_mplock_required
= 1;
346 kprintf("interrupt uses mplock: %s\n", name
);
348 if (intr_flags
& INTR_CLOCK
)
353 info
->i_flags
|= (intr_flags
& INTR_EXCL
);
354 if (info
->i_slow
+ info
->i_fast
== 1 && (intr_flags
& INTR_HIFREQ
)) {
356 * Allow high frequency interrupt, if this intr is not
359 info
->i_flags
|= INTR_HIFREQ
;
361 info
->i_flags
&= ~INTR_HIFREQ
;
365 * Enable random number generation keying off of this interrupt.
367 if ((intr_flags
& INTR_NOENTROPY
) == 0 && info
->i_random
.sc_enabled
== 0) {
368 info
->i_random
.sc_enabled
= 1;
369 info
->i_random
.sc_intr
= intr
;
373 * Add the record to the interrupt list.
376 while (*list
!= NULL
)
377 list
= &(*list
)->next
;
382 * Update max_installed_hard_intr to make the emergency intr poll
383 * a bit more efficient.
385 if (intr
< FIRST_SOFTINT
) {
386 if (max_installed_hard_intr
[cpuid
] <= intr
)
387 max_installed_hard_intr
[cpuid
] = intr
+ 1;
390 if (intr
>= FIRST_SOFTINT
)
391 swi_info_ary
[intr
- FIRST_SOFTINT
] = info
;
394 * Setup the machine level interrupt vector
396 if (intr
< FIRST_SOFTINT
&& info
->i_slow
+ info
->i_fast
== 1)
397 machintr_intr_setup(intr
, intr_flags
);
400 int_moveto_origcpu(orig_cpuid
, cpuid
);
405 unregister_swi(void *id
, int intr
, int cpuid
)
408 cpuid
= intr
% ncpus
;
410 unregister_int(id
, cpuid
);
414 unregister_int(void *id
, int cpuid
)
416 struct intr_info
*info
;
417 struct intrec
**list
;
419 int intr
, orig_cpuid
;
421 KKASSERT(cpuid
>= 0 && cpuid
< ncpus
);
423 intr
= ((intrec_t
)id
)->intr
;
425 if (intr
< 0 || intr
>= MAX_INTS
)
426 panic("register_int: bad intr %d", intr
);
428 info
= &intr_block
->ary
[cpuid
][intr
];
430 int_moveto_destcpu(&orig_cpuid
, cpuid
);
433 * Remove the interrupt descriptor, adjust the descriptor count,
434 * and teardown the machine level vector if this was the last interrupt.
437 list
= &info
->i_reclist
;
438 while ((rec
= *list
) != NULL
) {
447 if (rec
->intr_flags
& INTR_CLOCK
)
451 if (intr
< FIRST_SOFTINT
&& info
->i_fast
+ info
->i_slow
== 0)
452 machintr_intr_teardown(intr
);
455 * Clear i_mplock_required if no handlers in the chain require the
458 for (rec0
= info
->i_reclist
; rec0
; rec0
= rec0
->next
) {
459 if ((rec0
->intr_flags
& INTR_MPSAFE
) == 0)
463 info
->i_mplock_required
= 0;
466 if (info
->i_reclist
== NULL
) {
468 if (intr
>= FIRST_SOFTINT
)
469 swi_info_ary
[intr
- FIRST_SOFTINT
] = NULL
;
470 } else if (info
->i_fast
+ info
->i_slow
== 1 &&
471 (info
->i_reclist
->intr_flags
& INTR_HIFREQ
)) {
472 /* Unshared high frequency interrupt. */
473 info
->i_flags
|= INTR_HIFREQ
;
478 int_moveto_origcpu(orig_cpuid
, cpuid
);
484 kfree(rec
->name
, M_DEVBUF
);
485 kfree(rec
, M_DEVBUF
);
487 kprintf("warning: unregister_int: int %d handler for %s not found\n",
488 intr
, ((intrec_t
)id
)->name
);
493 get_interrupt_counter(int intr
, int cpuid
)
495 struct intr_info
*info
;
497 KKASSERT(cpuid
>= 0 && cpuid
< ncpus
);
499 if (intr
< 0 || intr
>= MAX_INTS
)
500 panic("register_int: bad intr %d", intr
);
501 info
= &intr_block
->ary
[cpuid
][intr
];
502 return(info
->i_count
);
506 register_randintr(int intr
)
508 struct intr_info
*info
;
511 if (intr
< 0 || intr
>= MAX_INTS
)
512 panic("register_randintr: bad intr %d", intr
);
514 for (cpuid
= 0; cpuid
< ncpus
; ++cpuid
) {
515 info
= &intr_block
->ary
[cpuid
][intr
];
516 info
->i_random
.sc_intr
= intr
;
517 info
->i_random
.sc_enabled
= 1;
522 unregister_randintr(int intr
)
524 struct intr_info
*info
;
527 if (intr
< 0 || intr
>= MAX_INTS
)
528 panic("register_swi: bad intr %d", intr
);
530 for (cpuid
= 0; cpuid
< ncpus
; ++cpuid
) {
531 info
= &intr_block
->ary
[cpuid
][intr
];
532 info
->i_random
.sc_enabled
= -1;
537 next_registered_randintr(int intr
)
539 struct intr_info
*info
;
541 if (intr
< 0 || intr
>= MAX_INTS
)
542 panic("register_swi: bad intr %d", intr
);
544 while (intr
< MAX_INTS
) {
547 for (cpuid
= 0; cpuid
< ncpus
; ++cpuid
) {
548 info
= &intr_block
->ary
[cpuid
][intr
];
549 if (info
->i_random
.sc_enabled
> 0)
558 * Dispatch an interrupt. If there's nothing to do we have a stray
559 * interrupt and can just return, leaving the interrupt masked.
561 * We need to schedule the interrupt and set its i_running bit. If
562 * we are not on the interrupt thread's cpu we have to send a message
563 * to the correct cpu that will issue the desired action (interlocking
564 * with the interrupt thread's critical section). We do NOT attempt to
565 * reschedule interrupts whos i_running bit is already set because
566 * this would prematurely wakeup a livelock-limited interrupt thread.
568 * i_running is only tested/set on the same cpu as the interrupt thread.
570 * We are NOT in a critical section, which will allow the scheduled
571 * interrupt to preempt us. The MP lock might *NOT* be held here.
574 sched_ithd_remote(void *arg
)
576 sched_ithd_intern(arg
);
580 sched_ithd_intern(struct intr_info
*info
)
583 if (info
->i_state
!= ISTATE_NOTHREAD
) {
584 if (info
->i_reclist
== NULL
) {
585 report_stray_interrupt(info
, "sched_ithd");
587 if (info
->i_thread
->td_gd
== mycpu
) {
588 if (info
->i_running
== 0) {
590 if (info
->i_state
!= ISTATE_LIVELOCKED
)
591 lwkt_schedule(info
->i_thread
); /* MIGHT PREEMPT */
594 lwkt_send_ipiq(info
->i_thread
->td_gd
, sched_ithd_remote
, info
);
598 report_stray_interrupt(info
, "sched_ithd");
603 sched_ithd_soft(int intr
)
605 struct intr_info
*info
;
607 KKASSERT(intr
>= FIRST_SOFTINT
&& intr
< MAX_INTS
);
609 info
= swi_info_ary
[intr
- FIRST_SOFTINT
];
611 sched_ithd_intern(info
);
613 kprintf("unregistered softint %d got scheduled on cpu%d\n",
619 sched_ithd_hard(int intr
)
621 KKASSERT(intr
>= 0 && intr
< MAX_HARDINTS
);
622 sched_ithd_intern(&intr_block
->ary
[mycpuid
][intr
]);
625 #ifdef _KERNEL_VIRTUAL
628 sched_ithd_hard_virtual(int intr
)
630 KKASSERT(intr
>= 0 && intr
< MAX_HARDINTS
);
631 sched_ithd_intern(&intr_block
->ary
[0][intr
]);
635 register_int_virtual(int intr
, inthand2_t
*handler
, void *arg
, const char *name
,
636 struct lwkt_serialize
*serializer
, int intr_flags
)
638 return register_int(intr
, handler
, arg
, name
, serializer
, intr_flags
, 0);
642 unregister_int_virtual(void *id
)
644 unregister_int(id
, 0);
647 #endif /* _KERN_VIRTUAL */
650 report_stray_interrupt(struct intr_info
*info
, const char *func
)
652 ++info
->i_straycount
;
653 if (info
->i_straycount
< 10) {
654 if (info
->i_errorticks
== ticks
)
656 info
->i_errorticks
= ticks
;
657 kprintf("%s: stray interrupt %d on cpu%d\n",
658 func
, info
->i_intr
, mycpuid
);
659 } else if (info
->i_straycount
== 10) {
660 kprintf("%s: %ld stray interrupts %d on cpu%d - "
661 "there will be no further reports\n", func
,
662 info
->i_straycount
, info
->i_intr
, mycpuid
);
667 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
668 * might not be held).
671 ithread_livelock_wakeup(systimer_t st
, int in_ipi __unused
,
672 struct intrframe
*frame __unused
)
674 struct intr_info
*info
;
676 info
= &intr_block
->ary
[mycpuid
][(int)(intptr_t)st
->data
];
677 if (info
->i_state
!= ISTATE_NOTHREAD
)
678 lwkt_schedule(info
->i_thread
);
682 * Schedule ithread within fast intr handler
684 * Temporarily bump the current thread's td_nest_count to prevent deep
685 * preemptions and splz/doreti stacks.
688 ithread_fast_sched(int intr
, thread_t td
)
692 sched_ithd_hard(intr
);
693 crit_enter_quick(td
);
698 * This function is called directly from the ICU or APIC vector code assembly
699 * to process an interrupt. The critical section and interrupt deferral
700 * checks have already been done but the function is entered WITHOUT
701 * a critical section held. The BGL may or may not be held.
703 * Must return non-zero if we do not want the vector code to re-enable
704 * the interrupt (which we don't if we have to schedule the interrupt)
706 int ithread_fast_handler(struct intrframe
*frame
);
709 ithread_fast_handler(struct intrframe
*frame
)
712 struct intr_info
*info
;
713 struct intrec
**list
;
716 TD_INVARIANTS_DECLARE
;
721 intr
= frame
->if_vec
;
725 /* We must be in critical section. */
726 KKASSERT(td
->td_critcount
);
728 info
= &intr_block
->ary
[mycpuid
][intr
];
731 * If we are not processing any FAST interrupts, just schedule the thing.
733 if (info
->i_fast
== 0) {
735 ithread_fast_sched(intr
, td
);
740 * This should not normally occur since interrupts ought to be
741 * masked if the ithread has been scheduled or is running.
747 * Bump the interrupt nesting level to process any FAST interrupts.
748 * Obtain the MP lock as necessary. If the MP lock cannot be obtained,
749 * schedule the interrupt thread to deal with the issue instead.
751 * To reduce overhead, just leave the MP lock held once it has been
754 ++gd
->gd_intr_nesting_level
;
756 must_schedule
= info
->i_slow
;
759 TD_INVARIANTS_GET(td
);
760 list
= &info
->i_reclist
;
762 for (rec
= *list
; rec
; rec
= nrec
) {
763 /* rec may be invalid after call */
766 if (rec
->intr_flags
& INTR_CLOCK
) {
767 if ((rec
->intr_flags
& INTR_MPSAFE
) == 0 && got_mplock
== 0) {
768 if (try_mplock() == 0) {
769 /* Couldn't get the MP lock; just schedule it. */
775 if (rec
->serializer
) {
776 must_schedule
+= lwkt_serialize_handler_try(
777 rec
->serializer
, rec
->handler
,
778 rec
->argument
, frame
);
780 rec
->handler(rec
->argument
, frame
);
782 TD_INVARIANTS_TEST(td
, rec
->name
);
789 --gd
->gd_intr_nesting_level
;
794 * If we had a problem, or mixed fast and slow interrupt handlers are
795 * registered, schedule the ithread to catch the missed records (it
796 * will just re-run all of them). A return value of 0 indicates that
797 * all handlers have been run and the interrupt can be re-enabled, and
798 * a non-zero return indicates that the interrupt thread controls
801 if (must_schedule
> 0)
802 ithread_fast_sched(intr
, td
);
803 else if (must_schedule
== 0)
805 return(must_schedule
);
809 * Interrupt threads run this as their main loop.
811 * The handler begins execution outside a critical section and no MP lock.
813 * The i_running state starts at 0. When an interrupt occurs, the hardware
814 * interrupt is disabled and sched_ithd_hard(). The HW interrupt remains
815 * disabled until all routines have run. We then call machintr_intr_enable()
816 * to reenable the HW interrupt and deschedule us until the next interrupt.
818 * We are responsible for atomically checking i_running. i_running for our
819 * irq is only set in the context of our cpu, so a critical section is a
820 * sufficient interlock.
822 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
825 ithread_handler(void *arg
)
827 struct intr_info
*info
;
830 int intr
, cpuid
= mycpuid
;
832 struct intrec
**list
;
835 struct systimer ill_timer
; /* enforced freq. timer */
836 u_int ill_count
; /* interrupt livelock counter */
837 int upper_limit
; /* interrupt livelock upper limit */
838 TD_INVARIANTS_DECLARE
;
841 intr
= (int)(intptr_t)arg
;
842 info
= &intr_block
->ary
[cpuid
][intr
];
843 list
= &info
->i_reclist
;
846 * The loop must be entered with one critical section held. The thread
847 * does not hold the mplock on startup.
850 lseconds
= gd
->gd_time_seconds
;
856 * The chain is only considered MPSAFE if all its interrupt handlers
857 * are MPSAFE. However, if intr_mpsafe has been turned off we
858 * always operate with the BGL.
860 if (info
->i_mplock_required
!= mpheld
) {
861 if (info
->i_mplock_required
) {
862 KKASSERT(mpheld
== 0);
866 KKASSERT(mpheld
!= 0);
872 TD_INVARIANTS_GET(gd
->gd_curthread
);
875 * If an interrupt is pending, clear i_running and execute the
876 * handlers. Note that certain types of interrupts can re-trigger
877 * and set i_running again.
879 * Each handler is run in a critical section. Note that we run both
880 * FAST and SLOW designated service routines.
882 if (info
->i_running
) {
887 report_stray_interrupt(info
, "ithread_handler");
889 for (rec
= *list
; rec
; rec
= nrec
) {
890 /* rec may be invalid after call */
892 if (rec
->handler
== NULL
) {
893 kprintf("NULL HANDLER %s\n", rec
->name
);
895 if (rec
->serializer
) {
896 lwkt_serialize_handler_call(rec
->serializer
, rec
->handler
,
897 rec
->argument
, NULL
);
899 rec
->handler(rec
->argument
, NULL
);
901 TD_INVARIANTS_TEST(gd
->gd_curthread
, rec
->name
);
906 * This is our interrupt hook to add rate randomness to the random
909 if (info
->i_random
.sc_enabled
> 0)
910 add_interrupt_randomness(intr
);
913 * Unmask the interrupt to allow it to trigger again. This only
914 * applies to certain types of interrupts (typ level interrupts).
915 * This can result in the interrupt retriggering, but the retrigger
916 * will not be processed until we cycle our critical section.
918 * Only unmask interrupts while handlers are installed. It is
919 * possible to hit a situation where no handlers are installed
920 * due to a device driver livelocking and then tearing down its
921 * interrupt on close (the parallel bus being a good example).
923 if (intr
< FIRST_SOFTINT
&& *list
)
924 machintr_intr_enable(intr
);
927 * Do a quick exit/enter to catch any higher-priority interrupt
928 * sources, such as the statclock, so thread time accounting
929 * will still work. This may also cause an interrupt to re-trigger.
935 * LIVELOCK STATE MACHINE
937 switch(info
->i_state
) {
940 * Reset the count each second.
942 if (lseconds
!= gd
->gd_time_seconds
) {
943 lseconds
= gd
->gd_time_seconds
;
948 * If we did not exceed the frequency limit, we are done.
949 * If the interrupt has not retriggered we deschedule ourselves.
951 if (info
->i_flags
& INTR_HIFREQ
)
952 upper_limit
= livelock_limit_hi
;
954 upper_limit
= livelock_limit
;
955 if (ill_count
<= upper_limit
) {
956 if (info
->i_running
== 0) {
957 lwkt_deschedule_self(gd
->gd_curthread
);
964 * Otherwise we are livelocked. Set up a periodic systimer
965 * to wake the thread up at the limit frequency.
967 kprintf("intr %d on cpu%d at %d/%d hz, livelocked limit engaged!\n",
968 intr
, cpuid
, ill_count
, upper_limit
);
969 info
->i_state
= ISTATE_LIVELOCKED
;
970 if ((use_limit
= upper_limit
) < 100)
972 else if (use_limit
> 500000)
974 systimer_init_periodic_nq(&ill_timer
, ithread_livelock_wakeup
,
975 (void *)(intptr_t)intr
, use_limit
);
977 case ISTATE_LIVELOCKED
:
979 * Wait for our periodic timer to go off. Since the interrupt
980 * has re-armed it can still set i_running, but it will not
981 * reschedule us while we are in a livelocked state.
983 lwkt_deschedule_self(gd
->gd_curthread
);
987 * Check once a second to see if the livelock condition no
990 if (lseconds
!= gd
->gd_time_seconds
) {
991 lseconds
= gd
->gd_time_seconds
;
992 if (ill_count
< livelock_lowater
) {
993 info
->i_state
= ISTATE_NORMAL
;
994 systimer_del(&ill_timer
);
995 kprintf("intr %d on cpu%d at %d/%d hz, livelock removed\n",
996 intr
, cpuid
, ill_count
, livelock_lowater
);
997 } else if (livelock_debug
== intr
||
998 (bootverbose
&& cold
)) {
999 kprintf("intr %d on cpu%d at %d/%d hz, in livelock\n",
1000 intr
, cpuid
, ill_count
, livelock_lowater
);
1011 * Emergency interrupt polling thread. The thread begins execution
1012 * outside a critical section with the BGL held.
1014 * If emergency interrupt polling is enabled, this thread will
1015 * execute all system interrupts not marked INTR_NOPOLL at the
1016 * specified polling frequency.
1018 * WARNING! This thread runs *ALL* interrupt service routines that
1019 * are not marked INTR_NOPOLL, which basically means everything except
1020 * the 8254 clock interrupt and the ATA interrupt. It has very high
1021 * overhead and should only be used in situations where the machine
1022 * cannot otherwise be made to work. Due to the severe performance
1023 * degredation, it should not be enabled on production machines.
1026 ithread_emergency(void *arg __unused
)
1028 globaldata_t gd
= mycpu
;
1029 struct intr_info
*info
;
1031 int intr
, cpuid
= mycpuid
;
1032 TD_INVARIANTS_DECLARE
;
1036 TD_INVARIANTS_GET(gd
->gd_curthread
);
1039 for (intr
= 0; intr
< max_installed_hard_intr
[cpuid
]; ++intr
) {
1040 info
= &intr_block
->ary
[cpuid
][intr
];
1041 for (rec
= info
->i_reclist
; rec
; rec
= nrec
) {
1042 /* rec may be invalid after call */
1044 if ((rec
->intr_flags
& INTR_NOPOLL
) == 0) {
1045 if (rec
->serializer
) {
1046 lwkt_serialize_handler_try(rec
->serializer
,
1047 rec
->handler
, rec
->argument
, NULL
);
1049 rec
->handler(rec
->argument
, NULL
);
1051 TD_INVARIANTS_TEST(gd
->gd_curthread
, rec
->name
);
1055 lwkt_deschedule_self(gd
->gd_curthread
);
1062 * Systimer callback - schedule the emergency interrupt poll thread
1063 * if emergency polling is enabled.
1067 emergency_intr_timer_callback(systimer_t info
, int in_ipi __unused
,
1068 struct intrframe
*frame __unused
)
1070 if (emergency_intr_enable
)
1071 lwkt_schedule(info
->data
);
1075 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1076 * The data for this machine dependent, and the declarations are in machine
1077 * dependent code. The layout of intrnames and intrcnt however is machine
1080 * We do not know the length of intrcnt and intrnames at compile time, so
1081 * calculate things at run time.
1085 sysctl_intrnames(SYSCTL_HANDLER_ARGS
)
1087 struct intr_info
*info
;
1094 for (cpuid
= 0; cpuid
< ncpus
; ++cpuid
) {
1095 for (intr
= 0; error
== 0 && intr
< MAX_INTS
; ++intr
) {
1096 info
= &intr_block
->ary
[cpuid
][intr
];
1100 for (rec
= info
->i_reclist
; rec
; rec
= rec
->next
) {
1101 ksnprintf(buf
+ len
, sizeof(buf
) - len
, "%s%s",
1102 (len
? "/" : ""), rec
->name
);
1103 len
+= strlen(buf
+ len
);
1106 ksnprintf(buf
, sizeof(buf
), "irq%d", intr
);
1109 error
= SYSCTL_OUT(req
, buf
, len
+ 1);
1115 SYSCTL_PROC(_hw
, OID_AUTO
, intrnames
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1116 NULL
, 0, sysctl_intrnames
, "", "Interrupt Names");
1119 sysctl_intrcnt_all(SYSCTL_HANDLER_ARGS
)
1121 struct intr_info
*info
;
1125 for (cpuid
= 0; cpuid
< ncpus
; ++cpuid
) {
1126 for (intr
= 0; intr
< MAX_INTS
; ++intr
) {
1127 info
= &intr_block
->ary
[cpuid
][intr
];
1129 error
= SYSCTL_OUT(req
, &info
->i_count
, sizeof(info
->i_count
));
1138 SYSCTL_PROC(_hw
, OID_AUTO
, intrcnt_all
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1139 NULL
, 0, sysctl_intrcnt_all
, "", "Interrupt Counts");
1141 SYSCTL_PROC(_hw
, OID_AUTO
, intrcnt
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1142 NULL
, 0, sysctl_intrcnt_all
, "", "Interrupt Counts");
1145 int_moveto_destcpu(int *orig_cpuid0
, int cpuid
)
1147 int orig_cpuid
= mycpuid
;
1149 if (cpuid
!= orig_cpuid
)
1150 lwkt_migratecpu(cpuid
);
1152 *orig_cpuid0
= orig_cpuid
;
1156 int_moveto_origcpu(int orig_cpuid
, int cpuid
)
1158 if (cpuid
!= orig_cpuid
)
1159 lwkt_migratecpu(orig_cpuid
);
1163 intr_init(void *dummy __unused
)
1167 kprintf("Initialize MI interrupts\n");
1169 intr_block
= kmalloc(sizeof(*intr_block
), M_INTRMNG
,
1170 M_INTWAIT
| M_ZERO
);
1172 for (cpuid
= 0; cpuid
< ncpus
; ++cpuid
) {
1175 for (intr
= 0; intr
< MAX_INTS
; ++intr
) {
1176 struct intr_info
*info
= &intr_block
->ary
[cpuid
][intr
];
1178 info
->i_cpuid
= cpuid
;
1179 info
->i_intr
= intr
;
1183 SYSINIT(intr_init
, SI_BOOT2_FINISH_PIC
, SI_ORDER_ANY
, intr_init
, NULL
);