2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.55 2008/09/01 12:49:00 sephe Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/thread.h>
38 #include <sys/thread2.h>
39 #include <sys/random.h>
40 #include <sys/serialize.h>
41 #include <sys/interrupt.h>
43 #include <sys/machintr.h>
45 #include <machine/frame.h>
47 #include <sys/interrupt.h>
51 typedef struct intrec
{
53 struct intr_info
*info
;
59 struct lwkt_serialize
*serializer
;
64 struct thread i_thread
;
65 struct random_softc i_random
;
67 long i_count
; /* interrupts dispatched */
68 int i_mplock_required
;
73 unsigned long i_straycount
;
74 } intr_info_ary
[MAX_INTS
];
76 int max_installed_hard_intr
;
77 int max_installed_soft_intr
;
79 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000
81 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS
);
82 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS
);
83 static void emergency_intr_timer_callback(systimer_t
, struct intrframe
*);
84 static void ithread_handler(void *arg
);
85 static void ithread_emergency(void *arg
);
86 static void report_stray_interrupt(int intr
, struct intr_info
*info
);
87 static void int_moveto_destcpu(int *, int *, int);
88 static void int_moveto_origcpu(int, int);
90 int intr_info_size
= sizeof(intr_info_ary
) / sizeof(intr_info_ary
[0]);
92 static struct systimer emergency_intr_timer
;
93 static struct thread emergency_intr_thread
;
95 #define ISTATE_NOTHREAD 0
96 #define ISTATE_NORMAL 1
97 #define ISTATE_LIVELOCKED 2
100 static int intr_mpsafe
= 1;
101 TUNABLE_INT("kern.intr_mpsafe", &intr_mpsafe
);
102 SYSCTL_INT(_kern
, OID_AUTO
, intr_mpsafe
,
103 CTLFLAG_RW
, &intr_mpsafe
, 0, "Run INTR_MPSAFE handlers without the BGL");
105 static int livelock_limit
= 40000;
106 static int livelock_lowater
= 20000;
107 static int livelock_debug
= -1;
108 SYSCTL_INT(_kern
, OID_AUTO
, livelock_limit
,
109 CTLFLAG_RW
, &livelock_limit
, 0, "Livelock interrupt rate limit");
110 SYSCTL_INT(_kern
, OID_AUTO
, livelock_lowater
,
111 CTLFLAG_RW
, &livelock_lowater
, 0, "Livelock low-water mark restore");
112 SYSCTL_INT(_kern
, OID_AUTO
, livelock_debug
,
113 CTLFLAG_RW
, &livelock_debug
, 0, "Livelock debug intr#");
115 static int emergency_intr_enable
= 0; /* emergency interrupt polling */
116 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable
);
117 SYSCTL_PROC(_kern
, OID_AUTO
, emergency_intr_enable
, CTLTYPE_INT
| CTLFLAG_RW
,
118 0, 0, sysctl_emergency_enable
, "I", "Emergency Interrupt Poll Enable");
120 static int emergency_intr_freq
= 10; /* emergency polling frequency */
121 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq
);
122 SYSCTL_PROC(_kern
, OID_AUTO
, emergency_intr_freq
, CTLTYPE_INT
| CTLFLAG_RW
,
123 0, 0, sysctl_emergency_freq
, "I", "Emergency Interrupt Poll Frequency");
126 * Sysctl support routines
129 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS
)
133 enabled
= emergency_intr_enable
;
134 error
= sysctl_handle_int(oidp
, &enabled
, 0, req
);
135 if (error
|| req
->newptr
== NULL
)
137 emergency_intr_enable
= enabled
;
138 if (emergency_intr_enable
) {
139 systimer_adjust_periodic(&emergency_intr_timer
,
140 emergency_intr_freq
);
142 systimer_adjust_periodic(&emergency_intr_timer
, 1);
148 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS
)
152 phz
= emergency_intr_freq
;
153 error
= sysctl_handle_int(oidp
, &phz
, 0, req
);
154 if (error
|| req
->newptr
== NULL
)
158 else if (phz
> EMERGENCY_INTR_POLLING_FREQ_MAX
)
159 phz
= EMERGENCY_INTR_POLLING_FREQ_MAX
;
161 emergency_intr_freq
= phz
;
162 if (emergency_intr_enable
) {
163 systimer_adjust_periodic(&emergency_intr_timer
,
164 emergency_intr_freq
);
166 systimer_adjust_periodic(&emergency_intr_timer
, 1);
172 * Register an SWI or INTerrupt handler.
175 register_swi(int intr
, inthand2_t
*handler
, void *arg
, const char *name
,
176 struct lwkt_serialize
*serializer
)
178 if (intr
< FIRST_SOFTINT
|| intr
>= MAX_INTS
)
179 panic("register_swi: bad intr %d", intr
);
180 return(register_int(intr
, handler
, arg
, name
, serializer
, 0));
184 register_int(int intr
, inthand2_t
*handler
, void *arg
, const char *name
,
185 struct lwkt_serialize
*serializer
, int intr_flags
)
187 struct intr_info
*info
;
188 struct intrec
**list
;
190 int orig_cpuid
, cpuid
;
192 if (intr
< 0 || intr
>= MAX_INTS
)
193 panic("register_int: bad intr %d", intr
);
196 info
= &intr_info_ary
[intr
];
199 * Construct an interrupt handler record
201 rec
= kmalloc(sizeof(struct intrec
), M_DEVBUF
, M_INTWAIT
);
202 rec
->name
= kmalloc(strlen(name
) + 1, M_DEVBUF
, M_INTWAIT
);
203 strcpy(rec
->name
, name
);
206 rec
->handler
= handler
;
209 rec
->intr_flags
= intr_flags
;
211 rec
->serializer
= serializer
;
214 * Create an emergency polling thread and set up a systimer to wake
217 if (emergency_intr_thread
.td_kstack
== NULL
) {
218 lwkt_create(ithread_emergency
, NULL
, NULL
,
219 &emergency_intr_thread
, TDF_STOPREQ
|TDF_INTTHREAD
, -1,
221 systimer_init_periodic_nq(&emergency_intr_timer
,
222 emergency_intr_timer_callback
, &emergency_intr_thread
,
223 (emergency_intr_enable
? emergency_intr_freq
: 1));
226 int_moveto_destcpu(&orig_cpuid
, &cpuid
, intr
);
229 * Create an interrupt thread if necessary, leave it in an unscheduled
232 if (info
->i_state
== ISTATE_NOTHREAD
) {
233 info
->i_state
= ISTATE_NORMAL
;
234 lwkt_create((void *)ithread_handler
, (void *)(intptr_t)intr
, NULL
,
235 &info
->i_thread
, TDF_STOPREQ
|TDF_INTTHREAD
|TDF_MPSAFE
, -1,
237 if (intr
>= FIRST_SOFTINT
)
238 lwkt_setpri(&info
->i_thread
, TDPRI_SOFT_NORM
);
240 lwkt_setpri(&info
->i_thread
, TDPRI_INT_MED
);
241 info
->i_thread
.td_preemptable
= lwkt_preempt
;
244 list
= &info
->i_reclist
;
247 * Keep track of how many fast and slow interrupts we have.
248 * Set i_mplock_required if any handler in the chain requires
249 * the MP lock to operate.
251 if ((intr_flags
& INTR_MPSAFE
) == 0)
252 info
->i_mplock_required
= 1;
253 if (intr_flags
& INTR_FAST
)
259 * Enable random number generation keying off of this interrupt.
261 if ((intr_flags
& INTR_NOENTROPY
) == 0 && info
->i_random
.sc_enabled
== 0) {
262 info
->i_random
.sc_enabled
= 1;
263 info
->i_random
.sc_intr
= intr
;
267 * Add the record to the interrupt list.
270 while (*list
!= NULL
)
271 list
= &(*list
)->next
;
276 * Update max_installed_hard_intr to make the emergency intr poll
277 * a bit more efficient.
279 if (intr
< FIRST_SOFTINT
) {
280 if (max_installed_hard_intr
<= intr
)
281 max_installed_hard_intr
= intr
+ 1;
283 if (max_installed_soft_intr
<= intr
)
284 max_installed_soft_intr
= intr
+ 1;
288 * Setup the machine level interrupt vector
290 if (intr
< FIRST_SOFTINT
&& info
->i_slow
+ info
->i_fast
== 1) {
291 if (machintr_vector_setup(intr
, intr_flags
))
292 kprintf("machintr_vector_setup: failed on irq %d\n", intr
);
295 int_moveto_origcpu(orig_cpuid
, cpuid
);
301 unregister_swi(void *id
)
307 unregister_int(void *id
)
309 struct intr_info
*info
;
310 struct intrec
**list
;
312 int intr
, orig_cpuid
, cpuid
;
314 intr
= ((intrec_t
)id
)->intr
;
316 if (intr
< 0 || intr
>= MAX_INTS
)
317 panic("register_int: bad intr %d", intr
);
319 info
= &intr_info_ary
[intr
];
321 int_moveto_destcpu(&orig_cpuid
, &cpuid
, intr
);
324 * Remove the interrupt descriptor, adjust the descriptor count,
325 * and teardown the machine level vector if this was the last interrupt.
328 list
= &info
->i_reclist
;
329 while ((rec
= *list
) != NULL
) {
338 if (rec
->intr_flags
& INTR_FAST
)
342 if (intr
< FIRST_SOFTINT
&& info
->i_fast
+ info
->i_slow
== 0)
343 machintr_vector_teardown(intr
);
346 * Clear i_mplock_required if no handlers in the chain require the
349 for (rec0
= info
->i_reclist
; rec0
; rec0
= rec0
->next
) {
350 if ((rec0
->intr_flags
& INTR_MPSAFE
) == 0)
354 info
->i_mplock_required
= 0;
359 int_moveto_origcpu(orig_cpuid
, cpuid
);
365 kfree(rec
->name
, M_DEVBUF
);
366 kfree(rec
, M_DEVBUF
);
368 kprintf("warning: unregister_int: int %d handler for %s not found\n",
369 intr
, ((intrec_t
)id
)->name
);
374 get_registered_name(int intr
)
378 if (intr
< 0 || intr
>= MAX_INTS
)
379 panic("register_int: bad intr %d", intr
);
381 if ((rec
= intr_info_ary
[intr
].i_reclist
) == NULL
)
390 count_registered_ints(int intr
)
392 struct intr_info
*info
;
394 if (intr
< 0 || intr
>= MAX_INTS
)
395 panic("register_int: bad intr %d", intr
);
396 info
= &intr_info_ary
[intr
];
397 return(info
->i_fast
+ info
->i_slow
);
401 get_interrupt_counter(int intr
)
403 struct intr_info
*info
;
405 if (intr
< 0 || intr
>= MAX_INTS
)
406 panic("register_int: bad intr %d", intr
);
407 info
= &intr_info_ary
[intr
];
408 return(info
->i_count
);
413 swi_setpriority(int intr
, int pri
)
415 struct intr_info
*info
;
417 if (intr
< FIRST_SOFTINT
|| intr
>= MAX_INTS
)
418 panic("register_swi: bad intr %d", intr
);
419 info
= &intr_info_ary
[intr
];
420 if (info
->i_state
!= ISTATE_NOTHREAD
)
421 lwkt_setpri(&info
->i_thread
, pri
);
425 register_randintr(int intr
)
427 struct intr_info
*info
;
429 if (intr
< 0 || intr
>= MAX_INTS
)
430 panic("register_randintr: bad intr %d", intr
);
431 info
= &intr_info_ary
[intr
];
432 info
->i_random
.sc_intr
= intr
;
433 info
->i_random
.sc_enabled
= 1;
437 unregister_randintr(int intr
)
439 struct intr_info
*info
;
441 if (intr
< 0 || intr
>= MAX_INTS
)
442 panic("register_swi: bad intr %d", intr
);
443 info
= &intr_info_ary
[intr
];
444 info
->i_random
.sc_enabled
= -1;
448 next_registered_randintr(int intr
)
450 struct intr_info
*info
;
452 if (intr
< 0 || intr
>= MAX_INTS
)
453 panic("register_swi: bad intr %d", intr
);
454 while (intr
< MAX_INTS
) {
455 info
= &intr_info_ary
[intr
];
456 if (info
->i_random
.sc_enabled
> 0)
464 * Dispatch an interrupt. If there's nothing to do we have a stray
465 * interrupt and can just return, leaving the interrupt masked.
467 * We need to schedule the interrupt and set its i_running bit. If
468 * we are not on the interrupt thread's cpu we have to send a message
469 * to the correct cpu that will issue the desired action (interlocking
470 * with the interrupt thread's critical section). We do NOT attempt to
471 * reschedule interrupts whos i_running bit is already set because
472 * this would prematurely wakeup a livelock-limited interrupt thread.
474 * i_running is only tested/set on the same cpu as the interrupt thread.
476 * We are NOT in a critical section, which will allow the scheduled
477 * interrupt to preempt us. The MP lock might *NOT* be held here.
482 sched_ithd_remote(void *arg
)
484 sched_ithd((int)arg
);
492 struct intr_info
*info
;
494 info
= &intr_info_ary
[intr
];
497 if (info
->i_state
!= ISTATE_NOTHREAD
) {
498 if (info
->i_reclist
== NULL
) {
499 report_stray_interrupt(intr
, info
);
502 if (info
->i_thread
.td_gd
== mycpu
) {
503 if (info
->i_running
== 0) {
505 if (info
->i_state
!= ISTATE_LIVELOCKED
)
506 lwkt_schedule(&info
->i_thread
); /* MIGHT PREEMPT */
509 lwkt_send_ipiq(info
->i_thread
.td_gd
,
510 sched_ithd_remote
, (void *)intr
);
513 if (info
->i_running
== 0) {
515 if (info
->i_state
!= ISTATE_LIVELOCKED
)
516 lwkt_schedule(&info
->i_thread
); /* MIGHT PREEMPT */
521 report_stray_interrupt(intr
, info
);
526 report_stray_interrupt(int intr
, struct intr_info
*info
)
528 ++info
->i_straycount
;
529 if (info
->i_straycount
< 10) {
530 if (info
->i_errorticks
== ticks
)
532 info
->i_errorticks
= ticks
;
533 kprintf("sched_ithd: stray interrupt %d on cpu %d\n",
535 } else if (info
->i_straycount
== 10) {
536 kprintf("sched_ithd: %ld stray interrupts %d on cpu %d - "
537 "there will be no further reports\n",
538 info
->i_straycount
, intr
, mycpuid
);
543 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
544 * might not be held).
547 ithread_livelock_wakeup(systimer_t st
)
549 struct intr_info
*info
;
551 info
= &intr_info_ary
[(int)(intptr_t)st
->data
];
552 if (info
->i_state
!= ISTATE_NOTHREAD
)
553 lwkt_schedule(&info
->i_thread
);
557 * Schedule ithread within fast intr handler
559 * XXX Protect sched_ithd() call with gd_intr_nesting_level?
560 * Interrupts aren't enabled, but still...
563 ithread_fast_sched(int intr
, thread_t td
)
568 * We are already in critical section, exit it now to
573 crit_enter_quick(td
);
579 * This function is called directly from the ICU or APIC vector code assembly
580 * to process an interrupt. The critical section and interrupt deferral
581 * checks have already been done but the function is entered WITHOUT
582 * a critical section held. The BGL may or may not be held.
584 * Must return non-zero if we do not want the vector code to re-enable
585 * the interrupt (which we don't if we have to schedule the interrupt)
587 int ithread_fast_handler(struct intrframe
*frame
);
590 ithread_fast_handler(struct intrframe
*frame
)
593 struct intr_info
*info
;
594 struct intrec
**list
;
599 intrec_t rec
, next_rec
;
603 intr
= frame
->if_vec
;
607 /* We must be in critical section. */
608 KKASSERT(td
->td_pri
>= TDPRI_CRIT
);
610 info
= &intr_info_ary
[intr
];
613 * If we are not processing any FAST interrupts, just schedule the thing.
615 if (info
->i_fast
== 0) {
617 ithread_fast_sched(intr
, td
);
622 * This should not normally occur since interrupts ought to be
623 * masked if the ithread has been scheduled or is running.
629 * Bump the interrupt nesting level to process any FAST interrupts.
630 * Obtain the MP lock as necessary. If the MP lock cannot be obtained,
631 * schedule the interrupt thread to deal with the issue instead.
633 * To reduce overhead, just leave the MP lock held once it has been
636 ++gd
->gd_intr_nesting_level
;
638 must_schedule
= info
->i_slow
;
643 list
= &info
->i_reclist
;
644 for (rec
= *list
; rec
; rec
= next_rec
) {
645 next_rec
= rec
->next
; /* rec may be invalid after call */
647 if (rec
->intr_flags
& INTR_FAST
) {
649 if ((rec
->intr_flags
& INTR_MPSAFE
) == 0 && got_mplock
== 0) {
650 if (try_mplock() == 0) {
651 /* Couldn't get the MP lock; just schedule it. */
658 if (rec
->serializer
) {
659 must_schedule
+= lwkt_serialize_handler_try(
660 rec
->serializer
, rec
->handler
,
661 rec
->argument
, frame
);
663 rec
->handler(rec
->argument
, frame
);
671 --gd
->gd_intr_nesting_level
;
678 * If we had a problem, or mixed fast and slow interrupt handlers are
679 * registered, schedule the ithread to catch the missed records (it
680 * will just re-run all of them). A return value of 0 indicates that
681 * all handlers have been run and the interrupt can be re-enabled, and
682 * a non-zero return indicates that the interrupt thread controls
685 if (must_schedule
> 0)
686 ithread_fast_sched(intr
, td
);
687 else if (must_schedule
== 0)
689 return(must_schedule
);
693 * Interrupt threads run this as their main loop.
695 * The handler begins execution outside a critical section and with the BGL
698 * The i_running state starts at 0. When an interrupt occurs, the hardware
699 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled
700 * until all routines have run. We then call ithread_done() to reenable
701 * the HW interrupt and deschedule us until the next interrupt.
703 * We are responsible for atomically checking i_running and ithread_done()
704 * is responsible for atomically checking for platform-specific delayed
705 * interrupts. i_running for our irq is only set in the context of our cpu,
706 * so a critical section is a sufficient interlock.
708 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
711 ithread_handler(void *arg
)
713 struct intr_info
*info
;
718 struct intrec
**list
;
721 struct systimer ill_timer
; /* enforced freq. timer */
722 u_int ill_count
; /* interrupt livelock counter */
725 intr
= (int)(intptr_t)arg
;
726 info
= &intr_info_ary
[intr
];
727 list
= &info
->i_reclist
;
729 lseconds
= gd
->gd_time_seconds
;
732 * The loop must be entered with one critical section held. The thread
733 * is created with TDF_MPSAFE so the MP lock is not held on start.
740 * The chain is only considered MPSAFE if all its interrupt handlers
741 * are MPSAFE. However, if intr_mpsafe has been turned off we
742 * always operate with the BGL.
745 if (intr_mpsafe
== 0) {
750 } else if (info
->i_mplock_required
!= mpheld
) {
751 if (info
->i_mplock_required
) {
752 KKASSERT(mpheld
== 0);
756 KKASSERT(mpheld
!= 0);
764 * If an interrupt is pending, clear i_running and execute the
765 * handlers. Note that certain types of interrupts can re-trigger
766 * and set i_running again.
768 * Each handler is run in a critical section. Note that we run both
769 * FAST and SLOW designated service routines.
771 if (info
->i_running
) {
776 report_stray_interrupt(intr
, info
);
778 for (rec
= *list
; rec
; rec
= nrec
) {
780 if (rec
->serializer
) {
781 lwkt_serialize_handler_call(rec
->serializer
, rec
->handler
,
782 rec
->argument
, NULL
);
784 rec
->handler(rec
->argument
, NULL
);
790 * This is our interrupt hook to add rate randomness to the random
793 if (info
->i_random
.sc_enabled
> 0)
794 add_interrupt_randomness(intr
);
797 * Unmask the interrupt to allow it to trigger again. This only
798 * applies to certain types of interrupts (typ level interrupts).
799 * This can result in the interrupt retriggering, but the retrigger
800 * will not be processed until we cycle our critical section.
802 * Only unmask interrupts while handlers are installed. It is
803 * possible to hit a situation where no handlers are installed
804 * due to a device driver livelocking and then tearing down its
805 * interrupt on close (the parallel bus being a good example).
808 machintr_intren(intr
);
811 * Do a quick exit/enter to catch any higher-priority interrupt
812 * sources, such as the statclock, so thread time accounting
813 * will still work. This may also cause an interrupt to re-trigger.
819 * LIVELOCK STATE MACHINE
821 switch(info
->i_state
) {
824 * Reset the count each second.
826 if (lseconds
!= gd
->gd_time_seconds
) {
827 lseconds
= gd
->gd_time_seconds
;
832 * If we did not exceed the frequency limit, we are done.
833 * If the interrupt has not retriggered we deschedule ourselves.
835 if (ill_count
<= livelock_limit
) {
836 if (info
->i_running
== 0) {
837 lwkt_deschedule_self(gd
->gd_curthread
);
844 * Otherwise we are livelocked. Set up a periodic systimer
845 * to wake the thread up at the limit frequency.
847 kprintf("intr %d at %d/%d hz, livelocked limit engaged!\n",
848 intr
, ill_count
, livelock_limit
);
849 info
->i_state
= ISTATE_LIVELOCKED
;
850 if ((use_limit
= livelock_limit
) < 100)
852 else if (use_limit
> 500000)
854 systimer_init_periodic_nq(&ill_timer
, ithread_livelock_wakeup
,
855 (void *)(intptr_t)intr
, use_limit
);
857 case ISTATE_LIVELOCKED
:
859 * Wait for our periodic timer to go off. Since the interrupt
860 * has re-armed it can still set i_running, but it will not
861 * reschedule us while we are in a livelocked state.
863 lwkt_deschedule_self(gd
->gd_curthread
);
867 * Check once a second to see if the livelock condition no
870 if (lseconds
!= gd
->gd_time_seconds
) {
871 lseconds
= gd
->gd_time_seconds
;
872 if (ill_count
< livelock_lowater
) {
873 info
->i_state
= ISTATE_NORMAL
;
874 systimer_del(&ill_timer
);
875 kprintf("intr %d at %d/%d hz, livelock removed\n",
876 intr
, ill_count
, livelock_lowater
);
877 } else if (livelock_debug
== intr
||
878 (bootverbose
&& cold
)) {
879 kprintf("intr %d at %d/%d hz, in livelock\n",
880 intr
, ill_count
, livelock_lowater
);
891 * Emergency interrupt polling thread. The thread begins execution
892 * outside a critical section with the BGL held.
894 * If emergency interrupt polling is enabled, this thread will
895 * execute all system interrupts not marked INTR_NOPOLL at the
896 * specified polling frequency.
898 * WARNING! This thread runs *ALL* interrupt service routines that
899 * are not marked INTR_NOPOLL, which basically means everything except
900 * the 8254 clock interrupt and the ATA interrupt. It has very high
901 * overhead and should only be used in situations where the machine
902 * cannot otherwise be made to work. Due to the severe performance
903 * degredation, it should not be enabled on production machines.
906 ithread_emergency(void *arg __unused
)
908 struct intr_info
*info
;
913 for (intr
= 0; intr
< max_installed_hard_intr
; ++intr
) {
914 info
= &intr_info_ary
[intr
];
915 for (rec
= info
->i_reclist
; rec
; rec
= nrec
) {
916 if ((rec
->intr_flags
& INTR_NOPOLL
) == 0) {
917 if (rec
->serializer
) {
918 lwkt_serialize_handler_call(rec
->serializer
,
919 rec
->handler
, rec
->argument
, NULL
);
921 rec
->handler(rec
->argument
, NULL
);
927 lwkt_deschedule_self(curthread
);
933 * Systimer callback - schedule the emergency interrupt poll thread
934 * if emergency polling is enabled.
938 emergency_intr_timer_callback(systimer_t info
, struct intrframe
*frame __unused
)
940 if (emergency_intr_enable
)
941 lwkt_schedule(info
->data
);
945 ithread_cpuid(int intr
)
947 const struct intr_info
*info
;
949 KKASSERT(intr
>= 0 && intr
< MAX_INTS
);
950 info
= &intr_info_ary
[intr
];
952 if (info
->i_state
== ISTATE_NOTHREAD
)
954 return info
->i_thread
.td_gd
->gd_cpuid
;
958 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
959 * The data for this machine dependent, and the declarations are in machine
960 * dependent code. The layout of intrnames and intrcnt however is machine
963 * We do not know the length of intrcnt and intrnames at compile time, so
964 * calculate things at run time.
968 sysctl_intrnames(SYSCTL_HANDLER_ARGS
)
970 struct intr_info
*info
;
977 for (intr
= 0; error
== 0 && intr
< MAX_INTS
; ++intr
) {
978 info
= &intr_info_ary
[intr
];
982 for (rec
= info
->i_reclist
; rec
; rec
= rec
->next
) {
983 ksnprintf(buf
+ len
, sizeof(buf
) - len
, "%s%s",
984 (len
? "/" : ""), rec
->name
);
985 len
+= strlen(buf
+ len
);
988 ksnprintf(buf
, sizeof(buf
), "irq%d", intr
);
991 error
= SYSCTL_OUT(req
, buf
, len
+ 1);
997 SYSCTL_PROC(_hw
, OID_AUTO
, intrnames
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
998 NULL
, 0, sysctl_intrnames
, "", "Interrupt Names");
1001 sysctl_intrcnt(SYSCTL_HANDLER_ARGS
)
1003 struct intr_info
*info
;
1007 for (intr
= 0; intr
< max_installed_hard_intr
; ++intr
) {
1008 info
= &intr_info_ary
[intr
];
1010 error
= SYSCTL_OUT(req
, &info
->i_count
, sizeof(info
->i_count
));
1014 for (intr
= FIRST_SOFTINT
; intr
< max_installed_soft_intr
; ++intr
) {
1015 info
= &intr_info_ary
[intr
];
1017 error
= SYSCTL_OUT(req
, &info
->i_count
, sizeof(info
->i_count
));
1025 SYSCTL_PROC(_hw
, OID_AUTO
, intrcnt
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1026 NULL
, 0, sysctl_intrcnt
, "", "Interrupt Counts");
1029 int_moveto_destcpu(int *orig_cpuid0
, int *cpuid0
, int intr
)
1031 int orig_cpuid
= mycpuid
, cpuid
;
1035 ksnprintf(envpath
, sizeof(envpath
), "hw.irq.%d.dest", intr
);
1036 kgetenv_int(envpath
, &cpuid
);
1040 if (cpuid
!= orig_cpuid
)
1041 lwkt_migratecpu(cpuid
);
1043 *orig_cpuid0
= orig_cpuid
;
1048 int_moveto_origcpu(int orig_cpuid
, int cpuid
)
1050 if (cpuid
!= orig_cpuid
)
1051 lwkt_migratecpu(orig_cpuid
);