2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.55 2008/09/01 12:49:00 sephe Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/thread.h>
38 #include <sys/thread2.h>
39 #include <sys/random.h>
40 #include <sys/serialize.h>
41 #include <sys/interrupt.h>
43 #include <sys/machintr.h>
45 #include <machine/frame.h>
47 #include <sys/interrupt.h>
51 typedef struct intrec
{
53 struct intr_info
*info
;
59 struct lwkt_serialize
*serializer
;
64 struct thread i_thread
;
65 struct random_softc i_random
;
67 long i_count
; /* interrupts dispatched */
68 int i_mplock_required
;
73 unsigned long i_straycount
;
74 } intr_info_ary
[MAX_INTS
];
76 int max_installed_hard_intr
;
77 int max_installed_soft_intr
;
79 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000
81 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS
);
82 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS
);
83 static void emergency_intr_timer_callback(systimer_t
, struct intrframe
*);
84 static void ithread_handler(void *arg
);
85 static void ithread_emergency(void *arg
);
86 static void report_stray_interrupt(int intr
, struct intr_info
*info
);
87 static void int_moveto_destcpu(int *, int *, int);
88 static void int_moveto_origcpu(int, int);
90 static void intr_get_mplock(void);
93 int intr_info_size
= sizeof(intr_info_ary
) / sizeof(intr_info_ary
[0]);
95 static struct systimer emergency_intr_timer
;
96 static struct thread emergency_intr_thread
;
98 #define ISTATE_NOTHREAD 0
99 #define ISTATE_NORMAL 1
100 #define ISTATE_LIVELOCKED 2
103 static int intr_mpsafe
= 1;
104 static int intr_migrate
= 0;
105 static int intr_migrate_count
;
106 TUNABLE_INT("kern.intr_mpsafe", &intr_mpsafe
);
107 SYSCTL_INT(_kern
, OID_AUTO
, intr_mpsafe
,
108 CTLFLAG_RW
, &intr_mpsafe
, 0, "Run INTR_MPSAFE handlers without the BGL");
109 SYSCTL_INT(_kern
, OID_AUTO
, intr_migrate
,
110 CTLFLAG_RW
, &intr_migrate
, 0, "Migrate to cpu holding BGL");
111 SYSCTL_INT(_kern
, OID_AUTO
, intr_migrate_count
,
112 CTLFLAG_RW
, &intr_migrate_count
, 0, "");
114 static int livelock_limit
= 40000;
115 static int livelock_lowater
= 20000;
116 static int livelock_debug
= -1;
117 SYSCTL_INT(_kern
, OID_AUTO
, livelock_limit
,
118 CTLFLAG_RW
, &livelock_limit
, 0, "Livelock interrupt rate limit");
119 SYSCTL_INT(_kern
, OID_AUTO
, livelock_lowater
,
120 CTLFLAG_RW
, &livelock_lowater
, 0, "Livelock low-water mark restore");
121 SYSCTL_INT(_kern
, OID_AUTO
, livelock_debug
,
122 CTLFLAG_RW
, &livelock_debug
, 0, "Livelock debug intr#");
124 static int emergency_intr_enable
= 0; /* emergency interrupt polling */
125 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable
);
126 SYSCTL_PROC(_kern
, OID_AUTO
, emergency_intr_enable
, CTLTYPE_INT
| CTLFLAG_RW
,
127 0, 0, sysctl_emergency_enable
, "I", "Emergency Interrupt Poll Enable");
129 static int emergency_intr_freq
= 10; /* emergency polling frequency */
130 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq
);
131 SYSCTL_PROC(_kern
, OID_AUTO
, emergency_intr_freq
, CTLTYPE_INT
| CTLFLAG_RW
,
132 0, 0, sysctl_emergency_freq
, "I", "Emergency Interrupt Poll Frequency");
135 * Sysctl support routines
138 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS
)
142 enabled
= emergency_intr_enable
;
143 error
= sysctl_handle_int(oidp
, &enabled
, 0, req
);
144 if (error
|| req
->newptr
== NULL
)
146 emergency_intr_enable
= enabled
;
147 if (emergency_intr_enable
) {
148 systimer_adjust_periodic(&emergency_intr_timer
,
149 emergency_intr_freq
);
151 systimer_adjust_periodic(&emergency_intr_timer
, 1);
157 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS
)
161 phz
= emergency_intr_freq
;
162 error
= sysctl_handle_int(oidp
, &phz
, 0, req
);
163 if (error
|| req
->newptr
== NULL
)
167 else if (phz
> EMERGENCY_INTR_POLLING_FREQ_MAX
)
168 phz
= EMERGENCY_INTR_POLLING_FREQ_MAX
;
170 emergency_intr_freq
= phz
;
171 if (emergency_intr_enable
) {
172 systimer_adjust_periodic(&emergency_intr_timer
,
173 emergency_intr_freq
);
175 systimer_adjust_periodic(&emergency_intr_timer
, 1);
181 * Register an SWI or INTerrupt handler.
184 register_swi(int intr
, inthand2_t
*handler
, void *arg
, const char *name
,
185 struct lwkt_serialize
*serializer
)
187 if (intr
< FIRST_SOFTINT
|| intr
>= MAX_INTS
)
188 panic("register_swi: bad intr %d", intr
);
189 return(register_int(intr
, handler
, arg
, name
, serializer
, 0));
193 register_int(int intr
, inthand2_t
*handler
, void *arg
, const char *name
,
194 struct lwkt_serialize
*serializer
, int intr_flags
)
196 struct intr_info
*info
;
197 struct intrec
**list
;
199 int orig_cpuid
, cpuid
;
201 if (intr
< 0 || intr
>= MAX_INTS
)
202 panic("register_int: bad intr %d", intr
);
205 info
= &intr_info_ary
[intr
];
208 * Construct an interrupt handler record
210 rec
= kmalloc(sizeof(struct intrec
), M_DEVBUF
, M_INTWAIT
);
211 rec
->name
= kmalloc(strlen(name
) + 1, M_DEVBUF
, M_INTWAIT
);
212 strcpy(rec
->name
, name
);
215 rec
->handler
= handler
;
218 rec
->intr_flags
= intr_flags
;
220 rec
->serializer
= serializer
;
223 * Create an emergency polling thread and set up a systimer to wake
226 if (emergency_intr_thread
.td_kstack
== NULL
) {
227 lwkt_create(ithread_emergency
, NULL
, NULL
,
228 &emergency_intr_thread
, TDF_STOPREQ
|TDF_INTTHREAD
, -1,
230 systimer_init_periodic_nq(&emergency_intr_timer
,
231 emergency_intr_timer_callback
, &emergency_intr_thread
,
232 (emergency_intr_enable
? emergency_intr_freq
: 1));
235 int_moveto_destcpu(&orig_cpuid
, &cpuid
, intr
);
238 * Create an interrupt thread if necessary, leave it in an unscheduled
241 if (info
->i_state
== ISTATE_NOTHREAD
) {
242 info
->i_state
= ISTATE_NORMAL
;
243 lwkt_create((void *)ithread_handler
, (void *)(intptr_t)intr
, NULL
,
244 &info
->i_thread
, TDF_STOPREQ
|TDF_INTTHREAD
|TDF_MPSAFE
, -1,
246 if (intr
>= FIRST_SOFTINT
)
247 lwkt_setpri(&info
->i_thread
, TDPRI_SOFT_NORM
);
249 lwkt_setpri(&info
->i_thread
, TDPRI_INT_MED
);
250 info
->i_thread
.td_preemptable
= lwkt_preempt
;
253 list
= &info
->i_reclist
;
256 * Keep track of how many fast and slow interrupts we have.
257 * Set i_mplock_required if any handler in the chain requires
258 * the MP lock to operate.
260 if ((intr_flags
& INTR_MPSAFE
) == 0)
261 info
->i_mplock_required
= 1;
262 if (intr_flags
& INTR_FAST
)
268 * Enable random number generation keying off of this interrupt.
270 if ((intr_flags
& INTR_NOENTROPY
) == 0 && info
->i_random
.sc_enabled
== 0) {
271 info
->i_random
.sc_enabled
= 1;
272 info
->i_random
.sc_intr
= intr
;
276 * Add the record to the interrupt list.
279 while (*list
!= NULL
)
280 list
= &(*list
)->next
;
285 * Update max_installed_hard_intr to make the emergency intr poll
286 * a bit more efficient.
288 if (intr
< FIRST_SOFTINT
) {
289 if (max_installed_hard_intr
<= intr
)
290 max_installed_hard_intr
= intr
+ 1;
292 if (max_installed_soft_intr
<= intr
)
293 max_installed_soft_intr
= intr
+ 1;
297 * Setup the machine level interrupt vector
299 if (intr
< FIRST_SOFTINT
&& info
->i_slow
+ info
->i_fast
== 1) {
300 if (machintr_vector_setup(intr
, intr_flags
))
301 kprintf("machintr_vector_setup: failed on irq %d\n", intr
);
304 int_moveto_origcpu(orig_cpuid
, cpuid
);
310 unregister_swi(void *id
)
316 unregister_int(void *id
)
318 struct intr_info
*info
;
319 struct intrec
**list
;
321 int intr
, orig_cpuid
, cpuid
;
323 intr
= ((intrec_t
)id
)->intr
;
325 if (intr
< 0 || intr
>= MAX_INTS
)
326 panic("register_int: bad intr %d", intr
);
328 info
= &intr_info_ary
[intr
];
330 int_moveto_destcpu(&orig_cpuid
, &cpuid
, intr
);
333 * Remove the interrupt descriptor, adjust the descriptor count,
334 * and teardown the machine level vector if this was the last interrupt.
337 list
= &info
->i_reclist
;
338 while ((rec
= *list
) != NULL
) {
347 if (rec
->intr_flags
& INTR_FAST
)
351 if (intr
< FIRST_SOFTINT
&& info
->i_fast
+ info
->i_slow
== 0)
352 machintr_vector_teardown(intr
);
355 * Clear i_mplock_required if no handlers in the chain require the
358 for (rec0
= info
->i_reclist
; rec0
; rec0
= rec0
->next
) {
359 if ((rec0
->intr_flags
& INTR_MPSAFE
) == 0)
363 info
->i_mplock_required
= 0;
368 int_moveto_origcpu(orig_cpuid
, cpuid
);
374 kfree(rec
->name
, M_DEVBUF
);
375 kfree(rec
, M_DEVBUF
);
377 kprintf("warning: unregister_int: int %d handler for %s not found\n",
378 intr
, ((intrec_t
)id
)->name
);
383 get_registered_name(int intr
)
387 if (intr
< 0 || intr
>= MAX_INTS
)
388 panic("register_int: bad intr %d", intr
);
390 if ((rec
= intr_info_ary
[intr
].i_reclist
) == NULL
)
399 count_registered_ints(int intr
)
401 struct intr_info
*info
;
403 if (intr
< 0 || intr
>= MAX_INTS
)
404 panic("register_int: bad intr %d", intr
);
405 info
= &intr_info_ary
[intr
];
406 return(info
->i_fast
+ info
->i_slow
);
410 get_interrupt_counter(int intr
)
412 struct intr_info
*info
;
414 if (intr
< 0 || intr
>= MAX_INTS
)
415 panic("register_int: bad intr %d", intr
);
416 info
= &intr_info_ary
[intr
];
417 return(info
->i_count
);
422 swi_setpriority(int intr
, int pri
)
424 struct intr_info
*info
;
426 if (intr
< FIRST_SOFTINT
|| intr
>= MAX_INTS
)
427 panic("register_swi: bad intr %d", intr
);
428 info
= &intr_info_ary
[intr
];
429 if (info
->i_state
!= ISTATE_NOTHREAD
)
430 lwkt_setpri(&info
->i_thread
, pri
);
434 register_randintr(int intr
)
436 struct intr_info
*info
;
438 if (intr
< 0 || intr
>= MAX_INTS
)
439 panic("register_randintr: bad intr %d", intr
);
440 info
= &intr_info_ary
[intr
];
441 info
->i_random
.sc_intr
= intr
;
442 info
->i_random
.sc_enabled
= 1;
446 unregister_randintr(int intr
)
448 struct intr_info
*info
;
450 if (intr
< 0 || intr
>= MAX_INTS
)
451 panic("register_swi: bad intr %d", intr
);
452 info
= &intr_info_ary
[intr
];
453 info
->i_random
.sc_enabled
= -1;
457 next_registered_randintr(int intr
)
459 struct intr_info
*info
;
461 if (intr
< 0 || intr
>= MAX_INTS
)
462 panic("register_swi: bad intr %d", intr
);
463 while (intr
< MAX_INTS
) {
464 info
= &intr_info_ary
[intr
];
465 if (info
->i_random
.sc_enabled
> 0)
473 * Dispatch an interrupt. If there's nothing to do we have a stray
474 * interrupt and can just return, leaving the interrupt masked.
476 * We need to schedule the interrupt and set its i_running bit. If
477 * we are not on the interrupt thread's cpu we have to send a message
478 * to the correct cpu that will issue the desired action (interlocking
479 * with the interrupt thread's critical section). We do NOT attempt to
480 * reschedule interrupts whos i_running bit is already set because
481 * this would prematurely wakeup a livelock-limited interrupt thread.
483 * i_running is only tested/set on the same cpu as the interrupt thread.
485 * We are NOT in a critical section, which will allow the scheduled
486 * interrupt to preempt us. The MP lock might *NOT* be held here.
491 sched_ithd_remote(void *arg
)
493 sched_ithd((int)(intptr_t)arg
);
501 struct intr_info
*info
;
503 info
= &intr_info_ary
[intr
];
506 if (info
->i_state
!= ISTATE_NOTHREAD
) {
507 if (info
->i_reclist
== NULL
) {
508 report_stray_interrupt(intr
, info
);
511 if (info
->i_thread
.td_gd
== mycpu
) {
512 if (info
->i_running
== 0) {
514 if (info
->i_state
!= ISTATE_LIVELOCKED
)
515 lwkt_schedule(&info
->i_thread
); /* MIGHT PREEMPT */
518 lwkt_send_ipiq(info
->i_thread
.td_gd
,
519 sched_ithd_remote
, (void *)(intptr_t)intr
);
522 if (info
->i_running
== 0) {
524 if (info
->i_state
!= ISTATE_LIVELOCKED
)
525 lwkt_schedule(&info
->i_thread
); /* MIGHT PREEMPT */
530 report_stray_interrupt(intr
, info
);
535 report_stray_interrupt(int intr
, struct intr_info
*info
)
537 ++info
->i_straycount
;
538 if (info
->i_straycount
< 10) {
539 if (info
->i_errorticks
== ticks
)
541 info
->i_errorticks
= ticks
;
542 kprintf("sched_ithd: stray interrupt %d on cpu %d\n",
544 } else if (info
->i_straycount
== 10) {
545 kprintf("sched_ithd: %ld stray interrupts %d on cpu %d - "
546 "there will be no further reports\n",
547 info
->i_straycount
, intr
, mycpuid
);
552 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
553 * might not be held).
556 ithread_livelock_wakeup(systimer_t st
)
558 struct intr_info
*info
;
560 info
= &intr_info_ary
[(int)(intptr_t)st
->data
];
561 if (info
->i_state
!= ISTATE_NOTHREAD
)
562 lwkt_schedule(&info
->i_thread
);
566 * Schedule ithread within fast intr handler
568 * XXX Protect sched_ithd() call with gd_intr_nesting_level?
569 * Interrupts aren't enabled, but still...
572 ithread_fast_sched(int intr
, thread_t td
)
577 * We are already in critical section, exit it now to
582 crit_enter_quick(td
);
588 * This function is called directly from the ICU or APIC vector code assembly
589 * to process an interrupt. The critical section and interrupt deferral
590 * checks have already been done but the function is entered WITHOUT
591 * a critical section held. The BGL may or may not be held.
593 * Must return non-zero if we do not want the vector code to re-enable
594 * the interrupt (which we don't if we have to schedule the interrupt)
596 int ithread_fast_handler(struct intrframe
*frame
);
599 ithread_fast_handler(struct intrframe
*frame
)
602 struct intr_info
*info
;
603 struct intrec
**list
;
608 intrec_t rec
, next_rec
;
612 intr
= frame
->if_vec
;
616 /* We must be in critical section. */
617 KKASSERT(td
->td_pri
>= TDPRI_CRIT
);
619 info
= &intr_info_ary
[intr
];
622 * If we are not processing any FAST interrupts, just schedule the thing.
624 if (info
->i_fast
== 0) {
626 ithread_fast_sched(intr
, td
);
631 * This should not normally occur since interrupts ought to be
632 * masked if the ithread has been scheduled or is running.
638 * Bump the interrupt nesting level to process any FAST interrupts.
639 * Obtain the MP lock as necessary. If the MP lock cannot be obtained,
640 * schedule the interrupt thread to deal with the issue instead.
642 * To reduce overhead, just leave the MP lock held once it has been
645 ++gd
->gd_intr_nesting_level
;
647 must_schedule
= info
->i_slow
;
652 list
= &info
->i_reclist
;
653 for (rec
= *list
; rec
; rec
= next_rec
) {
654 next_rec
= rec
->next
; /* rec may be invalid after call */
656 if (rec
->intr_flags
& INTR_FAST
) {
658 if ((rec
->intr_flags
& INTR_MPSAFE
) == 0 && got_mplock
== 0) {
659 if (try_mplock() == 0) {
660 /* Couldn't get the MP lock; just schedule it. */
667 if (rec
->serializer
) {
668 must_schedule
+= lwkt_serialize_handler_try(
669 rec
->serializer
, rec
->handler
,
670 rec
->argument
, frame
);
672 rec
->handler(rec
->argument
, frame
);
680 --gd
->gd_intr_nesting_level
;
687 * If we had a problem, or mixed fast and slow interrupt handlers are
688 * registered, schedule the ithread to catch the missed records (it
689 * will just re-run all of them). A return value of 0 indicates that
690 * all handlers have been run and the interrupt can be re-enabled, and
691 * a non-zero return indicates that the interrupt thread controls
694 if (must_schedule
> 0)
695 ithread_fast_sched(intr
, td
);
696 else if (must_schedule
== 0)
698 return(must_schedule
);
702 * Interrupt threads run this as their main loop.
704 * The handler begins execution outside a critical section and with the BGL
707 * The i_running state starts at 0. When an interrupt occurs, the hardware
708 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled
709 * until all routines have run. We then call ithread_done() to reenable
710 * the HW interrupt and deschedule us until the next interrupt.
712 * We are responsible for atomically checking i_running and ithread_done()
713 * is responsible for atomically checking for platform-specific delayed
714 * interrupts. i_running for our irq is only set in the context of our cpu,
715 * so a critical section is a sufficient interlock.
717 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
720 ithread_handler(void *arg
)
722 struct intr_info
*info
;
727 struct intrec
**list
;
730 struct systimer ill_timer
; /* enforced freq. timer */
731 u_int ill_count
; /* interrupt livelock counter */
734 intr
= (int)(intptr_t)arg
;
735 info
= &intr_info_ary
[intr
];
736 list
= &info
->i_reclist
;
739 * The loop must be entered with one critical section held. The thread
740 * is created with TDF_MPSAFE so the MP lock is not held on start.
743 lseconds
= gd
->gd_time_seconds
;
749 * The chain is only considered MPSAFE if all its interrupt handlers
750 * are MPSAFE. However, if intr_mpsafe has been turned off we
751 * always operate with the BGL.
754 if (intr_mpsafe
== 0) {
759 } else if (info
->i_mplock_required
!= mpheld
) {
760 if (info
->i_mplock_required
) {
761 KKASSERT(mpheld
== 0);
765 KKASSERT(mpheld
!= 0);
772 * scheduled cpu may have changed, see intr_get_mplock()
778 * If an interrupt is pending, clear i_running and execute the
779 * handlers. Note that certain types of interrupts can re-trigger
780 * and set i_running again.
782 * Each handler is run in a critical section. Note that we run both
783 * FAST and SLOW designated service routines.
785 if (info
->i_running
) {
790 report_stray_interrupt(intr
, info
);
792 for (rec
= *list
; rec
; rec
= nrec
) {
794 if (rec
->serializer
) {
795 lwkt_serialize_handler_call(rec
->serializer
, rec
->handler
,
796 rec
->argument
, NULL
);
798 rec
->handler(rec
->argument
, NULL
);
804 * This is our interrupt hook to add rate randomness to the random
807 if (info
->i_random
.sc_enabled
> 0)
808 add_interrupt_randomness(intr
);
811 * Unmask the interrupt to allow it to trigger again. This only
812 * applies to certain types of interrupts (typ level interrupts).
813 * This can result in the interrupt retriggering, but the retrigger
814 * will not be processed until we cycle our critical section.
816 * Only unmask interrupts while handlers are installed. It is
817 * possible to hit a situation where no handlers are installed
818 * due to a device driver livelocking and then tearing down its
819 * interrupt on close (the parallel bus being a good example).
822 machintr_intren(intr
);
825 * Do a quick exit/enter to catch any higher-priority interrupt
826 * sources, such as the statclock, so thread time accounting
827 * will still work. This may also cause an interrupt to re-trigger.
833 * LIVELOCK STATE MACHINE
835 switch(info
->i_state
) {
838 * Reset the count each second.
840 if (lseconds
!= gd
->gd_time_seconds
) {
841 lseconds
= gd
->gd_time_seconds
;
846 * If we did not exceed the frequency limit, we are done.
847 * If the interrupt has not retriggered we deschedule ourselves.
849 if (ill_count
<= livelock_limit
) {
850 if (info
->i_running
== 0) {
852 if (mpheld
&& intr_migrate
) {
857 lwkt_deschedule_self(gd
->gd_curthread
);
864 * Otherwise we are livelocked. Set up a periodic systimer
865 * to wake the thread up at the limit frequency.
867 kprintf("intr %d at %d/%d hz, livelocked limit engaged!\n",
868 intr
, ill_count
, livelock_limit
);
869 info
->i_state
= ISTATE_LIVELOCKED
;
870 if ((use_limit
= livelock_limit
) < 100)
872 else if (use_limit
> 500000)
874 systimer_init_periodic_nq(&ill_timer
, ithread_livelock_wakeup
,
875 (void *)(intptr_t)intr
, use_limit
);
877 case ISTATE_LIVELOCKED
:
879 * Wait for our periodic timer to go off. Since the interrupt
880 * has re-armed it can still set i_running, but it will not
881 * reschedule us while we are in a livelocked state.
883 lwkt_deschedule_self(gd
->gd_curthread
);
887 * Check once a second to see if the livelock condition no
890 if (lseconds
!= gd
->gd_time_seconds
) {
891 lseconds
= gd
->gd_time_seconds
;
892 if (ill_count
< livelock_lowater
) {
893 info
->i_state
= ISTATE_NORMAL
;
894 systimer_del(&ill_timer
);
895 kprintf("intr %d at %d/%d hz, livelock removed\n",
896 intr
, ill_count
, livelock_lowater
);
897 } else if (livelock_debug
== intr
||
898 (bootverbose
&& cold
)) {
899 kprintf("intr %d at %d/%d hz, in livelock\n",
900 intr
, ill_count
, livelock_lowater
);
913 * An interrupt thread is trying to get the MP lock. To avoid cpu-bound
914 * code in the kernel on cpu X from interfering we chase the MP lock.
917 intr_get_mplock(void)
921 if (intr_migrate
== 0) {
925 while (try_mplock() == 0) {
926 owner
= owner_mplock();
927 if (owner
>= 0 && owner
!= mycpu
->gd_cpuid
) {
928 lwkt_migratecpu(owner
);
929 ++intr_migrate_count
;
939 * Emergency interrupt polling thread. The thread begins execution
940 * outside a critical section with the BGL held.
942 * If emergency interrupt polling is enabled, this thread will
943 * execute all system interrupts not marked INTR_NOPOLL at the
944 * specified polling frequency.
946 * WARNING! This thread runs *ALL* interrupt service routines that
947 * are not marked INTR_NOPOLL, which basically means everything except
948 * the 8254 clock interrupt and the ATA interrupt. It has very high
949 * overhead and should only be used in situations where the machine
950 * cannot otherwise be made to work. Due to the severe performance
951 * degredation, it should not be enabled on production machines.
954 ithread_emergency(void *arg __unused
)
956 struct intr_info
*info
;
961 for (intr
= 0; intr
< max_installed_hard_intr
; ++intr
) {
962 info
= &intr_info_ary
[intr
];
963 for (rec
= info
->i_reclist
; rec
; rec
= nrec
) {
964 if ((rec
->intr_flags
& INTR_NOPOLL
) == 0) {
965 if (rec
->serializer
) {
966 lwkt_serialize_handler_call(rec
->serializer
,
967 rec
->handler
, rec
->argument
, NULL
);
969 rec
->handler(rec
->argument
, NULL
);
975 lwkt_deschedule_self(curthread
);
981 * Systimer callback - schedule the emergency interrupt poll thread
982 * if emergency polling is enabled.
986 emergency_intr_timer_callback(systimer_t info
, struct intrframe
*frame __unused
)
988 if (emergency_intr_enable
)
989 lwkt_schedule(info
->data
);
993 ithread_cpuid(int intr
)
995 const struct intr_info
*info
;
997 KKASSERT(intr
>= 0 && intr
< MAX_INTS
);
998 info
= &intr_info_ary
[intr
];
1000 if (info
->i_state
== ISTATE_NOTHREAD
)
1002 return info
->i_thread
.td_gd
->gd_cpuid
;
1006 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1007 * The data for this machine dependent, and the declarations are in machine
1008 * dependent code. The layout of intrnames and intrcnt however is machine
1011 * We do not know the length of intrcnt and intrnames at compile time, so
1012 * calculate things at run time.
1016 sysctl_intrnames(SYSCTL_HANDLER_ARGS
)
1018 struct intr_info
*info
;
1025 for (intr
= 0; error
== 0 && intr
< MAX_INTS
; ++intr
) {
1026 info
= &intr_info_ary
[intr
];
1030 for (rec
= info
->i_reclist
; rec
; rec
= rec
->next
) {
1031 ksnprintf(buf
+ len
, sizeof(buf
) - len
, "%s%s",
1032 (len
? "/" : ""), rec
->name
);
1033 len
+= strlen(buf
+ len
);
1036 ksnprintf(buf
, sizeof(buf
), "irq%d", intr
);
1039 error
= SYSCTL_OUT(req
, buf
, len
+ 1);
1045 SYSCTL_PROC(_hw
, OID_AUTO
, intrnames
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1046 NULL
, 0, sysctl_intrnames
, "", "Interrupt Names");
1049 sysctl_intrcnt(SYSCTL_HANDLER_ARGS
)
1051 struct intr_info
*info
;
1055 for (intr
= 0; intr
< max_installed_hard_intr
; ++intr
) {
1056 info
= &intr_info_ary
[intr
];
1058 error
= SYSCTL_OUT(req
, &info
->i_count
, sizeof(info
->i_count
));
1062 for (intr
= FIRST_SOFTINT
; intr
< max_installed_soft_intr
; ++intr
) {
1063 info
= &intr_info_ary
[intr
];
1065 error
= SYSCTL_OUT(req
, &info
->i_count
, sizeof(info
->i_count
));
1073 SYSCTL_PROC(_hw
, OID_AUTO
, intrcnt
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1074 NULL
, 0, sysctl_intrcnt
, "", "Interrupt Counts");
1077 int_moveto_destcpu(int *orig_cpuid0
, int *cpuid0
, int intr
)
1079 int orig_cpuid
= mycpuid
, cpuid
;
1083 ksnprintf(envpath
, sizeof(envpath
), "hw.irq.%d.dest", intr
);
1084 kgetenv_int(envpath
, &cpuid
);
1088 if (cpuid
!= orig_cpuid
)
1089 lwkt_migratecpu(cpuid
);
1091 *orig_cpuid0
= orig_cpuid
;
1096 int_moveto_origcpu(int orig_cpuid
, int cpuid
)
1098 if (cpuid
!= orig_cpuid
)
1099 lwkt_migratecpu(orig_cpuid
);