Import 2.1.81
[davej-history.git] / arch / i386 / kernel / irq.c
blobac83e4c7d6c288a7e3d05c1f34d9e802e559a7b5
1 /*
2 * linux/arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
14 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
15 * Naturally it's not a 1:1 relation, but there are similarities.
18 #include <linux/config.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/signal.h>
23 #include <linux/sched.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/timex.h>
27 #include <linux/malloc.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/tasks.h>
31 #include <linux/smp_lock.h>
32 #include <linux/init.h>
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/bitops.h>
38 #include <asm/smp.h>
39 #include <asm/pgtable.h>
40 #include <asm/delay.h>
42 #include "irq.h"
45 * I had a lockup scenario where a tight loop doing
46 * spin_unlock()/spin_lock() on CPU#1 was racing with
47 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
48 * apparently the spin_unlock() information did not make it
49 * through to CPU#0 ... nasty, is this by design, do we haveto limit
50 * 'memory update oscillation frequency' artificially like here?
52 * Such 'high frequency update' races can be avoided by careful design, but
53 * some of our major constructs like spinlocks use similar techniques,
54 * it would be nice to clarify this issue. Set this define to 0 if you
55 * want to check wether your system freezes. I suspect the delay done
56 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
57 * i thought that such things are guaranteed by design, since we use
58 * the 'LOCK' prefix.
60 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 1
62 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
63 # define SYNC_OTHER_CORES(x) udelay(x+1)
64 #else
66 * We have to allow irqs to arrive between __sti and __cli
68 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
69 #endif
71 unsigned int local_bh_count[NR_CPUS];
72 unsigned int local_irq_count[NR_CPUS];
74 atomic_t nmi_counter;
77 * About the IO-APIC, the architecture is 'merged' into our
78 * current irq architecture, seemlessly. (i hope). It is only
79 * visible through 8 more hardware interrupt lines, but otherwise
80 * drivers are unaffected. The main code is believed to be
81 * NR_IRQS-safe (nothing anymore thinks we have 16
82 * irq lines only), but there might be some places left ...
86 * This contains the irq mask for both 8259A irq controllers,
87 * and on SMP the extended IO-APIC IRQs 16-23. The IO-APIC
88 * uses this mask too, in probe_irq*().
90 * (0x0000ffff for NR_IRQS==16, 0x00ffffff for NR_IRQS=24)
92 static unsigned int cached_irq_mask = (1<<NR_IRQS)-1;
94 #define cached_21 ((cached_irq_mask | io_apic_irqs) & 0xff)
95 #define cached_A1 (((cached_irq_mask | io_apic_irqs) >> 8) & 0xff)
97 spinlock_t irq_controller_lock;
99 static int irq_events [NR_IRQS] = { -1, };
100 static int disabled_irq [NR_IRQS] = { 0, };
101 #ifdef __SMP__
102 static int irq_owner [NR_IRQS] = { NO_PROC_ID, };
103 #endif
106 * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
107 * boards the timer interrupt and sometimes the keyboard interrupt is
108 * not connected to any IO-APIC pin, it's fed to the CPU ExtInt IRQ line
109 * directly.
111 * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
112 * this 'mixed mode' IRQ handling costs us one more branch in do_IRQ,
113 * but we have _much_ higher compatibility and robustness this way.
116 #ifndef __SMP__
117 static const unsigned int io_apic_irqs = 0;
118 #else
120 * Default to all normal IRQ's _not_ using the IO APIC.
122 * To get IO-APIC interrupts you should either:
123 * - turn some of them into IO-APIC interrupts at runtime
124 * with some magic system call interface.
125 * - explicitly use irq 16-19 depending on which PCI irq
126 * line your PCI controller uses.
128 unsigned int io_apic_irqs = 0xff0000;
129 #endif
131 static inline void mask_8259A(int irq)
133 cached_irq_mask |= 1 << irq;
134 if (irq & 8) {
135 outb(cached_A1,0xA1);
136 } else {
137 outb(cached_21,0x21);
141 static inline void unmask_8259A(int irq)
143 cached_irq_mask &= ~(1 << irq);
144 if (irq & 8) {
145 outb(cached_A1,0xA1);
146 } else {
147 outb(cached_21,0x21);
151 void set_8259A_irq_mask(int irq)
154 * (it might happen that we see IRQ>15 on a UP box, with SMP
155 * emulation)
157 if (irq < 16) {
158 if (irq & 8) {
159 outb(cached_A1,0xA1);
160 } else {
161 outb(cached_21,0x21);
167 * These have to be protected by the spinlock
168 * before being called.
170 void mask_irq(unsigned int irq)
172 if (IO_APIC_IRQ(irq))
173 disable_IO_APIC_irq(irq);
174 else {
175 cached_irq_mask |= 1 << irq;
176 set_8259A_irq_mask(irq);
180 void unmask_irq(unsigned int irq)
182 if (IO_APIC_IRQ(irq))
183 enable_IO_APIC_irq(irq);
184 else {
185 cached_irq_mask &= ~(1 << irq);
186 set_8259A_irq_mask(irq);
191 * This builds up the IRQ handler stubs using some ugly macros in irq.h
193 * These macros create the low-level assembly IRQ routines that save
194 * register context and call do_IRQ(). do_IRQ() then does all the
195 * operations that are needed to keep the AT (or SMP IOAPIC)
196 * interrupt-controller happy.
200 BUILD_COMMON_IRQ()
202 * ISA PIC or IO-APIC triggered (INTA-cycle or APIC) interrupts:
204 BUILD_IRQ(0) BUILD_IRQ(1) BUILD_IRQ(2) BUILD_IRQ(3)
205 BUILD_IRQ(4) BUILD_IRQ(5) BUILD_IRQ(6) BUILD_IRQ(7)
206 BUILD_IRQ(8) BUILD_IRQ(9) BUILD_IRQ(10) BUILD_IRQ(11)
207 BUILD_IRQ(12) BUILD_IRQ(13) BUILD_IRQ(14) BUILD_IRQ(15)
209 #ifdef __SMP__
212 * The IO-APIC (persent only in SMP boards) has 8 more hardware
213 * interrupt pins, for all of them we define an IRQ vector:
215 * raw PCI interrupts 0-3, basically these are the ones used
216 * heavily:
218 BUILD_IRQ(16) BUILD_IRQ(17) BUILD_IRQ(18) BUILD_IRQ(19)
221 * [FIXME: anyone with 2 separate PCI buses and 2 IO-APICs,
222 * please speak up and request experimental patches.
223 * --mingo ]
227 * MIRQ (motherboard IRQ) interrupts 0-1:
229 BUILD_IRQ(20) BUILD_IRQ(21)
232 * 'nondefined general purpose interrupt'.
234 BUILD_IRQ(22)
236 * optionally rerouted SMI interrupt:
238 BUILD_IRQ(23)
241 * The following vectors are part of the Linux architecture, there
242 * is no hardware IRQ pin equivalent for them, they are triggered
243 * through the ICC by us (IPIs), via smp_message_pass():
245 BUILD_SMP_INTERRUPT(reschedule_interrupt)
246 BUILD_SMP_INTERRUPT(invalidate_interrupt)
247 BUILD_SMP_INTERRUPT(stop_cpu_interrupt)
250 * every pentium local APIC has two 'local interrupts', with a
251 * soft-definable vector attached to both interrupts, one of
252 * which is a timer interrupt, the other one is error counter
253 * overflow. Linux uses the local APIC timer interrupt to get
254 * a much simpler SMP time architecture:
256 BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt)
258 #endif
260 static void (*interrupt[NR_IRQS])(void) = {
261 IRQ0_interrupt, IRQ1_interrupt, IRQ2_interrupt, IRQ3_interrupt,
262 IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt,
263 IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt,
264 IRQ12_interrupt, IRQ13_interrupt, IRQ14_interrupt, IRQ15_interrupt
265 #ifdef __SMP__
266 ,IRQ16_interrupt, IRQ17_interrupt, IRQ18_interrupt, IRQ19_interrupt,
267 IRQ20_interrupt, IRQ21_interrupt, IRQ22_interrupt, IRQ23_interrupt
268 #endif
272 * Initial irq handlers.
275 static void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
278 * Note that on a 486, we don't want to do a SIGFPE on an irq13
279 * as the irq is unreliable, and exception 16 works correctly
280 * (ie as explained in the intel literature). On a 386, you
281 * can't use exception 16 due to bad IBM design, so we have to
282 * rely on the less exact irq13.
284 * Careful.. Not only is IRQ13 unreliable, but it is also
285 * leads to races. IBM designers who came up with it should
286 * be shot.
289 static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs)
291 outb(0,0xF0);
292 if (ignore_irq13 || !boot_cpu_data.hard_math)
293 return;
294 math_error();
297 static struct irqaction irq13 = { math_error_irq, 0, 0, "fpu", NULL, NULL };
300 * IRQ2 is cascade interrupt to second interrupt controller
302 static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL};
304 static struct irqaction *irq_action[NR_IRQS] = {
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
308 NULL, NULL, NULL, NULL
309 #ifdef __SMP__
310 ,NULL, NULL, NULL, NULL,
311 NULL, NULL, NULL, NULL
312 #endif
315 int get_irq_list(char *buf)
317 int i, j;
318 struct irqaction * action;
319 char *p = buf;
321 p += sprintf(p, " ");
322 for (j=0; j<smp_num_cpus; j++)
323 p += sprintf(p, "CPU%d ",j);
324 *p++ = '\n';
326 for (i = 0 ; i < NR_IRQS ; i++) {
327 action = irq_action[i];
328 if (!action)
329 continue;
330 p += sprintf(p, "%3d: ",i);
331 #ifndef __SMP__
332 p += sprintf(p, "%10u ", kstat_irqs(i));
333 #else
334 for (j=0; j<smp_num_cpus; j++)
335 p += sprintf(p, "%10u ",
336 kstat.irqs[cpu_logical_map(j)][i]);
337 #endif
339 if (IO_APIC_IRQ(i))
340 p += sprintf(p, " IO-APIC ");
341 else
342 p += sprintf(p, " XT PIC ");
343 p += sprintf(p, " %s", action->name);
345 for (action=action->next; action; action = action->next) {
346 p += sprintf(p, ", %s", action->name);
348 *p++ = '\n';
350 p += sprintf(p, "NMI: %10u\n", atomic_read(&nmi_counter));
351 #ifdef __SMP__
352 p += sprintf(p, "IPI: %10lu\n", ipi_count);
353 #endif
354 return p - buf;
358 * Global interrupt locks for SMP. Allow interrupts to come in on any
359 * CPU, yet make cli/sti act globally to protect critical regions..
361 #ifdef __SMP__
362 unsigned char global_irq_holder = NO_PROC_ID;
363 unsigned volatile int global_irq_lock;
364 atomic_t global_irq_count;
366 atomic_t global_bh_count;
367 atomic_t global_bh_lock;
370 * "global_cli()" is a special case, in that it can hold the
371 * interrupts disabled for a longish time, and also because
372 * we may be doing TLB invalidates when holding the global
373 * IRQ lock for historical reasons. Thus we may need to check
374 * SMP invalidate events specially by hand here (but not in
375 * any normal spinlocks)
377 static inline void check_smp_invalidate(int cpu)
379 if (test_bit(cpu, &smp_invalidate_needed)) {
380 clear_bit(cpu, &smp_invalidate_needed);
381 local_flush_tlb();
385 static void show(char * str)
387 int i;
388 unsigned long *stack;
389 int cpu = smp_processor_id();
391 printk("\n%s, CPU %d:\n", str, cpu);
392 printk("irq: %d [%d %d]\n",
393 atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
394 printk("bh: %d [%d %d]\n",
395 atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
396 stack = (unsigned long *) &str;
397 for (i = 40; i ; i--) {
398 unsigned long x = *++stack;
399 if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
400 printk("<[%08lx]> ", x);
406 #define MAXCOUNT 100000000
408 static inline void wait_on_bh(void)
410 int count = MAXCOUNT;
411 do {
412 if (!--count) {
413 show("wait_on_bh");
414 count = ~0;
416 /* nothing .. wait for the other bh's to go away */
417 } while (atomic_read(&global_bh_count) != 0);
420 static inline void wait_on_irq(int cpu)
422 int count = MAXCOUNT;
424 while (atomic_read(&global_irq_count)) {
425 clear_bit(0,&global_irq_lock);
427 for (;;) {
428 if (!--count) {
429 show("wait_on_irq");
430 count = ~0;
432 __sti();
433 SYNC_OTHER_CORES(cpu);
434 __cli();
435 check_smp_invalidate(cpu);
436 if (atomic_read(&global_irq_count))
437 continue;
438 if (global_irq_lock)
439 continue;
440 if (!test_and_set_bit(0,&global_irq_lock))
441 break;
447 * This is called when we want to synchronize with
448 * bottom half handlers. We need to wait until
449 * no other CPU is executing any bottom half handler.
451 * Don't wait if we're already running in an interrupt
452 * context or are inside a bh handler.
454 void synchronize_bh(void)
456 if (atomic_read(&global_bh_count)) {
457 int cpu = smp_processor_id();
458 if (!local_irq_count[cpu] && !local_bh_count[cpu]) {
459 wait_on_bh();
465 * This is called when we want to synchronize with
466 * interrupts. We may for example tell a device to
467 * stop sending interrupts: but to make sure there
468 * are no interrupts that are executing on another
469 * CPU we need to call this function.
471 void synchronize_irq(void)
473 /* Stupid approach */
474 cli();
475 sti();
478 static inline void get_irqlock(int cpu)
480 if (test_and_set_bit(0,&global_irq_lock)) {
481 /* do we already hold the lock? */
482 if ((unsigned char) cpu == global_irq_holder)
483 return;
484 /* Uhhuh.. Somebody else got it. Wait.. */
485 do {
486 do {
487 check_smp_invalidate(cpu);
488 } while (test_bit(0,&global_irq_lock));
489 } while (test_and_set_bit(0,&global_irq_lock));
492 * We also to make sure that nobody else is running
493 * in an interrupt context.
495 wait_on_irq(cpu);
498 * Ok, finally..
500 global_irq_holder = cpu;
504 * A global "cli()" while in an interrupt context
505 * turns into just a local cli(). Interrupts
506 * should use spinlocks for the (very unlikely)
507 * case that they ever want to protect against
508 * each other.
510 void __global_cli(void)
512 int cpu = smp_processor_id();
514 __cli();
515 if (!local_irq_count[cpu])
516 get_irqlock(cpu);
519 void __global_sti(void)
521 int cpu = smp_processor_id();
523 if (!local_irq_count[cpu])
524 release_irqlock(cpu);
525 __sti();
528 unsigned long __global_save_flags(void)
530 return global_irq_holder == (unsigned char) smp_processor_id();
533 void __global_restore_flags(unsigned long flags)
535 switch (flags) {
536 case 0:
537 __global_sti();
538 break;
539 case 1:
540 __global_cli();
541 break;
542 default:
543 printk("global_restore_flags: %08lx (%08lx)\n",
544 flags, (&flags)[-1]);
548 #endif
550 static int handle_IRQ_event(int irq, struct pt_regs * regs)
552 struct irqaction * action;
553 int status;
555 status = 0;
556 action = *(irq + irq_action);
558 if (action) {
559 status |= 1;
561 if (!(action->flags & SA_INTERRUPT))
562 __sti();
564 do {
565 status |= action->flags;
566 action->handler(irq, action->dev_id, regs);
567 action = action->next;
568 } while (action);
569 if (status & SA_SAMPLE_RANDOM)
570 add_interrupt_randomness(irq);
571 __cli();
574 return status;
579 * disable/enable_irq() wait for all irq contexts to finish
580 * executing. Also it's recursive.
582 void disable_irq(unsigned int irq)
584 unsigned long flags;
586 spin_lock_irqsave(&irq_controller_lock, flags);
587 disabled_irq[irq]++;
588 mask_irq(irq);
589 spin_unlock_irqrestore(&irq_controller_lock, flags);
591 synchronize_irq();
594 void enable_irq(unsigned int irq)
596 unsigned long flags;
598 spin_lock_irqsave(&irq_controller_lock, flags);
599 disabled_irq[irq]--;
600 unmask_irq(irq);
601 spin_unlock_irqrestore(&irq_controller_lock, flags);
605 * Careful! The 8259A is a fragile beast, it pretty
606 * much _has_ to be done exactly like this (mask it
607 * first, _then_ send the EOI, and the order of EOI
608 * to the two 8259s is important!
610 static inline void mask_and_ack_8259A(int irq_nr)
612 spin_lock(&irq_controller_lock);
613 cached_irq_mask |= 1 << irq_nr;
614 if (irq_nr & 8) {
615 inb(0xA1); /* DUMMY */
616 outb(cached_A1,0xA1);
617 outb(0x62,0x20); /* Specific EOI to cascade */
618 outb(0x20,0xA0);
619 } else {
620 inb(0x21); /* DUMMY */
621 outb(cached_21,0x21);
622 outb(0x20,0x20);
624 spin_unlock(&irq_controller_lock);
627 static void do_8259A_IRQ(int irq, int cpu, struct pt_regs * regs)
629 mask_and_ack_8259A(irq);
631 irq_enter(cpu, irq);
633 if (handle_IRQ_event(irq, regs)) {
634 spin_lock(&irq_controller_lock);
635 unmask_8259A(irq);
636 spin_unlock(&irq_controller_lock);
639 irq_exit(cpu, irq);
643 * FIXME! This is completely broken.
645 static void do_ioapic_IRQ(int irq, int cpu, struct pt_regs * regs)
647 int should_handle_irq;
649 spin_lock(&irq_controller_lock);
650 should_handle_irq = 0;
651 if (!irq_events[irq]++ && !disabled_irq[irq]) {
652 should_handle_irq = 1;
653 irq_owner[irq] = cpu;
654 hardirq_enter(cpu);
657 ack_APIC_irq();
659 spin_unlock(&irq_controller_lock);
661 if (should_handle_irq) {
662 again:
663 if (!handle_IRQ_event(irq, regs))
664 disabled_irq[irq] = 1;
668 spin_lock(&irq_controller_lock);
669 release_irqlock(cpu);
671 if ((--irq_events[irq]) && (!disabled_irq[irq]) && should_handle_irq) {
672 spin_unlock(&irq_controller_lock);
673 goto again;
676 irq_owner[irq] = NO_PROC_ID;
677 hardirq_exit(cpu);
678 spin_unlock(&irq_controller_lock);
680 enable_IO_APIC_irq(irq);
684 * do_IRQ handles all normal device IRQ's (the special
685 * SMP cross-CPU interrupts have their own specific
686 * handlers).
688 * the biggest change on SMP is the fact that we no more mask
689 * interrupts in hardware, please believe me, this is unavoidable,
690 * the hardware is largely message-oriented, i tried to force our
691 * state-driven irq handling scheme onto the IO-APIC, but no avail.
693 * so we soft-disable interrupts via 'event counters', the first 'incl'
694 * will do the IRQ handling. This also has the nice side effect of increased
695 * overlapping ... i saw no driver problem so far.
697 asmlinkage void do_IRQ(struct pt_regs regs)
699 void (*do_lowlevel_IRQ)(int, int, struct pt_regs *);
702 * We ack quickly, we don't want the irq controller
703 * thinking we're snobs just because some other CPU has
704 * disabled global interrupts (we have already done the
705 * INT_ACK cycles, it's too late to try to pretend to the
706 * controller that we aren't taking the interrupt).
708 * 0 return value means that this irq is already being
709 * handled by some other CPU. (or is disabled)
711 int irq = regs.orig_eax & 0xff;
712 int cpu = smp_processor_id();
714 kstat.irqs[cpu][irq]++;
716 do_lowlevel_IRQ = do_8259A_IRQ;
717 if (IO_APIC_IRQ(irq))
718 do_lowlevel_IRQ = do_ioapic_IRQ;
720 do_lowlevel_IRQ(irq, cpu, &regs);
723 * This should be conditional: we should really get
724 * a return code from the irq handler to tell us
725 * whether the handler wants us to do software bottom
726 * half handling or not..
728 if (1) {
729 if (bh_active & bh_mask)
730 do_bottom_half();
734 int setup_x86_irq(int irq, struct irqaction * new)
736 int shared = 0;
737 struct irqaction *old, **p;
738 unsigned long flags;
740 p = irq_action + irq;
741 if ((old = *p) != NULL) {
742 /* Can't share interrupts unless both agree to */
743 if (!(old->flags & new->flags & SA_SHIRQ))
744 return -EBUSY;
746 /* add new interrupt at end of irq queue */
747 do {
748 p = &old->next;
749 old = *p;
750 } while (old);
751 shared = 1;
754 if (new->flags & SA_SAMPLE_RANDOM)
755 rand_initialize_irq(irq);
757 save_flags(flags);
758 cli();
759 *p = new;
761 if (!shared) {
762 spin_lock(&irq_controller_lock);
763 if (IO_APIC_IRQ(irq)) {
765 * First disable it in the 8259A:
767 cached_irq_mask |= 1 << irq;
768 if (irq < 16)
769 set_8259A_irq_mask(irq);
770 setup_IO_APIC_irq(irq);
772 unmask_irq(irq);
773 spin_unlock(&irq_controller_lock);
775 restore_flags(flags);
776 return 0;
779 int request_irq(unsigned int irq,
780 void (*handler)(int, void *, struct pt_regs *),
781 unsigned long irqflags,
782 const char * devname,
783 void *dev_id)
785 int retval;
786 struct irqaction * action;
788 if (irq >= NR_IRQS)
789 return -EINVAL;
790 if (!handler)
791 return -EINVAL;
793 action = (struct irqaction *)
794 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
795 if (!action)
796 return -ENOMEM;
798 action->handler = handler;
799 action->flags = irqflags;
800 action->mask = 0;
801 action->name = devname;
802 action->next = NULL;
803 action->dev_id = dev_id;
805 retval = setup_x86_irq(irq, action);
807 if (retval)
808 kfree(action);
809 return retval;
812 void free_irq(unsigned int irq, void *dev_id)
814 struct irqaction * action, **p;
815 unsigned long flags;
817 if (irq >= NR_IRQS) {
818 printk("Trying to free IRQ%d\n",irq);
819 return;
821 for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
822 if (action->dev_id != dev_id)
823 continue;
825 /* Found it - now free it */
826 save_flags(flags);
827 cli();
828 *p = action->next;
829 restore_flags(flags);
830 kfree(action);
831 return;
833 printk("Trying to free free IRQ%d\n",irq);
837 * probing is always single threaded [FIXME: is this true?]
839 static unsigned int probe_irqs[NR_CPUS][NR_IRQS];
841 unsigned long probe_irq_on (void)
843 unsigned int i, j, irqs = 0;
844 unsigned long delay;
847 * save current irq counts
849 memcpy(probe_irqs,kstat.irqs,NR_CPUS*NR_IRQS*sizeof(int));
852 * first, enable any unassigned irqs
854 for (i = NR_IRQS-1; i > 0; i--) {
855 if (!irq_action[i]) {
856 spin_lock(&irq_controller_lock);
857 unmask_irq(i);
858 irqs |= (1 << i);
859 spin_unlock(&irq_controller_lock);
864 * wait for spurious interrupts to increase counters
866 for (delay = jiffies + HZ/10; delay > jiffies; )
867 /* about 100ms delay */ synchronize_irq();
870 * now filter out any obviously spurious interrupts
872 for (i=0; i<NR_IRQS; i++)
873 for (j=0; j<NR_CPUS; j++)
874 if (kstat.irqs[j][i] != probe_irqs[j][i])
875 irqs &= ~(i<<1);
877 return irqs;
880 int probe_irq_off (unsigned long irqs)
882 int i,j, irq_found = -1;
884 for (i=0; i<NR_IRQS; i++) {
885 int sum = 0;
886 for (j=0; j<NR_CPUS; j++) {
887 sum += kstat.irqs[j][i];
888 sum -= probe_irqs[j][i];
890 if (sum && (irqs & (i<<1))) {
891 if (irq_found != -1) {
892 irq_found = -irq_found;
893 goto out;
894 } else
895 irq_found = i;
898 if (irq_found == -1)
899 irq_found = 0;
900 out:
901 return irq_found;
904 void init_IO_APIC_traps(void)
906 int i;
908 * NOTE! The local APIC isn't very good at handling
909 * multiple interrupts at the same interrupt level.
910 * As the interrupt level is determined by taking the
911 * vector number and shifting that right by 4, we
912 * want to spread these out a bit so that they don't
913 * all fall in the same interrupt level
915 * also, we've got to be careful not to trash gate
916 * 0x80, because int 0x80 is hm, kindof importantish ;)
918 for (i = 0; i < NR_IRQS ; i++)
919 if (IO_APIC_GATE_OFFSET+(i<<3) <= 0xfe) /* HACK */ {
920 if (IO_APIC_IRQ(i)) {
922 * First disable it in the 8259A:
924 cached_irq_mask |= 1 << i;
925 if (i < 16)
926 set_8259A_irq_mask(i);
927 setup_IO_APIC_irq(i);
932 __initfunc(void init_IRQ(void))
934 int i;
936 /* set the clock to 100 Hz */
937 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
938 outb_p(LATCH & 0xff , 0x40); /* LSB */
939 outb(LATCH >> 8 , 0x40); /* MSB */
941 printk("INIT IRQ\n");
942 for (i=0; i<NR_IRQS; i++) {
943 irq_events[i] = 0;
944 #ifdef __SMP__
945 irq_owner[i] = NO_PROC_ID;
946 #endif
947 disabled_irq[i] = 0;
950 * 16 old-style INTA-cycle interrupt gates:
952 for (i = 0; i < 16; i++)
953 set_intr_gate(0x20+i,interrupt[i]);
955 #ifdef __SMP__
957 for (i = 0; i < NR_IRQS ; i++)
958 if (IO_APIC_GATE_OFFSET+(i<<3) <= 0xfe) /* hack -- mingo */
959 set_intr_gate(IO_APIC_GATE_OFFSET+(i<<3),interrupt[i]);
962 * The reschedule interrupt slowly changes it's functionality,
963 * while so far it was a kind of broadcasted timer interrupt,
964 * in the future it should become a CPU-to-CPU rescheduling IPI,
965 * driven by schedule() ?
967 * [ It has to be here .. it doesn't work if you put
968 * it down the bottom - assembler explodes 8) ]
971 /* IPI for rescheduling */
972 set_intr_gate(0x30, reschedule_interrupt);
974 /* IPI for invalidation */
975 set_intr_gate(0x31, invalidate_interrupt);
977 /* IPI for CPU halt */
978 set_intr_gate(0x40, stop_cpu_interrupt);
980 /* self generated IPI for local APIC timer */
981 set_intr_gate(0x41, apic_timer_interrupt);
983 #endif
984 request_region(0x20,0x20,"pic1");
985 request_region(0xa0,0x20,"pic2");
986 setup_x86_irq(2, &irq2);
987 setup_x86_irq(13, &irq13);