Import 2.3.11pre1
[davej-history.git] / arch / i386 / kernel / irq.c
blob9dc24667bc70e9373d4ae929d4502a130debc706
1 /*
2 * linux/arch/i386/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
14 * IRQs are in fact implemented a bit like signal handlers for the kernel.
15 * Naturally it's not a 1:1 relation, but there are similarities.
18 #include <linux/config.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/signal.h>
23 #include <linux/sched.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/timex.h>
27 #include <linux/malloc.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/tasks.h>
31 #include <linux/smp_lock.h>
32 #include <linux/init.h>
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/bitops.h>
38 #include <asm/smp.h>
39 #include <asm/pgtable.h>
40 #include <asm/delay.h>
41 #include <asm/desc.h>
43 #include "irq.h"
45 unsigned int local_bh_count[NR_CPUS];
46 unsigned int local_irq_count[NR_CPUS];
48 atomic_t nmi_counter;
51 * Linux has a controller-independent x86 interrupt architecture.
52 * every controller has a 'controller-template', that is used
53 * by the main code to do the right thing. Each driver-visible
54 * interrupt source is transparently wired to the apropriate
55 * controller. Thus drivers need not be aware of the
56 * interrupt-controller.
58 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
59 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
60 * (IO-APICs assumed to be messaging to Pentium local-APICs)
62 * the code is designed to be easily extended with new/different
63 * interrupt controllers, without having to do assembly magic.
67 * Micro-access to controllers is serialized over the whole
68 * system. We never hold this lock when we call the actual
69 * IRQ handler.
71 spinlock_t irq_controller_lock;
74 * Dummy controller type for unused interrupts
76 static void do_none(unsigned int irq, struct pt_regs * regs)
79 * we are careful. While for ISA irqs it's common to happen
80 * outside of any driver (think autodetection), this is not
81 * at all nice for PCI interrupts. So we are stricter and
82 * print a warning when such spurious interrupts happen.
83 * Spurious interrupts can confuse other drivers if the PCI
84 * IRQ line is shared.
86 * Such spurious interrupts are either driver bugs, or
87 * sometimes hw (chipset) bugs.
89 printk("unexpected IRQ vector %d on CPU#%d!\n",irq, smp_processor_id());
91 #ifdef __SMP__
93 * [currently unexpected vectors happen only on SMP and APIC.
94 * if we want to have non-APIC and non-8259A controllers
95 * in the future with unexpected vectors, this ack should
96 * probably be made controller-specific.]
98 ack_APIC_irq();
99 #endif
101 static void enable_none(unsigned int irq) { }
102 static void disable_none(unsigned int irq) { }
104 /* startup is the same as "enable", shutdown is same as "disable" */
105 #define startup_none enable_none
106 #define shutdown_none disable_none
108 struct hw_interrupt_type no_irq_type = {
109 "none",
110 startup_none,
111 shutdown_none,
112 do_none,
113 enable_none,
114 disable_none
118 * This is the 'legacy' 8259A Programmable Interrupt Controller,
119 * present in the majority of PC/AT boxes.
122 static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs);
123 static void enable_8259A_irq(unsigned int irq);
124 void disable_8259A_irq(unsigned int irq);
126 /* startup is the same as "enable", shutdown is same as "disable" */
127 #define startup_8259A_irq enable_8259A_irq
128 #define shutdown_8259A_irq disable_8259A_irq
130 static struct hw_interrupt_type i8259A_irq_type = {
131 "XT-PIC",
132 startup_8259A_irq,
133 shutdown_8259A_irq,
134 do_8259A_IRQ,
135 enable_8259A_irq,
136 disable_8259A_irq
140 * Controller mappings for all interrupt sources:
142 irq_desc_t irq_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
146 * 8259A PIC functions to handle ISA devices:
150 * This contains the irq mask for both 8259A irq controllers,
152 static unsigned int cached_irq_mask = 0xffff;
154 #define __byte(x,y) (((unsigned char *)&(y))[x])
155 #define cached_21 (__byte(0,cached_irq_mask))
156 #define cached_A1 (__byte(1,cached_irq_mask))
159 * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
160 * boards the timer interrupt is not connected to any IO-APIC pin, it's
161 * fed to the CPU IRQ line directly.
163 * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
164 * this 'mixed mode' IRQ handling costs nothing because it's only used
165 * at IRQ setup time.
167 unsigned long io_apic_irqs = 0;
170 * These have to be protected by the irq controller spinlock
171 * before being called.
173 void disable_8259A_irq(unsigned int irq)
175 unsigned int mask = 1 << irq;
176 cached_irq_mask |= mask;
177 if (irq & 8) {
178 outb(cached_A1,0xA1);
179 } else {
180 outb(cached_21,0x21);
184 static void enable_8259A_irq(unsigned int irq)
186 unsigned int mask = ~(1 << irq);
187 cached_irq_mask &= mask;
188 if (irq & 8) {
189 outb(cached_A1,0xA1);
190 } else {
191 outb(cached_21,0x21);
195 int i8259A_irq_pending(unsigned int irq)
197 unsigned int mask = 1<<irq;
199 if (irq < 8)
200 return (inb(0x20) & mask);
201 return (inb(0xA0) & (mask >> 8));
204 void make_8259A_irq(unsigned int irq)
206 disable_irq_nosync(irq);
207 io_apic_irqs &= ~(1<<irq);
208 irq_desc[irq].handler = &i8259A_irq_type;
209 enable_irq(irq);
213 * Careful! The 8259A is a fragile beast, it pretty
214 * much _has_ to be done exactly like this (mask it
215 * first, _then_ send the EOI, and the order of EOI
216 * to the two 8259s is important!
218 static inline void mask_and_ack_8259A(unsigned int irq)
220 cached_irq_mask |= 1 << irq;
221 if (irq & 8) {
222 inb(0xA1); /* DUMMY */
223 outb(cached_A1,0xA1);
224 outb(0x62,0x20); /* Specific EOI to cascade */
225 outb(0x20,0xA0);
226 } else {
227 inb(0x21); /* DUMMY */
228 outb(cached_21,0x21);
229 outb(0x20,0x20);
233 static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs)
235 struct irqaction * action;
236 irq_desc_t *desc = irq_desc + irq;
238 spin_lock(&irq_controller_lock);
240 unsigned int status;
241 mask_and_ack_8259A(irq);
242 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
243 action = NULL;
244 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
245 action = desc->action;
246 status |= IRQ_INPROGRESS;
248 desc->status = status;
250 spin_unlock(&irq_controller_lock);
252 /* Exit early if we had no action or it was disabled */
253 if (!action)
254 return;
256 handle_IRQ_event(irq, regs, action);
258 spin_lock(&irq_controller_lock);
260 unsigned int status = desc->status & ~IRQ_INPROGRESS;
261 desc->status = status;
262 if (!(status & IRQ_DISABLED))
263 enable_8259A_irq(irq);
265 spin_unlock(&irq_controller_lock);
269 * This builds up the IRQ handler stubs using some ugly macros in irq.h
271 * These macros create the low-level assembly IRQ routines that save
272 * register context and call do_IRQ(). do_IRQ() then does all the
273 * operations that are needed to keep the AT (or SMP IOAPIC)
274 * interrupt-controller happy.
278 BUILD_COMMON_IRQ()
280 #define BI(x,y) \
281 BUILD_IRQ(##x##y)
283 #define BUILD_16_IRQS(x) \
284 BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
285 BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
286 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
287 BI(x,c) BI(x,d) BI(x,e) BI(x,f)
290 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
291 * (these are usually mapped to vectors 0x20-0x30)
293 BUILD_16_IRQS(0x0)
295 #ifdef CONFIG_X86_IO_APIC
297 * The IO-APIC gives us many more interrupt sources. Most of these
298 * are unused but an SMP system is supposed to have enough memory ...
299 * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
300 * across the spectrum, so we really want to be prepared to get all
301 * of these. Plus, more powerful systems might have more than 64
302 * IO-APIC registers.
304 * (these are usually mapped into the 0x30-0xff vector range)
306 BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
307 BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
308 BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
309 BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
310 #endif
312 #undef BUILD_16_IRQS
313 #undef BI
316 #ifdef __SMP__
318 * The following vectors are part of the Linux architecture, there
319 * is no hardware IRQ pin equivalent for them, they are triggered
320 * through the ICC by us (IPIs)
322 BUILD_SMP_INTERRUPT(reschedule_interrupt)
323 BUILD_SMP_INTERRUPT(invalidate_interrupt)
324 BUILD_SMP_INTERRUPT(stop_cpu_interrupt)
325 BUILD_SMP_INTERRUPT(call_function_interrupt)
326 BUILD_SMP_INTERRUPT(spurious_interrupt)
329 * every pentium local APIC has two 'local interrupts', with a
330 * soft-definable vector attached to both interrupts, one of
331 * which is a timer interrupt, the other one is error counter
332 * overflow. Linux uses the local APIC timer interrupt to get
333 * a much simpler SMP time architecture:
335 BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt)
337 #endif
339 #define IRQ(x,y) \
340 IRQ##x##y##_interrupt
342 #define IRQLIST_16(x) \
343 IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
344 IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
345 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
346 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
348 static void (*interrupt[NR_IRQS])(void) = {
349 IRQLIST_16(0x0),
351 #ifdef CONFIG_X86_IO_APIC
352 IRQLIST_16(0x1), IRQLIST_16(0x2), IRQLIST_16(0x3),
353 IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
354 IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
355 IRQLIST_16(0xc), IRQLIST_16(0xd)
356 #endif
359 #undef IRQ
360 #undef IRQLIST_16
364 * Special irq handlers.
367 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
369 #ifndef CONFIG_VISWS
371 * Note that on a 486, we don't want to do a SIGFPE on an irq13
372 * as the irq is unreliable, and exception 16 works correctly
373 * (ie as explained in the intel literature). On a 386, you
374 * can't use exception 16 due to bad IBM design, so we have to
375 * rely on the less exact irq13.
377 * Careful.. Not only is IRQ13 unreliable, but it is also
378 * leads to races. IBM designers who came up with it should
379 * be shot.
382 static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs)
384 outb(0,0xF0);
385 if (ignore_irq13 || !boot_cpu_data.hard_math)
386 return;
387 math_error();
390 static struct irqaction irq13 = { math_error_irq, 0, 0, "fpu", NULL, NULL };
393 * IRQ2 is cascade interrupt to second interrupt controller
396 static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL};
397 #endif
400 * Generic, controller-independent functions:
403 int get_irq_list(char *buf)
405 int i, j;
406 struct irqaction * action;
407 char *p = buf;
409 p += sprintf(p, " ");
410 for (j=0; j<smp_num_cpus; j++)
411 p += sprintf(p, "CPU%d ",j);
412 *p++ = '\n';
414 for (i = 0 ; i < NR_IRQS ; i++) {
415 action = irq_desc[i].action;
416 if (!action)
417 continue;
418 p += sprintf(p, "%3d: ",i);
419 #ifndef __SMP__
420 p += sprintf(p, "%10u ", kstat_irqs(i));
421 #else
422 for (j=0; j<smp_num_cpus; j++)
423 p += sprintf(p, "%10u ",
424 kstat.irqs[cpu_logical_map(j)][i]);
425 #endif
426 p += sprintf(p, " %14s", irq_desc[i].handler->typename);
427 p += sprintf(p, " %s", action->name);
429 for (action=action->next; action; action = action->next) {
430 p += sprintf(p, ", %s", action->name);
432 *p++ = '\n';
434 p += sprintf(p, "NMI: %10u\n", atomic_read(&nmi_counter));
435 #ifdef __SMP__
436 p += sprintf(p, "ERR: %10lu\n", ipi_count);
437 #endif
438 return p - buf;
442 * Global interrupt locks for SMP. Allow interrupts to come in on any
443 * CPU, yet make cli/sti act globally to protect critical regions..
445 #ifdef __SMP__
446 unsigned char global_irq_holder = NO_PROC_ID;
447 unsigned volatile int global_irq_lock;
448 atomic_t global_irq_count;
450 atomic_t global_bh_count;
451 atomic_t global_bh_lock;
454 * "global_cli()" is a special case, in that it can hold the
455 * interrupts disabled for a longish time, and also because
456 * we may be doing TLB invalidates when holding the global
457 * IRQ lock for historical reasons. Thus we may need to check
458 * SMP invalidate events specially by hand here (but not in
459 * any normal spinlocks)
461 static inline void check_smp_invalidate(int cpu)
463 if (test_bit(cpu, &smp_invalidate_needed)) {
464 clear_bit(cpu, &smp_invalidate_needed);
465 local_flush_tlb();
469 static void show(char * str)
471 int i;
472 unsigned long *stack;
473 int cpu = smp_processor_id();
474 extern char *get_options(char *str, int *ints);
476 printk("\n%s, CPU %d:\n", str, cpu);
477 printk("irq: %d [%d %d]\n",
478 atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
479 printk("bh: %d [%d %d]\n",
480 atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
481 stack = (unsigned long *) &stack;
482 for (i = 40; i ; i--) {
483 unsigned long x = *++stack;
484 if (x > (unsigned long) &get_options && x < (unsigned long) &vsprintf) {
485 printk("<[%08lx]> ", x);
490 #define MAXCOUNT 100000000
492 static inline void wait_on_bh(void)
494 int count = MAXCOUNT;
495 do {
496 if (!--count) {
497 show("wait_on_bh");
498 count = ~0;
500 /* nothing .. wait for the other bh's to go away */
501 } while (atomic_read(&global_bh_count) != 0);
505 * I had a lockup scenario where a tight loop doing
506 * spin_unlock()/spin_lock() on CPU#1 was racing with
507 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
508 * apparently the spin_unlock() information did not make it
509 * through to CPU#0 ... nasty, is this by design, do we have to limit
510 * 'memory update oscillation frequency' artificially like here?
512 * Such 'high frequency update' races can be avoided by careful design, but
513 * some of our major constructs like spinlocks use similar techniques,
514 * it would be nice to clarify this issue. Set this define to 0 if you
515 * want to check whether your system freezes. I suspect the delay done
516 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
517 * i thought that such things are guaranteed by design, since we use
518 * the 'LOCK' prefix.
520 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 1
522 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
523 # define SYNC_OTHER_CORES(x) udelay(x+1)
524 #else
526 * We have to allow irqs to arrive between __sti and __cli
528 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
529 #endif
531 static inline void wait_on_irq(int cpu)
533 int count = MAXCOUNT;
535 for (;;) {
538 * Wait until all interrupts are gone. Wait
539 * for bottom half handlers unless we're
540 * already executing in one..
542 if (!atomic_read(&global_irq_count)) {
543 if (local_bh_count[cpu] || !atomic_read(&global_bh_count))
544 break;
547 /* Duh, we have to loop. Release the lock to avoid deadlocks */
548 clear_bit(0,&global_irq_lock);
550 for (;;) {
551 if (!--count) {
552 show("wait_on_irq");
553 count = ~0;
555 __sti();
556 SYNC_OTHER_CORES(cpu);
557 __cli();
558 check_smp_invalidate(cpu);
559 if (atomic_read(&global_irq_count))
560 continue;
561 if (global_irq_lock)
562 continue;
563 if (!local_bh_count[cpu] && atomic_read(&global_bh_count))
564 continue;
565 if (!test_and_set_bit(0,&global_irq_lock))
566 break;
572 * This is called when we want to synchronize with
573 * bottom half handlers. We need to wait until
574 * no other CPU is executing any bottom half handler.
576 * Don't wait if we're already running in an interrupt
577 * context or are inside a bh handler.
579 void synchronize_bh(void)
581 if (atomic_read(&global_bh_count) && !in_interrupt())
582 wait_on_bh();
586 * This is called when we want to synchronize with
587 * interrupts. We may for example tell a device to
588 * stop sending interrupts: but to make sure there
589 * are no interrupts that are executing on another
590 * CPU we need to call this function.
592 void synchronize_irq(void)
594 if (atomic_read(&global_irq_count)) {
595 /* Stupid approach */
596 cli();
597 sti();
601 static inline void get_irqlock(int cpu)
603 if (test_and_set_bit(0,&global_irq_lock)) {
604 /* do we already hold the lock? */
605 if ((unsigned char) cpu == global_irq_holder)
606 return;
607 /* Uhhuh.. Somebody else got it. Wait.. */
608 do {
609 do {
610 check_smp_invalidate(cpu);
611 } while (test_bit(0,&global_irq_lock));
612 } while (test_and_set_bit(0,&global_irq_lock));
615 * We also to make sure that nobody else is running
616 * in an interrupt context.
618 wait_on_irq(cpu);
621 * Ok, finally..
623 global_irq_holder = cpu;
626 #define EFLAGS_IF_SHIFT 9
629 * A global "cli()" while in an interrupt context
630 * turns into just a local cli(). Interrupts
631 * should use spinlocks for the (very unlikely)
632 * case that they ever want to protect against
633 * each other.
635 * If we already have local interrupts disabled,
636 * this will not turn a local disable into a
637 * global one (problems with spinlocks: this makes
638 * save_flags+cli+sti usable inside a spinlock).
640 void __global_cli(void)
642 unsigned int flags;
644 __save_flags(flags);
645 if (flags & (1 << EFLAGS_IF_SHIFT)) {
646 int cpu = smp_processor_id();
647 __cli();
648 if (!local_irq_count[cpu])
649 get_irqlock(cpu);
653 void __global_sti(void)
655 int cpu = smp_processor_id();
657 if (!local_irq_count[cpu])
658 release_irqlock(cpu);
659 __sti();
663 * SMP flags value to restore to:
664 * 0 - global cli
665 * 1 - global sti
666 * 2 - local cli
667 * 3 - local sti
669 unsigned long __global_save_flags(void)
671 int retval;
672 int local_enabled;
673 unsigned long flags;
675 __save_flags(flags);
676 local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
677 /* default to local */
678 retval = 2 + local_enabled;
680 /* check for global flags if we're not in an interrupt */
681 if (!local_irq_count[smp_processor_id()]) {
682 if (local_enabled)
683 retval = 1;
684 if (global_irq_holder == (unsigned char) smp_processor_id())
685 retval = 0;
687 return retval;
690 void __global_restore_flags(unsigned long flags)
692 switch (flags) {
693 case 0:
694 __global_cli();
695 break;
696 case 1:
697 __global_sti();
698 break;
699 case 2:
700 __cli();
701 break;
702 case 3:
703 __sti();
704 break;
705 default:
706 printk("global_restore_flags: %08lx (%08lx)\n",
707 flags, (&flags)[-1]);
711 #endif
714 * This should really return information about whether
715 * we should do bottom half handling etc. Right now we
716 * end up _always_ checking the bottom half, which is a
717 * waste of time and is not what some drivers would
718 * prefer.
720 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
722 int status;
723 int cpu = smp_processor_id();
725 irq_enter(cpu, irq);
727 status = 1; /* Force the "do bottom halves" bit */
729 if (!(action->flags & SA_INTERRUPT))
730 __sti();
732 do {
733 status |= action->flags;
734 action->handler(irq, action->dev_id, regs);
735 action = action->next;
736 } while (action);
737 if (status & SA_SAMPLE_RANDOM)
738 add_interrupt_randomness(irq);
739 __cli();
741 irq_exit(cpu, irq);
743 return status;
747 * Generic enable/disable code: this just calls
748 * down into the PIC-specific version for the actual
749 * hardware disable after having gotten the irq
750 * controller lock.
752 void disable_irq_nosync(unsigned int irq)
754 unsigned long flags;
756 spin_lock_irqsave(&irq_controller_lock, flags);
757 if (!irq_desc[irq].depth++) {
758 irq_desc[irq].status |= IRQ_DISABLED;
759 irq_desc[irq].handler->disable(irq);
761 spin_unlock_irqrestore(&irq_controller_lock, flags);
765 * Synchronous version of the above, making sure the IRQ is
766 * no longer running on any other IRQ..
768 void disable_irq(unsigned int irq)
770 disable_irq_nosync(irq);
772 if (!local_irq_count[smp_processor_id()]) {
773 do {
774 barrier();
775 } while (irq_desc[irq].status & IRQ_INPROGRESS);
779 void enable_irq(unsigned int irq)
781 unsigned long flags;
783 spin_lock_irqsave(&irq_controller_lock, flags);
784 switch (irq_desc[irq].depth) {
785 case 1:
786 irq_desc[irq].status &= ~IRQ_DISABLED;
787 irq_desc[irq].handler->enable(irq);
788 /* fall throught */
789 default:
790 irq_desc[irq].depth--;
791 break;
792 case 0:
793 printk("enable_irq() unbalanced from %p\n",
794 __builtin_return_address(0));
796 spin_unlock_irqrestore(&irq_controller_lock, flags);
800 * do_IRQ handles all normal device IRQ's (the special
801 * SMP cross-CPU interrupts have their own specific
802 * handlers).
804 asmlinkage void do_IRQ(struct pt_regs regs)
807 * We ack quickly, we don't want the irq controller
808 * thinking we're snobs just because some other CPU has
809 * disabled global interrupts (we have already done the
810 * INT_ACK cycles, it's too late to try to pretend to the
811 * controller that we aren't taking the interrupt).
813 * 0 return value means that this irq is already being
814 * handled by some other CPU. (or is disabled)
816 int irq = regs.orig_eax & 0xff; /* subtle, see irq.h */
817 int cpu = smp_processor_id();
819 kstat.irqs[cpu][irq]++;
820 irq_desc[irq].handler->handle(irq, &regs);
823 * This should be conditional: we should really get
824 * a return code from the irq handler to tell us
825 * whether the handler wants us to do software bottom
826 * half handling or not..
828 if (1) {
829 if (bh_active & bh_mask)
830 do_bottom_half();
834 int setup_x86_irq(unsigned int irq, struct irqaction * new)
836 int shared = 0;
837 struct irqaction *old, **p;
838 unsigned long flags;
841 * Some drivers like serial.c use request_irq() heavily,
842 * so we have to be careful not to interfere with a
843 * running system.
845 if (new->flags & SA_SAMPLE_RANDOM) {
847 * This function might sleep, we want to call it first,
848 * outside of the atomic block.
849 * Yes, this might clear the entropy pool if the wrong
850 * driver is attempted to be loaded, without actually
851 * installing a new handler, but is this really a problem,
852 * only the sysadmin is able to do this.
854 rand_initialize_irq(irq);
858 * The following block of code has to be executed atomically
860 spin_lock_irqsave(&irq_controller_lock,flags);
861 p = &irq_desc[irq].action;
862 if ((old = *p) != NULL) {
863 /* Can't share interrupts unless both agree to */
864 if (!(old->flags & new->flags & SA_SHIRQ)) {
865 spin_unlock_irqrestore(&irq_controller_lock,flags);
866 return -EBUSY;
869 /* add new interrupt at end of irq queue */
870 do {
871 p = &old->next;
872 old = *p;
873 } while (old);
874 shared = 1;
877 *p = new;
879 if (!shared) {
880 irq_desc[irq].depth = 0;
881 irq_desc[irq].status &= ~IRQ_DISABLED;
882 irq_desc[irq].handler->startup(irq);
884 spin_unlock_irqrestore(&irq_controller_lock,flags);
885 return 0;
888 int request_irq(unsigned int irq,
889 void (*handler)(int, void *, struct pt_regs *),
890 unsigned long irqflags,
891 const char * devname,
892 void *dev_id)
894 int retval;
895 struct irqaction * action;
897 if (irq >= NR_IRQS)
898 return -EINVAL;
899 if (!handler)
900 return -EINVAL;
902 action = (struct irqaction *)
903 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
904 if (!action)
905 return -ENOMEM;
907 action->handler = handler;
908 action->flags = irqflags;
909 action->mask = 0;
910 action->name = devname;
911 action->next = NULL;
912 action->dev_id = dev_id;
914 retval = setup_x86_irq(irq, action);
916 if (retval)
917 kfree(action);
918 return retval;
921 void free_irq(unsigned int irq, void *dev_id)
923 struct irqaction * action, **p;
924 unsigned long flags;
926 if (irq >= NR_IRQS)
927 return;
929 spin_lock_irqsave(&irq_controller_lock,flags);
930 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
931 if (action->dev_id != dev_id)
932 continue;
934 /* Found it - now free it */
935 *p = action->next;
936 kfree(action);
937 if (!irq_desc[irq].action) {
938 irq_desc[irq].status |= IRQ_DISABLED;
939 irq_desc[irq].handler->shutdown(irq);
941 goto out;
943 printk("Trying to free free IRQ%d\n",irq);
944 out:
945 spin_unlock_irqrestore(&irq_controller_lock,flags);
949 * IRQ autodetection code..
951 * This depends on the fact that any interrupt that
952 * comes in on to an unassigned handler will get stuck
953 * with "IRQ_WAITING" cleared and the interrupt
954 * disabled.
956 unsigned long probe_irq_on(void)
958 unsigned int i;
959 unsigned long delay;
962 * first, enable any unassigned irqs
964 spin_lock_irq(&irq_controller_lock);
965 for (i = NR_IRQS-1; i > 0; i--) {
966 if (!irq_desc[i].action) {
967 irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING;
968 irq_desc[i].handler->startup(i);
971 spin_unlock_irq(&irq_controller_lock);
974 * Wait for spurious interrupts to trigger
976 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
977 /* about 100ms delay */ synchronize_irq();
980 * Now filter out any obviously spurious interrupts
982 spin_lock_irq(&irq_controller_lock);
983 for (i=0; i<NR_IRQS; i++) {
984 unsigned int status = irq_desc[i].status;
986 if (!(status & IRQ_AUTODETECT))
987 continue;
989 /* It triggered already - consider it spurious. */
990 if (!(status & IRQ_WAITING)) {
991 irq_desc[i].status = status & ~IRQ_AUTODETECT;
992 irq_desc[i].handler->shutdown(i);
995 spin_unlock_irq(&irq_controller_lock);
997 return 0x12345678;
1000 int probe_irq_off(unsigned long unused)
1002 int i, irq_found, nr_irqs;
1004 if (unused != 0x12345678)
1005 printk("Bad IRQ probe from %lx\n", (&unused)[-1]);
1007 nr_irqs = 0;
1008 irq_found = 0;
1009 spin_lock_irq(&irq_controller_lock);
1010 for (i=0; i<NR_IRQS; i++) {
1011 unsigned int status = irq_desc[i].status;
1013 if (!(status & IRQ_AUTODETECT))
1014 continue;
1016 if (!(status & IRQ_WAITING)) {
1017 if (!nr_irqs)
1018 irq_found = i;
1019 nr_irqs++;
1021 irq_desc[i].status = status & ~IRQ_AUTODETECT;
1022 irq_desc[i].handler->shutdown(i);
1024 spin_unlock_irq(&irq_controller_lock);
1026 if (nr_irqs > 1)
1027 irq_found = -irq_found;
1028 return irq_found;
1031 void init_ISA_irqs (void)
1033 int i;
1035 for (i = 0; i < NR_IRQS; i++) {
1036 irq_desc[i].status = IRQ_DISABLED;
1037 irq_desc[i].action = 0;
1038 irq_desc[i].depth = 0;
1040 if (i < 16) {
1042 * 16 old-style INTA-cycle interrupts:
1044 irq_desc[i].handler = &i8259A_irq_type;
1045 } else {
1047 * 'high' PCI IRQs filled in on demand
1049 irq_desc[i].handler = &no_irq_type;
1054 __initfunc(void init_IRQ(void))
1056 int i;
1058 #ifndef CONFIG_X86_VISWS_APIC
1059 init_ISA_irqs();
1060 #else
1061 init_VISWS_APIC_irqs();
1062 #endif
1064 * Cover the whole vector space, no vector can escape
1065 * us. (some of these will be overridden and become
1066 * 'special' SMP interrupts)
1068 for (i = 0; i < NR_IRQS; i++) {
1069 int vector = FIRST_EXTERNAL_VECTOR + i;
1070 if (vector != SYSCALL_VECTOR)
1071 set_intr_gate(vector, interrupt[i]);
1074 #ifdef __SMP__
1077 IRQ0 must be given a fixed assignment and initialized
1078 before init_IRQ_SMP.
1080 set_intr_gate(IRQ0_TRAP_VECTOR, interrupt[0]);
1083 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1084 * IPI, driven by wakeup.
1086 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1088 /* IPI for invalidation */
1089 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1091 /* IPI for CPU halt */
1092 set_intr_gate(STOP_CPU_VECTOR, stop_cpu_interrupt);
1094 /* self generated IPI for local APIC timer */
1095 set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
1097 /* IPI for generic function call */
1098 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1100 /* IPI vector for APIC spurious interrupts */
1101 set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
1102 #endif
1105 * Set the clock to 100 Hz, we already have a valid
1106 * vector now:
1108 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
1109 outb_p(LATCH & 0xff , 0x40); /* LSB */
1110 outb(LATCH >> 8 , 0x40); /* MSB */
1112 #ifndef CONFIG_VISWS
1113 setup_x86_irq(2, &irq2);
1114 setup_x86_irq(13, &irq13);
1115 #endif
1118 #ifdef CONFIG_X86_IO_APIC
1119 __initfunc(void init_IRQ_SMP(void))
1121 int i;
1122 for (i = 0; i < NR_IRQS ; i++)
1123 if (IO_APIC_VECTOR(i) > 0)
1124 set_intr_gate(IO_APIC_VECTOR(i), interrupt[i]);
1126 #endif