2 * linux/arch/i386/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
14 * IRQs are in fact implemented a bit like signal handlers for the kernel.
15 * Naturally it's not a 1:1 relation, but there are similarities.
18 #include <linux/config.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/signal.h>
23 #include <linux/sched.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/timex.h>
27 #include <linux/malloc.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/tasks.h>
31 #include <linux/smp_lock.h>
32 #include <linux/init.h>
34 #include <asm/system.h>
37 #include <asm/bitops.h>
39 #include <asm/pgtable.h>
40 #include <asm/delay.h>
45 unsigned int local_bh_count
[NR_CPUS
];
46 unsigned int local_irq_count
[NR_CPUS
];
51 * Linux has a controller-independent x86 interrupt architecture.
52 * every controller has a 'controller-template', that is used
53 * by the main code to do the right thing. Each driver-visible
54 * interrupt source is transparently wired to the apropriate
55 * controller. Thus drivers need not be aware of the
56 * interrupt-controller.
58 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
59 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
60 * (IO-APICs assumed to be messaging to Pentium local-APICs)
62 * the code is designed to be easily extended with new/different
63 * interrupt controllers, without having to do assembly magic.
67 * Micro-access to controllers is serialized over the whole
68 * system. We never hold this lock when we call the actual
71 spinlock_t irq_controller_lock
;
74 * Dummy controller type for unused interrupts
76 static void do_none(unsigned int irq
, struct pt_regs
* regs
)
79 * we are careful. While for ISA irqs it's common to happen
80 * outside of any driver (think autodetection), this is not
81 * at all nice for PCI interrupts. So we are stricter and
82 * print a warning when such spurious interrupts happen.
83 * Spurious interrupts can confuse other drivers if the PCI
86 * Such spurious interrupts are either driver bugs, or
87 * sometimes hw (chipset) bugs.
89 printk("unexpected IRQ vector %d on CPU#%d!\n",irq
, smp_processor_id());
93 * [currently unexpected vectors happen only on SMP and APIC.
94 * if we want to have non-APIC and non-8259A controllers
95 * in the future with unexpected vectors, this ack should
96 * probably be made controller-specific.]
101 static void enable_none(unsigned int irq
) { }
102 static void disable_none(unsigned int irq
) { }
104 /* startup is the same as "enable", shutdown is same as "disable" */
105 #define startup_none enable_none
106 #define shutdown_none disable_none
108 struct hw_interrupt_type no_irq_type
= {
118 * This is the 'legacy' 8259A Programmable Interrupt Controller,
119 * present in the majority of PC/AT boxes.
122 static void do_8259A_IRQ(unsigned int irq
, struct pt_regs
* regs
);
123 static void enable_8259A_irq(unsigned int irq
);
124 void disable_8259A_irq(unsigned int irq
);
126 /* startup is the same as "enable", shutdown is same as "disable" */
127 #define startup_8259A_irq enable_8259A_irq
128 #define shutdown_8259A_irq disable_8259A_irq
130 static struct hw_interrupt_type i8259A_irq_type
= {
140 * Controller mappings for all interrupt sources:
142 irq_desc_t irq_desc
[NR_IRQS
] = { [0 ... NR_IRQS
-1] = { 0, &no_irq_type
, }};
146 * 8259A PIC functions to handle ISA devices:
150 * This contains the irq mask for both 8259A irq controllers,
152 static unsigned int cached_irq_mask
= 0xffff;
154 #define __byte(x,y) (((unsigned char *)&(y))[x])
155 #define cached_21 (__byte(0,cached_irq_mask))
156 #define cached_A1 (__byte(1,cached_irq_mask))
159 * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
160 * boards the timer interrupt is not connected to any IO-APIC pin, it's
161 * fed to the CPU IRQ line directly.
163 * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
164 * this 'mixed mode' IRQ handling costs nothing because it's only used
167 unsigned long io_apic_irqs
= 0;
170 * These have to be protected by the irq controller spinlock
171 * before being called.
173 void disable_8259A_irq(unsigned int irq
)
175 unsigned int mask
= 1 << irq
;
176 cached_irq_mask
|= mask
;
178 outb(cached_A1
,0xA1);
180 outb(cached_21
,0x21);
184 static void enable_8259A_irq(unsigned int irq
)
186 unsigned int mask
= ~(1 << irq
);
187 cached_irq_mask
&= mask
;
189 outb(cached_A1
,0xA1);
191 outb(cached_21
,0x21);
195 int i8259A_irq_pending(unsigned int irq
)
197 unsigned int mask
= 1<<irq
;
200 return (inb(0x20) & mask
);
201 return (inb(0xA0) & (mask
>> 8));
204 void make_8259A_irq(unsigned int irq
)
206 disable_irq_nosync(irq
);
207 io_apic_irqs
&= ~(1<<irq
);
208 irq_desc
[irq
].handler
= &i8259A_irq_type
;
213 * Careful! The 8259A is a fragile beast, it pretty
214 * much _has_ to be done exactly like this (mask it
215 * first, _then_ send the EOI, and the order of EOI
216 * to the two 8259s is important!
218 static inline void mask_and_ack_8259A(unsigned int irq
)
220 cached_irq_mask
|= 1 << irq
;
222 inb(0xA1); /* DUMMY */
223 outb(cached_A1
,0xA1);
224 outb(0x62,0x20); /* Specific EOI to cascade */
227 inb(0x21); /* DUMMY */
228 outb(cached_21
,0x21);
233 static void do_8259A_IRQ(unsigned int irq
, struct pt_regs
* regs
)
235 struct irqaction
* action
;
236 irq_desc_t
*desc
= irq_desc
+ irq
;
238 spin_lock(&irq_controller_lock
);
241 mask_and_ack_8259A(irq
);
242 status
= desc
->status
& ~(IRQ_REPLAY
| IRQ_WAITING
);
244 if (!(status
& (IRQ_DISABLED
| IRQ_INPROGRESS
))) {
245 action
= desc
->action
;
246 status
|= IRQ_INPROGRESS
;
248 desc
->status
= status
;
250 spin_unlock(&irq_controller_lock
);
252 /* Exit early if we had no action or it was disabled */
256 handle_IRQ_event(irq
, regs
, action
);
258 spin_lock(&irq_controller_lock
);
260 unsigned int status
= desc
->status
& ~IRQ_INPROGRESS
;
261 desc
->status
= status
;
262 if (!(status
& IRQ_DISABLED
))
263 enable_8259A_irq(irq
);
265 spin_unlock(&irq_controller_lock
);
269 * This builds up the IRQ handler stubs using some ugly macros in irq.h
271 * These macros create the low-level assembly IRQ routines that save
272 * register context and call do_IRQ(). do_IRQ() then does all the
273 * operations that are needed to keep the AT (or SMP IOAPIC)
274 * interrupt-controller happy.
283 #define BUILD_16_IRQS(x) \
284 BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
285 BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
286 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
287 BI(x,c) BI(x,d) BI(x,e) BI(x,f)
290 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
291 * (these are usually mapped to vectors 0x20-0x30)
295 #ifdef CONFIG_X86_IO_APIC
297 * The IO-APIC gives us many more interrupt sources. Most of these
298 * are unused but an SMP system is supposed to have enough memory ...
299 * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
300 * across the spectrum, so we really want to be prepared to get all
301 * of these. Plus, more powerful systems might have more than 64
304 * (these are usually mapped into the 0x30-0xff vector range)
306 BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
307 BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
308 BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
309 BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
318 * The following vectors are part of the Linux architecture, there
319 * is no hardware IRQ pin equivalent for them, they are triggered
320 * through the ICC by us (IPIs)
322 BUILD_SMP_INTERRUPT(reschedule_interrupt
)
323 BUILD_SMP_INTERRUPT(invalidate_interrupt
)
324 BUILD_SMP_INTERRUPT(stop_cpu_interrupt
)
325 BUILD_SMP_INTERRUPT(mtrr_interrupt
)
326 BUILD_SMP_INTERRUPT(spurious_interrupt
)
329 * every pentium local APIC has two 'local interrupts', with a
330 * soft-definable vector attached to both interrupts, one of
331 * which is a timer interrupt, the other one is error counter
332 * overflow. Linux uses the local APIC timer interrupt to get
333 * a much simpler SMP time architecture:
335 BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt
)
340 IRQ##x##y##_interrupt
342 #define IRQLIST_16(x) \
343 IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
344 IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
345 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
346 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
348 static void (*interrupt
[NR_IRQS
])(void) = {
351 #ifdef CONFIG_X86_IO_APIC
352 IRQLIST_16(0x1), IRQLIST_16(0x2), IRQLIST_16(0x3),
353 IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
354 IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
355 IRQLIST_16(0xc), IRQLIST_16(0xd)
364 * Special irq handlers.
367 void no_action(int cpl
, void *dev_id
, struct pt_regs
*regs
) { }
371 * Note that on a 486, we don't want to do a SIGFPE on an irq13
372 * as the irq is unreliable, and exception 16 works correctly
373 * (ie as explained in the intel literature). On a 386, you
374 * can't use exception 16 due to bad IBM design, so we have to
375 * rely on the less exact irq13.
377 * Careful.. Not only is IRQ13 unreliable, but it is also
378 * leads to races. IBM designers who came up with it should
382 static void math_error_irq(int cpl
, void *dev_id
, struct pt_regs
*regs
)
385 if (ignore_irq13
|| !boot_cpu_data
.hard_math
)
390 static struct irqaction irq13
= { math_error_irq
, 0, 0, "fpu", NULL
, NULL
};
393 * IRQ2 is cascade interrupt to second interrupt controller
396 static struct irqaction irq2
= { no_action
, 0, 0, "cascade", NULL
, NULL
};
400 * Generic, controller-independent functions:
403 int get_irq_list(char *buf
)
406 struct irqaction
* action
;
409 p
+= sprintf(p
, " ");
410 for (j
=0; j
<smp_num_cpus
; j
++)
411 p
+= sprintf(p
, "CPU%d ",j
);
414 for (i
= 0 ; i
< NR_IRQS
; i
++) {
415 action
= irq_desc
[i
].action
;
418 p
+= sprintf(p
, "%3d: ",i
);
420 p
+= sprintf(p
, "%10u ", kstat_irqs(i
));
422 for (j
=0; j
<smp_num_cpus
; j
++)
423 p
+= sprintf(p
, "%10u ",
424 kstat
.irqs
[cpu_logical_map(j
)][i
]);
426 p
+= sprintf(p
, " %14s", irq_desc
[i
].handler
->typename
);
427 p
+= sprintf(p
, " %s", action
->name
);
429 for (action
=action
->next
; action
; action
= action
->next
) {
430 p
+= sprintf(p
, ", %s", action
->name
);
434 p
+= sprintf(p
, "NMI: %10u\n", atomic_read(&nmi_counter
));
436 p
+= sprintf(p
, "ERR: %10lu\n", ipi_count
);
442 * Global interrupt locks for SMP. Allow interrupts to come in on any
443 * CPU, yet make cli/sti act globally to protect critical regions..
446 unsigned char global_irq_holder
= NO_PROC_ID
;
447 unsigned volatile int global_irq_lock
;
448 atomic_t global_irq_count
;
450 atomic_t global_bh_count
;
451 atomic_t global_bh_lock
;
454 * "global_cli()" is a special case, in that it can hold the
455 * interrupts disabled for a longish time, and also because
456 * we may be doing TLB invalidates when holding the global
457 * IRQ lock for historical reasons. Thus we may need to check
458 * SMP invalidate events specially by hand here (but not in
459 * any normal spinlocks)
461 static inline void check_smp_invalidate(int cpu
)
463 if (test_bit(cpu
, &smp_invalidate_needed
)) {
464 clear_bit(cpu
, &smp_invalidate_needed
);
469 static void show(char * str
)
472 unsigned long *stack
;
473 int cpu
= smp_processor_id();
474 extern char *get_options(char *str
, int *ints
);
476 printk("\n%s, CPU %d:\n", str
, cpu
);
477 printk("irq: %d [%d %d]\n",
478 atomic_read(&global_irq_count
), local_irq_count
[0], local_irq_count
[1]);
479 printk("bh: %d [%d %d]\n",
480 atomic_read(&global_bh_count
), local_bh_count
[0], local_bh_count
[1]);
481 stack
= (unsigned long *) &stack
;
482 for (i
= 40; i
; i
--) {
483 unsigned long x
= *++stack
;
484 if (x
> (unsigned long) &get_options
&& x
< (unsigned long) &vsprintf
) {
485 printk("<[%08lx]> ", x
);
490 #define MAXCOUNT 100000000
492 static inline void wait_on_bh(void)
494 int count
= MAXCOUNT
;
500 /* nothing .. wait for the other bh's to go away */
501 } while (atomic_read(&global_bh_count
) != 0);
505 * I had a lockup scenario where a tight loop doing
506 * spin_unlock()/spin_lock() on CPU#1 was racing with
507 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
508 * apparently the spin_unlock() information did not make it
509 * through to CPU#0 ... nasty, is this by design, do we have to limit
510 * 'memory update oscillation frequency' artificially like here?
512 * Such 'high frequency update' races can be avoided by careful design, but
513 * some of our major constructs like spinlocks use similar techniques,
514 * it would be nice to clarify this issue. Set this define to 0 if you
515 * want to check whether your system freezes. I suspect the delay done
516 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
517 * i thought that such things are guaranteed by design, since we use
520 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 1
522 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
523 # define SYNC_OTHER_CORES(x) udelay(x+1)
526 * We have to allow irqs to arrive between __sti and __cli
528 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
531 static inline void wait_on_irq(int cpu
)
533 int count
= MAXCOUNT
;
538 * Wait until all interrupts are gone. Wait
539 * for bottom half handlers unless we're
540 * already executing in one..
542 if (!atomic_read(&global_irq_count
)) {
543 if (local_bh_count
[cpu
] || !atomic_read(&global_bh_count
))
547 /* Duh, we have to loop. Release the lock to avoid deadlocks */
548 clear_bit(0,&global_irq_lock
);
556 SYNC_OTHER_CORES(cpu
);
558 check_smp_invalidate(cpu
);
559 if (atomic_read(&global_irq_count
))
563 if (!local_bh_count
[cpu
] && atomic_read(&global_bh_count
))
565 if (!test_and_set_bit(0,&global_irq_lock
))
572 * This is called when we want to synchronize with
573 * bottom half handlers. We need to wait until
574 * no other CPU is executing any bottom half handler.
576 * Don't wait if we're already running in an interrupt
577 * context or are inside a bh handler.
579 void synchronize_bh(void)
581 if (atomic_read(&global_bh_count
) && !in_interrupt())
586 * This is called when we want to synchronize with
587 * interrupts. We may for example tell a device to
588 * stop sending interrupts: but to make sure there
589 * are no interrupts that are executing on another
590 * CPU we need to call this function.
592 void synchronize_irq(void)
594 if (atomic_read(&global_irq_count
)) {
595 /* Stupid approach */
601 static inline void get_irqlock(int cpu
)
603 if (test_and_set_bit(0,&global_irq_lock
)) {
604 /* do we already hold the lock? */
605 if ((unsigned char) cpu
== global_irq_holder
)
607 /* Uhhuh.. Somebody else got it. Wait.. */
610 check_smp_invalidate(cpu
);
611 } while (test_bit(0,&global_irq_lock
));
612 } while (test_and_set_bit(0,&global_irq_lock
));
615 * We also to make sure that nobody else is running
616 * in an interrupt context.
623 global_irq_holder
= cpu
;
626 #define EFLAGS_IF_SHIFT 9
629 * A global "cli()" while in an interrupt context
630 * turns into just a local cli(). Interrupts
631 * should use spinlocks for the (very unlikely)
632 * case that they ever want to protect against
635 * If we already have local interrupts disabled,
636 * this will not turn a local disable into a
637 * global one (problems with spinlocks: this makes
638 * save_flags+cli+sti usable inside a spinlock).
640 void __global_cli(void)
645 if (flags
& (1 << EFLAGS_IF_SHIFT
)) {
646 int cpu
= smp_processor_id();
648 if (!local_irq_count
[cpu
])
653 void __global_sti(void)
655 int cpu
= smp_processor_id();
657 if (!local_irq_count
[cpu
])
658 release_irqlock(cpu
);
663 * SMP flags value to restore to:
669 unsigned long __global_save_flags(void)
676 local_enabled
= (flags
>> EFLAGS_IF_SHIFT
) & 1;
677 /* default to local */
678 retval
= 2 + local_enabled
;
680 /* check for global flags if we're not in an interrupt */
681 if (!local_irq_count
[smp_processor_id()]) {
684 if (global_irq_holder
== (unsigned char) smp_processor_id())
690 void __global_restore_flags(unsigned long flags
)
706 printk("global_restore_flags: %08lx (%08lx)\n",
707 flags
, (&flags
)[-1]);
714 * This should really return information about whether
715 * we should do bottom half handling etc. Right now we
716 * end up _always_ checking the bottom half, which is a
717 * waste of time and is not what some drivers would
720 int handle_IRQ_event(unsigned int irq
, struct pt_regs
* regs
, struct irqaction
* action
)
723 int cpu
= smp_processor_id();
727 status
= 1; /* Force the "do bottom halves" bit */
729 if (!(action
->flags
& SA_INTERRUPT
))
733 status
|= action
->flags
;
734 action
->handler(irq
, action
->dev_id
, regs
);
735 action
= action
->next
;
737 if (status
& SA_SAMPLE_RANDOM
)
738 add_interrupt_randomness(irq
);
747 * Generic enable/disable code: this just calls
748 * down into the PIC-specific version for the actual
749 * hardware disable after having gotten the irq
752 void disable_irq_nosync(unsigned int irq
)
756 spin_lock_irqsave(&irq_controller_lock
, flags
);
757 if (!irq_desc
[irq
].depth
++) {
758 irq_desc
[irq
].status
|= IRQ_DISABLED
;
759 irq_desc
[irq
].handler
->disable(irq
);
761 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
765 * Synchronous version of the above, making sure the IRQ is
766 * no longer running on any other IRQ..
768 void disable_irq(unsigned int irq
)
770 disable_irq_nosync(irq
);
772 if (!local_irq_count
[smp_processor_id()]) {
775 } while (irq_desc
[irq
].status
& IRQ_INPROGRESS
);
779 void enable_irq(unsigned int irq
)
783 spin_lock_irqsave(&irq_controller_lock
, flags
);
784 switch (irq_desc
[irq
].depth
) {
786 irq_desc
[irq
].status
&= ~IRQ_DISABLED
;
787 irq_desc
[irq
].handler
->enable(irq
);
790 irq_desc
[irq
].depth
--;
793 printk("enable_irq() unbalanced from %p\n",
794 __builtin_return_address(0));
796 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
800 * do_IRQ handles all normal device IRQ's (the special
801 * SMP cross-CPU interrupts have their own specific
804 asmlinkage
void do_IRQ(struct pt_regs regs
)
807 * We ack quickly, we don't want the irq controller
808 * thinking we're snobs just because some other CPU has
809 * disabled global interrupts (we have already done the
810 * INT_ACK cycles, it's too late to try to pretend to the
811 * controller that we aren't taking the interrupt).
813 * 0 return value means that this irq is already being
814 * handled by some other CPU. (or is disabled)
816 int irq
= regs
.orig_eax
& 0xff; /* subtle, see irq.h */
817 int cpu
= smp_processor_id();
819 kstat
.irqs
[cpu
][irq
]++;
820 irq_desc
[irq
].handler
->handle(irq
, ®s
);
823 * This should be conditional: we should really get
824 * a return code from the irq handler to tell us
825 * whether the handler wants us to do software bottom
826 * half handling or not..
829 if (bh_active
& bh_mask
)
834 int setup_x86_irq(unsigned int irq
, struct irqaction
* new)
837 struct irqaction
*old
, **p
;
841 * Some drivers like serial.c use request_irq() heavily,
842 * so we have to be careful not to interfere with a
845 if (new->flags
& SA_SAMPLE_RANDOM
) {
847 * This function might sleep, we want to call it first,
848 * outside of the atomic block.
849 * Yes, this might clear the entropy pool if the wrong
850 * driver is attempted to be loaded, without actually
851 * installing a new handler, but is this really a problem,
852 * only the sysadmin is able to do this.
854 rand_initialize_irq(irq
);
858 * The following block of code has to be executed atomically
860 spin_lock_irqsave(&irq_controller_lock
,flags
);
861 p
= &irq_desc
[irq
].action
;
862 if ((old
= *p
) != NULL
) {
863 /* Can't share interrupts unless both agree to */
864 if (!(old
->flags
& new->flags
& SA_SHIRQ
)) {
865 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
869 /* add new interrupt at end of irq queue */
880 irq_desc
[irq
].depth
= 0;
881 irq_desc
[irq
].status
&= ~IRQ_DISABLED
;
882 irq_desc
[irq
].handler
->startup(irq
);
884 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
888 int request_irq(unsigned int irq
,
889 void (*handler
)(int, void *, struct pt_regs
*),
890 unsigned long irqflags
,
891 const char * devname
,
895 struct irqaction
* action
;
902 action
= (struct irqaction
*)
903 kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
907 action
->handler
= handler
;
908 action
->flags
= irqflags
;
910 action
->name
= devname
;
912 action
->dev_id
= dev_id
;
914 retval
= setup_x86_irq(irq
, action
);
921 void free_irq(unsigned int irq
, void *dev_id
)
923 struct irqaction
* action
, **p
;
929 spin_lock_irqsave(&irq_controller_lock
,flags
);
930 for (p
= &irq_desc
[irq
].action
; (action
= *p
) != NULL
; p
= &action
->next
) {
931 if (action
->dev_id
!= dev_id
)
934 /* Found it - now free it */
937 if (!irq_desc
[irq
].action
) {
938 irq_desc
[irq
].status
|= IRQ_DISABLED
;
939 irq_desc
[irq
].handler
->shutdown(irq
);
943 printk("Trying to free free IRQ%d\n",irq
);
945 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
949 * IRQ autodetection code..
951 * This depends on the fact that any interrupt that
952 * comes in on to an unassigned handler will get stuck
953 * with "IRQ_WAITING" cleared and the interrupt
956 unsigned long probe_irq_on(void)
962 * first, enable any unassigned irqs
964 spin_lock_irq(&irq_controller_lock
);
965 for (i
= NR_IRQS
-1; i
> 0; i
--) {
966 if (!irq_desc
[i
].action
) {
967 irq_desc
[i
].status
|= IRQ_AUTODETECT
| IRQ_WAITING
;
968 irq_desc
[i
].handler
->startup(i
);
971 spin_unlock_irq(&irq_controller_lock
);
974 * Wait for spurious interrupts to trigger
976 for (delay
= jiffies
+ HZ
/10; time_after(delay
, jiffies
); )
977 /* about 100ms delay */ synchronize_irq();
980 * Now filter out any obviously spurious interrupts
982 spin_lock_irq(&irq_controller_lock
);
983 for (i
=0; i
<NR_IRQS
; i
++) {
984 unsigned int status
= irq_desc
[i
].status
;
986 if (!(status
& IRQ_AUTODETECT
))
989 /* It triggered already - consider it spurious. */
990 if (!(status
& IRQ_WAITING
)) {
991 irq_desc
[i
].status
= status
& ~IRQ_AUTODETECT
;
992 irq_desc
[i
].handler
->shutdown(i
);
995 spin_unlock_irq(&irq_controller_lock
);
1000 int probe_irq_off(unsigned long unused
)
1002 int i
, irq_found
, nr_irqs
;
1004 if (unused
!= 0x12345678)
1005 printk("Bad IRQ probe from %lx\n", (&unused
)[-1]);
1009 spin_lock_irq(&irq_controller_lock
);
1010 for (i
=0; i
<NR_IRQS
; i
++) {
1011 unsigned int status
= irq_desc
[i
].status
;
1013 if (!(status
& IRQ_AUTODETECT
))
1016 if (!(status
& IRQ_WAITING
)) {
1021 irq_desc
[i
].status
= status
& ~IRQ_AUTODETECT
;
1022 irq_desc
[i
].handler
->shutdown(i
);
1024 spin_unlock_irq(&irq_controller_lock
);
1027 irq_found
= -irq_found
;
1031 void init_ISA_irqs (void)
1035 for (i
= 0; i
< NR_IRQS
; i
++) {
1036 irq_desc
[i
].status
= IRQ_DISABLED
;
1037 irq_desc
[i
].action
= 0;
1038 irq_desc
[i
].depth
= 0;
1042 * 16 old-style INTA-cycle interrupts:
1044 irq_desc
[i
].handler
= &i8259A_irq_type
;
1047 * 'high' PCI IRQs filled in on demand
1049 irq_desc
[i
].handler
= &no_irq_type
;
1054 __initfunc(void init_IRQ(void))
1058 #ifndef CONFIG_X86_VISWS_APIC
1061 init_VISWS_APIC_irqs();
1064 * Cover the whole vector space, no vector can escape
1065 * us. (some of these will be overridden and become
1066 * 'special' SMP interrupts)
1068 for (i
= 0; i
< NR_IRQS
; i
++) {
1069 int vector
= FIRST_EXTERNAL_VECTOR
+ i
;
1070 if (vector
!= SYSCALL_VECTOR
)
1071 set_intr_gate(vector
, interrupt
[i
]);
1077 IRQ0 must be given a fixed assignment and initialized
1078 before init_IRQ_SMP.
1080 set_intr_gate(IRQ0_TRAP_VECTOR
, interrupt
[0]);
1083 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1084 * IPI, driven by wakeup.
1086 set_intr_gate(RESCHEDULE_VECTOR
, reschedule_interrupt
);
1088 /* IPI for invalidation */
1089 set_intr_gate(INVALIDATE_TLB_VECTOR
, invalidate_interrupt
);
1091 /* IPI for CPU halt */
1092 set_intr_gate(STOP_CPU_VECTOR
, stop_cpu_interrupt
);
1094 /* self generated IPI for local APIC timer */
1095 set_intr_gate(LOCAL_TIMER_VECTOR
, apic_timer_interrupt
);
1097 /* IPI for MTRR control */
1098 set_intr_gate(MTRR_CHANGE_VECTOR
, mtrr_interrupt
);
1100 /* IPI vector for APIC spurious interrupts */
1101 set_intr_gate(SPURIOUS_APIC_VECTOR
, spurious_interrupt
);
1103 request_region(0x20,0x20,"pic1");
1104 request_region(0xa0,0x20,"pic2");
1107 * Set the clock to 100 Hz, we already have a valid
1110 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
1111 outb_p(LATCH
& 0xff , 0x40); /* LSB */
1112 outb(LATCH
>> 8 , 0x40); /* MSB */
1114 #ifndef CONFIG_VISWS
1115 setup_x86_irq(2, &irq2
);
1116 setup_x86_irq(13, &irq13
);
1120 #ifdef CONFIG_X86_IO_APIC
1121 __initfunc(void init_IRQ_SMP(void))
1124 for (i
= 0; i
< NR_IRQS
; i
++)
1125 if (IO_APIC_VECTOR(i
) > 0)
1126 set_intr_gate(IO_APIC_VECTOR(i
), interrupt
[i
]);