1 /* mostly architecture independent
3 the beautiful visws architecture code needs to be updated too.
4 and, finally, the BUILD_IRQ and SMP_BUILD macros in irq.h need fixed.
7 * linux/arch/i386/kernel/irq.c
9 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
11 * This file contains the code used by various IRQ handling routines:
12 * asking for different IRQ's should be done through these routines
13 * instead of just grabbing them. Thus setups with different IRQ numbers
14 * shouldn't result in any weird surprises, and installing new handlers
19 * IRQs are in fact implemented a bit like signal handlers for the kernel.
20 * Naturally it's not a 1:1 relation, but there are similarities.
23 #include <linux/ptrace.h>
24 #include <linux/errno.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/signal.h>
27 #include <linux/sched.h>
28 #include <linux/ioport.h>
29 #include <linux/interrupt.h>
30 #include <linux/timex.h>
31 #include <linux/malloc.h>
32 #include <linux/random.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/init.h>
37 #include <asm/system.h>
39 #include <asm/bitops.h>
41 #include <asm/pgtable.h>
42 #include <asm/delay.h>
45 #include <linux/irq.h>
48 unsigned int local_bh_count
[NR_CPUS
];
49 unsigned int local_irq_count
[NR_CPUS
];
54 * Linux has a controller-independent x86 interrupt architecture.
55 * every controller has a 'controller-template', that is used
56 * by the main code to do the right thing. Each driver-visible
57 * interrupt source is transparently wired to the apropriate
58 * controller. Thus drivers need not be aware of the
59 * interrupt-controller.
61 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
62 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
63 * (IO-APICs assumed to be messaging to Pentium local-APICs)
65 * the code is designed to be easily extended with new/different
66 * interrupt controllers, without having to do assembly magic.
70 * Micro-access to controllers is serialized over the whole
71 * system. We never hold this lock when we call the actual
74 spinlock_t irq_controller_lock
= SPIN_LOCK_UNLOCKED
;
76 * Controller mappings for all interrupt sources:
78 irq_desc_t irq_desc
[NR_IRQS
] __cacheline_aligned
= { [0 ... NR_IRQS
-1] = { 0, &no_irq_type
, }};
81 * Special irq handlers.
84 void no_action(int cpl
, void *dev_id
, struct pt_regs
*regs
) { }
87 * Generic, controller-independent functions:
90 int get_irq_list(char *buf
)
93 struct irqaction
* action
;
97 for (j
=0; j
<smp_num_cpus
; j
++)
98 p
+= sprintf(p
, "CPU%d ",j
);
101 for (i
= 0 ; i
< NR_IRQS
; i
++) {
102 action
= irq_desc
[i
].action
;
105 p
+= sprintf(p
, "%3d: ",i
);
107 p
+= sprintf(p
, "%10u ", kstat_irqs(i
));
109 for (j
=0; j
<smp_num_cpus
; j
++)
110 p
+= sprintf(p
, "%10u ",
111 kstat
.irqs
[cpu_logical_map(j
)][i
]);
113 p
+= sprintf(p
, " %14s", irq_desc
[i
].handler
->typename
);
114 p
+= sprintf(p
, " %s", action
->name
);
116 for (action
=action
->next
; action
; action
= action
->next
) {
117 p
+= sprintf(p
, ", %s", action
->name
);
121 p
+= sprintf(p
, "NMI: %10u\n", atomic_read(&nmi_counter
));
123 p
+= sprintf(p
, "ERR: %10lu\n", ipi_count
);
129 * Global interrupt locks for SMP. Allow interrupts to come in on any
130 * CPU, yet make cli/sti act globally to protect critical regions..
133 unsigned char global_irq_holder
= NO_PROC_ID
;
134 unsigned volatile int global_irq_lock
;
135 atomic_t global_irq_count
;
137 atomic_t global_bh_count
;
138 atomic_t global_bh_lock
;
139 spinlock_t i386_bh_lock
= SPIN_LOCK_UNLOCKED
;
142 * "global_cli()" is a special case, in that it can hold the
143 * interrupts disabled for a longish time, and also because
144 * we may be doing TLB invalidates when holding the global
145 * IRQ lock for historical reasons. Thus we may need to check
146 * SMP invalidate events specially by hand here (but not in
147 * any normal spinlocks)
149 static inline void check_smp_invalidate(int cpu
)
151 if (test_bit(cpu
, &smp_invalidate_needed
)) {
152 struct mm_struct
*mm
= current
->mm
;
153 clear_bit(cpu
, &smp_invalidate_needed
);
155 atomic_set_mask(1 << cpu
, &mm
->cpu_vm_mask
);
160 static void show(char * str
)
163 unsigned long *stack
;
164 int cpu
= smp_processor_id();
166 printk("\n%s, CPU %d:\n", str
, cpu
);
167 printk("irq: %d [%d %d]\n",
168 atomic_read(&global_irq_count
), local_irq_count
[0], local_irq_count
[1]);
169 printk("bh: %d [%d %d]\n",
170 atomic_read(&global_bh_count
), local_bh_count
[0], local_bh_count
[1]);
171 stack
= (unsigned long *) &stack
;
172 for (i
= 40; i
; i
--) {
173 unsigned long x
= *++stack
;
174 if (x
> (unsigned long) &get_option
&& x
< (unsigned long) &vsprintf
) {
175 printk("<[%08lx]> ", x
);
180 #define MAXCOUNT 100000000
182 static inline void wait_on_bh(void)
184 int count
= MAXCOUNT
;
190 /* nothing .. wait for the other bh's to go away */
191 } while (atomic_read(&global_bh_count
) != 0);
195 * I had a lockup scenario where a tight loop doing
196 * spin_unlock()/spin_lock() on CPU#1 was racing with
197 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
198 * apparently the spin_unlock() information did not make it
199 * through to CPU#0 ... nasty, is this by design, do we have to limit
200 * 'memory update oscillation frequency' artificially like here?
202 * Such 'high frequency update' races can be avoided by careful design, but
203 * some of our major constructs like spinlocks use similar techniques,
204 * it would be nice to clarify this issue. Set this define to 0 if you
205 * want to check whether your system freezes. I suspect the delay done
206 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
207 * i thought that such things are guaranteed by design, since we use
210 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 1
212 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
213 # define SYNC_OTHER_CORES(x) udelay(x+1)
216 * We have to allow irqs to arrive between __sti and __cli
218 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
221 static inline void wait_on_irq(int cpu
)
223 int count
= MAXCOUNT
;
228 * Wait until all interrupts are gone. Wait
229 * for bottom half handlers unless we're
230 * already executing in one..
232 if (!atomic_read(&global_irq_count
)) {
233 if (local_bh_count
[cpu
] || !atomic_read(&global_bh_count
))
237 /* Duh, we have to loop. Release the lock to avoid deadlocks */
238 clear_bit(0,&global_irq_lock
);
246 SYNC_OTHER_CORES(cpu
);
248 check_smp_invalidate(cpu
);
249 if (atomic_read(&global_irq_count
))
253 if (!local_bh_count
[cpu
] && atomic_read(&global_bh_count
))
255 if (!test_and_set_bit(0,&global_irq_lock
))
262 * This is called when we want to synchronize with
263 * bottom half handlers. We need to wait until
264 * no other CPU is executing any bottom half handler.
266 * Don't wait if we're already running in an interrupt
267 * context or are inside a bh handler.
269 void synchronize_bh(void)
271 if (atomic_read(&global_bh_count
) && !in_interrupt())
276 * This is called when we want to synchronize with
277 * interrupts. We may for example tell a device to
278 * stop sending interrupts: but to make sure there
279 * are no interrupts that are executing on another
280 * CPU we need to call this function.
282 void synchronize_irq(void)
284 if (atomic_read(&global_irq_count
)) {
285 /* Stupid approach */
291 static inline void get_irqlock(int cpu
)
293 if (test_and_set_bit(0,&global_irq_lock
)) {
294 /* do we already hold the lock? */
295 if ((unsigned char) cpu
== global_irq_holder
)
297 /* Uhhuh.. Somebody else got it. Wait.. */
300 check_smp_invalidate(cpu
);
301 } while (test_bit(0,&global_irq_lock
));
302 } while (test_and_set_bit(0,&global_irq_lock
));
305 * We also to make sure that nobody else is running
306 * in an interrupt context.
313 global_irq_holder
= cpu
;
316 #define EFLAGS_IF_SHIFT 9
319 * A global "cli()" while in an interrupt context
320 * turns into just a local cli(). Interrupts
321 * should use spinlocks for the (very unlikely)
322 * case that they ever want to protect against
325 * If we already have local interrupts disabled,
326 * this will not turn a local disable into a
327 * global one (problems with spinlocks: this makes
328 * save_flags+cli+sti usable inside a spinlock).
330 void __global_cli(void)
335 if (flags
& (1 << EFLAGS_IF_SHIFT
)) {
336 int cpu
= smp_processor_id();
338 if (!local_irq_count
[cpu
])
343 void __global_sti(void)
345 int cpu
= smp_processor_id();
347 if (!local_irq_count
[cpu
])
348 release_irqlock(cpu
);
353 * SMP flags value to restore to:
359 unsigned long __global_save_flags(void)
366 local_enabled
= (flags
>> EFLAGS_IF_SHIFT
) & 1;
367 /* default to local */
368 retval
= 2 + local_enabled
;
370 /* check for global flags if we're not in an interrupt */
371 if (!local_irq_count
[smp_processor_id()]) {
374 if (global_irq_holder
== (unsigned char) smp_processor_id())
380 void __global_restore_flags(unsigned long flags
)
396 printk("global_restore_flags: %08lx (%08lx)\n",
397 flags
, (&flags
)[-1]);
404 * This should really return information about whether
405 * we should do bottom half handling etc. Right now we
406 * end up _always_ checking the bottom half, which is a
407 * waste of time and is not what some drivers would
410 int handle_IRQ_event(unsigned int irq
, struct pt_regs
* regs
, struct irqaction
* action
)
413 int cpu
= smp_processor_id();
417 status
= 1; /* Force the "do bottom halves" bit */
419 if (!(action
->flags
& SA_INTERRUPT
))
423 status
|= action
->flags
;
424 action
->handler(irq
, action
->dev_id
, regs
);
425 action
= action
->next
;
427 if (status
& SA_SAMPLE_RANDOM
)
428 add_interrupt_randomness(irq
);
437 * Generic enable/disable code: this just calls
438 * down into the PIC-specific version for the actual
439 * hardware disable after having gotten the irq
442 void disable_irq_nosync(unsigned int irq
)
446 spin_lock_irqsave(&irq_controller_lock
, flags
);
447 if (!irq_desc
[irq
].depth
++) {
448 irq_desc
[irq
].status
|= IRQ_DISABLED
;
449 irq_desc
[irq
].handler
->disable(irq
);
451 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
455 * Synchronous version of the above, making sure the IRQ is
456 * no longer running on any other IRQ..
458 void disable_irq(unsigned int irq
)
460 disable_irq_nosync(irq
);
462 if (!local_irq_count
[smp_processor_id()]) {
465 } while (irq_desc
[irq
].status
& IRQ_INPROGRESS
);
469 void enable_irq(unsigned int irq
)
473 spin_lock_irqsave(&irq_controller_lock
, flags
);
474 switch (irq_desc
[irq
].depth
) {
476 unsigned int status
= irq_desc
[irq
].status
& ~IRQ_DISABLED
;
477 irq_desc
[irq
].status
= status
;
478 if ((status
& (IRQ_PENDING
| IRQ_REPLAY
)) == IRQ_PENDING
) {
479 irq_desc
[irq
].status
= status
| IRQ_REPLAY
;
480 hw_resend_irq(irq_desc
[irq
].handler
,irq
);
482 irq_desc
[irq
].handler
->enable(irq
);
486 irq_desc
[irq
].depth
--;
489 printk("enable_irq() unbalanced from %p\n",
490 __builtin_return_address(0));
492 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
496 * do_IRQ handles all normal device IRQ's (the special
497 * SMP cross-CPU interrupts have their own specific
500 asmlinkage
unsigned int do_IRQ(struct pt_regs regs
)
503 * We ack quickly, we don't want the irq controller
504 * thinking we're snobs just because some other CPU has
505 * disabled global interrupts (we have already done the
506 * INT_ACK cycles, it's too late to try to pretend to the
507 * controller that we aren't taking the interrupt).
509 * 0 return value means that this irq is already being
510 * handled by some other CPU. (or is disabled)
512 int irq
= regs
.orig_eax
& 0xff; /* high bits used in ret_from_ code */
513 int cpu
= smp_processor_id();
515 struct irqaction
* action
;
518 kstat
.irqs
[cpu
][irq
]++;
519 desc
= irq_desc
+ irq
;
520 spin_lock(&irq_controller_lock
);
521 irq_desc
[irq
].handler
->ack(irq
);
523 REPLAY is when Linux resends an IRQ that was dropped earlier
524 WAITING is used by probe to mark irqs that are being tested
526 status
= desc
->status
& ~(IRQ_REPLAY
| IRQ_WAITING
);
527 status
|= IRQ_PENDING
; /* we _want_ to handle it */
530 * If the IRQ is disabled for whatever reason, we cannot
531 * use the action we have.
534 if (!(status
& (IRQ_DISABLED
| IRQ_INPROGRESS
))) {
535 action
= desc
->action
;
536 status
&= ~IRQ_PENDING
; /* we commit to handling */
537 status
|= IRQ_INPROGRESS
; /* we are handling it */
539 desc
->status
= status
;
540 spin_unlock(&irq_controller_lock
);
543 * If there is no IRQ handler or it was disabled, exit early.
544 Since we set PENDING, if another processor is handling
545 a different instance of this same irq, the other processor
546 will take care of it.
552 * Edge triggered interrupts need to remember
554 * This applies to any hw interrupts that allow a second
555 * instance of the same irq to arrive while we are in do_IRQ
556 * or in the handler. But the code here only handles the _second_
557 * instance of the irq, not the third or fourth. So it is mostly
558 * useful for irq hardware that does not mask cleanly in an
562 handle_IRQ_event(irq
, ®s
, action
);
563 spin_lock(&irq_controller_lock
);
565 if (!(desc
->status
& IRQ_PENDING
))
567 desc
->status
&= ~IRQ_PENDING
;
568 spin_unlock(&irq_controller_lock
);
570 desc
->status
&= ~IRQ_INPROGRESS
;
571 if (!(desc
->status
& IRQ_DISABLED
)){
572 irq_desc
[irq
].handler
->end(irq
);
574 spin_unlock(&irq_controller_lock
);
577 * This should be conditional: we should really get
578 * a return code from the irq handler to tell us
579 * whether the handler wants us to do software bottom
580 * half handling or not..
583 if (bh_active
& bh_mask
)
589 int request_irq(unsigned int irq
,
590 void (*handler
)(int, void *, struct pt_regs
*),
591 unsigned long irqflags
,
592 const char * devname
,
596 struct irqaction
* action
;
603 action
= (struct irqaction
*)
604 kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
608 action
->handler
= handler
;
609 action
->flags
= irqflags
;
611 action
->name
= devname
;
613 action
->dev_id
= dev_id
;
615 retval
= setup_irq(irq
, action
);
621 void free_irq(unsigned int irq
, void *dev_id
)
623 struct irqaction
**p
;
629 spin_lock_irqsave(&irq_controller_lock
,flags
);
630 p
= &irq_desc
[irq
].action
;
632 struct irqaction
* action
= *p
;
634 struct irqaction
**pp
= p
;
636 if (action
->dev_id
!= dev_id
)
639 /* Found it - now remove it from the list of entries */
641 if (!irq_desc
[irq
].action
) {
642 irq_desc
[irq
].status
|= IRQ_DISABLED
;
643 irq_desc
[irq
].handler
->shutdown(irq
);
645 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
647 /* Wait to make sure it's not being used on another CPU */
648 while (irq_desc
[irq
].status
& IRQ_INPROGRESS
)
653 printk("Trying to free free IRQ%d\n",irq
);
654 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
660 * IRQ autodetection code..
662 * This depends on the fact that any interrupt that
663 * comes in on to an unassigned handler will get stuck
664 * with "IRQ_WAITING" cleared and the interrupt
667 unsigned long probe_irq_on(void)
673 * first, enable any unassigned irqs
675 spin_lock_irq(&irq_controller_lock
);
676 for (i
= NR_IRQS
-1; i
> 0; i
--) {
677 if (!irq_desc
[i
].action
) {
678 irq_desc
[i
].status
|= IRQ_AUTODETECT
| IRQ_WAITING
;
679 if(irq_desc
[i
].handler
->startup(i
))
680 irq_desc
[i
].status
|= IRQ_PENDING
;
683 spin_unlock_irq(&irq_controller_lock
);
686 * Wait for spurious interrupts to trigger
688 for (delay
= jiffies
+ HZ
/10; time_after(delay
, jiffies
); )
689 /* about 100ms delay */ synchronize_irq();
692 * Now filter out any obviously spurious interrupts
694 spin_lock_irq(&irq_controller_lock
);
695 for (i
=0; i
<NR_IRQS
; i
++) {
696 unsigned int status
= irq_desc
[i
].status
;
698 if (!(status
& IRQ_AUTODETECT
))
701 /* It triggered already - consider it spurious. */
702 if (!(status
& IRQ_WAITING
)) {
703 irq_desc
[i
].status
= status
& ~IRQ_AUTODETECT
;
704 irq_desc
[i
].handler
->shutdown(i
);
707 spin_unlock_irq(&irq_controller_lock
);
712 int probe_irq_off(unsigned long unused
)
714 int i
, irq_found
, nr_irqs
;
716 if (unused
!= 0x12345678)
717 printk("Bad IRQ probe from %lx\n", (&unused
)[-1]);
721 spin_lock_irq(&irq_controller_lock
);
722 for (i
=0; i
<NR_IRQS
; i
++) {
723 unsigned int status
= irq_desc
[i
].status
;
725 if (!(status
& IRQ_AUTODETECT
))
728 if (!(status
& IRQ_WAITING
)) {
733 irq_desc
[i
].status
= status
& ~IRQ_AUTODETECT
;
734 irq_desc
[i
].handler
->shutdown(i
);
736 spin_unlock_irq(&irq_controller_lock
);
739 irq_found
= -irq_found
;
743 /* this was setup_x86_irq but it seems pretty generic */
744 int setup_irq(unsigned int irq
, struct irqaction
* new)
747 struct irqaction
*old
, **p
;
751 * Some drivers like serial.c use request_irq() heavily,
752 * so we have to be careful not to interfere with a
755 if (new->flags
& SA_SAMPLE_RANDOM
) {
757 * This function might sleep, we want to call it first,
758 * outside of the atomic block.
759 * Yes, this might clear the entropy pool if the wrong
760 * driver is attempted to be loaded, without actually
761 * installing a new handler, but is this really a problem,
762 * only the sysadmin is able to do this.
764 rand_initialize_irq(irq
);
768 * The following block of code has to be executed atomically
770 spin_lock_irqsave(&irq_controller_lock
,flags
);
771 p
= &irq_desc
[irq
].action
;
772 if ((old
= *p
) != NULL
) {
773 /* Can't share interrupts unless both agree to */
774 if (!(old
->flags
& new->flags
& SA_SHIRQ
)) {
775 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
779 /* add new interrupt at end of irq queue */
790 irq_desc
[irq
].depth
= 0;
791 irq_desc
[irq
].status
&= ~IRQ_DISABLED
;
792 irq_desc
[irq
].handler
->startup(irq
);
794 spin_unlock_irqrestore(&irq_controller_lock
,flags
);