Import 2.3.6
[davej-history.git] / arch / arm / kernel / irq.c
blob332e8940d707c7c199e9f7a4797ea2e17bbba305
1 /*
2 * linux/arch/arm/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-1998 Russell King.
7 * This file contains the code used by various IRQ handling routines:
8 * asking for different IRQ's should be done through these routines
9 * instead of just grabbing them. Thus setups with different IRQ numbers
10 * shouldn't result in any weird surprises, and installing new handlers
11 * should be easier.
15 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
16 * Naturally it's not a 1:1 relation, but there are similarities.
18 #include <linux/config.h> /* for CONFIG_DEBUG_ERRORS */
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/signal.h>
23 #include <linux/sched.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/timex.h>
27 #include <linux/malloc.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
33 #include <asm/hardware.h>
34 #include <asm/io.h>
35 #include <asm/pgtable.h>
36 #include <asm/system.h>
38 #ifndef SMP
39 #define irq_enter(cpu, irq) (++local_irq_count[cpu])
40 #define irq_exit(cpu, irq) (--local_irq_count[cpu])
41 #else
42 #error SMP not supported
43 #endif
45 #ifndef cliIF
46 #define cliIF()
47 #endif
49 unsigned int local_bh_count[NR_CPUS];
50 unsigned int local_irq_count[NR_CPUS];
51 spinlock_t irq_controller_lock;
53 extern int get_fiq_list(char *);
54 extern void init_FIQ(void);
56 struct irqdesc {
57 unsigned int nomask : 1; /* IRQ does not mask in IRQ */
58 unsigned int enabled : 1; /* IRQ is currently enabled */
59 unsigned int triggered: 1; /* IRQ has occurred */
60 unsigned int probing : 1; /* IRQ in use for a probe */
61 unsigned int probe_ok : 1; /* IRQ can be used for probe */
62 unsigned int valid : 1; /* IRQ claimable */
63 unsigned int unused :26;
64 void (*mask_ack)(unsigned int irq); /* Mask and acknowledge IRQ */
65 void (*mask)(unsigned int irq); /* Mask IRQ */
66 void (*unmask)(unsigned int irq); /* Unmask IRQ */
67 struct irqaction *action;
68 unsigned int unused2[3];
71 static struct irqdesc irq_desc[NR_IRQS];
74 * Dummy mask/unmask handler
76 static void dummy_mask_unmask_irq(unsigned int irq)
80 void disable_irq(unsigned int irq)
82 unsigned long flags;
84 spin_lock_irqsave(&irq_controller_lock, flags);
85 cliIF();
86 irq_desc[irq].enabled = 0;
87 irq_desc[irq].mask(irq);
88 spin_unlock_irqrestore(&irq_controller_lock, flags);
91 void enable_irq(unsigned int irq)
93 unsigned long flags;
95 spin_lock_irqsave(&irq_controller_lock, flags);
96 cliIF();
97 irq_desc[irq].enabled = 1;
98 irq_desc[irq].probing = 0;
99 irq_desc[irq].triggered = 0;
100 irq_desc[irq].unmask(irq);
101 spin_unlock_irqrestore(&irq_controller_lock, flags);
104 int get_irq_list(char *buf)
106 int i;
107 struct irqaction * action;
108 char *p = buf;
110 for (i = 0 ; i < NR_IRQS ; i++) {
111 action = irq_desc[i].action;
112 if (!action)
113 continue;
114 p += sprintf(p, "%3d: %10u %s",
115 i, kstat_irqs(i), action->name);
116 for (action = action->next; action; action = action->next) {
117 p += sprintf(p, ", %s", action->name);
119 *p++ = '\n';
122 #ifdef CONFIG_ACORN
123 p += get_fiq_list(p);
124 #endif
125 return p - buf;
129 * do_IRQ handles all normal device IRQ's
131 asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
133 struct irqdesc * desc = irq_desc + irq;
134 struct irqaction * action;
135 int status, cpu;
137 spin_lock(&irq_controller_lock);
138 desc->mask_ack(irq);
139 spin_unlock(&irq_controller_lock);
141 cpu = smp_processor_id();
142 irq_enter(cpu, irq);
143 kstat.irqs[cpu][irq]++;
144 desc->triggered = 1;
146 /* Return with this interrupt masked if no action */
147 status = 0;
148 action = desc->action;
150 if (action) {
151 if (desc->nomask) {
152 spin_lock(&irq_controller_lock);
153 desc->unmask(irq);
154 spin_unlock(&irq_controller_lock);
157 if (!(action->flags & SA_INTERRUPT))
158 __sti();
160 do {
161 status |= action->flags;
162 action->handler(irq, action->dev_id, regs);
163 action = action->next;
164 } while (action);
166 if (status & SA_SAMPLE_RANDOM)
167 add_interrupt_randomness(irq);
168 __cli();
170 if (!desc->nomask && desc->enabled) {
171 spin_lock(&irq_controller_lock);
172 desc->unmask(irq);
173 spin_unlock(&irq_controller_lock);
177 irq_exit(cpu, irq);
180 * This should be conditional: we should really get
181 * a return code from the irq handler to tell us
182 * whether the handler wants us to do software bottom
183 * half handling or not..
185 * ** IMPORTANT NOTE: do_bottom_half() ENABLES IRQS!!! **
186 * ** WE MUST DISABLE THEM AGAIN, ELSE IDE DISKS GO **
187 * ** AWOL **
189 if (1) {
190 if (bh_active & bh_mask)
191 do_bottom_half();
192 __cli();
196 #if defined(CONFIG_ARCH_ACORN)
197 void do_ecard_IRQ(int irq, struct pt_regs *regs)
199 struct irqdesc * desc;
200 struct irqaction * action;
201 int cpu;
203 desc = irq_desc + irq;
205 cpu = smp_processor_id();
206 kstat.irqs[cpu][irq]++;
207 desc->triggered = 1;
209 action = desc->action;
211 if (action) {
212 do {
213 action->handler(irq, action->dev_id, regs);
214 action = action->next;
215 } while (action);
216 } else {
217 spin_lock(&irq_controller_lock);
218 desc->mask(irq);
219 spin_unlock(&irq_controller_lock);
222 #endif
224 int setup_arm_irq(int irq, struct irqaction * new)
226 int shared = 0;
227 struct irqaction *old, **p;
228 unsigned long flags;
230 if (new->flags & SA_SAMPLE_RANDOM)
231 rand_initialize_irq(irq);
233 spin_lock_irqsave(&irq_controller_lock, flags);
235 p = &irq_desc[irq].action;
236 if ((old = *p) != NULL) {
237 /* Can't share interrupts unless both agree to */
238 if (!(old->flags & new->flags & SA_SHIRQ)) {
239 spin_unlock_irqrestore(&irq_controller_lock, flags);
240 return -EBUSY;
243 /* add new interrupt at end of irq queue */
244 do {
245 p = &old->next;
246 old = *p;
247 } while (old);
248 shared = 1;
251 *p = new;
253 if (!shared) {
254 irq_desc[irq].nomask = (new->flags & SA_IRQNOMASK) ? 1 : 0;
255 irq_desc[irq].enabled = 1;
256 irq_desc[irq].probing = 0;
257 irq_desc[irq].unmask(irq);
260 spin_unlock_irqrestore(&irq_controller_lock, flags);
261 return 0;
265 * Using "struct sigaction" is slightly silly, but there
266 * are historical reasons and it works well, so..
268 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
269 unsigned long irq_flags, const char * devname, void *dev_id)
271 unsigned long retval;
272 struct irqaction *action;
274 if (!irq_desc[irq].valid)
275 return -EINVAL;
276 if (!handler)
277 return -EINVAL;
279 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
280 if (!action)
281 return -ENOMEM;
283 action->handler = handler;
284 action->flags = irq_flags;
285 action->mask = 0;
286 action->name = devname;
287 action->next = NULL;
288 action->dev_id = dev_id;
290 retval = setup_arm_irq(irq, action);
292 if (retval)
293 kfree(action);
294 return retval;
297 void free_irq(unsigned int irq, void *dev_id)
299 struct irqaction * action, **p;
300 unsigned long flags;
302 if (!irq_desc[irq].valid) {
303 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
304 #ifdef CONFIG_DEBUG_ERRORS
305 __backtrace();
306 #endif
307 return;
309 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
310 if (action->dev_id != dev_id)
311 continue;
313 /* Found it - now free it */
314 save_flags_cli (flags);
315 *p = action->next;
316 restore_flags (flags);
317 kfree(action);
318 return;
320 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
321 #ifdef CONFIG_DEBUG_ERRORS
322 __backtrace();
323 #endif
326 /* Start the interrupt probing. Unlike other architectures,
327 * we don't return a mask of interrupts from probe_irq_on,
328 * but return the number of interrupts enabled for the probe.
329 * The interrupts which have been enabled for probing is
330 * instead recorded in the irq_desc structure.
332 unsigned long probe_irq_on(void)
334 unsigned int i, irqs = 0;
335 unsigned long delay;
338 * first snaffle up any unassigned but
339 * probe-able interrupts
341 spin_lock_irq(&irq_controller_lock);
342 for (i = 0; i < NR_IRQS; i++) {
343 if (!irq_desc[i].valid ||
344 !irq_desc[i].probe_ok ||
345 irq_desc[i].action)
346 continue;
348 irq_desc[i].probing = 1;
349 irq_desc[i].enabled = 1;
350 irq_desc[i].triggered = 0;
351 irq_desc[i].unmask(i);
352 irqs += 1;
354 spin_unlock_irq(&irq_controller_lock);
357 * wait for spurious interrupts to mask themselves out again
359 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
360 /* min 100ms delay */;
363 * now filter out any obviously spurious interrupts
365 spin_lock_irq(&irq_controller_lock);
366 for (i = 0; i < NR_IRQS; i++) {
367 if (irq_desc[i].probing && irq_desc[i].triggered) {
368 irq_desc[i].probing = 0;
369 irqs -= 1;
372 spin_unlock_irq(&irq_controller_lock);
374 /* now filter out any obviously spurious interrupts */
375 return irqs;
379 * Possible return values:
380 * >= 0 - interrupt number
381 * -1 - no interrupt/many interrupts
383 int probe_irq_off(unsigned long irqs)
385 unsigned int i;
386 int irq_found = -1;
389 * look at the interrupts, and find exactly one
390 * that we were probing has been triggered
392 spin_lock_irq(&irq_controller_lock);
393 for (i = 0; i < NR_IRQS; i++) {
394 if (irq_desc[i].probing &&
395 irq_desc[i].triggered) {
396 if (irq_found != -1) {
397 irq_found = NO_IRQ;
398 goto out;
400 irq_found = i;
404 if (irq_found == -1)
405 irq_found = NO_IRQ;
406 out:
407 spin_unlock_irq(&irq_controller_lock);
408 return irq_found;
412 * Get architecture specific interrupt handlers
413 * and interrupt initialisation.
415 #include <asm/arch/irq.h>
417 __initfunc(void init_IRQ(void))
419 extern void init_dma(void);
420 int irq;
422 for (irq = 0; irq < NR_IRQS; irq++) {
423 irq_desc[irq].mask_ack = dummy_mask_unmask_irq;
424 irq_desc[irq].mask = dummy_mask_unmask_irq;
425 irq_desc[irq].unmask = dummy_mask_unmask_irq;
428 irq_init_irq();
429 #ifdef CONFIG_ARCH_ACORN
430 init_FIQ();
431 #endif
432 init_dma();