Merge with Linux 2.4.0-test5-pre3.
[linux-2.6/linux-mips.git] / arch / ia64 / kernel / irq.c
blobed7eb88fc5bad1358705d988ac27e4c196d36cbd
1 /*
2 * linux/arch/ia64/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
14 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
16 * IRQs are in fact implemented a bit like signal handlers for the kernel.
17 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/ptrace.h>
22 #include <linux/errno.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/timex.h>
28 #include <linux/malloc.h>
29 #include <linux/random.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/irq.h>
34 #include <linux/proc_fs.h>
36 #include <asm/io.h>
37 #include <asm/smp.h>
38 #include <asm/system.h>
39 #include <asm/bitops.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgalloc.h>
42 #include <asm/delay.h>
43 #include <asm/irq.h>
48 * Linux has a controller-independent x86 interrupt architecture.
49 * every controller has a 'controller-template', that is used
50 * by the main code to do the right thing. Each driver-visible
51 * interrupt source is transparently wired to the apropriate
52 * controller. Thus drivers need not be aware of the
53 * interrupt-controller.
55 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
56 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
57 * (IO-APICs assumed to be messaging to Pentium local-APICs)
59 * the code is designed to be easily extended with new/different
60 * interrupt controllers, without having to do assembly magic.
63 irq_cpustat_t irq_stat [NR_CPUS];
66 * Controller mappings for all interrupt sources:
68 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
69 { [0 ... NR_IRQS-1] = { IRQ_DISABLED, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
71 static void register_irq_proc (unsigned int irq);
74 * Special irq handlers.
77 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
80 * Generic no controller code
83 static void enable_none(unsigned int irq) { }
84 static unsigned int startup_none(unsigned int irq) { return 0; }
85 static void disable_none(unsigned int irq) { }
86 static void ack_none(unsigned int irq)
89 * 'what should we do if we get a hw irq event on an illegal vector'.
90 * each architecture has to answer this themselves, it doesnt deserve
91 * a generic callback i think.
93 #if CONFIG_X86
94 printk("unexpected IRQ trap at vector %02x\n", irq);
95 #ifdef CONFIG_X86_LOCAL_APIC
97 * Currently unexpected vectors happen only on SMP and APIC.
98 * We _must_ ack these because every local APIC has only N
99 * irq slots per priority level, and a 'hanging, unacked' IRQ
100 * holds up an irq slot - in excessive cases (when multiple
101 * unexpected vectors occur) that might lock up the APIC
102 * completely.
104 ack_APIC_irq();
105 #endif
106 #endif
107 #if CONFIG_IA64
108 printk("Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
109 #endif
112 /* startup is the same as "enable", shutdown is same as "disable" */
113 #define shutdown_none disable_none
114 #define end_none enable_none
116 struct hw_interrupt_type no_irq_type = {
117 "none",
118 startup_none,
119 shutdown_none,
120 enable_none,
121 disable_none,
122 ack_none,
123 end_none
126 volatile unsigned long irq_err_count;
129 * Generic, controller-independent functions:
132 int get_irq_list(char *buf)
134 int i, j;
135 struct irqaction * action;
136 char *p = buf;
138 p += sprintf(p, " ");
139 for (j=0; j<smp_num_cpus; j++)
140 p += sprintf(p, "CPU%d ",j);
141 *p++ = '\n';
143 for (i = 0 ; i < NR_IRQS ; i++) {
144 action = irq_desc[i].action;
145 if (!action)
146 continue;
147 p += sprintf(p, "%3d: ",i);
148 #ifndef CONFIG_SMP
149 p += sprintf(p, "%10u ", kstat_irqs(i));
150 #else
151 for (j = 0; j < smp_num_cpus; j++)
152 p += sprintf(p, "%10u ",
153 kstat.irqs[cpu_logical_map(j)][i]);
154 #endif
155 p += sprintf(p, " %14s", irq_desc[i].handler->typename);
156 p += sprintf(p, " %s", action->name);
158 for (action=action->next; action; action = action->next)
159 p += sprintf(p, ", %s", action->name);
160 *p++ = '\n';
162 p += sprintf(p, "NMI: ");
163 for (j = 0; j < smp_num_cpus; j++)
164 p += sprintf(p, "%10u ",
165 nmi_counter(cpu_logical_map(j)));
166 p += sprintf(p, "\n");
167 #if defined(CONFIG_SMP) && defined(__i386__)
168 p += sprintf(p, "LOC: ");
169 for (j = 0; j < smp_num_cpus; j++)
170 p += sprintf(p, "%10u ",
171 apic_timer_irqs[cpu_logical_map(j)]);
172 p += sprintf(p, "\n");
173 #endif
174 p += sprintf(p, "ERR: %10lu\n", irq_err_count);
175 return p - buf;
180 * Global interrupt locks for SMP. Allow interrupts to come in on any
181 * CPU, yet make cli/sti act globally to protect critical regions..
184 #ifdef CONFIG_SMP
185 unsigned int global_irq_holder = NO_PROC_ID;
186 volatile unsigned int global_irq_lock;
188 extern void show_stack(unsigned long* esp);
190 static void show(char * str)
192 int i;
193 int cpu = smp_processor_id();
195 printk("\n%s, CPU %d:\n", str, cpu);
196 printk("irq: %d [",irqs_running());
197 for(i=0;i < smp_num_cpus;i++)
198 printk(" %d",local_irq_count(i));
199 printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
200 for(i=0;i < smp_num_cpus;i++)
201 printk(" %d",local_bh_count(i));
203 printk(" ]\nStack dumps:");
204 #if defined(__ia64__)
206 * We can't unwind the stack of another CPU without access to
207 * the registers of that CPU. And sending an IPI when we're
208 * in a potentially wedged state doesn't sound like a smart
209 * idea.
211 #elif defined(__i386__)
212 for(i=0;i< smp_num_cpus;i++) {
213 unsigned long esp;
214 if(i==cpu)
215 continue;
216 printk("\nCPU %d:",i);
217 esp = init_tss[i].esp0;
218 if(esp==NULL) {
219 /* tss->esp0 is set to NULL in cpu_init(),
220 * it's initialized when the cpu returns to user
221 * space. -- manfreds
223 printk(" <unknown> ");
224 continue;
226 esp &= ~(THREAD_SIZE-1);
227 esp += sizeof(struct task_struct);
228 show_stack((void*)esp);
230 #else
231 You lose...
232 #endif
233 printk("\nCPU %d:",cpu);
234 show_stack(NULL);
235 printk("\n");
238 #define MAXCOUNT 100000000
241 * I had a lockup scenario where a tight loop doing
242 * spin_unlock()/spin_lock() on CPU#1 was racing with
243 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
244 * apparently the spin_unlock() information did not make it
245 * through to CPU#0 ... nasty, is this by design, do we have to limit
246 * 'memory update oscillation frequency' artificially like here?
248 * Such 'high frequency update' races can be avoided by careful design, but
249 * some of our major constructs like spinlocks use similar techniques,
250 * it would be nice to clarify this issue. Set this define to 0 if you
251 * want to check whether your system freezes. I suspect the delay done
252 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
253 * i thought that such things are guaranteed by design, since we use
254 * the 'LOCK' prefix.
256 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
258 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
259 # define SYNC_OTHER_CORES(x) udelay(x+1)
260 #else
262 * We have to allow irqs to arrive between __sti and __cli
264 # ifdef __ia64__
265 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop 0")
266 # else
267 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
268 # endif
269 #endif
271 static inline void wait_on_irq(int cpu)
273 int count = MAXCOUNT;
275 for (;;) {
278 * Wait until all interrupts are gone. Wait
279 * for bottom half handlers unless we're
280 * already executing in one..
282 if (!irqs_running())
283 if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
284 break;
286 /* Duh, we have to loop. Release the lock to avoid deadlocks */
287 clear_bit(0,&global_irq_lock);
289 for (;;) {
290 if (!--count) {
291 show("wait_on_irq");
292 count = ~0;
294 __sti();
295 SYNC_OTHER_CORES(cpu);
296 __cli();
297 if (irqs_running())
298 continue;
299 if (global_irq_lock)
300 continue;
301 if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
302 continue;
303 if (!test_and_set_bit(0,&global_irq_lock))
304 break;
310 * This is called when we want to synchronize with
311 * interrupts. We may for example tell a device to
312 * stop sending interrupts: but to make sure there
313 * are no interrupts that are executing on another
314 * CPU we need to call this function.
316 void synchronize_irq(void)
318 if (irqs_running()) {
319 /* Stupid approach */
320 cli();
321 sti();
325 static inline void get_irqlock(int cpu)
327 if (test_and_set_bit(0,&global_irq_lock)) {
328 /* do we already hold the lock? */
329 if (cpu == global_irq_holder)
330 return;
331 /* Uhhuh.. Somebody else got it. Wait.. */
332 do {
333 do {
334 } while (test_bit(0,&global_irq_lock));
335 } while (test_and_set_bit(0,&global_irq_lock));
338 * We also to make sure that nobody else is running
339 * in an interrupt context.
341 wait_on_irq(cpu);
344 * Ok, finally..
346 global_irq_holder = cpu;
349 #define EFLAGS_IF_SHIFT 9
352 * A global "cli()" while in an interrupt context
353 * turns into just a local cli(). Interrupts
354 * should use spinlocks for the (very unlikely)
355 * case that they ever want to protect against
356 * each other.
358 * If we already have local interrupts disabled,
359 * this will not turn a local disable into a
360 * global one (problems with spinlocks: this makes
361 * save_flags+cli+sti usable inside a spinlock).
363 void __global_cli(void)
365 unsigned int flags;
367 #ifdef __ia64__
368 __save_flags(flags);
369 if (flags & IA64_PSR_I) {
370 int cpu = smp_processor_id();
371 __cli();
372 if (!local_irq_count(cpu))
373 get_irqlock(cpu);
375 #else
376 __save_flags(flags);
377 if (flags & (1 << EFLAGS_IF_SHIFT)) {
378 int cpu = smp_processor_id();
379 __cli();
380 if (!local_irq_count(cpu))
381 get_irqlock(cpu);
383 #endif
386 void __global_sti(void)
388 int cpu = smp_processor_id();
390 if (!local_irq_count(cpu))
391 release_irqlock(cpu);
392 __sti();
396 * SMP flags value to restore to:
397 * 0 - global cli
398 * 1 - global sti
399 * 2 - local cli
400 * 3 - local sti
402 unsigned long __global_save_flags(void)
404 int retval;
405 int local_enabled;
406 unsigned long flags;
407 int cpu = smp_processor_id();
409 __save_flags(flags);
410 #ifdef __ia64__
411 local_enabled = (flags & IA64_PSR_I) != 0;
412 #else
413 local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
414 #endif
415 /* default to local */
416 retval = 2 + local_enabled;
418 /* check for global flags if we're not in an interrupt */
419 if (!local_irq_count(cpu)) {
420 if (local_enabled)
421 retval = 1;
422 if (global_irq_holder == cpu)
423 retval = 0;
425 return retval;
428 void __global_restore_flags(unsigned long flags)
430 switch (flags) {
431 case 0:
432 __global_cli();
433 break;
434 case 1:
435 __global_sti();
436 break;
437 case 2:
438 __cli();
439 break;
440 case 3:
441 __sti();
442 break;
443 default:
444 printk("global_restore_flags: %08lx (%08lx)\n",
445 flags, (&flags)[-1]);
449 #endif
452 * This should really return information about whether
453 * we should do bottom half handling etc. Right now we
454 * end up _always_ checking the bottom half, which is a
455 * waste of time and is not what some drivers would
456 * prefer.
458 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
460 int status;
461 int cpu = smp_processor_id();
463 irq_enter(cpu, irq);
465 status = 1; /* Force the "do bottom halves" bit */
467 if (!(action->flags & SA_INTERRUPT))
468 __sti();
470 do {
471 status |= action->flags;
472 action->handler(irq, action->dev_id, regs);
473 action = action->next;
474 } while (action);
475 if (status & SA_SAMPLE_RANDOM)
476 add_interrupt_randomness(irq);
477 __cli();
479 irq_exit(cpu, irq);
481 return status;
485 * Generic enable/disable code: this just calls
486 * down into the PIC-specific version for the actual
487 * hardware disable after having gotten the irq
488 * controller lock.
490 void inline disable_irq_nosync(unsigned int irq)
492 irq_desc_t *desc = irq_desc + irq;
493 unsigned long flags;
495 spin_lock_irqsave(&desc->lock, flags);
496 if (!desc->depth++) {
497 desc->status |= IRQ_DISABLED;
498 desc->handler->disable(irq);
500 spin_unlock_irqrestore(&desc->lock, flags);
504 * Synchronous version of the above, making sure the IRQ is
505 * no longer running on any other IRQ..
507 void disable_irq(unsigned int irq)
509 disable_irq_nosync(irq);
511 #ifdef CONFIG_SMP
512 if (!local_irq_count(smp_processor_id())) {
513 do {
514 barrier();
515 } while (irq_desc[irq].status & IRQ_INPROGRESS);
517 #endif
520 void enable_irq(unsigned int irq)
522 irq_desc_t *desc = irq_desc + irq;
523 unsigned long flags;
525 spin_lock_irqsave(&desc->lock, flags);
526 switch (desc->depth) {
527 case 1: {
528 unsigned int status = desc->status & ~IRQ_DISABLED;
529 desc->status = status;
530 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
531 desc->status = status | IRQ_REPLAY;
532 hw_resend_irq(desc->handler,irq);
534 desc->handler->enable(irq);
535 /* fall-through */
537 default:
538 desc->depth--;
539 break;
540 case 0:
541 printk("enable_irq() unbalanced from %p\n",
542 __builtin_return_address(0));
544 spin_unlock_irqrestore(&desc->lock, flags);
548 * do_IRQ handles all normal device IRQ's (the special
549 * SMP cross-CPU interrupts have their own specific
550 * handlers).
552 unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
555 * We ack quickly, we don't want the irq controller
556 * thinking we're snobs just because some other CPU has
557 * disabled global interrupts (we have already done the
558 * INT_ACK cycles, it's too late to try to pretend to the
559 * controller that we aren't taking the interrupt).
561 * 0 return value means that this irq is already being
562 * handled by some other CPU. (or is disabled)
564 int cpu = smp_processor_id();
565 irq_desc_t *desc = irq_desc + irq;
566 struct irqaction * action;
567 unsigned int status;
569 kstat.irqs[cpu][irq]++;
570 spin_lock(&desc->lock);
571 desc->handler->ack(irq);
573 REPLAY is when Linux resends an IRQ that was dropped earlier
574 WAITING is used by probe to mark irqs that are being tested
576 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
577 status |= IRQ_PENDING; /* we _want_ to handle it */
580 * If the IRQ is disabled for whatever reason, we cannot
581 * use the action we have.
583 action = NULL;
584 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
585 action = desc->action;
586 status &= ~IRQ_PENDING; /* we commit to handling */
587 if (!(status & IRQ_PER_CPU))
588 status |= IRQ_INPROGRESS; /* we are handling it */
590 desc->status = status;
593 * If there is no IRQ handler or it was disabled, exit early.
594 * Since we set PENDING, if another processor is handling
595 * a different instance of this same irq, the other processor
596 * will take care of it.
598 if (!action)
599 goto out;
602 * Edge triggered interrupts need to remember
603 * pending events.
604 * This applies to any hw interrupts that allow a second
605 * instance of the same irq to arrive while we are in do_IRQ
606 * or in the handler. But the code here only handles the _second_
607 * instance of the irq, not the third or fourth. So it is mostly
608 * useful for irq hardware that does not mask cleanly in an
609 * SMP environment.
611 for (;;) {
612 spin_unlock(&desc->lock);
613 handle_IRQ_event(irq, regs, action);
614 spin_lock(&desc->lock);
616 if (!(desc->status & IRQ_PENDING))
617 break;
618 desc->status &= ~IRQ_PENDING;
620 desc->status &= ~IRQ_INPROGRESS;
621 out:
623 * The ->end() handler has to deal with interrupts which got
624 * disabled while the handler was running.
626 desc->handler->end(irq);
627 spin_unlock(&desc->lock);
629 return 1;
632 int request_irq(unsigned int irq,
633 void (*handler)(int, void *, struct pt_regs *),
634 unsigned long irqflags,
635 const char * devname,
636 void *dev_id)
638 int retval;
639 struct irqaction * action;
641 #if 1
643 * Sanity-check: shared interrupts should REALLY pass in
644 * a real dev-ID, otherwise we'll have trouble later trying
645 * to figure out which interrupt is which (messes up the
646 * interrupt freeing logic etc).
648 if (irqflags & SA_SHIRQ) {
649 if (!dev_id)
650 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
652 #endif
654 if (irq >= NR_IRQS)
655 return -EINVAL;
656 if (!handler)
657 return -EINVAL;
659 action = (struct irqaction *)
660 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
661 if (!action)
662 return -ENOMEM;
664 action->handler = handler;
665 action->flags = irqflags;
666 action->mask = 0;
667 action->name = devname;
668 action->next = NULL;
669 action->dev_id = dev_id;
671 retval = setup_irq(irq, action);
672 if (retval)
673 kfree(action);
674 return retval;
677 void free_irq(unsigned int irq, void *dev_id)
679 irq_desc_t *desc;
680 struct irqaction **p;
681 unsigned long flags;
683 if (irq >= NR_IRQS)
684 return;
686 desc = irq_desc + irq;
687 spin_lock_irqsave(&desc->lock,flags);
688 p = &desc->action;
689 for (;;) {
690 struct irqaction * action = *p;
691 if (action) {
692 struct irqaction **pp = p;
693 p = &action->next;
694 if (action->dev_id != dev_id)
695 continue;
697 /* Found it - now remove it from the list of entries */
698 *pp = action->next;
699 if (!desc->action) {
700 desc->status |= IRQ_DISABLED;
701 desc->handler->shutdown(irq);
703 spin_unlock_irqrestore(&desc->lock,flags);
705 #ifdef CONFIG_SMP
706 /* Wait to make sure it's not being used on another CPU */
707 while (desc->status & IRQ_INPROGRESS)
708 barrier();
709 #endif
710 kfree(action);
711 return;
713 printk("Trying to free free IRQ%d\n",irq);
714 spin_unlock_irqrestore(&desc->lock,flags);
715 return;
720 * IRQ autodetection code..
722 * This depends on the fact that any interrupt that
723 * comes in on to an unassigned handler will get stuck
724 * with "IRQ_WAITING" cleared and the interrupt
725 * disabled.
727 unsigned long probe_irq_on(void)
729 unsigned int i;
730 irq_desc_t *desc;
731 unsigned long val;
732 unsigned long delay;
735 * something may have generated an irq long ago and we want to
736 * flush such a longstanding irq before considering it as spurious.
738 for (i = NR_IRQS-1; i > 0; i--) {
739 desc = irq_desc + i;
741 spin_lock_irq(&desc->lock);
742 if (!irq_desc[i].action)
743 irq_desc[i].handler->startup(i);
744 spin_unlock_irq(&desc->lock);
747 /* Wait for longstanding interrupts to trigger. */
748 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
749 /* about 20ms delay */ synchronize_irq();
752 * enable any unassigned irqs
753 * (we must startup again here because if a longstanding irq
754 * happened in the previous stage, it may have masked itself)
756 for (i = NR_IRQS-1; i > 0; i--) {
757 desc = irq_desc + i;
759 spin_lock_irq(&desc->lock);
760 if (!desc->action) {
761 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
762 if (desc->handler->startup(i))
763 desc->status |= IRQ_PENDING;
765 spin_unlock_irq(&desc->lock);
769 * Wait for spurious interrupts to trigger
771 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
772 /* about 100ms delay */ synchronize_irq();
775 * Now filter out any obviously spurious interrupts
777 val = 0;
778 for (i = 0; i < NR_IRQS; i++) {
779 irq_desc_t *desc = irq_desc + i;
780 unsigned int status;
782 spin_lock_irq(&desc->lock);
783 status = desc->status;
785 if (status & IRQ_AUTODETECT) {
786 /* It triggered already - consider it spurious. */
787 if (!(status & IRQ_WAITING)) {
788 desc->status = status & ~IRQ_AUTODETECT;
789 desc->handler->shutdown(i);
790 } else
791 if (i < 32)
792 val |= 1 << i;
794 spin_unlock_irq(&desc->lock);
797 return val;
801 * Return a mask of triggered interrupts (this
802 * can handle only legacy ISA interrupts).
804 unsigned int probe_irq_mask(unsigned long val)
806 int i;
807 unsigned int mask;
809 mask = 0;
810 for (i = 0; i < 16; i++) {
811 irq_desc_t *desc = irq_desc + i;
812 unsigned int status;
814 spin_lock_irq(&desc->lock);
815 status = desc->status;
817 if (status & IRQ_AUTODETECT) {
818 if (!(status & IRQ_WAITING))
819 mask |= 1 << i;
821 desc->status = status & ~IRQ_AUTODETECT;
822 desc->handler->shutdown(i);
824 spin_unlock_irq(&desc->lock);
827 return mask & val;
831 * Return the one interrupt that triggered (this can
832 * handle any interrupt source)
834 int probe_irq_off(unsigned long val)
836 int i, irq_found, nr_irqs;
838 nr_irqs = 0;
839 irq_found = 0;
840 for (i = 0; i < NR_IRQS; i++) {
841 irq_desc_t *desc = irq_desc + i;
842 unsigned int status;
844 spin_lock_irq(&desc->lock);
845 status = desc->status;
847 if (status & IRQ_AUTODETECT) {
848 if (!(status & IRQ_WAITING)) {
849 if (!nr_irqs)
850 irq_found = i;
851 nr_irqs++;
853 desc->status = status & ~IRQ_AUTODETECT;
854 desc->handler->shutdown(i);
856 spin_unlock_irq(&desc->lock);
859 if (nr_irqs > 1)
860 irq_found = -irq_found;
861 return irq_found;
864 /* this was setup_x86_irq but it seems pretty generic */
865 int setup_irq(unsigned int irq, struct irqaction * new)
867 int shared = 0;
868 unsigned long flags;
869 struct irqaction *old, **p;
870 irq_desc_t *desc = irq_desc + irq;
873 * Some drivers like serial.c use request_irq() heavily,
874 * so we have to be careful not to interfere with a
875 * running system.
877 if (new->flags & SA_SAMPLE_RANDOM) {
879 * This function might sleep, we want to call it first,
880 * outside of the atomic block.
881 * Yes, this might clear the entropy pool if the wrong
882 * driver is attempted to be loaded, without actually
883 * installing a new handler, but is this really a problem,
884 * only the sysadmin is able to do this.
886 rand_initialize_irq(irq);
890 * The following block of code has to be executed atomically
892 spin_lock_irqsave(&desc->lock,flags);
893 p = &desc->action;
894 if ((old = *p) != NULL) {
895 /* Can't share interrupts unless both agree to */
896 if (!(old->flags & new->flags & SA_SHIRQ)) {
897 spin_unlock_irqrestore(&desc->lock,flags);
898 return -EBUSY;
901 /* add new interrupt at end of irq queue */
902 do {
903 p = &old->next;
904 old = *p;
905 } while (old);
906 shared = 1;
909 *p = new;
911 if (!shared) {
912 desc->depth = 0;
913 desc->status &= ~IRQ_DISABLED;
914 desc->handler->startup(irq);
916 spin_unlock_irqrestore(&desc->lock,flags);
918 register_irq_proc(irq);
919 return 0;
922 static struct proc_dir_entry * root_irq_dir;
923 static struct proc_dir_entry * irq_dir [NR_IRQS];
924 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
926 static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
928 #define HEX_DIGITS 8
930 static int irq_affinity_read_proc (char *page, char **start, off_t off,
931 int count, int *eof, void *data)
933 if (count < HEX_DIGITS+1)
934 return -EINVAL;
935 return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
938 static unsigned int parse_hex_value (const char *buffer,
939 unsigned long count, unsigned long *ret)
941 unsigned char hexnum [HEX_DIGITS];
942 unsigned long value;
943 int i;
945 if (!count)
946 return -EINVAL;
947 if (count > HEX_DIGITS)
948 count = HEX_DIGITS;
949 if (copy_from_user(hexnum, buffer, count))
950 return -EFAULT;
953 * Parse the first 8 characters as a hex string, any non-hex char
954 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
956 value = 0;
958 for (i = 0; i < count; i++) {
959 unsigned int c = hexnum[i];
961 switch (c) {
962 case '0' ... '9': c -= '0'; break;
963 case 'a' ... 'f': c -= 'a'-10; break;
964 case 'A' ... 'F': c -= 'A'-10; break;
965 default:
966 goto out;
968 value = (value << 4) | c;
970 out:
971 *ret = value;
972 return 0;
975 static int irq_affinity_write_proc (struct file *file, const char *buffer,
976 unsigned long count, void *data)
978 int irq = (long) data, full_count = count, err;
979 unsigned long new_value;
981 if (!irq_desc[irq].handler->set_affinity)
982 return -EIO;
984 err = parse_hex_value(buffer, count, &new_value);
986 #if CONFIG_SMP
988 * Do not allow disabling IRQs completely - it's a too easy
989 * way to make the system unusable accidentally :-) At least
990 * one online CPU still has to be targeted.
992 if (!(new_value & cpu_online_map))
993 return -EINVAL;
994 #endif
996 irq_affinity[irq] = new_value;
997 irq_desc[irq].handler->set_affinity(irq, new_value);
999 return full_count;
1002 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1003 int count, int *eof, void *data)
1005 unsigned long *mask = (unsigned long *) data;
1006 if (count < HEX_DIGITS+1)
1007 return -EINVAL;
1008 return sprintf (page, "%08lx\n", *mask);
1011 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1012 unsigned long count, void *data)
1014 unsigned long *mask = (unsigned long *) data, full_count = count, err;
1015 unsigned long new_value;
1017 err = parse_hex_value(buffer, count, &new_value);
1018 if (err)
1019 return err;
1021 *mask = new_value;
1022 return full_count;
1025 #define MAX_NAMELEN 10
1027 static void register_irq_proc (unsigned int irq)
1029 struct proc_dir_entry *entry;
1030 char name [MAX_NAMELEN];
1032 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type))
1033 return;
1035 memset(name, 0, MAX_NAMELEN);
1036 sprintf(name, "%d", irq);
1038 /* create /proc/irq/1234 */
1039 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1041 /* create /proc/irq/1234/smp_affinity */
1042 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1044 entry->nlink = 1;
1045 entry->data = (void *)(long)irq;
1046 entry->read_proc = irq_affinity_read_proc;
1047 entry->write_proc = irq_affinity_write_proc;
1049 smp_affinity_entry[irq] = entry;
1052 unsigned long prof_cpu_mask = -1;
1054 void init_irq_proc (void)
1056 struct proc_dir_entry *entry;
1057 int i;
1059 /* create /proc/irq */
1060 root_irq_dir = proc_mkdir("irq", 0);
1062 /* create /proc/irq/prof_cpu_mask */
1063 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1065 entry->nlink = 1;
1066 entry->data = (void *)&prof_cpu_mask;
1067 entry->read_proc = prof_cpu_mask_read_proc;
1068 entry->write_proc = prof_cpu_mask_write_proc;
1071 * Create entries for all existing IRQs.
1073 for (i = 0; i < NR_IRQS; i++) {
1074 if (irq_desc[i].handler == &no_irq_type)
1075 continue;
1076 register_irq_proc(i);