MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / arch / m32r / kernel / irq.c
blobba135773308a1c49351c2a1d71884c9d719c2d78
1 /*
2 * linux/arch/m32r/kernel/irq.c
4 * Copyright (c) 2003, 2004 Hitoshi Yamamoto
6 * Taken from i386 2.6.4 version.
7 */
9 /*
10 * linux/arch/i386/kernel/irq.c
12 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
14 * This file contains the code used by various IRQ handling routines:
15 * asking for different IRQ's should be done through these routines
16 * instead of just grabbing them. Thus setups with different IRQ numbers
17 * shouldn't result in any weird surprises, and installing new handlers
18 * should be easier.
22 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
24 * IRQs are in fact implemented a bit like signal handlers for the kernel.
25 * Naturally it's not a 1:1 relation, but there are similarities.
28 #include <linux/config.h>
29 #include <linux/errno.h>
30 #include <linux/module.h>
31 #include <linux/signal.h>
32 #include <linux/sched.h>
33 #include <linux/ioport.h>
34 #include <linux/interrupt.h>
35 #include <linux/timex.h>
36 #include <linux/slab.h>
37 #include <linux/random.h>
38 #include <linux/smp_lock.h>
39 #include <linux/init.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/irq.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/kallsyms.h>
46 #include <asm/atomic.h>
47 #include <asm/io.h>
48 #include <asm/smp.h>
49 #include <asm/system.h>
50 #include <asm/bitops.h>
51 #include <asm/uaccess.h>
52 #include <asm/delay.h>
53 #include <asm/irq.h>
56 * Linux has a controller-independent x86 interrupt architecture.
57 * every controller has a 'controller-template', that is used
58 * by the main code to do the right thing. Each driver-visible
59 * interrupt source is transparently wired to the apropriate
60 * controller. Thus drivers need not be aware of the
61 * interrupt-controller.
63 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
64 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
65 * (IO-APICs assumed to be messaging to Pentium local-APICs)
67 * the code is designed to be easily extended with new/different
68 * interrupt controllers, without having to do assembly magic.
72 * Controller mappings for all interrupt sources:
74 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
75 [0 ... NR_IRQS-1] = {
76 .handler = &no_irq_type,
77 .lock = SPIN_LOCK_UNLOCKED
81 static void register_irq_proc (unsigned int irq);
84 * Special irq handlers.
87 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
88 { return IRQ_NONE; }
91 * Generic no controller code
94 static void enable_none(unsigned int irq) { }
95 static unsigned int startup_none(unsigned int irq) { return 0; }
96 static void disable_none(unsigned int irq) { }
97 static void ack_none(unsigned int irq)
100 * 'what should we do if we get a hw irq event on an illegal vector'.
101 * each architecture has to answer this themselves, it doesn't deserve
102 * a generic callback i think.
104 printk("unexpected IRQ trap at vector %02x\n", irq);
107 /* startup is the same as "enable", shutdown is same as "disable" */
108 #define shutdown_none disable_none
109 #define end_none enable_none
111 struct hw_interrupt_type no_irq_type = {
112 "none",
113 startup_none,
114 shutdown_none,
115 enable_none,
116 disable_none,
117 ack_none,
118 end_none
121 atomic_t irq_err_count;
122 atomic_t irq_mis_count;
125 * Generic, controller-independent functions:
128 int show_interrupts(struct seq_file *p, void *v)
130 int i = *(loff_t *) v, j;
131 struct irqaction * action;
132 unsigned long flags;
134 if (i == 0) {
135 seq_printf(p, " ");
136 for (j=0; j<NR_CPUS; j++)
137 if (cpu_online(j))
138 seq_printf(p, "CPU%d ",j);
139 seq_putc(p, '\n');
142 if (i < NR_IRQS) {
143 spin_lock_irqsave(&irq_desc[i].lock, flags);
144 action = irq_desc[i].action;
145 if (!action)
146 goto skip;
147 seq_printf(p, "%3d: ",i);
148 #ifndef CONFIG_SMP
149 seq_printf(p, "%10u ", kstat_irqs(i));
150 #else
151 for (j = 0; j < NR_CPUS; j++)
152 if (cpu_online(j))
153 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
154 #endif
155 seq_printf(p, " %14s", irq_desc[i].handler->typename);
156 seq_printf(p, " %s", action->name);
158 for (action=action->next; action; action = action->next)
159 seq_printf(p, ", %s", action->name);
161 seq_putc(p, '\n');
162 skip:
163 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
164 } else if (i == NR_IRQS) {
165 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
166 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
168 return 0;
171 #ifdef CONFIG_SMP
172 inline void synchronize_irq(unsigned int irq)
174 while (irq_desc[irq].status & IRQ_INPROGRESS)
175 cpu_relax();
177 #endif
180 * This should really return information about whether
181 * we should do bottom half handling etc. Right now we
182 * end up _always_ checking the bottom half, which is a
183 * waste of time and is not what some drivers would
184 * prefer.
186 int handle_IRQ_event(unsigned int irq,
187 struct pt_regs *regs, struct irqaction *action)
189 int status = 1; /* Force the "do bottom halves" bit */
190 int ret, retval = 0;
192 if (!(action->flags & SA_INTERRUPT))
193 local_irq_enable();
195 do {
196 ret = action->handler(irq, action->dev_id, regs);
197 if (ret == IRQ_HANDLED)
198 status |= action->flags;
199 action = action->next;
200 retval |= ret;
201 } while (action);
202 if (status & SA_SAMPLE_RANDOM)
203 add_interrupt_randomness(irq);
204 local_irq_disable();
205 return retval;
208 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
210 struct irqaction *action;
212 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
213 printk(KERN_ERR "irq event %d: bogus return value %x\n",
214 irq, action_ret);
215 } else {
216 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
218 dump_stack();
219 printk(KERN_ERR "handlers:\n");
220 action = desc->action;
221 do {
222 printk(KERN_ERR "[<%p>]", action->handler);
223 print_symbol(" (%s)",
224 (unsigned long)action->handler);
225 printk("\n");
226 action = action->next;
227 } while (action);
230 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
232 static int count = 100;
234 if (count) {
235 count--;
236 __report_bad_irq(irq, desc, action_ret);
240 static int noirqdebug;
242 static int __init noirqdebug_setup(char *str)
244 noirqdebug = 1;
245 printk("IRQ lockup detection disabled\n");
246 return 1;
249 __setup("noirqdebug", noirqdebug_setup);
252 * If 99,900 of the previous 100,000 interrupts have not been handled then
253 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
254 * turn the IRQ off.
256 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
257 * device sharing an IRQ with the failing one)
259 * Called under desc->lock
261 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
263 if (action_ret != IRQ_HANDLED) {
264 desc->irqs_unhandled++;
265 if (action_ret != IRQ_NONE)
266 report_bad_irq(irq, desc, action_ret);
269 desc->irq_count++;
270 if (desc->irq_count < 100000)
271 return;
273 desc->irq_count = 0;
274 if (desc->irqs_unhandled > 99900) {
276 * The interrupt is stuck
278 __report_bad_irq(irq, desc, action_ret);
280 * Now kill the IRQ
282 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
283 desc->status |= IRQ_DISABLED;
284 desc->handler->disable(irq);
286 desc->irqs_unhandled = 0;
290 * Generic enable/disable code: this just calls
291 * down into the PIC-specific version for the actual
292 * hardware disable after having gotten the irq
293 * controller lock.
297 * disable_irq_nosync - disable an irq without waiting
298 * @irq: Interrupt to disable
300 * Disable the selected interrupt line. Disables and Enables are
301 * nested.
302 * Unlike disable_irq(), this function does not ensure existing
303 * instances of the IRQ handler have completed before returning.
305 * This function may be called from IRQ context.
308 inline void disable_irq_nosync(unsigned int irq)
310 irq_desc_t *desc = irq_desc + irq;
311 unsigned long flags;
313 spin_lock_irqsave(&desc->lock, flags);
314 if (!desc->depth++) {
315 desc->status |= IRQ_DISABLED;
316 desc->handler->disable(irq);
318 spin_unlock_irqrestore(&desc->lock, flags);
322 * disable_irq - disable an irq and wait for completion
323 * @irq: Interrupt to disable
325 * Disable the selected interrupt line. Enables and Disables are
326 * nested.
327 * This function waits for any pending IRQ handlers for this interrupt
328 * to complete before returning. If you use this function while
329 * holding a resource the IRQ handler may need you will deadlock.
331 * This function may be called - with care - from IRQ context.
334 void disable_irq(unsigned int irq)
336 irq_desc_t *desc = irq_desc + irq;
337 disable_irq_nosync(irq);
338 if (desc->action)
339 synchronize_irq(irq);
343 * enable_irq - enable handling of an irq
344 * @irq: Interrupt to enable
346 * Undoes the effect of one call to disable_irq(). If this
347 * matches the last disable, processing of interrupts on this
348 * IRQ line is re-enabled.
350 * This function may be called from IRQ context.
353 void enable_irq(unsigned int irq)
355 irq_desc_t *desc = irq_desc + irq;
356 unsigned long flags;
358 spin_lock_irqsave(&desc->lock, flags);
359 switch (desc->depth) {
360 case 1: {
361 unsigned int status = desc->status & ~IRQ_DISABLED;
362 desc->status = status;
363 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
364 desc->status = status | IRQ_REPLAY;
365 hw_resend_irq(desc->handler,irq);
367 desc->handler->enable(irq);
368 /* fall-through */
370 default:
371 desc->depth--;
372 break;
373 case 0:
374 printk("enable_irq(%u) unbalanced from %p\n", irq,
375 __builtin_return_address(0));
377 spin_unlock_irqrestore(&desc->lock, flags);
381 * do_IRQ handles all normal device IRQ's (the special
382 * SMP cross-CPU interrupts have their own specific
383 * handlers).
385 asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
388 * We ack quickly, we don't want the irq controller
389 * thinking we're snobs just because some other CPU has
390 * disabled global interrupts (we have already done the
391 * INT_ACK cycles, it's too late to try to pretend to the
392 * controller that we aren't taking the interrupt).
394 * 0 return value means that this irq is already being
395 * handled by some other CPU. (or is disabled)
397 irq_desc_t *desc = irq_desc + irq;
398 struct irqaction * action;
399 unsigned int status;
401 irq_enter();
403 #ifdef CONFIG_DEBUG_STACKOVERFLOW
404 /* FIXME M32R */
405 #endif
406 kstat_this_cpu.irqs[irq]++;
407 spin_lock(&desc->lock);
408 desc->handler->ack(irq);
410 REPLAY is when Linux resends an IRQ that was dropped earlier
411 WAITING is used by probe to mark irqs that are being tested
413 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
414 status |= IRQ_PENDING; /* we _want_ to handle it */
417 * If the IRQ is disabled for whatever reason, we cannot
418 * use the action we have.
420 action = NULL;
421 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
422 action = desc->action;
423 status &= ~IRQ_PENDING; /* we commit to handling */
424 status |= IRQ_INPROGRESS; /* we are handling it */
426 desc->status = status;
429 * If there is no IRQ handler or it was disabled, exit early.
430 Since we set PENDING, if another processor is handling
431 a different instance of this same irq, the other processor
432 will take care of it.
434 if (unlikely(!action))
435 goto out;
438 * Edge triggered interrupts need to remember
439 * pending events.
440 * This applies to any hw interrupts that allow a second
441 * instance of the same irq to arrive while we are in do_IRQ
442 * or in the handler. But the code here only handles the _second_
443 * instance of the irq, not the third or fourth. So it is mostly
444 * useful for irq hardware that does not mask cleanly in an
445 * SMP environment.
447 for (;;) {
448 irqreturn_t action_ret;
450 spin_unlock(&desc->lock);
451 action_ret = handle_IRQ_event(irq, regs, action);
452 spin_lock(&desc->lock);
453 if (!noirqdebug)
454 note_interrupt(irq, desc, action_ret);
455 if (likely(!(desc->status & IRQ_PENDING)))
456 break;
457 desc->status &= ~IRQ_PENDING;
459 desc->status &= ~IRQ_INPROGRESS;
461 out:
463 * The ->end() handler has to deal with interrupts which got
464 * disabled while the handler was running.
466 desc->handler->end(irq);
467 spin_unlock(&desc->lock);
469 irq_exit();
471 #if defined(CONFIG_SMP)
472 if (irq == M32R_IRQ_MFT2)
473 smp_send_timer();
474 #endif /* CONFIG_SMP */
476 return 1;
479 int can_request_irq(unsigned int irq, unsigned long irqflags)
481 struct irqaction *action;
483 if (irq >= NR_IRQS)
484 return 0;
485 action = irq_desc[irq].action;
486 if (action) {
487 if (irqflags & action->flags & SA_SHIRQ)
488 action = NULL;
490 return !action;
494 * request_irq - allocate an interrupt line
495 * @irq: Interrupt line to allocate
496 * @handler: Function to be called when the IRQ occurs
497 * @irqflags: Interrupt type flags
498 * @devname: An ascii name for the claiming device
499 * @dev_id: A cookie passed back to the handler function
501 * This call allocates interrupt resources and enables the
502 * interrupt line and IRQ handling. From the point this
503 * call is made your handler function may be invoked. Since
504 * your handler function must clear any interrupt the board
505 * raises, you must take care both to initialise your hardware
506 * and to set up the interrupt handler in the right order.
508 * Dev_id must be globally unique. Normally the address of the
509 * device data structure is used as the cookie. Since the handler
510 * receives this value it makes sense to use it.
512 * If your interrupt is shared you must pass a non NULL dev_id
513 * as this is required when freeing the interrupt.
515 * Flags:
517 * SA_SHIRQ Interrupt is shared
519 * SA_INTERRUPT Disable local interrupts while processing
521 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
525 int request_irq(unsigned int irq,
526 irqreturn_t (*handler)(int, void *, struct pt_regs *),
527 unsigned long irqflags,
528 const char * devname,
529 void *dev_id)
531 int retval;
532 struct irqaction * action;
534 #if 1
536 * Sanity-check: shared interrupts should REALLY pass in
537 * a real dev-ID, otherwise we'll have trouble later trying
538 * to figure out which interrupt is which (messes up the
539 * interrupt freeing logic etc).
541 if (irqflags & SA_SHIRQ) {
542 if (!dev_id)
543 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
545 #endif
547 if (irq >= NR_IRQS)
548 return -EINVAL;
549 if (!handler)
550 return -EINVAL;
552 action = (struct irqaction *)
553 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
554 if (!action)
555 return -ENOMEM;
557 action->handler = handler;
558 action->flags = irqflags;
559 cpus_clear(action->mask);
560 action->name = devname;
561 action->next = NULL;
562 action->dev_id = dev_id;
564 retval = setup_irq(irq, action);
565 if (retval)
566 kfree(action);
567 return retval;
570 EXPORT_SYMBOL(request_irq);
573 * free_irq - free an interrupt
574 * @irq: Interrupt line to free
575 * @dev_id: Device identity to free
577 * Remove an interrupt handler. The handler is removed and if the
578 * interrupt line is no longer in use by any driver it is disabled.
579 * On a shared IRQ the caller must ensure the interrupt is disabled
580 * on the card it drives before calling this function. The function
581 * does not return until any executing interrupts for this IRQ
582 * have completed.
584 * This function must not be called from interrupt context.
587 void free_irq(unsigned int irq, void *dev_id)
589 irq_desc_t *desc;
590 struct irqaction **p;
591 unsigned long flags;
593 if (irq >= NR_IRQS)
594 return;
596 desc = irq_desc + irq;
597 spin_lock_irqsave(&desc->lock,flags);
598 p = &desc->action;
599 for (;;) {
600 struct irqaction * action = *p;
601 if (action) {
602 struct irqaction **pp = p;
603 p = &action->next;
604 if (action->dev_id != dev_id)
605 continue;
607 /* Found it - now remove it from the list of entries */
608 *pp = action->next;
609 if (!desc->action) {
610 desc->status |= IRQ_DISABLED;
611 desc->handler->shutdown(irq);
613 spin_unlock_irqrestore(&desc->lock,flags);
615 /* Wait to make sure it's not being used on another CPU */
616 synchronize_irq(irq);
617 kfree(action);
618 return;
620 printk("Trying to free free IRQ%d\n",irq);
621 spin_unlock_irqrestore(&desc->lock,flags);
622 return;
626 EXPORT_SYMBOL(free_irq);
629 * IRQ autodetection code..
631 * This depends on the fact that any interrupt that
632 * comes in on to an unassigned handler will get stuck
633 * with "IRQ_WAITING" cleared and the interrupt
634 * disabled.
637 static DECLARE_MUTEX(probe_sem);
640 * probe_irq_on - begin an interrupt autodetect
642 * Commence probing for an interrupt. The interrupts are scanned
643 * and a mask of potential interrupt lines is returned.
647 unsigned long probe_irq_on(void)
649 unsigned int i;
650 irq_desc_t *desc;
651 unsigned long val;
652 unsigned long delay;
654 down(&probe_sem);
656 * something may have generated an irq long ago and we want to
657 * flush such a longstanding irq before considering it as spurious.
659 for (i = NR_IRQS-1; i > 0; i--) {
660 desc = irq_desc + i;
662 spin_lock_irq(&desc->lock);
663 if (!irq_desc[i].action)
664 irq_desc[i].handler->startup(i);
665 spin_unlock_irq(&desc->lock);
668 /* Wait for longstanding interrupts to trigger. */
669 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
670 /* about 20ms delay */ barrier();
673 * enable any unassigned irqs
674 * (we must startup again here because if a longstanding irq
675 * happened in the previous stage, it may have masked itself)
677 for (i = NR_IRQS-1; i > 0; i--) {
678 desc = irq_desc + i;
680 spin_lock_irq(&desc->lock);
681 if (!desc->action) {
682 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
683 if (desc->handler->startup(i))
684 desc->status |= IRQ_PENDING;
686 spin_unlock_irq(&desc->lock);
690 * Wait for spurious interrupts to trigger
692 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
693 /* about 100ms delay */ barrier();
696 * Now filter out any obviously spurious interrupts
698 val = 0;
699 for (i = 0; i < NR_IRQS; i++) {
700 irq_desc_t *desc = irq_desc + i;
701 unsigned int status;
703 spin_lock_irq(&desc->lock);
704 status = desc->status;
706 if (status & IRQ_AUTODETECT) {
707 /* It triggered already - consider it spurious. */
708 if (!(status & IRQ_WAITING)) {
709 desc->status = status & ~IRQ_AUTODETECT;
710 desc->handler->shutdown(i);
711 } else
712 if (i < 32)
713 val |= 1 << i;
715 spin_unlock_irq(&desc->lock);
718 return val;
721 EXPORT_SYMBOL(probe_irq_on);
724 * Return a mask of triggered interrupts (this
725 * can handle only legacy ISA interrupts).
729 * probe_irq_mask - scan a bitmap of interrupt lines
730 * @val: mask of interrupts to consider
732 * Scan the ISA bus interrupt lines and return a bitmap of
733 * active interrupts. The interrupt probe logic state is then
734 * returned to its previous value.
736 * Note: we need to scan all the irq's even though we will
737 * only return ISA irq numbers - just so that we reset them
738 * all to a known state.
740 unsigned int probe_irq_mask(unsigned long val)
742 int i;
743 unsigned int mask;
745 mask = 0;
746 for (i = 0; i < NR_IRQS; i++) {
747 irq_desc_t *desc = irq_desc + i;
748 unsigned int status;
750 spin_lock_irq(&desc->lock);
751 status = desc->status;
753 if (status & IRQ_AUTODETECT) {
754 if (i < 16 && !(status & IRQ_WAITING))
755 mask |= 1 << i;
757 desc->status = status & ~IRQ_AUTODETECT;
758 desc->handler->shutdown(i);
760 spin_unlock_irq(&desc->lock);
762 up(&probe_sem);
764 return mask & val;
768 * Return the one interrupt that triggered (this can
769 * handle any interrupt source).
773 * probe_irq_off - end an interrupt autodetect
774 * @val: mask of potential interrupts (unused)
776 * Scans the unused interrupt lines and returns the line which
777 * appears to have triggered the interrupt. If no interrupt was
778 * found then zero is returned. If more than one interrupt is
779 * found then minus the first candidate is returned to indicate
780 * their is doubt.
782 * The interrupt probe logic state is returned to its previous
783 * value.
785 * BUGS: When used in a module (which arguably shouldnt happen)
786 * nothing prevents two IRQ probe callers from overlapping. The
787 * results of this are non-optimal.
790 int probe_irq_off(unsigned long val)
792 int i, irq_found, nr_irqs;
794 nr_irqs = 0;
795 irq_found = 0;
796 for (i = 0; i < NR_IRQS; i++) {
797 irq_desc_t *desc = irq_desc + i;
798 unsigned int status;
800 spin_lock_irq(&desc->lock);
801 status = desc->status;
803 if (status & IRQ_AUTODETECT) {
804 if (!(status & IRQ_WAITING)) {
805 if (!nr_irqs)
806 irq_found = i;
807 nr_irqs++;
809 desc->status = status & ~IRQ_AUTODETECT;
810 desc->handler->shutdown(i);
812 spin_unlock_irq(&desc->lock);
814 up(&probe_sem);
816 if (nr_irqs > 1)
817 irq_found = -irq_found;
818 return irq_found;
821 EXPORT_SYMBOL(probe_irq_off);
823 /* this was setup_x86_irq but it seems pretty generic */
824 int setup_irq(unsigned int irq, struct irqaction * new)
826 int shared = 0;
827 unsigned long flags;
828 struct irqaction *old, **p;
829 irq_desc_t *desc = irq_desc + irq;
831 if (desc->handler == &no_irq_type)
832 return -ENOSYS;
834 * Some drivers like serial.c use request_irq() heavily,
835 * so we have to be careful not to interfere with a
836 * running system.
838 if (new->flags & SA_SAMPLE_RANDOM) {
840 * This function might sleep, we want to call it first,
841 * outside of the atomic block.
842 * Yes, this might clear the entropy pool if the wrong
843 * driver is attempted to be loaded, without actually
844 * installing a new handler, but is this really a problem,
845 * only the sysadmin is able to do this.
847 rand_initialize_irq(irq);
851 * The following block of code has to be executed atomically
853 spin_lock_irqsave(&desc->lock,flags);
854 p = &desc->action;
855 if ((old = *p) != NULL) {
856 /* Can't share interrupts unless both agree to */
857 if (!(old->flags & new->flags & SA_SHIRQ)) {
858 spin_unlock_irqrestore(&desc->lock,flags);
859 return -EBUSY;
862 /* add new interrupt at end of irq queue */
863 do {
864 p = &old->next;
865 old = *p;
866 } while (old);
867 shared = 1;
870 *p = new;
872 if (!shared) {
873 desc->depth = 0;
874 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
875 desc->handler->startup(irq);
877 spin_unlock_irqrestore(&desc->lock,flags);
879 register_irq_proc(irq);
880 return 0;
883 static struct proc_dir_entry * root_irq_dir;
884 static struct proc_dir_entry * irq_dir [NR_IRQS];
886 #ifdef CONFIG_SMP
888 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
890 cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
892 static int irq_affinity_read_proc(char *page, char **start, off_t off,
893 int count, int *eof, void *data)
895 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
896 if (count - len < 2)
897 return -EINVAL;
898 len += sprintf(page + len, "\n");
899 return len;
902 static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
903 unsigned long count, void *data)
905 int irq = (long)data, full_count = count, err;
906 cpumask_t new_value, tmp;
908 if (!irq_desc[irq].handler->set_affinity)
909 return -EIO;
911 err = cpumask_parse(buffer, count, new_value);
912 if (err)
913 return err;
916 * Do not allow disabling IRQs completely - it's a too easy
917 * way to make the system unusable accidentally :-) At least
918 * one online CPU still has to be targeted.
920 cpus_and(tmp, new_value, cpu_online_map);
921 if (cpus_empty(tmp))
922 return -EINVAL;
924 irq_affinity[irq] = new_value;
925 irq_desc[irq].handler->set_affinity(irq,
926 cpumask_of_cpu(first_cpu(new_value)));
928 return full_count;
931 #endif
933 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
934 int count, int *eof, void *data)
936 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
937 if (count - len < 2)
938 return -EINVAL;
939 len += sprintf(page + len, "\n");
940 return len;
943 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
944 unsigned long count, void *data)
946 cpumask_t *mask = (cpumask_t *)data;
947 unsigned long full_count = count, err;
948 cpumask_t new_value;
950 err = cpumask_parse(buffer, count, new_value);
951 if (err)
952 return err;
954 *mask = new_value;
955 return full_count;
958 #define MAX_NAMELEN 10
960 static void register_irq_proc (unsigned int irq)
962 char name [MAX_NAMELEN];
964 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
965 irq_dir[irq])
966 return;
968 memset(name, 0, MAX_NAMELEN);
969 sprintf(name, "%d", irq);
971 /* create /proc/irq/1234 */
972 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
974 #ifdef CONFIG_SMP
976 struct proc_dir_entry *entry;
978 /* create /proc/irq/1234/smp_affinity */
979 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
981 if (entry) {
982 entry->nlink = 1;
983 entry->data = (void *)(long)irq;
984 entry->read_proc = irq_affinity_read_proc;
985 entry->write_proc = irq_affinity_write_proc;
988 smp_affinity_entry[irq] = entry;
990 #endif
993 unsigned long prof_cpu_mask = -1;
995 void init_irq_proc (void)
997 struct proc_dir_entry *entry;
998 int i;
1000 /* create /proc/irq */
1001 root_irq_dir = proc_mkdir("irq", NULL);
1003 /* create /proc/irq/prof_cpu_mask */
1004 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1006 if (!entry)
1007 return;
1009 entry->nlink = 1;
1010 entry->data = (void *)&prof_cpu_mask;
1011 entry->read_proc = prof_cpu_mask_read_proc;
1012 entry->write_proc = prof_cpu_mask_write_proc;
1015 * Create entries for all existing IRQs.
1017 for (i = 0; i < NR_IRQS; i++)
1018 register_irq_proc(i);