initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / arm / kernel / irq.c
blob9c6501295b3971b89335a9cfb53c88b66a1b2507
1 /*
2 * linux/arch/arm/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This file contains the code used by various IRQ handling routines:
12 * asking for different IRQ's should be done through these routines
13 * instead of just grabbing them. Thus setups with different IRQ numbers
14 * shouldn't result in any weird surprises, and installing new handlers
15 * should be easier.
17 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
18 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/init.h>
31 #include <linux/seq_file.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/kallsyms.h>
36 #include <asm/irq.h>
37 #include <asm/system.h>
38 #include <asm/mach/irq.h>
41 * Maximum IRQ count. Currently, this is arbitary. However, it should
42 * not be set too low to prevent false triggering. Conversely, if it
43 * is set too high, then you could miss a stuck IRQ.
45 * Maybe we ought to set a timer and re-enable the IRQ at a later time?
47 #define MAX_IRQ_CNT 100000
49 static int noirqdebug;
50 static volatile unsigned long irq_err_count;
51 static spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
52 static LIST_HEAD(irq_pending);
54 struct irqdesc irq_desc[NR_IRQS];
55 void (*init_arch_irq)(void) __initdata = NULL;
58 * Dummy mask/unmask handler
60 void dummy_mask_unmask_irq(unsigned int irq)
64 irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
66 return IRQ_NONE;
69 void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
71 irq_err_count += 1;
72 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
75 static struct irqchip bad_chip = {
76 .ack = dummy_mask_unmask_irq,
77 .mask = dummy_mask_unmask_irq,
78 .unmask = dummy_mask_unmask_irq,
81 static struct irqdesc bad_irq_desc = {
82 .chip = &bad_chip,
83 .handle = do_bad_IRQ,
84 .pend = LIST_HEAD_INIT(bad_irq_desc.pend),
85 .disable_depth = 1,
88 /**
89 * disable_irq - disable an irq and wait for completion
90 * @irq: Interrupt to disable
92 * Disable the selected interrupt line. Enables and disables
93 * are nested. We do this lazily.
95 * This function may be called from IRQ context.
97 void disable_irq(unsigned int irq)
99 struct irqdesc *desc = irq_desc + irq;
100 unsigned long flags;
102 spin_lock_irqsave(&irq_controller_lock, flags);
103 desc->disable_depth++;
104 list_del_init(&desc->pend);
105 spin_unlock_irqrestore(&irq_controller_lock, flags);
107 EXPORT_SYMBOL(disable_irq);
110 * enable_irq - enable interrupt handling on an irq
111 * @irq: Interrupt to enable
113 * Re-enables the processing of interrupts on this IRQ line.
114 * Note that this may call the interrupt handler, so you may
115 * get unexpected results if you hold IRQs disabled.
117 * This function may be called from IRQ context.
119 void enable_irq(unsigned int irq)
121 struct irqdesc *desc = irq_desc + irq;
122 unsigned long flags;
124 spin_lock_irqsave(&irq_controller_lock, flags);
125 if (unlikely(!desc->disable_depth)) {
126 printk("enable_irq(%u) unbalanced from %p\n", irq,
127 __builtin_return_address(0));
128 } else if (!--desc->disable_depth) {
129 desc->probing = 0;
130 desc->chip->unmask(irq);
133 * If the interrupt is waiting to be processed,
134 * try to re-run it. We can't directly run it
135 * from here since the caller might be in an
136 * interrupt-protected region.
138 if (desc->pending && list_empty(&desc->pend)) {
139 desc->pending = 0;
140 if (!desc->chip->retrigger ||
141 desc->chip->retrigger(irq))
142 list_add(&desc->pend, &irq_pending);
145 spin_unlock_irqrestore(&irq_controller_lock, flags);
147 EXPORT_SYMBOL(enable_irq);
150 * Enable wake on selected irq
152 void enable_irq_wake(unsigned int irq)
154 struct irqdesc *desc = irq_desc + irq;
155 unsigned long flags;
157 spin_lock_irqsave(&irq_controller_lock, flags);
158 if (desc->chip->wake)
159 desc->chip->wake(irq, 1);
160 spin_unlock_irqrestore(&irq_controller_lock, flags);
162 EXPORT_SYMBOL(enable_irq_wake);
164 void disable_irq_wake(unsigned int irq)
166 struct irqdesc *desc = irq_desc + irq;
167 unsigned long flags;
169 spin_lock_irqsave(&irq_controller_lock, flags);
170 if (desc->chip->wake)
171 desc->chip->wake(irq, 0);
172 spin_unlock_irqrestore(&irq_controller_lock, flags);
174 EXPORT_SYMBOL(disable_irq_wake);
176 int show_interrupts(struct seq_file *p, void *v)
178 int i = *(loff_t *) v;
179 struct irqaction * action;
180 unsigned long flags;
182 if (i < NR_IRQS) {
183 spin_lock_irqsave(&irq_controller_lock, flags);
184 action = irq_desc[i].action;
185 if (!action)
186 goto unlock;
188 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
189 seq_printf(p, " %s", action->name);
190 for (action = action->next; action; action = action->next)
191 seq_printf(p, ", %s", action->name);
193 seq_putc(p, '\n');
194 unlock:
195 spin_unlock_irqrestore(&irq_controller_lock, flags);
196 } else if (i == NR_IRQS) {
197 #ifdef CONFIG_ARCH_ACORN
198 show_fiq_list(p, v);
199 #endif
200 seq_printf(p, "Err: %10lu\n", irq_err_count);
202 return 0;
206 * IRQ lock detection.
208 * Hopefully, this should get us out of a few locked situations.
209 * However, it may take a while for this to happen, since we need
210 * a large number if IRQs to appear in the same jiffie with the
211 * same instruction pointer (or within 2 instructions).
213 static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
215 unsigned long instr_ptr = instruction_pointer(regs);
217 if (desc->lck_jif == jiffies &&
218 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
219 desc->lck_cnt += 1;
221 if (desc->lck_cnt > MAX_IRQ_CNT) {
222 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
223 return 1;
225 } else {
226 desc->lck_cnt = 0;
227 desc->lck_pc = instruction_pointer(regs);
228 desc->lck_jif = jiffies;
230 return 0;
233 static void
234 report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
236 static int count = 100;
237 struct irqaction *action;
239 if (!count || noirqdebug)
240 return;
242 count--;
244 if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
245 printk("irq%u: bogus retval mask %x\n", irq, ret);
246 } else {
247 printk("irq%u: nobody cared\n", irq);
249 show_regs(regs);
250 dump_stack();
251 printk(KERN_ERR "handlers:");
252 action = desc->action;
253 do {
254 printk("\n" KERN_ERR "[<%p>]", action->handler);
255 print_symbol(" (%s)", (unsigned long)action->handler);
256 action = action->next;
257 } while (action);
258 printk("\n");
261 static int
262 __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
264 unsigned int status;
265 int ret, retval = 0;
267 spin_unlock(&irq_controller_lock);
269 if (!(action->flags & SA_INTERRUPT))
270 local_irq_enable();
272 status = 0;
273 do {
274 ret = action->handler(irq, action->dev_id, regs);
275 if (ret == IRQ_HANDLED)
276 status |= action->flags;
277 retval |= ret;
278 action = action->next;
279 } while (action);
281 if (status & SA_SAMPLE_RANDOM)
282 add_interrupt_randomness(irq);
284 spin_lock_irq(&irq_controller_lock);
286 return retval;
290 * This is for software-decoded IRQs. The caller is expected to
291 * handle the ack, clear, mask and unmask issues.
293 void
294 do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
296 struct irqaction *action;
297 const int cpu = smp_processor_id();
299 desc->triggered = 1;
301 kstat_cpu(cpu).irqs[irq]++;
303 action = desc->action;
304 if (action) {
305 int ret = __do_irq(irq, action, regs);
306 if (ret != IRQ_HANDLED)
307 report_bad_irq(irq, regs, desc, ret);
312 * Most edge-triggered IRQ implementations seem to take a broken
313 * approach to this. Hence the complexity.
315 void
316 do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
318 const int cpu = smp_processor_id();
320 desc->triggered = 1;
323 * If we're currently running this IRQ, or its disabled,
324 * we shouldn't process the IRQ. Instead, turn on the
325 * hardware masks.
327 if (unlikely(desc->running || desc->disable_depth))
328 goto running;
331 * Acknowledge and clear the IRQ, but don't mask it.
333 desc->chip->ack(irq);
336 * Mark the IRQ currently in progress.
338 desc->running = 1;
340 kstat_cpu(cpu).irqs[irq]++;
342 do {
343 struct irqaction *action;
344 int ret;
346 action = desc->action;
347 if (!action)
348 break;
350 if (desc->pending && !desc->disable_depth) {
351 desc->pending = 0;
352 desc->chip->unmask(irq);
355 ret = __do_irq(irq, action, regs);
356 if (ret != IRQ_HANDLED)
357 report_bad_irq(irq, regs, desc, ret);
358 } while (desc->pending && !desc->disable_depth);
360 desc->running = 0;
363 * If we were disabled or freed, shut down the handler.
365 if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
366 return;
368 running:
370 * We got another IRQ while this one was masked or
371 * currently running. Delay it.
373 desc->pending = 1;
374 desc->chip->mask(irq);
375 desc->chip->ack(irq);
379 * Level-based IRQ handler. Nice and simple.
381 void
382 do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
384 struct irqaction *action;
385 const int cpu = smp_processor_id();
387 desc->triggered = 1;
390 * Acknowledge, clear _AND_ disable the interrupt.
392 desc->chip->ack(irq);
394 if (likely(!desc->disable_depth)) {
395 kstat_cpu(cpu).irqs[irq]++;
398 * Return with this interrupt masked if no action
400 action = desc->action;
401 if (action) {
402 int ret = __do_irq(irq, desc->action, regs);
404 if (ret != IRQ_HANDLED)
405 report_bad_irq(irq, regs, desc, ret);
407 if (likely(!desc->disable_depth &&
408 !check_irq_lock(desc, irq, regs)))
409 desc->chip->unmask(irq);
414 static void do_pending_irqs(struct pt_regs *regs)
416 struct list_head head, *l, *n;
418 do {
419 struct irqdesc *desc;
422 * First, take the pending interrupts off the list.
423 * The act of calling the handlers may add some IRQs
424 * back onto the list.
426 head = irq_pending;
427 INIT_LIST_HEAD(&irq_pending);
428 head.next->prev = &head;
429 head.prev->next = &head;
432 * Now run each entry. We must delete it from our
433 * list before calling the handler.
435 list_for_each_safe(l, n, &head) {
436 desc = list_entry(l, struct irqdesc, pend);
437 list_del_init(&desc->pend);
438 desc->handle(desc - irq_desc, desc, regs);
442 * The list must be empty.
444 BUG_ON(!list_empty(&head));
445 } while (!list_empty(&irq_pending));
449 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
450 * come via this function. Instead, they should provide their
451 * own 'handler'
453 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
455 struct irqdesc *desc = irq_desc + irq;
458 * Some hardware gives randomly wrong interrupts. Rather
459 * than crashing, do something sensible.
461 if (irq >= NR_IRQS)
462 desc = &bad_irq_desc;
464 irq_enter();
465 spin_lock(&irq_controller_lock);
466 desc->handle(irq, desc, regs);
469 * Now re-run any pending interrupts.
471 if (!list_empty(&irq_pending))
472 do_pending_irqs(regs);
474 spin_unlock(&irq_controller_lock);
475 irq_exit();
478 void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
480 struct irqdesc *desc;
481 unsigned long flags;
483 if (irq >= NR_IRQS) {
484 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
485 return;
488 if (handle == NULL)
489 handle = do_bad_IRQ;
491 desc = irq_desc + irq;
493 if (is_chained && desc->chip == &bad_chip)
494 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
496 spin_lock_irqsave(&irq_controller_lock, flags);
497 if (handle == do_bad_IRQ) {
498 desc->chip->mask(irq);
499 desc->chip->ack(irq);
500 desc->disable_depth = 1;
502 desc->handle = handle;
503 if (handle != do_bad_IRQ && is_chained) {
504 desc->valid = 0;
505 desc->probe_ok = 0;
506 desc->disable_depth = 0;
507 desc->chip->unmask(irq);
509 spin_unlock_irqrestore(&irq_controller_lock, flags);
512 void set_irq_chip(unsigned int irq, struct irqchip *chip)
514 struct irqdesc *desc;
515 unsigned long flags;
517 if (irq >= NR_IRQS) {
518 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
519 return;
522 if (chip == NULL)
523 chip = &bad_chip;
525 desc = irq_desc + irq;
526 spin_lock_irqsave(&irq_controller_lock, flags);
527 desc->chip = chip;
528 spin_unlock_irqrestore(&irq_controller_lock, flags);
531 int set_irq_type(unsigned int irq, unsigned int type)
533 struct irqdesc *desc;
534 unsigned long flags;
535 int ret = -ENXIO;
537 if (irq >= NR_IRQS) {
538 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
539 return -ENODEV;
542 desc = irq_desc + irq;
543 if (desc->chip->type) {
544 spin_lock_irqsave(&irq_controller_lock, flags);
545 ret = desc->chip->type(irq, type);
546 spin_unlock_irqrestore(&irq_controller_lock, flags);
549 return ret;
551 EXPORT_SYMBOL(set_irq_type);
553 void set_irq_flags(unsigned int irq, unsigned int iflags)
555 struct irqdesc *desc;
556 unsigned long flags;
558 if (irq >= NR_IRQS) {
559 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
560 return;
563 desc = irq_desc + irq;
564 spin_lock_irqsave(&irq_controller_lock, flags);
565 desc->valid = (iflags & IRQF_VALID) != 0;
566 desc->probe_ok = (iflags & IRQF_PROBE) != 0;
567 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
568 spin_unlock_irqrestore(&irq_controller_lock, flags);
571 int setup_irq(unsigned int irq, struct irqaction *new)
573 int shared = 0;
574 struct irqaction *old, **p;
575 unsigned long flags;
576 struct irqdesc *desc;
579 * Some drivers like serial.c use request_irq() heavily,
580 * so we have to be careful not to interfere with a
581 * running system.
583 if (new->flags & SA_SAMPLE_RANDOM) {
585 * This function might sleep, we want to call it first,
586 * outside of the atomic block.
587 * Yes, this might clear the entropy pool if the wrong
588 * driver is attempted to be loaded, without actually
589 * installing a new handler, but is this really a problem,
590 * only the sysadmin is able to do this.
592 rand_initialize_irq(irq);
596 * The following block of code has to be executed atomically
598 desc = irq_desc + irq;
599 spin_lock_irqsave(&irq_controller_lock, flags);
600 p = &desc->action;
601 if ((old = *p) != NULL) {
602 /* Can't share interrupts unless both agree to */
603 if (!(old->flags & new->flags & SA_SHIRQ)) {
604 spin_unlock_irqrestore(&irq_controller_lock, flags);
605 return -EBUSY;
608 /* add new interrupt at end of irq queue */
609 do {
610 p = &old->next;
611 old = *p;
612 } while (old);
613 shared = 1;
616 *p = new;
618 if (!shared) {
619 desc->probing = 0;
620 desc->running = 0;
621 desc->pending = 0;
622 desc->disable_depth = 1;
623 if (!desc->noautoenable) {
624 desc->disable_depth = 0;
625 desc->chip->unmask(irq);
629 spin_unlock_irqrestore(&irq_controller_lock, flags);
630 return 0;
634 * request_irq - allocate an interrupt line
635 * @irq: Interrupt line to allocate
636 * @handler: Function to be called when the IRQ occurs
637 * @irqflags: Interrupt type flags
638 * @devname: An ascii name for the claiming device
639 * @dev_id: A cookie passed back to the handler function
641 * This call allocates interrupt resources and enables the
642 * interrupt line and IRQ handling. From the point this
643 * call is made your handler function may be invoked. Since
644 * your handler function must clear any interrupt the board
645 * raises, you must take care both to initialise your hardware
646 * and to set up the interrupt handler in the right order.
648 * Dev_id must be globally unique. Normally the address of the
649 * device data structure is used as the cookie. Since the handler
650 * receives this value it makes sense to use it.
652 * If your interrupt is shared you must pass a non NULL dev_id
653 * as this is required when freeing the interrupt.
655 * Flags:
657 * SA_SHIRQ Interrupt is shared
659 * SA_INTERRUPT Disable local interrupts while processing
661 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
664 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
665 unsigned long irq_flags, const char * devname, void *dev_id)
667 unsigned long retval;
668 struct irqaction *action;
670 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
671 (irq_flags & SA_SHIRQ && !dev_id))
672 return -EINVAL;
674 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
675 if (!action)
676 return -ENOMEM;
678 action->handler = handler;
679 action->flags = irq_flags;
680 cpus_clear(action->mask);
681 action->name = devname;
682 action->next = NULL;
683 action->dev_id = dev_id;
685 retval = setup_irq(irq, action);
687 if (retval)
688 kfree(action);
689 return retval;
692 EXPORT_SYMBOL(request_irq);
695 * free_irq - free an interrupt
696 * @irq: Interrupt line to free
697 * @dev_id: Device identity to free
699 * Remove an interrupt handler. The handler is removed and if the
700 * interrupt line is no longer in use by any driver it is disabled.
701 * On a shared IRQ the caller must ensure the interrupt is disabled
702 * on the card it drives before calling this function.
704 * This function must not be called from interrupt context.
706 void free_irq(unsigned int irq, void *dev_id)
708 struct irqaction * action, **p;
709 unsigned long flags;
711 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
712 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
713 dump_stack();
714 return;
717 spin_lock_irqsave(&irq_controller_lock, flags);
718 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
719 if (action->dev_id != dev_id)
720 continue;
722 /* Found it - now free it */
723 *p = action->next;
724 break;
726 spin_unlock_irqrestore(&irq_controller_lock, flags);
728 if (!action) {
729 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
730 dump_stack();
731 } else {
732 synchronize_irq(irq);
733 kfree(action);
737 EXPORT_SYMBOL(free_irq);
739 static DECLARE_MUTEX(probe_sem);
741 /* Start the interrupt probing. Unlike other architectures,
742 * we don't return a mask of interrupts from probe_irq_on,
743 * but return the number of interrupts enabled for the probe.
744 * The interrupts which have been enabled for probing is
745 * instead recorded in the irq_desc structure.
747 unsigned long probe_irq_on(void)
749 unsigned int i, irqs = 0;
750 unsigned long delay;
752 down(&probe_sem);
755 * first snaffle up any unassigned but
756 * probe-able interrupts
758 spin_lock_irq(&irq_controller_lock);
759 for (i = 0; i < NR_IRQS; i++) {
760 if (!irq_desc[i].probe_ok || irq_desc[i].action)
761 continue;
763 irq_desc[i].probing = 1;
764 irq_desc[i].triggered = 0;
765 if (irq_desc[i].chip->type)
766 irq_desc[i].chip->type(i, IRQT_PROBE);
767 irq_desc[i].chip->unmask(i);
768 irqs += 1;
770 spin_unlock_irq(&irq_controller_lock);
773 * wait for spurious interrupts to mask themselves out again
775 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
776 /* min 100ms delay */;
779 * now filter out any obviously spurious interrupts
781 spin_lock_irq(&irq_controller_lock);
782 for (i = 0; i < NR_IRQS; i++) {
783 if (irq_desc[i].probing && irq_desc[i].triggered) {
784 irq_desc[i].probing = 0;
785 irqs -= 1;
788 spin_unlock_irq(&irq_controller_lock);
790 return irqs;
793 EXPORT_SYMBOL(probe_irq_on);
795 unsigned int probe_irq_mask(unsigned long irqs)
797 unsigned int mask = 0, i;
799 spin_lock_irq(&irq_controller_lock);
800 for (i = 0; i < 16 && i < NR_IRQS; i++)
801 if (irq_desc[i].probing && irq_desc[i].triggered)
802 mask |= 1 << i;
803 spin_unlock_irq(&irq_controller_lock);
805 up(&probe_sem);
807 return mask;
809 EXPORT_SYMBOL(probe_irq_mask);
812 * Possible return values:
813 * >= 0 - interrupt number
814 * -1 - no interrupt/many interrupts
816 int probe_irq_off(unsigned long irqs)
818 unsigned int i;
819 int irq_found = NO_IRQ;
822 * look at the interrupts, and find exactly one
823 * that we were probing has been triggered
825 spin_lock_irq(&irq_controller_lock);
826 for (i = 0; i < NR_IRQS; i++) {
827 if (irq_desc[i].probing &&
828 irq_desc[i].triggered) {
829 if (irq_found != NO_IRQ) {
830 irq_found = NO_IRQ;
831 goto out;
833 irq_found = i;
837 if (irq_found == -1)
838 irq_found = NO_IRQ;
839 out:
840 spin_unlock_irq(&irq_controller_lock);
842 up(&probe_sem);
844 return irq_found;
847 EXPORT_SYMBOL(probe_irq_off);
849 void __init init_irq_proc(void)
853 void __init init_IRQ(void)
855 struct irqdesc *desc;
856 extern void init_dma(void);
857 int irq;
859 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
860 *desc = bad_irq_desc;
861 INIT_LIST_HEAD(&desc->pend);
864 init_arch_irq();
865 init_dma();
868 static int __init noirqdebug_setup(char *str)
870 noirqdebug = 1;
871 return 1;
874 __setup("noirqdebug", noirqdebug_setup);