initial commit with v2.6.9
[linux-2.6.9-moxart.git] / include / linux / hardirq.h
blob7e1aad3947e77698f33a3860da0bafb8dce68560
1 #ifndef LINUX_HARDIRQ_H
2 #define LINUX_HARDIRQ_H
4 #include <linux/config.h>
5 #include <linux/smp_lock.h>
6 #include <asm/hardirq.h>
8 #define __IRQ_MASK(x) ((1UL << (x))-1)
10 #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
11 #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
12 #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
14 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
15 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
16 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
18 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
19 #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
20 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
23 * Are we doing bottom half or hardware interrupt processing?
24 * Are we in a softirq context? Interrupt context?
26 #define in_irq() (hardirq_count())
27 #define in_softirq() (softirq_count())
28 #define in_interrupt() (irq_count())
30 #ifdef CONFIG_PREEMPT
31 # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
32 # define preemptible() (preempt_count() == 0 && !irqs_disabled())
33 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
34 #else
35 # define in_atomic() (preempt_count() != 0)
36 # define preemptible() 0
37 # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
38 #endif
40 #ifdef CONFIG_SMP
41 extern void synchronize_irq(unsigned int irq);
42 #else
43 # define synchronize_irq(irq) barrier()
44 #endif
46 #endif /* LINUX_HARDIRQ_H */