[ARM] Remove more 26-bit ARM support.
[linux-2.6/history.git] / include / asm-ia64 / hardirq.h
blob6816b4181464c05a0435a5aa7026214fc8f079e3
1 #ifndef _ASM_IA64_HARDIRQ_H
2 #define _ASM_IA64_HARDIRQ_H
4 /*
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
9 #include <linux/config.h>
11 #include <linux/threads.h>
12 #include <linux/irq.h>
14 #include <asm/processor.h>
17 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
20 #define __ARCH_IRQ_STAT 1
22 #define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending)
23 #define syscall_count(cpu) /* unused on IA-64 */
24 #define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd)
25 #define nmi_count(cpu) 0
27 #define local_softirq_pending() (local_cpu_data->softirq_pending)
28 #define local_syscall_count() /* unused on IA-64 */
29 #define local_ksoftirqd_task() (local_cpu_data->ksoftirqd)
30 #define local_nmi_count() 0
33 * We put the hardirq and softirq counter into the preemption counter. The bitmask has the
34 * following meaning:
36 * - bits 0-7 are the preemption count (max preemption depth: 256)
37 * - bits 8-15 are the softirq count (max # of softirqs: 256)
38 * - bits 16-29 are the hardirq count (max # of hardirqs: 16384)
40 * - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.)
42 * PREEMPT_MASK: 0x000000ff
43 * SOFTIRQ_MASK: 0x0000ff00
44 * HARDIRQ_MASK: 0x3fff0000
47 #define PREEMPT_BITS 8
48 #define SOFTIRQ_BITS 8
49 #define HARDIRQ_BITS 14
51 #define PREEMPT_SHIFT 0
52 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
53 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
55 #define __MASK(x) ((1UL << (x))-1)
57 #define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
58 #define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
59 #define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
61 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
62 #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
63 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
65 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
66 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
67 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
70 * The hardirq mask has to be large enough to have space for potentially all IRQ sources
71 * in the system nesting on a single CPU:
73 #if (1 << HARDIRQ_BITS) < NR_IRQS
74 # error HARDIRQ_BITS is too low!
75 #endif
78 * Are we doing bottom half or hardware interrupt processing?
79 * Are we in a softirq context?
80 * Interrupt context?
82 #define in_irq() (hardirq_count())
83 #define in_softirq() (softirq_count())
84 #define in_interrupt() (irq_count())
86 #define hardirq_trylock() (!in_interrupt())
87 #define hardirq_endlock() do { } while (0)
89 #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
91 #ifdef CONFIG_PREEMPT
92 # include <linux/smp_lock.h>
93 # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
94 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
95 #else
96 # define in_atomic() (preempt_count() != 0)
97 # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
98 #endif
100 #define irq_exit() \
101 do { \
102 preempt_count() -= IRQ_EXIT_OFFSET; \
103 if (!in_interrupt() && local_softirq_pending()) \
104 do_softirq(); \
105 preempt_enable_no_resched(); \
106 } while (0)
108 #ifdef CONFIG_SMP
109 extern void synchronize_irq (unsigned int irq);
110 #else
111 # define synchronize_irq(irq) barrier()
112 #endif /* CONFIG_SMP */
114 #endif /* _ASM_IA64_HARDIRQ_H */