- Alan Cox: synch. PA-RISC arch and bitops cleanups
[davej-history.git] / include / asm-parisc / hardirq.h
blob2c717bfd510047f06f4281657dcac840d2dcda65
1 /* hardirq.h: 32-bit Sparc hard IRQ support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
5 */
7 #ifndef __PARISC_HARDIRQ_H
8 #define __PARISC_HARDIRQ_H
10 #include <linux/config.h>
11 #include <linux/threads.h>
13 typedef struct {
14 unsigned int __softirq_active;
15 unsigned int __softirq_mask;
16 unsigned int __local_irq_count;
17 unsigned int __local_bh_count;
18 unsigned int __syscall_count;
19 } ____cacheline_aligned irq_cpustat_t;
21 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
24 * Are we in an interrupt context? Either doing bottom half
25 * or hardware interrupt processing?
27 #define in_interrupt() ({ int __cpu = smp_processor_id(); \
28 (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
30 #define in_irq() ({ int __cpu = smp_processor_id(); \
31 (local_irq_count(__cpu) != 0); })
33 #ifndef CONFIG_SMP
35 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
36 #define hardirq_endlock(cpu) do { } while (0)
38 #define irq_enter(cpu, irq) (local_irq_count(cpu)++)
39 #define irq_exit(cpu, irq) (local_irq_count(cpu)--)
41 #define synchronize_irq() barrier()
43 #else
45 #include <asm/atomic.h>
46 #include <linux/spinlock.h>
47 #include <asm/system.h>
48 #include <asm/smp.h>
50 extern unsigned char global_irq_holder;
51 extern spinlock_t global_irq_lock;
52 extern atomic_t global_irq_count;
54 static inline void release_irqlock(int cpu)
56 /* if we didn't own the irq lock, just ignore.. */
57 if (global_irq_holder == (unsigned char) cpu) {
58 global_irq_holder = NO_PROC_ID;
59 spin_unlock(&global_irq_lock);
63 static inline void irq_enter(int cpu)
65 ++local_irq_count(cpu);
66 atomic_inc(&global_irq_count);
69 static inline void irq_exit(int cpu)
71 atomic_dec(&global_irq_count);
72 --local_irq_count(cpu);
75 static inline int hardirq_trylock(int cpu)
77 return (! atomic_read(&global_irq_count) &&
78 ! spin_is_locked (&global_irq_lock));
81 #define hardirq_endlock(cpu) do { } while (0)
83 extern void synchronize_irq(void);
85 #endif /* CONFIG_SMP */
87 #endif /* __PARISC_HARDIRQ_H */