1 /* hardirq.h: 64-bit Sparc hard IRQ support.
3 * Copyright (C) 1997, 1998 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC64_HARDIRQ_H
7 #define __SPARC64_HARDIRQ_H
9 #include <linux/config.h>
10 #include <linux/threads.h>
11 #include <linux/brlock.h>
12 #include <linux/spinlock.h>
15 extern unsigned int __local_irq_count
;
16 #define local_irq_count(cpu) __local_irq_count
17 #define irq_enter(cpu, irq) (__local_irq_count++)
18 #define irq_exit(cpu, irq) (__local_irq_count--)
20 #define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
21 #define irq_enter(cpu, irq) br_read_lock(BR_GLOBALIRQ_LOCK)
22 #define irq_exit(cpu, irq) br_read_unlock(BR_GLOBALIRQ_LOCK)
26 * Are we in an interrupt context? Either doing bottom half
27 * or hardware interrupt processing? On any cpu?
29 #define in_interrupt() ((local_irq_count(smp_processor_id()) + \
30 local_bh_count(smp_processor_id())) != 0)
32 /* This tests only the local processors hw IRQ context disposition. */
33 #define in_irq() (local_irq_count(smp_processor_id()) != 0)
37 #define hardirq_trylock(cpu) ((void)(cpu), local_irq_count(smp_processor_id()) == 0)
38 #define hardirq_endlock(cpu) do { (void)(cpu); } while(0)
40 #define hardirq_enter(cpu) ((void)(cpu), local_irq_count(smp_processor_id())++)
41 #define hardirq_exit(cpu) ((void)(cpu), local_irq_count(smp_processor_id())--)
43 #define synchronize_irq() barrier()
45 #else /* (CONFIG_SMP) */
47 static __inline__
int irqs_running(void)
49 enum brlock_indices idx
= BR_GLOBALIRQ_LOCK
;
52 for (i
= 0; i
< smp_num_cpus
; i
++)
53 count
+= (__brlock_array
[cpu_logical_map(i
)][idx
] != 0);
58 extern unsigned char global_irq_holder
;
60 static inline void release_irqlock(int cpu
)
62 /* if we didn't own the irq lock, just ignore... */
63 if(global_irq_holder
== (unsigned char) cpu
) {
64 global_irq_holder
= NO_PROC_ID
;
65 br_write_unlock(BR_GLOBALIRQ_LOCK
);
69 static inline int hardirq_trylock(int cpu
)
71 spinlock_t
*lock
= &__br_write_locks
[BR_GLOBALIRQ_LOCK
].lock
;
73 return (!irqs_running() && !spin_is_locked(lock
));
76 #define hardirq_endlock(cpu) do { (void)(cpu); } while (0)
78 extern void synchronize_irq(void);
80 #endif /* CONFIG_SMP */
82 #endif /* !(__SPARC64_HARDIRQ_H) */