2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
14 extern void timer_interrupt(struct pt_regs
*);
19 static inline unsigned long local_get_flags(void)
23 __asm__
__volatile__("lbz %0,%1(13)"
25 : "i" (offsetof(struct paca_struct
, soft_enabled
)));
30 static inline unsigned long raw_local_irq_disable(void)
32 unsigned long flags
, zero
;
34 __asm__
__volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
35 : "=r" (flags
), "=&r" (zero
)
36 : "i" (offsetof(struct paca_struct
, soft_enabled
))
42 extern void raw_local_irq_restore(unsigned long);
43 extern void iseries_handle_interrupts(void);
45 #define raw_local_irq_enable() raw_local_irq_restore(1)
46 #define raw_local_save_flags(flags) ((flags) = local_get_flags())
47 #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
49 #define raw_irqs_disabled() (local_get_flags() == 0)
50 #define raw_irqs_disabled_flags(flags) ((flags) == 0)
52 #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
53 #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
55 #define hard_irq_disable() \
57 __hard_irq_disable(); \
58 get_paca()->soft_enabled = 0; \
59 get_paca()->hard_enabled = 0; \
62 static inline int irqs_disabled_flags(unsigned long flags
)
69 #if defined(CONFIG_BOOKE)
70 #define SET_MSR_EE(x) mtmsr(x)
71 #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
73 #define SET_MSR_EE(x) mtmsr(x)
74 #define local_irq_restore(flags) mtmsr(flags)
77 static inline void local_irq_disable(void)
80 __asm__
__volatile__("wrteei 0": : :"memory");
85 SET_MSR_EE(msr
& ~MSR_EE
);
89 static inline void local_irq_enable(void)
92 __asm__
__volatile__("wrteei 1": : :"memory");
97 SET_MSR_EE(msr
| MSR_EE
);
101 static inline void local_irq_save_ptr(unsigned long *flags
)
107 __asm__
__volatile__("wrteei 0": : :"memory");
109 SET_MSR_EE(msr
& ~MSR_EE
);
113 #define local_save_flags(flags) ((flags) = mfmsr())
114 #define local_irq_save(flags) local_irq_save_ptr(&flags)
115 #define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
117 #define hard_irq_enable() local_irq_enable()
118 #define hard_irq_disable() local_irq_disable()
120 static inline int irqs_disabled_flags(unsigned long flags
)
122 return (flags
& MSR_EE
) == 0;
125 #endif /* CONFIG_PPC64 */
128 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
129 * or should we not care like we do now ? --BenH.
133 #ifdef CONFIG_PERF_COUNTERS
136 static inline unsigned long test_perf_counter_pending(void)
140 asm volatile("lbz %0,%1(13)"
142 : "i" (offsetof(struct paca_struct
, perf_counter_pending
)));
146 static inline void set_perf_counter_pending(void)
148 asm volatile("stb %0,%1(13)" : :
150 "i" (offsetof(struct paca_struct
, perf_counter_pending
)));
153 static inline void clear_perf_counter_pending(void)
155 asm volatile("stb %0,%1(13)" : :
157 "i" (offsetof(struct paca_struct
, perf_counter_pending
)));
159 #endif /* CONFIG_PPC64 */
161 #else /* CONFIG_PERF_COUNTERS */
163 static inline unsigned long test_perf_counter_pending(void)
168 static inline void clear_perf_counter_pending(void) {}
169 #endif /* CONFIG_PERF_COUNTERS */
171 #endif /* __KERNEL__ */
172 #endif /* _ASM_POWERPC_HW_IRQ_H */