Complete irq tracing support for ARM
[linux-2.6/linux-2.6-openrd.git] / arch / arm / include / asm / assembler.h
blob44912cd5da13b0ac151f46d27af72cff710278bd
1 /*
2 * arch/arm/include/asm/assembler.h
4 * Copyright (C) 1996-2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This file contains arm architecture specific defines
11 * for the different processors.
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
16 #ifndef __ASSEMBLY__
17 #error "Only include this from assembly code"
18 #endif
20 #include <asm/ptrace.h>
23 * Endian independent macros for shifting bytes within registers.
25 #ifndef __ARMEB__
26 #define pull lsr
27 #define push lsl
28 #define get_byte_0 lsl #0
29 #define get_byte_1 lsr #8
30 #define get_byte_2 lsr #16
31 #define get_byte_3 lsr #24
32 #define put_byte_0 lsl #0
33 #define put_byte_1 lsl #8
34 #define put_byte_2 lsl #16
35 #define put_byte_3 lsl #24
36 #else
37 #define pull lsl
38 #define push lsr
39 #define get_byte_0 lsr #24
40 #define get_byte_1 lsr #16
41 #define get_byte_2 lsr #8
42 #define get_byte_3 lsl #0
43 #define put_byte_0 lsl #24
44 #define put_byte_1 lsl #16
45 #define put_byte_2 lsl #8
46 #define put_byte_3 lsl #0
47 #endif
50 * Data preload for architectures that support it
52 #if __LINUX_ARM_ARCH__ >= 5
53 #define PLD(code...) code
54 #else
55 #define PLD(code...)
56 #endif
59 * This can be used to enable code to cacheline align the destination
60 * pointer when bulk writing to memory. Experiments on StrongARM and
61 * XScale didn't show this a worthwhile thing to do when the cache is not
62 * set to write-allocate (this would need further testing on XScale when WA
63 * is used).
65 * On Feroceon there is much to gain however, regardless of cache mode.
67 #ifdef CONFIG_CPU_FEROCEON
68 #define CALGN(code...) code
69 #else
70 #define CALGN(code...)
71 #endif
74 * Enable and disable interrupts
76 #if __LINUX_ARM_ARCH__ >= 6
77 .macro disable_irq_notrace
78 cpsid i
79 .endm
81 .macro enable_irq_notrace
82 cpsie i
83 .endm
84 #else
85 .macro disable_irq_notrace
86 msr cpsr_c, #PSR_I_BIT | SVC_MODE
87 .endm
89 .macro enable_irq_notrace
90 msr cpsr_c, #SVC_MODE
91 .endm
92 #endif
94 .macro asm_trace_hardirqs_off
95 #if defined(CONFIG_TRACE_IRQFLAGS)
96 stmdb sp!, {r0-r3, ip, lr}
97 bl trace_hardirqs_off
98 ldmia sp!, {r0-r3, ip, lr}
99 #endif
100 .endm
102 .macro asm_trace_hardirqs_on_cond, cond
103 #if defined(CONFIG_TRACE_IRQFLAGS)
105 * actually the registers should be pushed and pop'd conditionally, but
106 * after bl the flags are certainly clobbered
108 stmdb sp!, {r0-r3, ip, lr}
109 bl\cond trace_hardirqs_on
110 ldmia sp!, {r0-r3, ip, lr}
111 #endif
112 .endm
114 .macro asm_trace_hardirqs_on
115 asm_trace_hardirqs_on_cond al
116 .endm
118 .macro disable_irq
119 disable_irq_notrace
120 asm_trace_hardirqs_off
121 .endm
123 .macro enable_irq
124 asm_trace_hardirqs_on
125 enable_irq_notrace
126 .endm
128 * Save the current IRQ state and disable IRQs. Note that this macro
129 * assumes FIQs are enabled, and that the processor is in SVC mode.
131 .macro save_and_disable_irqs, oldcpsr
132 mrs \oldcpsr, cpsr
133 disable_irq
134 .endm
137 * Restore interrupt state previously stored in a register. We don't
138 * guarantee that this will preserve the flags.
140 .macro restore_irqs_notrace, oldcpsr
141 msr cpsr_c, \oldcpsr
142 .endm
144 .macro restore_irqs, oldcpsr
145 tst \oldcpsr, #PSR_I_BIT
146 asm_trace_hardirqs_on_cond eq
147 restore_irqs_notrace \oldcpsr
148 .endm
150 #define USER(x...) \
151 9999: x; \
152 .section __ex_table,"a"; \
153 .align 3; \
154 .long 9999b,9001f; \
155 .previous
158 * SMP data memory barrier
160 .macro smp_dmb
161 #ifdef CONFIG_SMP
162 #if __LINUX_ARM_ARCH__ >= 7
164 #elif __LINUX_ARM_ARCH__ == 6
165 mcr p15, 0, r0, c7, c10, 5 @ dmb
166 #endif
167 #endif
168 .endm