2 * arch/arm/include/asm/assembler.h
4 * Copyright (C) 1996-2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This file contains arm architecture specific defines
11 * for the different processors.
13 * Do not include any C declarations in this file - it is included by
17 #error "Only include this from assembly code"
20 #include <asm/ptrace.h>
23 * Endian independent macros for shifting bytes within registers.
28 #define get_byte_0 lsl #0
29 #define get_byte_1 lsr #8
30 #define get_byte_2 lsr #16
31 #define get_byte_3 lsr #24
32 #define put_byte_0 lsl #0
33 #define put_byte_1 lsl #8
34 #define put_byte_2 lsl #16
35 #define put_byte_3 lsl #24
39 #define get_byte_0 lsr #24
40 #define get_byte_1 lsr #16
41 #define get_byte_2 lsr #8
42 #define get_byte_3 lsl #0
43 #define put_byte_0 lsl #24
44 #define put_byte_1 lsl #16
45 #define put_byte_2 lsl #8
46 #define put_byte_3 lsl #0
50 * Data preload for architectures that support it
52 #if __LINUX_ARM_ARCH__ >= 5
53 #define PLD(code...) code
59 * This can be used to enable code to cacheline align the destination
60 * pointer when bulk writing to memory. Experiments on StrongARM and
61 * XScale didn't show this a worthwhile thing to do when the cache is not
62 * set to write-allocate (this would need further testing on XScale when WA
65 * On Feroceon there is much to gain however, regardless of cache mode.
67 #ifdef CONFIG_CPU_FEROCEON
68 #define CALGN(code...) code
70 #define CALGN(code...)
74 * Enable and disable interrupts
76 #if __LINUX_ARM_ARCH__ >= 6
77 .macro disable_irq_notrace
81 .macro enable_irq_notrace
85 .macro disable_irq_notrace
86 msr cpsr_c
, #PSR_I_BIT | SVC_MODE
89 .macro enable_irq_notrace
94 .macro asm_trace_hardirqs_off
95 #if defined(CONFIG_TRACE_IRQFLAGS)
96 stmdb sp
!, {r0
-r3
, ip
, lr
}
98 ldmia sp
!, {r0
-r3
, ip
, lr
}
102 .macro asm_trace_hardirqs_on_cond
, cond
103 #if defined(CONFIG_TRACE_IRQFLAGS)
105 * actually the registers should be pushed and pop'd conditionally, but
106 * after bl the flags are certainly clobbered
108 stmdb sp
!, {r0
-r3
, ip
, lr
}
109 bl\cond trace_hardirqs_on
110 ldmia sp
!, {r0
-r3
, ip
, lr
}
114 .macro asm_trace_hardirqs_on
115 asm_trace_hardirqs_on_cond al
120 asm_trace_hardirqs_off
124 asm_trace_hardirqs_on
128 * Save the current IRQ state and disable IRQs. Note that this macro
129 * assumes FIQs are enabled, and that the processor is in SVC mode.
131 .macro save_and_disable_irqs
, oldcpsr
137 * Restore interrupt state previously stored in a register. We don't
138 * guarantee that this will preserve the flags.
140 .macro restore_irqs_notrace
, oldcpsr
144 .macro restore_irqs
, oldcpsr
145 tst \oldcpsr
, #PSR_I_BIT
146 asm_trace_hardirqs_on_cond eq
147 restore_irqs_notrace \oldcpsr
152 .section __ex_table,"a"; \
158 * SMP data memory barrier
162 #if __LINUX_ARM_ARCH__ >= 7
164 #elif __LINUX_ARM_ARCH__ == 6
165 mcr p15
, 0, r0
, c7
, c10
, 5 @ dmb