- pre3:
[davej-history.git] / include / asm-arm / proc-armo / locks.h
blobc460bfebbbff2d17b30dec2165d4eb36b471bbee
1 /*
2 * linux/include/asm-arm/proc-armo/locks.h
4 * Copyright (C) 2000 Russell King
5 * Fixes for 26 bit machines, (C) 2000 Dave Gilbert
7 * Interrupt safe locking assembler.
9 */
10 #ifndef __ASM_PROC_LOCKS_H
11 #define __ASM_PROC_LOCKS_H
13 /* Decrements by 1, fails if value < 0 */
14 #define __down_op(ptr,fail) \
15 ({ \
16 __asm__ __volatile__ ( \
17 "@ atomic down operation\n" \
18 " mov ip, pc\n" \
19 " orr lr, ip, #0x08000000\n" \
20 " teqp lr, #0\n" \
21 " ldr lr, [%0]\n" \
22 " and ip, ip, #0x0c000003\n" \
23 " subs lr, lr, #1\n" \
24 " str lr, [%0]\n" \
25 " orrmi ip, ip, #0x80000000 @ set N\n" \
26 " teqp ip, #0\n" \
27 " movmi ip, %0\n" \
28 " blmi " SYMBOL_NAME_STR(fail) \
29 : \
30 : "r" (ptr) \
31 : "ip", "lr", "cc"); \
34 #define __down_op_ret(ptr,fail) \
35 ({ \
36 unsigned int result; \
37 __asm__ __volatile__ ( \
38 " @ down_op_ret\n" \
39 " mov ip, pc\n" \
40 " orr lr, ip, #0x08000000\n" \
41 " teqp lr, #0\n" \
42 " ldr lr, [%1]\n" \
43 " and ip, ip, #0x0c000003\n" \
44 " subs lr, lr, #1\n" \
45 " str lr, [%1]\n" \
46 " orrmi ip, ip, #0x80000000 @ set N\n" \
47 " teqp ip, #0\n" \
48 " movmi ip, %1\n" \
49 " movpl ip, #0\n" \
50 " blmi " SYMBOL_NAME_STR(fail) "\n" \
51 " mov %0, ip" \
52 : "=&r" (result) \
53 : "r" (ptr) \
54 : "ip", "lr", "cc"); \
55 result; \
58 #define __up_op(ptr,wake) \
59 ({ \
60 __asm__ __volatile__ ( \
61 "@ up_op\n" \
62 " mov ip, pc\n" \
63 " orr lr, ip, #0x08000000\n" \
64 " teqp lr, #0\n" \
65 " ldr lr, [%0]\n" \
66 " and ip, ip, #0x0c000003\n" \
67 " adds lr, lr, #1\n" \
68 " str lr, [%0]\n" \
69 " orrle ip, ip, #0x80000000 @ set N - should this be mi ??? DAG ! \n" \
70 " teqp ip, #0\n" \
71 " movmi ip, %0\n" \
72 " blmi " SYMBOL_NAME_STR(wake) \
73 : \
74 : "r" (ptr) \
75 : "ip", "lr", "cc"); \
79 * The value 0x01000000 supports up to 128 processors and
80 * lots of processes. BIAS must be chosen such that sub'ing
81 * BIAS once per CPU will result in the long remaining
82 * negative.
84 #define RW_LOCK_BIAS 0x01000000
85 #define RW_LOCK_BIAS_STR "0x01000000"
87 /* Decrements by RW_LOCK_BIAS rather than 1, fails if value != 0 */
88 #define __down_op_write(ptr,fail) \
89 ({ \
90 __asm__ __volatile__( \
91 "@ down_op_write\n" \
92 " mov ip, pc\n" \
93 " orr lr, ip, #0x08000000\n" \
94 " teqp lr, #0\n" \
95 " and ip, ip, #0x0c000003\n" \
97 " ldr lr, [%0]\n" \
98 " subs lr, lr, %1\n" \
99 " str lr, [%0]\n" \
101 " orreq ip, ip, #0x40000000 @ set Z \n"\
102 " teqp ip, #0\n" \
103 " movne ip, %0\n" \
104 " blne " SYMBOL_NAME_STR(fail) \
106 : "r" (ptr), "I" (RW_LOCK_BIAS) \
107 : "ip", "lr", "cc"); \
110 /* Increments by RW_LOCK_BIAS, wakes if value >= 0 */
111 #define __up_op_write(ptr,wake) \
112 ({ \
113 __asm__ __volatile__( \
114 "@ up_op_read\n" \
115 " mov ip, pc\n" \
116 " orr lr, ip, #0x08000000\n" \
117 " teqp lr, #0\n" \
119 " ldr lr, [%0]\n" \
120 " and ip, ip, #0x0c000003\n" \
121 " adds lr, lr, %1\n" \
122 " str lr, [%0]\n" \
124 " orrcs ip, ip, #0x20000000 @ set C\n" \
125 " teqp ip, #0\n" \
126 " movcs ip, %0\n" \
127 " blcs " SYMBOL_NAME_STR(wake) \
129 : "r" (ptr), "I" (RW_LOCK_BIAS) \
130 : "ip", "lr", "cc"); \
133 #define __down_op_read(ptr,fail) \
134 __down_op(ptr, fail)
136 #define __up_op_read(ptr,wake) \
137 ({ \
138 __asm__ __volatile__( \
139 "@ up_op_read\n" \
140 " mov ip, pc\n" \
141 " orr lr, ip, #0x08000000\n" \
142 " teqp lr, #0\n" \
144 " ldr lr, [%0]\n" \
145 " and ip, ip, #0x0c000003\n" \
146 " adds lr, lr, %1\n" \
147 " str lr, [%0]\n" \
149 " orreq ip, ip, #0x40000000 @ Set Z \n" \
150 " teqp ip, #0\n" \
151 " moveq ip, %0\n" \
152 " bleq " SYMBOL_NAME_STR(wake) \
154 : "r" (ptr), "I" (1) \
155 : "ip", "lr", "cc"); \
158 #endif