TCX palette bug fix
[qemu/qemu_0_9_1_stable.git] / softmmu_header.h
blob8c6cf74adb453f5f1abd49f017d7a04f2441782a
1 /*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #if DATA_SIZE == 8
21 #define SUFFIX q
22 #define USUFFIX q
23 #define DATA_TYPE uint64_t
24 #elif DATA_SIZE == 4
25 #define SUFFIX l
26 #define USUFFIX l
27 #define DATA_TYPE uint32_t
28 #elif DATA_SIZE == 2
29 #define SUFFIX w
30 #define USUFFIX uw
31 #define DATA_TYPE uint16_t
32 #define DATA_STYPE int16_t
33 #elif DATA_SIZE == 1
34 #define SUFFIX b
35 #define USUFFIX ub
36 #define DATA_TYPE uint8_t
37 #define DATA_STYPE int8_t
38 #else
39 #error unsupported data size
40 #endif
42 #if ACCESS_TYPE == 0
44 #define CPU_MEM_INDEX 0
45 #define MMUSUFFIX _mmu
47 #elif ACCESS_TYPE == 1
49 #define CPU_MEM_INDEX 1
50 #define MMUSUFFIX _mmu
52 #elif ACCESS_TYPE == 2
54 #ifdef TARGET_I386
55 #define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3)
56 #elif defined (TARGET_PPC)
57 #define CPU_MEM_INDEX (msr_pr)
58 #elif defined (TARGET_MIPS)
59 #define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
60 #elif defined (TARGET_SPARC)
61 #define CPU_MEM_INDEX ((env->psrs) == 0)
62 #elif defined (TARGET_ARM)
63 #define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
64 #elif defined (TARGET_SH4)
65 #define CPU_MEM_INDEX ((env->sr & SR_MD) == 0)
66 #elif defined (TARGET_ALPHA)
67 #define CPU_MEM_INDEX ((env->ps >> 3) & 3)
68 #else
69 #error unsupported CPU
70 #endif
71 #define MMUSUFFIX _mmu
73 #elif ACCESS_TYPE == 3
75 #ifdef TARGET_I386
76 #define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3)
77 #elif defined (TARGET_PPC)
78 #define CPU_MEM_INDEX (msr_pr)
79 #elif defined (TARGET_MIPS)
80 #define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
81 #elif defined (TARGET_SPARC)
82 #define CPU_MEM_INDEX ((env->psrs) == 0)
83 #elif defined (TARGET_ARM)
84 #define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
85 #elif defined (TARGET_SH4)
86 #define CPU_MEM_INDEX ((env->sr & SR_MD) == 0)
87 #elif defined (TARGET_ALPHA)
88 #define CPU_MEM_INDEX ((env->ps >> 3) & 3)
89 #else
90 #error unsupported CPU
91 #endif
92 #define MMUSUFFIX _cmmu
94 #else
95 #error invalid ACCESS_TYPE
96 #endif
98 #if DATA_SIZE == 8
99 #define RES_TYPE uint64_t
100 #else
101 #define RES_TYPE int
102 #endif
104 #if ACCESS_TYPE == 3
105 #define ADDR_READ addr_code
106 #else
107 #define ADDR_READ addr_read
108 #endif
110 DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
111 int is_user);
112 void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int is_user);
114 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
115 (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU)
117 #define CPU_TLB_ENTRY_BITS 4
119 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
121 int res;
123 asm volatile ("movl %1, %%edx\n"
124 "movl %1, %%eax\n"
125 "shrl %3, %%edx\n"
126 "andl %4, %%eax\n"
127 "andl %2, %%edx\n"
128 "leal %5(%%edx, %%ebp), %%edx\n"
129 "cmpl (%%edx), %%eax\n"
130 "movl %1, %%eax\n"
131 "je 1f\n"
132 "pushl %6\n"
133 "call %7\n"
134 "popl %%edx\n"
135 "movl %%eax, %0\n"
136 "jmp 2f\n"
137 "1:\n"
138 "addl 12(%%edx), %%eax\n"
139 #if DATA_SIZE == 1
140 "movzbl (%%eax), %0\n"
141 #elif DATA_SIZE == 2
142 "movzwl (%%eax), %0\n"
143 #elif DATA_SIZE == 4
144 "movl (%%eax), %0\n"
145 #else
146 #error unsupported size
147 #endif
148 "2:\n"
149 : "=r" (res)
150 : "r" (ptr),
151 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
152 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
153 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
154 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
155 "i" (CPU_MEM_INDEX),
156 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
157 : "%eax", "%ecx", "%edx", "memory", "cc");
158 return res;
161 #if DATA_SIZE <= 2
162 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
164 int res;
166 asm volatile ("movl %1, %%edx\n"
167 "movl %1, %%eax\n"
168 "shrl %3, %%edx\n"
169 "andl %4, %%eax\n"
170 "andl %2, %%edx\n"
171 "leal %5(%%edx, %%ebp), %%edx\n"
172 "cmpl (%%edx), %%eax\n"
173 "movl %1, %%eax\n"
174 "je 1f\n"
175 "pushl %6\n"
176 "call %7\n"
177 "popl %%edx\n"
178 #if DATA_SIZE == 1
179 "movsbl %%al, %0\n"
180 #elif DATA_SIZE == 2
181 "movswl %%ax, %0\n"
182 #else
183 #error unsupported size
184 #endif
185 "jmp 2f\n"
186 "1:\n"
187 "addl 12(%%edx), %%eax\n"
188 #if DATA_SIZE == 1
189 "movsbl (%%eax), %0\n"
190 #elif DATA_SIZE == 2
191 "movswl (%%eax), %0\n"
192 #else
193 #error unsupported size
194 #endif
195 "2:\n"
196 : "=r" (res)
197 : "r" (ptr),
198 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
199 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
200 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
201 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
202 "i" (CPU_MEM_INDEX),
203 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
204 : "%eax", "%ecx", "%edx", "memory", "cc");
205 return res;
207 #endif
209 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
211 asm volatile ("movl %0, %%edx\n"
212 "movl %0, %%eax\n"
213 "shrl %3, %%edx\n"
214 "andl %4, %%eax\n"
215 "andl %2, %%edx\n"
216 "leal %5(%%edx, %%ebp), %%edx\n"
217 "cmpl (%%edx), %%eax\n"
218 "movl %0, %%eax\n"
219 "je 1f\n"
220 #if DATA_SIZE == 1
221 "movzbl %b1, %%edx\n"
222 #elif DATA_SIZE == 2
223 "movzwl %w1, %%edx\n"
224 #elif DATA_SIZE == 4
225 "movl %1, %%edx\n"
226 #else
227 #error unsupported size
228 #endif
229 "pushl %6\n"
230 "call %7\n"
231 "popl %%eax\n"
232 "jmp 2f\n"
233 "1:\n"
234 "addl 8(%%edx), %%eax\n"
235 #if DATA_SIZE == 1
236 "movb %b1, (%%eax)\n"
237 #elif DATA_SIZE == 2
238 "movw %w1, (%%eax)\n"
239 #elif DATA_SIZE == 4
240 "movl %1, (%%eax)\n"
241 #else
242 #error unsupported size
243 #endif
244 "2:\n"
246 : "r" (ptr),
247 /* NOTE: 'q' would be needed as constraint, but we could not use it
248 with T1 ! */
249 "r" (v),
250 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
251 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
252 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
253 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_write)),
254 "i" (CPU_MEM_INDEX),
255 "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))
256 : "%eax", "%ecx", "%edx", "memory", "cc");
259 #else
261 /* generic load/store macros */
263 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
265 int index;
266 RES_TYPE res;
267 target_ulong addr;
268 unsigned long physaddr;
269 int is_user;
271 addr = ptr;
272 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
273 is_user = CPU_MEM_INDEX;
274 if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
275 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
276 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
277 } else {
278 physaddr = addr + env->tlb_table[is_user][index].addend;
279 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
281 return res;
284 #if DATA_SIZE <= 2
285 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
287 int res, index;
288 target_ulong addr;
289 unsigned long physaddr;
290 int is_user;
292 addr = ptr;
293 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
294 is_user = CPU_MEM_INDEX;
295 if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
296 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
297 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
298 } else {
299 physaddr = addr + env->tlb_table[is_user][index].addend;
300 res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
302 return res;
304 #endif
306 #if ACCESS_TYPE != 3
308 /* generic store macro */
310 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
312 int index;
313 target_ulong addr;
314 unsigned long physaddr;
315 int is_user;
317 addr = ptr;
318 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
319 is_user = CPU_MEM_INDEX;
320 if (__builtin_expect(env->tlb_table[is_user][index].addr_write !=
321 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
322 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user);
323 } else {
324 physaddr = addr + env->tlb_table[is_user][index].addend;
325 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
329 #endif /* ACCESS_TYPE != 3 */
331 #endif /* !asm */
333 #if ACCESS_TYPE != 3
335 #if DATA_SIZE == 8
336 static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
338 union {
339 float64 d;
340 uint64_t i;
341 } u;
342 u.i = glue(ldq, MEMSUFFIX)(ptr);
343 return u.d;
346 static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v)
348 union {
349 float64 d;
350 uint64_t i;
351 } u;
352 u.d = v;
353 glue(stq, MEMSUFFIX)(ptr, u.i);
355 #endif /* DATA_SIZE == 8 */
357 #if DATA_SIZE == 4
358 static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr)
360 union {
361 float32 f;
362 uint32_t i;
363 } u;
364 u.i = glue(ldl, MEMSUFFIX)(ptr);
365 return u.f;
368 static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
370 union {
371 float32 f;
372 uint32_t i;
373 } u;
374 u.f = v;
375 glue(stl, MEMSUFFIX)(ptr, u.i);
377 #endif /* DATA_SIZE == 4 */
379 #endif /* ACCESS_TYPE != 3 */
381 #undef RES_TYPE
382 #undef DATA_TYPE
383 #undef DATA_STYPE
384 #undef SUFFIX
385 #undef USUFFIX
386 #undef DATA_SIZE
387 #undef CPU_MEM_INDEX
388 #undef MMUSUFFIX
389 #undef ADDR_READ