4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #define DATA_TYPE uint64_t
26 #define DATA_TYPE uint32_t
30 #define DATA_TYPE uint16_t
31 #define DATA_STYPE int16_t
35 #define DATA_TYPE uint8_t
36 #define DATA_STYPE int8_t
38 #error unsupported data size
41 #if ACCESS_TYPE < (NB_MMU_MODES)
43 #define CPU_MMU_INDEX ACCESS_TYPE
44 #define MMUSUFFIX _mmu
46 #elif ACCESS_TYPE == (NB_MMU_MODES)
48 #define CPU_MMU_INDEX (cpu_mmu_index(env))
49 #define MMUSUFFIX _mmu
51 #elif ACCESS_TYPE == (NB_MMU_MODES + 1)
53 #define CPU_MMU_INDEX (cpu_mmu_index(env))
54 #define MMUSUFFIX _cmmu
57 #error invalid ACCESS_TYPE
61 #define RES_TYPE uint64_t
66 #if ACCESS_TYPE == (NB_MMU_MODES + 1)
67 #define ADDR_READ addr_code
69 #define ADDR_READ addr_read
72 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
73 (ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU)
75 static inline RES_TYPE
glue(glue(ld
, USUFFIX
), MEMSUFFIX
)(target_ulong ptr
)
79 asm volatile ("movl %1, %%edx\n"
84 "leal %5(%%edx, %%ebp), %%edx\n"
85 "cmpl (%%edx), %%eax\n"
93 "addl 12(%%edx), %%eax\n"
95 "movzbl (%%eax), %0\n"
97 "movzwl (%%eax), %0\n"
101 #error unsupported size
106 "i" ((CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
),
107 "i" (TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
),
108 "i" (TARGET_PAGE_MASK
| (DATA_SIZE
- 1)),
109 "m" (*(uint32_t *)offsetof(CPUState
, tlb_table
[CPU_MMU_INDEX
][0].addr_read
)),
111 "m" (*(uint8_t *)&glue(glue(__ld
, SUFFIX
), MMUSUFFIX
))
112 : "%eax", "%ecx", "%edx", "memory", "cc");
117 static inline int glue(glue(lds
, SUFFIX
), MEMSUFFIX
)(target_ulong ptr
)
121 asm volatile ("movl %1, %%edx\n"
126 "leal %5(%%edx, %%ebp), %%edx\n"
127 "cmpl (%%edx), %%eax\n"
137 #error unsupported size
141 "addl 12(%%edx), %%eax\n"
143 "movsbl (%%eax), %0\n"
145 "movswl (%%eax), %0\n"
147 #error unsupported size
152 "i" ((CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
),
153 "i" (TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
),
154 "i" (TARGET_PAGE_MASK
| (DATA_SIZE
- 1)),
155 "m" (*(uint32_t *)offsetof(CPUState
, tlb_table
[CPU_MMU_INDEX
][0].addr_read
)),
157 "m" (*(uint8_t *)&glue(glue(__ld
, SUFFIX
), MMUSUFFIX
))
158 : "%eax", "%ecx", "%edx", "memory", "cc");
163 static inline void glue(glue(st
, SUFFIX
), MEMSUFFIX
)(target_ulong ptr
, RES_TYPE v
)
165 asm volatile ("movl %0, %%edx\n"
170 "leal %5(%%edx, %%ebp), %%edx\n"
171 "cmpl (%%edx), %%eax\n"
175 "movzbl %b1, %%edx\n"
177 "movzwl %w1, %%edx\n"
181 #error unsupported size
187 "addl 8(%%edx), %%eax\n"
189 "movb %b1, (%%eax)\n"
191 "movw %w1, (%%eax)\n"
195 #error unsupported size
205 "i" ((CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
),
206 "i" (TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
),
207 "i" (TARGET_PAGE_MASK
| (DATA_SIZE
- 1)),
208 "m" (*(uint32_t *)offsetof(CPUState
, tlb_table
[CPU_MMU_INDEX
][0].addr_write
)),
210 "m" (*(uint8_t *)&glue(glue(__st
, SUFFIX
), MMUSUFFIX
))
211 : "%eax", "%ecx", "%edx", "memory", "cc");
216 /* generic load/store macros */
218 static inline RES_TYPE
glue(glue(ld
, USUFFIX
), MEMSUFFIX
)(target_ulong ptr
)
223 unsigned long physaddr
;
227 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
228 mmu_idx
= CPU_MMU_INDEX
;
229 if (unlikely(env
->tlb_table
[mmu_idx
][page_index
].ADDR_READ
!=
230 (addr
& (TARGET_PAGE_MASK
| (DATA_SIZE
- 1))))) {
231 res
= glue(glue(__ld
, SUFFIX
), MMUSUFFIX
)(addr
, mmu_idx
);
233 physaddr
= addr
+ env
->tlb_table
[mmu_idx
][page_index
].addend
;
234 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)physaddr
);
240 static inline int glue(glue(lds
, SUFFIX
), MEMSUFFIX
)(target_ulong ptr
)
244 unsigned long physaddr
;
248 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
249 mmu_idx
= CPU_MMU_INDEX
;
250 if (unlikely(env
->tlb_table
[mmu_idx
][page_index
].ADDR_READ
!=
251 (addr
& (TARGET_PAGE_MASK
| (DATA_SIZE
- 1))))) {
252 res
= (DATA_STYPE
)glue(glue(__ld
, SUFFIX
), MMUSUFFIX
)(addr
, mmu_idx
);
254 physaddr
= addr
+ env
->tlb_table
[mmu_idx
][page_index
].addend
;
255 res
= glue(glue(lds
, SUFFIX
), _raw
)((uint8_t *)physaddr
);
261 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
263 /* generic store macro */
265 static inline void glue(glue(st
, SUFFIX
), MEMSUFFIX
)(target_ulong ptr
, RES_TYPE v
)
269 unsigned long physaddr
;
273 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
274 mmu_idx
= CPU_MMU_INDEX
;
275 if (unlikely(env
->tlb_table
[mmu_idx
][page_index
].addr_write
!=
276 (addr
& (TARGET_PAGE_MASK
| (DATA_SIZE
- 1))))) {
277 glue(glue(__st
, SUFFIX
), MMUSUFFIX
)(addr
, v
, mmu_idx
);
279 physaddr
= addr
+ env
->tlb_table
[mmu_idx
][page_index
].addend
;
280 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)physaddr
, v
);
284 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
288 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
291 static inline float64
glue(ldfq
, MEMSUFFIX
)(target_ulong ptr
)
297 u
.i
= glue(ldq
, MEMSUFFIX
)(ptr
);
301 static inline void glue(stfq
, MEMSUFFIX
)(target_ulong ptr
, float64 v
)
308 glue(stq
, MEMSUFFIX
)(ptr
, u
.i
);
310 #endif /* DATA_SIZE == 8 */
313 static inline float32
glue(ldfl
, MEMSUFFIX
)(target_ulong ptr
)
319 u
.i
= glue(ldl
, MEMSUFFIX
)(ptr
);
323 static inline void glue(stfl
, MEMSUFFIX
)(target_ulong ptr
, float32 v
)
330 glue(stl
, MEMSUFFIX
)(ptr
, u
.i
);
332 #endif /* DATA_SIZE == 4 */
334 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */