4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #define SDATA_TYPE int64_t
28 #define DATA_TYPE uint64_t
32 #define SDATA_TYPE int32_t
33 #define DATA_TYPE uint32_t
37 #define SDATA_TYPE int16_t
38 #define DATA_TYPE uint16_t
42 #define SDATA_TYPE int8_t
43 #define DATA_TYPE uint8_t
45 #error unsupported data size
49 /* For the benefit of TCG generated code, we want to avoid the complication
50 of ABI-specific return type promotion and always return a value extended
51 to the register size of the host. This is tcg_target_long, except in the
52 case of a 32-bit host and 64-bit data, and for that we always have
53 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
54 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
55 # define WORD_TYPE DATA_TYPE
56 # define USUFFIX SUFFIX
58 # define WORD_TYPE tcg_target_ulong
59 # define USUFFIX glue(u, SUFFIX)
60 # define SSUFFIX glue(s, SUFFIX)
63 #ifdef SOFTMMU_CODE_ACCESS
64 #define READ_ACCESS_TYPE MMU_INST_FETCH
65 #define ADDR_READ addr_code
67 #define READ_ACCESS_TYPE MMU_DATA_LOAD
68 #define ADDR_READ addr_read
72 # define BSWAP(X) bswap64(X)
74 # define BSWAP(X) bswap32(X)
76 # define BSWAP(X) bswap16(X)
82 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
83 # define helper_be_ld_name helper_le_ld_name
84 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
85 # define helper_be_lds_name helper_le_lds_name
86 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
87 # define helper_be_st_name helper_le_st_name
89 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
90 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
91 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
92 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
93 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
94 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
97 #ifndef SOFTMMU_CODE_ACCESS
98 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
99 size_t mmu_idx
, size_t index
,
104 CPUIOTLBEntry
*iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
105 return io_readx(env
, iotlbentry
, mmu_idx
, addr
, retaddr
, recheck
,
110 WORD_TYPE
helper_le_ld_name(CPUArchState
*env
, target_ulong addr
,
111 TCGMemOpIdx oi
, uintptr_t retaddr
)
113 unsigned mmu_idx
= get_mmuidx(oi
);
114 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
115 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
116 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
120 if (addr
& ((1 << a_bits
) - 1)) {
121 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
125 /* If the TLB entry is for a different page, reload and try again. */
126 if (!tlb_hit(tlb_addr
, addr
)) {
127 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
128 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, READ_ACCESS_TYPE
,
131 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
134 /* Handle an IO access. */
135 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
136 if ((addr
& (DATA_SIZE
- 1)) != 0) {
137 goto do_unaligned_access
;
140 /* ??? Note that the io helpers always read data in the target
141 byte ordering. We should push the LE/BE request down into io. */
142 res
= glue(io_read
, SUFFIX
)(env
, mmu_idx
, index
, addr
, retaddr
,
143 tlb_addr
& TLB_RECHECK
);
148 /* Handle slow unaligned access (it spans two pages or IO). */
150 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
151 >= TARGET_PAGE_SIZE
)) {
152 target_ulong addr1
, addr2
;
153 DATA_TYPE res1
, res2
;
156 addr1
= addr
& ~(DATA_SIZE
- 1);
157 addr2
= addr1
+ DATA_SIZE
;
158 res1
= helper_le_ld_name(env
, addr1
, oi
, retaddr
);
159 res2
= helper_le_ld_name(env
, addr2
, oi
, retaddr
);
160 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
162 /* Little-endian combine. */
163 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
167 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
169 res
= glue(glue(ld
, LSUFFIX
), _p
)((uint8_t *)haddr
);
171 res
= glue(glue(ld
, LSUFFIX
), _le_p
)((uint8_t *)haddr
);
177 WORD_TYPE
helper_be_ld_name(CPUArchState
*env
, target_ulong addr
,
178 TCGMemOpIdx oi
, uintptr_t retaddr
)
180 unsigned mmu_idx
= get_mmuidx(oi
);
181 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
182 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
183 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
187 if (addr
& ((1 << a_bits
) - 1)) {
188 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
192 /* If the TLB entry is for a different page, reload and try again. */
193 if (!tlb_hit(tlb_addr
, addr
)) {
194 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
195 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, READ_ACCESS_TYPE
,
198 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
201 /* Handle an IO access. */
202 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
203 if ((addr
& (DATA_SIZE
- 1)) != 0) {
204 goto do_unaligned_access
;
207 /* ??? Note that the io helpers always read data in the target
208 byte ordering. We should push the LE/BE request down into io. */
209 res
= glue(io_read
, SUFFIX
)(env
, mmu_idx
, index
, addr
, retaddr
,
210 tlb_addr
& TLB_RECHECK
);
215 /* Handle slow unaligned access (it spans two pages or IO). */
217 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
218 >= TARGET_PAGE_SIZE
)) {
219 target_ulong addr1
, addr2
;
220 DATA_TYPE res1
, res2
;
223 addr1
= addr
& ~(DATA_SIZE
- 1);
224 addr2
= addr1
+ DATA_SIZE
;
225 res1
= helper_be_ld_name(env
, addr1
, oi
, retaddr
);
226 res2
= helper_be_ld_name(env
, addr2
, oi
, retaddr
);
227 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
229 /* Big-endian combine. */
230 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
234 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
235 res
= glue(glue(ld
, LSUFFIX
), _be_p
)((uint8_t *)haddr
);
238 #endif /* DATA_SIZE > 1 */
240 #ifndef SOFTMMU_CODE_ACCESS
242 /* Provide signed versions of the load routines as well. We can of course
243 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
244 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
245 WORD_TYPE
helper_le_lds_name(CPUArchState
*env
, target_ulong addr
,
246 TCGMemOpIdx oi
, uintptr_t retaddr
)
248 return (SDATA_TYPE
)helper_le_ld_name(env
, addr
, oi
, retaddr
);
252 WORD_TYPE
helper_be_lds_name(CPUArchState
*env
, target_ulong addr
,
253 TCGMemOpIdx oi
, uintptr_t retaddr
)
255 return (SDATA_TYPE
)helper_be_ld_name(env
, addr
, oi
, retaddr
);
260 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
261 size_t mmu_idx
, size_t index
,
267 CPUIOTLBEntry
*iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
268 return io_writex(env
, iotlbentry
, mmu_idx
, val
, addr
, retaddr
,
272 void helper_le_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
273 TCGMemOpIdx oi
, uintptr_t retaddr
)
275 unsigned mmu_idx
= get_mmuidx(oi
);
276 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
277 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
278 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
281 if (addr
& ((1 << a_bits
) - 1)) {
282 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
286 /* If the TLB entry is for a different page, reload and try again. */
287 if (!tlb_hit(tlb_addr
, addr
)) {
288 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
289 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, MMU_DATA_STORE
,
292 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
& ~TLB_INVALID_MASK
;
295 /* Handle an IO access. */
296 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
297 if ((addr
& (DATA_SIZE
- 1)) != 0) {
298 goto do_unaligned_access
;
301 /* ??? Note that the io helpers always read data in the target
302 byte ordering. We should push the LE/BE request down into io. */
304 glue(io_write
, SUFFIX
)(env
, mmu_idx
, index
, val
, addr
,
305 retaddr
, tlb_addr
& TLB_RECHECK
);
309 /* Handle slow unaligned access (it spans two pages or IO). */
311 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
312 >= TARGET_PAGE_SIZE
)) {
314 target_ulong page2
, tlb_addr2
;
316 /* Ensure the second page is in the TLB. Note that the first page
317 is already guaranteed to be filled, and that the second page
318 cannot evict the first. */
319 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
320 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
321 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
322 if (!tlb_hit_page(tlb_addr2
, page2
)
323 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
324 tlb_fill(ENV_GET_CPU(env
), page2
, DATA_SIZE
, MMU_DATA_STORE
,
328 /* XXX: not efficient, but simple. */
329 /* This loop must go in the forward direction to avoid issues
330 with self-modifying code in Windows 64-bit. */
331 for (i
= 0; i
< DATA_SIZE
; ++i
) {
332 /* Little-endian extract. */
333 uint8_t val8
= val
>> (i
* 8);
334 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
340 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
342 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)haddr
, val
);
344 glue(glue(st
, SUFFIX
), _le_p
)((uint8_t *)haddr
, val
);
349 void helper_be_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
350 TCGMemOpIdx oi
, uintptr_t retaddr
)
352 unsigned mmu_idx
= get_mmuidx(oi
);
353 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
354 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
355 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
358 if (addr
& ((1 << a_bits
) - 1)) {
359 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
363 /* If the TLB entry is for a different page, reload and try again. */
364 if (!tlb_hit(tlb_addr
, addr
)) {
365 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
366 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, MMU_DATA_STORE
,
369 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
& ~TLB_INVALID_MASK
;
372 /* Handle an IO access. */
373 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
374 if ((addr
& (DATA_SIZE
- 1)) != 0) {
375 goto do_unaligned_access
;
378 /* ??? Note that the io helpers always read data in the target
379 byte ordering. We should push the LE/BE request down into io. */
381 glue(io_write
, SUFFIX
)(env
, mmu_idx
, index
, val
, addr
, retaddr
,
382 tlb_addr
& TLB_RECHECK
);
386 /* Handle slow unaligned access (it spans two pages or IO). */
388 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
389 >= TARGET_PAGE_SIZE
)) {
391 target_ulong page2
, tlb_addr2
;
393 /* Ensure the second page is in the TLB. Note that the first page
394 is already guaranteed to be filled, and that the second page
395 cannot evict the first. */
396 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
397 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
398 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
399 if (!tlb_hit_page(tlb_addr2
, page2
)
400 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
401 tlb_fill(ENV_GET_CPU(env
), page2
, DATA_SIZE
, MMU_DATA_STORE
,
405 /* XXX: not efficient, but simple */
406 /* This loop must go in the forward direction to avoid issues
407 with self-modifying code. */
408 for (i
= 0; i
< DATA_SIZE
; ++i
) {
409 /* Big-endian extract. */
410 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
411 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
417 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
418 glue(glue(st
, SUFFIX
), _be_p
)((uint8_t *)haddr
, val
);
420 #endif /* DATA_SIZE > 1 */
421 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
423 #undef READ_ACCESS_TYPE
434 #undef helper_le_ld_name
435 #undef helper_be_ld_name
436 #undef helper_le_lds_name
437 #undef helper_be_lds_name
438 #undef helper_le_st_name
439 #undef helper_be_st_name