4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #define SDATA_TYPE int64_t
28 #define DATA_TYPE uint64_t
32 #define SDATA_TYPE int32_t
33 #define DATA_TYPE uint32_t
37 #define SDATA_TYPE int16_t
38 #define DATA_TYPE uint16_t
42 #define SDATA_TYPE int8_t
43 #define DATA_TYPE uint8_t
45 #error unsupported data size
49 /* For the benefit of TCG generated code, we want to avoid the complication
50 of ABI-specific return type promotion and always return a value extended
51 to the register size of the host. This is tcg_target_long, except in the
52 case of a 32-bit host and 64-bit data, and for that we always have
53 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
54 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
55 # define WORD_TYPE DATA_TYPE
56 # define USUFFIX SUFFIX
58 # define WORD_TYPE tcg_target_ulong
59 # define USUFFIX glue(u, SUFFIX)
60 # define SSUFFIX glue(s, SUFFIX)
63 #ifdef SOFTMMU_CODE_ACCESS
64 #define READ_ACCESS_TYPE MMU_INST_FETCH
65 #define ADDR_READ addr_code
67 #define READ_ACCESS_TYPE MMU_DATA_LOAD
68 #define ADDR_READ addr_read
72 # define BSWAP(X) bswap64(X)
74 # define BSWAP(X) bswap32(X)
76 # define BSWAP(X) bswap16(X)
82 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
83 # define helper_be_ld_name helper_le_ld_name
84 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
85 # define helper_be_lds_name helper_le_lds_name
86 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
87 # define helper_be_st_name helper_le_st_name
89 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
90 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
91 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
92 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
93 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
94 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
97 #ifndef SOFTMMU_CODE_ACCESS
98 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
99 size_t mmu_idx
, size_t index
,
103 MMUAccessType access_type
)
105 CPUIOTLBEntry
*iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
106 return io_readx(env
, iotlbentry
, mmu_idx
, addr
, retaddr
, recheck
,
107 access_type
, DATA_SIZE
);
111 WORD_TYPE
helper_le_ld_name(CPUArchState
*env
, target_ulong addr
,
112 TCGMemOpIdx oi
, uintptr_t retaddr
)
114 uintptr_t mmu_idx
= get_mmuidx(oi
);
115 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
116 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
117 target_ulong tlb_addr
= entry
->ADDR_READ
;
118 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
122 if (addr
& ((1 << a_bits
) - 1)) {
123 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
127 /* If the TLB entry is for a different page, reload and try again. */
128 if (!tlb_hit(tlb_addr
, addr
)) {
129 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
130 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, READ_ACCESS_TYPE
,
132 index
= tlb_index(env
, mmu_idx
, addr
);
133 entry
= tlb_entry(env
, mmu_idx
, addr
);
135 tlb_addr
= entry
->ADDR_READ
;
138 /* Handle an IO access. */
139 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
140 if ((addr
& (DATA_SIZE
- 1)) != 0) {
141 goto do_unaligned_access
;
144 /* ??? Note that the io helpers always read data in the target
145 byte ordering. We should push the LE/BE request down into io. */
146 res
= glue(io_read
, SUFFIX
)(env
, mmu_idx
, index
, addr
, retaddr
,
147 tlb_addr
& TLB_RECHECK
,
153 /* Handle slow unaligned access (it spans two pages or IO). */
155 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
156 >= TARGET_PAGE_SIZE
)) {
157 target_ulong addr1
, addr2
;
158 DATA_TYPE res1
, res2
;
161 addr1
= addr
& ~(DATA_SIZE
- 1);
162 addr2
= addr1
+ DATA_SIZE
;
163 res1
= helper_le_ld_name(env
, addr1
, oi
, retaddr
);
164 res2
= helper_le_ld_name(env
, addr2
, oi
, retaddr
);
165 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
167 /* Little-endian combine. */
168 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
172 haddr
= addr
+ entry
->addend
;
174 res
= glue(glue(ld
, LSUFFIX
), _p
)((uint8_t *)haddr
);
176 res
= glue(glue(ld
, LSUFFIX
), _le_p
)((uint8_t *)haddr
);
182 WORD_TYPE
helper_be_ld_name(CPUArchState
*env
, target_ulong addr
,
183 TCGMemOpIdx oi
, uintptr_t retaddr
)
185 uintptr_t mmu_idx
= get_mmuidx(oi
);
186 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
187 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
188 target_ulong tlb_addr
= entry
->ADDR_READ
;
189 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
193 if (addr
& ((1 << a_bits
) - 1)) {
194 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
198 /* If the TLB entry is for a different page, reload and try again. */
199 if (!tlb_hit(tlb_addr
, addr
)) {
200 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
201 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, READ_ACCESS_TYPE
,
203 index
= tlb_index(env
, mmu_idx
, addr
);
204 entry
= tlb_entry(env
, mmu_idx
, addr
);
206 tlb_addr
= entry
->ADDR_READ
;
209 /* Handle an IO access. */
210 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
211 if ((addr
& (DATA_SIZE
- 1)) != 0) {
212 goto do_unaligned_access
;
215 /* ??? Note that the io helpers always read data in the target
216 byte ordering. We should push the LE/BE request down into io. */
217 res
= glue(io_read
, SUFFIX
)(env
, mmu_idx
, index
, addr
, retaddr
,
218 tlb_addr
& TLB_RECHECK
,
224 /* Handle slow unaligned access (it spans two pages or IO). */
226 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
227 >= TARGET_PAGE_SIZE
)) {
228 target_ulong addr1
, addr2
;
229 DATA_TYPE res1
, res2
;
232 addr1
= addr
& ~(DATA_SIZE
- 1);
233 addr2
= addr1
+ DATA_SIZE
;
234 res1
= helper_be_ld_name(env
, addr1
, oi
, retaddr
);
235 res2
= helper_be_ld_name(env
, addr2
, oi
, retaddr
);
236 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
238 /* Big-endian combine. */
239 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
243 haddr
= addr
+ entry
->addend
;
244 res
= glue(glue(ld
, LSUFFIX
), _be_p
)((uint8_t *)haddr
);
247 #endif /* DATA_SIZE > 1 */
249 #ifndef SOFTMMU_CODE_ACCESS
251 /* Provide signed versions of the load routines as well. We can of course
252 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
253 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
254 WORD_TYPE
helper_le_lds_name(CPUArchState
*env
, target_ulong addr
,
255 TCGMemOpIdx oi
, uintptr_t retaddr
)
257 return (SDATA_TYPE
)helper_le_ld_name(env
, addr
, oi
, retaddr
);
261 WORD_TYPE
helper_be_lds_name(CPUArchState
*env
, target_ulong addr
,
262 TCGMemOpIdx oi
, uintptr_t retaddr
)
264 return (SDATA_TYPE
)helper_be_ld_name(env
, addr
, oi
, retaddr
);
269 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
270 size_t mmu_idx
, size_t index
,
276 CPUIOTLBEntry
*iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
277 return io_writex(env
, iotlbentry
, mmu_idx
, val
, addr
, retaddr
,
281 void helper_le_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
282 TCGMemOpIdx oi
, uintptr_t retaddr
)
284 uintptr_t mmu_idx
= get_mmuidx(oi
);
285 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
286 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
287 target_ulong tlb_addr
= tlb_addr_write(entry
);
288 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
291 if (addr
& ((1 << a_bits
) - 1)) {
292 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
296 /* If the TLB entry is for a different page, reload and try again. */
297 if (!tlb_hit(tlb_addr
, addr
)) {
298 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
299 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, MMU_DATA_STORE
,
301 index
= tlb_index(env
, mmu_idx
, addr
);
302 entry
= tlb_entry(env
, mmu_idx
, addr
);
304 tlb_addr
= tlb_addr_write(entry
) & ~TLB_INVALID_MASK
;
307 /* Handle an IO access. */
308 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
309 if ((addr
& (DATA_SIZE
- 1)) != 0) {
310 goto do_unaligned_access
;
313 /* ??? Note that the io helpers always read data in the target
314 byte ordering. We should push the LE/BE request down into io. */
316 glue(io_write
, SUFFIX
)(env
, mmu_idx
, index
, val
, addr
,
317 retaddr
, tlb_addr
& TLB_RECHECK
);
321 /* Handle slow unaligned access (it spans two pages or IO). */
323 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
324 >= TARGET_PAGE_SIZE
)) {
329 /* Ensure the second page is in the TLB. Note that the first page
330 is already guaranteed to be filled, and that the second page
331 cannot evict the first. */
332 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
333 entry2
= tlb_entry(env
, mmu_idx
, page2
);
334 if (!tlb_hit_page(tlb_addr_write(entry2
), page2
)
335 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
336 tlb_fill(ENV_GET_CPU(env
), page2
, DATA_SIZE
, MMU_DATA_STORE
,
340 /* XXX: not efficient, but simple. */
341 /* This loop must go in the forward direction to avoid issues
342 with self-modifying code in Windows 64-bit. */
343 for (i
= 0; i
< DATA_SIZE
; ++i
) {
344 /* Little-endian extract. */
345 uint8_t val8
= val
>> (i
* 8);
346 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
352 haddr
= addr
+ entry
->addend
;
354 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)haddr
, val
);
356 glue(glue(st
, SUFFIX
), _le_p
)((uint8_t *)haddr
, val
);
361 void helper_be_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
362 TCGMemOpIdx oi
, uintptr_t retaddr
)
364 uintptr_t mmu_idx
= get_mmuidx(oi
);
365 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
366 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
367 target_ulong tlb_addr
= tlb_addr_write(entry
);
368 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
371 if (addr
& ((1 << a_bits
) - 1)) {
372 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
376 /* If the TLB entry is for a different page, reload and try again. */
377 if (!tlb_hit(tlb_addr
, addr
)) {
378 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
379 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, MMU_DATA_STORE
,
381 index
= tlb_index(env
, mmu_idx
, addr
);
382 entry
= tlb_entry(env
, mmu_idx
, addr
);
384 tlb_addr
= tlb_addr_write(entry
) & ~TLB_INVALID_MASK
;
387 /* Handle an IO access. */
388 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
389 if ((addr
& (DATA_SIZE
- 1)) != 0) {
390 goto do_unaligned_access
;
393 /* ??? Note that the io helpers always read data in the target
394 byte ordering. We should push the LE/BE request down into io. */
396 glue(io_write
, SUFFIX
)(env
, mmu_idx
, index
, val
, addr
, retaddr
,
397 tlb_addr
& TLB_RECHECK
);
401 /* Handle slow unaligned access (it spans two pages or IO). */
403 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
404 >= TARGET_PAGE_SIZE
)) {
409 /* Ensure the second page is in the TLB. Note that the first page
410 is already guaranteed to be filled, and that the second page
411 cannot evict the first. */
412 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
413 entry2
= tlb_entry(env
, mmu_idx
, page2
);
414 if (!tlb_hit_page(tlb_addr_write(entry2
), page2
)
415 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
416 tlb_fill(ENV_GET_CPU(env
), page2
, DATA_SIZE
, MMU_DATA_STORE
,
420 /* XXX: not efficient, but simple */
421 /* This loop must go in the forward direction to avoid issues
422 with self-modifying code. */
423 for (i
= 0; i
< DATA_SIZE
; ++i
) {
424 /* Big-endian extract. */
425 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
426 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
432 haddr
= addr
+ entry
->addend
;
433 glue(glue(st
, SUFFIX
), _be_p
)((uint8_t *)haddr
, val
);
435 #endif /* DATA_SIZE > 1 */
436 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
438 #undef READ_ACCESS_TYPE
449 #undef helper_le_ld_name
450 #undef helper_be_ld_name
451 #undef helper_le_lds_name
452 #undef helper_be_lds_name
453 #undef helper_le_st_name
454 #undef helper_be_st_name