4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #define SDATA_TYPE int64_t
28 #define DATA_TYPE uint64_t
32 #define SDATA_TYPE int32_t
33 #define DATA_TYPE uint32_t
37 #define SDATA_TYPE int16_t
38 #define DATA_TYPE uint16_t
42 #define SDATA_TYPE int8_t
43 #define DATA_TYPE uint8_t
45 #error unsupported data size
49 /* For the benefit of TCG generated code, we want to avoid the complication
50 of ABI-specific return type promotion and always return a value extended
51 to the register size of the host. This is tcg_target_long, except in the
52 case of a 32-bit host and 64-bit data, and for that we always have
53 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
54 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
55 # define WORD_TYPE DATA_TYPE
56 # define USUFFIX SUFFIX
58 # define WORD_TYPE tcg_target_ulong
59 # define USUFFIX glue(u, SUFFIX)
60 # define SSUFFIX glue(s, SUFFIX)
63 #ifdef SOFTMMU_CODE_ACCESS
64 #define READ_ACCESS_TYPE MMU_INST_FETCH
65 #define ADDR_READ addr_code
67 #define READ_ACCESS_TYPE MMU_DATA_LOAD
68 #define ADDR_READ addr_read
72 # define BSWAP(X) bswap64(X)
74 # define BSWAP(X) bswap32(X)
76 # define BSWAP(X) bswap16(X)
82 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
83 # define helper_be_ld_name helper_le_ld_name
84 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
85 # define helper_be_lds_name helper_le_lds_name
86 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
87 # define helper_be_st_name helper_le_st_name
89 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
90 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
91 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
92 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
93 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
94 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
97 #ifndef SOFTMMU_CODE_ACCESS
98 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
99 size_t mmu_idx
, size_t index
,
103 MMUAccessType access_type
)
105 CPUIOTLBEntry
*iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
106 return io_readx(env
, iotlbentry
, mmu_idx
, addr
, retaddr
, recheck
,
107 access_type
, DATA_SIZE
);
111 WORD_TYPE
helper_le_ld_name(CPUArchState
*env
, target_ulong addr
,
112 TCGMemOpIdx oi
, uintptr_t retaddr
)
114 unsigned mmu_idx
= get_mmuidx(oi
);
115 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
116 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
117 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
121 if (addr
& ((1 << a_bits
) - 1)) {
122 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
126 /* If the TLB entry is for a different page, reload and try again. */
127 if (!tlb_hit(tlb_addr
, addr
)) {
128 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
129 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, READ_ACCESS_TYPE
,
132 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
135 /* Handle an IO access. */
136 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
137 if ((addr
& (DATA_SIZE
- 1)) != 0) {
138 goto do_unaligned_access
;
141 /* ??? Note that the io helpers always read data in the target
142 byte ordering. We should push the LE/BE request down into io. */
143 res
= glue(io_read
, SUFFIX
)(env
, mmu_idx
, index
, addr
, retaddr
,
144 tlb_addr
& TLB_RECHECK
,
150 /* Handle slow unaligned access (it spans two pages or IO). */
152 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
153 >= TARGET_PAGE_SIZE
)) {
154 target_ulong addr1
, addr2
;
155 DATA_TYPE res1
, res2
;
158 addr1
= addr
& ~(DATA_SIZE
- 1);
159 addr2
= addr1
+ DATA_SIZE
;
160 res1
= helper_le_ld_name(env
, addr1
, oi
, retaddr
);
161 res2
= helper_le_ld_name(env
, addr2
, oi
, retaddr
);
162 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
164 /* Little-endian combine. */
165 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
169 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
171 res
= glue(glue(ld
, LSUFFIX
), _p
)((uint8_t *)haddr
);
173 res
= glue(glue(ld
, LSUFFIX
), _le_p
)((uint8_t *)haddr
);
179 WORD_TYPE
helper_be_ld_name(CPUArchState
*env
, target_ulong addr
,
180 TCGMemOpIdx oi
, uintptr_t retaddr
)
182 unsigned mmu_idx
= get_mmuidx(oi
);
183 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
184 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
185 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
189 if (addr
& ((1 << a_bits
) - 1)) {
190 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
194 /* If the TLB entry is for a different page, reload and try again. */
195 if (!tlb_hit(tlb_addr
, addr
)) {
196 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
197 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, READ_ACCESS_TYPE
,
200 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
203 /* Handle an IO access. */
204 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
205 if ((addr
& (DATA_SIZE
- 1)) != 0) {
206 goto do_unaligned_access
;
209 /* ??? Note that the io helpers always read data in the target
210 byte ordering. We should push the LE/BE request down into io. */
211 res
= glue(io_read
, SUFFIX
)(env
, mmu_idx
, index
, addr
, retaddr
,
212 tlb_addr
& TLB_RECHECK
,
218 /* Handle slow unaligned access (it spans two pages or IO). */
220 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
221 >= TARGET_PAGE_SIZE
)) {
222 target_ulong addr1
, addr2
;
223 DATA_TYPE res1
, res2
;
226 addr1
= addr
& ~(DATA_SIZE
- 1);
227 addr2
= addr1
+ DATA_SIZE
;
228 res1
= helper_be_ld_name(env
, addr1
, oi
, retaddr
);
229 res2
= helper_be_ld_name(env
, addr2
, oi
, retaddr
);
230 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
232 /* Big-endian combine. */
233 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
237 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
238 res
= glue(glue(ld
, LSUFFIX
), _be_p
)((uint8_t *)haddr
);
241 #endif /* DATA_SIZE > 1 */
243 #ifndef SOFTMMU_CODE_ACCESS
245 /* Provide signed versions of the load routines as well. We can of course
246 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
247 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
248 WORD_TYPE
helper_le_lds_name(CPUArchState
*env
, target_ulong addr
,
249 TCGMemOpIdx oi
, uintptr_t retaddr
)
251 return (SDATA_TYPE
)helper_le_ld_name(env
, addr
, oi
, retaddr
);
255 WORD_TYPE
helper_be_lds_name(CPUArchState
*env
, target_ulong addr
,
256 TCGMemOpIdx oi
, uintptr_t retaddr
)
258 return (SDATA_TYPE
)helper_be_ld_name(env
, addr
, oi
, retaddr
);
263 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
264 size_t mmu_idx
, size_t index
,
270 CPUIOTLBEntry
*iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
271 return io_writex(env
, iotlbentry
, mmu_idx
, val
, addr
, retaddr
,
275 void helper_le_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
276 TCGMemOpIdx oi
, uintptr_t retaddr
)
278 unsigned mmu_idx
= get_mmuidx(oi
);
279 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
280 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
281 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
284 if (addr
& ((1 << a_bits
) - 1)) {
285 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
289 /* If the TLB entry is for a different page, reload and try again. */
290 if (!tlb_hit(tlb_addr
, addr
)) {
291 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
292 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, MMU_DATA_STORE
,
295 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
& ~TLB_INVALID_MASK
;
298 /* Handle an IO access. */
299 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
300 if ((addr
& (DATA_SIZE
- 1)) != 0) {
301 goto do_unaligned_access
;
304 /* ??? Note that the io helpers always read data in the target
305 byte ordering. We should push the LE/BE request down into io. */
307 glue(io_write
, SUFFIX
)(env
, mmu_idx
, index
, val
, addr
,
308 retaddr
, tlb_addr
& TLB_RECHECK
);
312 /* Handle slow unaligned access (it spans two pages or IO). */
314 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
315 >= TARGET_PAGE_SIZE
)) {
317 target_ulong page2
, tlb_addr2
;
319 /* Ensure the second page is in the TLB. Note that the first page
320 is already guaranteed to be filled, and that the second page
321 cannot evict the first. */
322 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
323 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
324 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
325 if (!tlb_hit_page(tlb_addr2
, page2
)
326 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
327 tlb_fill(ENV_GET_CPU(env
), page2
, DATA_SIZE
, MMU_DATA_STORE
,
331 /* XXX: not efficient, but simple. */
332 /* This loop must go in the forward direction to avoid issues
333 with self-modifying code in Windows 64-bit. */
334 for (i
= 0; i
< DATA_SIZE
; ++i
) {
335 /* Little-endian extract. */
336 uint8_t val8
= val
>> (i
* 8);
337 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
343 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
345 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)haddr
, val
);
347 glue(glue(st
, SUFFIX
), _le_p
)((uint8_t *)haddr
, val
);
352 void helper_be_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
353 TCGMemOpIdx oi
, uintptr_t retaddr
)
355 unsigned mmu_idx
= get_mmuidx(oi
);
356 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
357 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
358 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
361 if (addr
& ((1 << a_bits
) - 1)) {
362 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
366 /* If the TLB entry is for a different page, reload and try again. */
367 if (!tlb_hit(tlb_addr
, addr
)) {
368 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
369 tlb_fill(ENV_GET_CPU(env
), addr
, DATA_SIZE
, MMU_DATA_STORE
,
372 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
& ~TLB_INVALID_MASK
;
375 /* Handle an IO access. */
376 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
377 if ((addr
& (DATA_SIZE
- 1)) != 0) {
378 goto do_unaligned_access
;
381 /* ??? Note that the io helpers always read data in the target
382 byte ordering. We should push the LE/BE request down into io. */
384 glue(io_write
, SUFFIX
)(env
, mmu_idx
, index
, val
, addr
, retaddr
,
385 tlb_addr
& TLB_RECHECK
);
389 /* Handle slow unaligned access (it spans two pages or IO). */
391 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
392 >= TARGET_PAGE_SIZE
)) {
394 target_ulong page2
, tlb_addr2
;
396 /* Ensure the second page is in the TLB. Note that the first page
397 is already guaranteed to be filled, and that the second page
398 cannot evict the first. */
399 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
400 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
401 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
402 if (!tlb_hit_page(tlb_addr2
, page2
)
403 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
404 tlb_fill(ENV_GET_CPU(env
), page2
, DATA_SIZE
, MMU_DATA_STORE
,
408 /* XXX: not efficient, but simple */
409 /* This loop must go in the forward direction to avoid issues
410 with self-modifying code. */
411 for (i
= 0; i
< DATA_SIZE
; ++i
) {
412 /* Big-endian extract. */
413 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
414 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
420 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
421 glue(glue(st
, SUFFIX
), _be_p
)((uint8_t *)haddr
, val
);
423 #endif /* DATA_SIZE > 1 */
424 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
426 #undef READ_ACCESS_TYPE
437 #undef helper_le_ld_name
438 #undef helper_be_ld_name
439 #undef helper_le_lds_name
440 #undef helper_be_lds_name
441 #undef helper_le_st_name
442 #undef helper_be_st_name