4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/address-spaces.h"
26 #include "exec/memory.h"
28 #define DATA_SIZE (1 << SHIFT)
33 #define SDATA_TYPE int64_t
34 #define DATA_TYPE uint64_t
38 #define SDATA_TYPE int32_t
39 #define DATA_TYPE uint32_t
43 #define SDATA_TYPE int16_t
44 #define DATA_TYPE uint16_t
48 #define SDATA_TYPE int8_t
49 #define DATA_TYPE uint8_t
51 #error unsupported data size
55 /* For the benefit of TCG generated code, we want to avoid the complication
56 of ABI-specific return type promotion and always return a value extended
57 to the register size of the host. This is tcg_target_long, except in the
58 case of a 32-bit host and 64-bit data, and for that we always have
59 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
60 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
61 # define WORD_TYPE DATA_TYPE
62 # define USUFFIX SUFFIX
64 # define WORD_TYPE tcg_target_ulong
65 # define USUFFIX glue(u, SUFFIX)
66 # define SSUFFIX glue(s, SUFFIX)
69 #ifdef SOFTMMU_CODE_ACCESS
70 #define READ_ACCESS_TYPE MMU_INST_FETCH
71 #define ADDR_READ addr_code
73 #define READ_ACCESS_TYPE MMU_DATA_LOAD
74 #define ADDR_READ addr_read
78 # define BSWAP(X) bswap64(X)
80 # define BSWAP(X) bswap32(X)
82 # define BSWAP(X) bswap16(X)
87 #ifdef TARGET_WORDS_BIGENDIAN
88 # define TGT_BE(X) (X)
89 # define TGT_LE(X) BSWAP(X)
91 # define TGT_BE(X) BSWAP(X)
92 # define TGT_LE(X) (X)
96 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
97 # define helper_be_ld_name helper_le_ld_name
98 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
99 # define helper_be_lds_name helper_le_lds_name
100 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
101 # define helper_be_st_name helper_le_st_name
103 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
104 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
105 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
106 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
107 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
108 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
111 #ifdef TARGET_WORDS_BIGENDIAN
112 # define helper_te_ld_name helper_be_ld_name
113 # define helper_te_st_name helper_be_st_name
115 # define helper_te_ld_name helper_le_ld_name
116 # define helper_te_st_name helper_le_st_name
119 #ifndef SOFTMMU_CODE_ACCESS
120 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
121 CPUIOTLBEntry
*iotlbentry
,
126 CPUState
*cpu
= ENV_GET_CPU(env
);
127 hwaddr physaddr
= iotlbentry
->addr
;
128 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
130 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
131 cpu
->mem_io_pc
= retaddr
;
132 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
133 cpu_io_recompile(cpu
, retaddr
);
136 cpu
->mem_io_vaddr
= addr
;
137 memory_region_dispatch_read(mr
, physaddr
, &val
, 1 << SHIFT
,
143 WORD_TYPE
helper_le_ld_name(CPUArchState
*env
, target_ulong addr
,
144 TCGMemOpIdx oi
, uintptr_t retaddr
)
146 unsigned mmu_idx
= get_mmuidx(oi
);
147 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
148 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
149 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
153 if (addr
& ((1 << a_bits
) - 1)) {
154 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
158 /* If the TLB entry is for a different page, reload and try again. */
159 if ((addr
& TARGET_PAGE_MASK
)
160 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
161 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
162 tlb_fill(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
165 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
168 /* Handle an IO access. */
169 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
170 CPUIOTLBEntry
*iotlbentry
;
171 if ((addr
& (DATA_SIZE
- 1)) != 0) {
172 goto do_unaligned_access
;
174 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
176 /* ??? Note that the io helpers always read data in the target
177 byte ordering. We should push the LE/BE request down into io. */
178 res
= glue(io_read
, SUFFIX
)(env
, iotlbentry
, addr
, retaddr
);
183 /* Handle slow unaligned access (it spans two pages or IO). */
185 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
186 >= TARGET_PAGE_SIZE
)) {
187 target_ulong addr1
, addr2
;
188 DATA_TYPE res1
, res2
;
191 addr1
= addr
& ~(DATA_SIZE
- 1);
192 addr2
= addr1
+ DATA_SIZE
;
193 res1
= helper_le_ld_name(env
, addr1
, oi
, retaddr
);
194 res2
= helper_le_ld_name(env
, addr2
, oi
, retaddr
);
195 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
197 /* Little-endian combine. */
198 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
202 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
204 res
= glue(glue(ld
, LSUFFIX
), _p
)((uint8_t *)haddr
);
206 res
= glue(glue(ld
, LSUFFIX
), _le_p
)((uint8_t *)haddr
);
212 WORD_TYPE
helper_be_ld_name(CPUArchState
*env
, target_ulong addr
,
213 TCGMemOpIdx oi
, uintptr_t retaddr
)
215 unsigned mmu_idx
= get_mmuidx(oi
);
216 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
217 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
218 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
222 if (addr
& ((1 << a_bits
) - 1)) {
223 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
227 /* If the TLB entry is for a different page, reload and try again. */
228 if ((addr
& TARGET_PAGE_MASK
)
229 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
230 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
231 tlb_fill(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
234 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
237 /* Handle an IO access. */
238 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
239 CPUIOTLBEntry
*iotlbentry
;
240 if ((addr
& (DATA_SIZE
- 1)) != 0) {
241 goto do_unaligned_access
;
243 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
245 /* ??? Note that the io helpers always read data in the target
246 byte ordering. We should push the LE/BE request down into io. */
247 res
= glue(io_read
, SUFFIX
)(env
, iotlbentry
, addr
, retaddr
);
252 /* Handle slow unaligned access (it spans two pages or IO). */
254 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
255 >= TARGET_PAGE_SIZE
)) {
256 target_ulong addr1
, addr2
;
257 DATA_TYPE res1
, res2
;
260 addr1
= addr
& ~(DATA_SIZE
- 1);
261 addr2
= addr1
+ DATA_SIZE
;
262 res1
= helper_be_ld_name(env
, addr1
, oi
, retaddr
);
263 res2
= helper_be_ld_name(env
, addr2
, oi
, retaddr
);
264 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
266 /* Big-endian combine. */
267 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
271 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
272 res
= glue(glue(ld
, LSUFFIX
), _be_p
)((uint8_t *)haddr
);
275 #endif /* DATA_SIZE > 1 */
277 #ifndef SOFTMMU_CODE_ACCESS
279 /* Provide signed versions of the load routines as well. We can of course
280 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
281 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
282 WORD_TYPE
helper_le_lds_name(CPUArchState
*env
, target_ulong addr
,
283 TCGMemOpIdx oi
, uintptr_t retaddr
)
285 return (SDATA_TYPE
)helper_le_ld_name(env
, addr
, oi
, retaddr
);
289 WORD_TYPE
helper_be_lds_name(CPUArchState
*env
, target_ulong addr
,
290 TCGMemOpIdx oi
, uintptr_t retaddr
)
292 return (SDATA_TYPE
)helper_be_ld_name(env
, addr
, oi
, retaddr
);
297 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
298 CPUIOTLBEntry
*iotlbentry
,
303 CPUState
*cpu
= ENV_GET_CPU(env
);
304 hwaddr physaddr
= iotlbentry
->addr
;
305 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
307 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
308 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
309 cpu_io_recompile(cpu
, retaddr
);
312 cpu
->mem_io_vaddr
= addr
;
313 cpu
->mem_io_pc
= retaddr
;
314 memory_region_dispatch_write(mr
, physaddr
, val
, 1 << SHIFT
,
318 void helper_le_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
319 TCGMemOpIdx oi
, uintptr_t retaddr
)
321 unsigned mmu_idx
= get_mmuidx(oi
);
322 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
323 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
324 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
327 if (addr
& ((1 << a_bits
) - 1)) {
328 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
332 /* If the TLB entry is for a different page, reload and try again. */
333 if ((addr
& TARGET_PAGE_MASK
)
334 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
335 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
336 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
338 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
341 /* Handle an IO access. */
342 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
343 CPUIOTLBEntry
*iotlbentry
;
344 if ((addr
& (DATA_SIZE
- 1)) != 0) {
345 goto do_unaligned_access
;
347 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
349 /* ??? Note that the io helpers always read data in the target
350 byte ordering. We should push the LE/BE request down into io. */
352 glue(io_write
, SUFFIX
)(env
, iotlbentry
, val
, addr
, retaddr
);
356 /* Handle slow unaligned access (it spans two pages or IO). */
358 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
359 >= TARGET_PAGE_SIZE
)) {
361 target_ulong page2
, tlb_addr2
;
363 /* Ensure the second page is in the TLB. Note that the first page
364 is already guaranteed to be filled, and that the second page
365 cannot evict the first. */
366 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
367 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
368 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
369 if (page2
!= (tlb_addr2
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))
370 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
371 tlb_fill(ENV_GET_CPU(env
), page2
, MMU_DATA_STORE
,
375 /* XXX: not efficient, but simple. */
376 /* This loop must go in the forward direction to avoid issues
377 with self-modifying code in Windows 64-bit. */
378 for (i
= 0; i
< DATA_SIZE
; ++i
) {
379 /* Little-endian extract. */
380 uint8_t val8
= val
>> (i
* 8);
381 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
387 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
389 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)haddr
, val
);
391 glue(glue(st
, SUFFIX
), _le_p
)((uint8_t *)haddr
, val
);
396 void helper_be_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
397 TCGMemOpIdx oi
, uintptr_t retaddr
)
399 unsigned mmu_idx
= get_mmuidx(oi
);
400 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
401 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
402 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
405 if (addr
& ((1 << a_bits
) - 1)) {
406 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
410 /* If the TLB entry is for a different page, reload and try again. */
411 if ((addr
& TARGET_PAGE_MASK
)
412 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
413 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
414 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
416 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
419 /* Handle an IO access. */
420 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
421 CPUIOTLBEntry
*iotlbentry
;
422 if ((addr
& (DATA_SIZE
- 1)) != 0) {
423 goto do_unaligned_access
;
425 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
427 /* ??? Note that the io helpers always read data in the target
428 byte ordering. We should push the LE/BE request down into io. */
430 glue(io_write
, SUFFIX
)(env
, iotlbentry
, val
, addr
, retaddr
);
434 /* Handle slow unaligned access (it spans two pages or IO). */
436 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
437 >= TARGET_PAGE_SIZE
)) {
439 target_ulong page2
, tlb_addr2
;
441 /* Ensure the second page is in the TLB. Note that the first page
442 is already guaranteed to be filled, and that the second page
443 cannot evict the first. */
444 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
445 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
446 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
447 if (page2
!= (tlb_addr2
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))
448 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
449 tlb_fill(ENV_GET_CPU(env
), page2
, MMU_DATA_STORE
,
453 /* XXX: not efficient, but simple */
454 /* This loop must go in the forward direction to avoid issues
455 with self-modifying code. */
456 for (i
= 0; i
< DATA_SIZE
; ++i
) {
457 /* Big-endian extract. */
458 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
459 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
465 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
466 glue(glue(st
, SUFFIX
), _be_p
)((uint8_t *)haddr
, val
);
468 #endif /* DATA_SIZE > 1 */
471 /* Probe for whether the specified guest write access is permitted.
472 * If it is not permitted then an exception will be taken in the same
473 * way as if this were a real write access (and we will not return).
474 * Otherwise the function will return, and there will be a valid
475 * entry in the TLB for this access.
477 void probe_write(CPUArchState
*env
, target_ulong addr
, int mmu_idx
,
480 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
481 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
483 if ((addr
& TARGET_PAGE_MASK
)
484 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
485 /* TLB entry is for a different page */
486 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
487 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
492 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
494 #undef READ_ACCESS_TYPE
510 #undef helper_le_ld_name
511 #undef helper_be_ld_name
512 #undef helper_le_lds_name
513 #undef helper_be_lds_name
514 #undef helper_le_st_name
515 #undef helper_be_st_name
516 #undef helper_te_ld_name
517 #undef helper_te_st_name