4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/address-spaces.h"
26 #include "exec/memory.h"
28 #define DATA_SIZE (1 << SHIFT)
33 #define SDATA_TYPE int64_t
34 #define DATA_TYPE uint64_t
38 #define SDATA_TYPE int32_t
39 #define DATA_TYPE uint32_t
43 #define SDATA_TYPE int16_t
44 #define DATA_TYPE uint16_t
48 #define SDATA_TYPE int8_t
49 #define DATA_TYPE uint8_t
51 #error unsupported data size
55 /* For the benefit of TCG generated code, we want to avoid the complication
56 of ABI-specific return type promotion and always return a value extended
57 to the register size of the host. This is tcg_target_long, except in the
58 case of a 32-bit host and 64-bit data, and for that we always have
59 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
60 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
61 # define WORD_TYPE DATA_TYPE
62 # define USUFFIX SUFFIX
64 # define WORD_TYPE tcg_target_ulong
65 # define USUFFIX glue(u, SUFFIX)
66 # define SSUFFIX glue(s, SUFFIX)
69 #ifdef SOFTMMU_CODE_ACCESS
70 #define READ_ACCESS_TYPE MMU_INST_FETCH
71 #define ADDR_READ addr_code
73 #define READ_ACCESS_TYPE MMU_DATA_LOAD
74 #define ADDR_READ addr_read
78 # define BSWAP(X) bswap64(X)
80 # define BSWAP(X) bswap32(X)
82 # define BSWAP(X) bswap16(X)
87 #ifdef TARGET_WORDS_BIGENDIAN
88 # define TGT_BE(X) (X)
89 # define TGT_LE(X) BSWAP(X)
91 # define TGT_BE(X) BSWAP(X)
92 # define TGT_LE(X) (X)
96 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
97 # define helper_be_ld_name helper_le_ld_name
98 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
99 # define helper_be_lds_name helper_le_lds_name
100 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
101 # define helper_be_st_name helper_le_st_name
103 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
104 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
105 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
106 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
107 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
108 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
111 #ifdef TARGET_WORDS_BIGENDIAN
112 # define helper_te_ld_name helper_be_ld_name
113 # define helper_te_st_name helper_be_st_name
115 # define helper_te_ld_name helper_le_ld_name
116 # define helper_te_st_name helper_le_st_name
119 /* macro to check the victim tlb */
120 #define VICTIM_TLB_HIT(ty) \
122 /* we are about to do a page table walk. our last hope is the \
123 * victim tlb. try to refill from the victim tlb before walking the \
126 CPUIOTLBEntry tmpiotlb; \
127 CPUTLBEntry tmptlb; \
128 for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
129 if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
130 /* found entry in victim tlb, swap tlb and iotlb */ \
131 tmptlb = env->tlb_table[mmu_idx][index]; \
132 env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
133 env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
134 tmpiotlb = env->iotlb[mmu_idx][index]; \
135 env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
136 env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
140 /* return true when there is a vtlb hit, i.e. vidx >=0 */ \
144 #ifndef SOFTMMU_CODE_ACCESS
145 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
146 CPUIOTLBEntry
*iotlbentry
,
151 CPUState
*cpu
= ENV_GET_CPU(env
);
152 hwaddr physaddr
= iotlbentry
->addr
;
153 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
);
155 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
156 cpu
->mem_io_pc
= retaddr
;
157 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
158 cpu_io_recompile(cpu
, retaddr
);
161 cpu
->mem_io_vaddr
= addr
;
162 memory_region_dispatch_read(mr
, physaddr
, &val
, 1 << SHIFT
,
168 WORD_TYPE
helper_le_ld_name(CPUArchState
*env
, target_ulong addr
,
169 TCGMemOpIdx oi
, uintptr_t retaddr
)
171 unsigned mmu_idx
= get_mmuidx(oi
);
172 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
173 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
177 /* Adjust the given return address. */
178 retaddr
-= GETPC_ADJ
;
180 /* If the TLB entry is for a different page, reload and try again. */
181 if ((addr
& TARGET_PAGE_MASK
)
182 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
183 if ((addr
& (DATA_SIZE
- 1)) != 0
184 && (get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
185 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
188 if (!VICTIM_TLB_HIT(ADDR_READ
)) {
189 tlb_fill(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
192 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
195 /* Handle an IO access. */
196 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
197 CPUIOTLBEntry
*iotlbentry
;
198 if ((addr
& (DATA_SIZE
- 1)) != 0) {
199 goto do_unaligned_access
;
201 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
203 /* ??? Note that the io helpers always read data in the target
204 byte ordering. We should push the LE/BE request down into io. */
205 res
= glue(io_read
, SUFFIX
)(env
, iotlbentry
, addr
, retaddr
);
210 /* Handle slow unaligned access (it spans two pages or IO). */
212 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
213 >= TARGET_PAGE_SIZE
)) {
214 target_ulong addr1
, addr2
;
215 DATA_TYPE res1
, res2
;
218 if ((get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
219 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
222 addr1
= addr
& ~(DATA_SIZE
- 1);
223 addr2
= addr1
+ DATA_SIZE
;
224 /* Note the adjustment at the beginning of the function.
225 Undo that for the recursion. */
226 res1
= helper_le_ld_name(env
, addr1
, oi
, retaddr
+ GETPC_ADJ
);
227 res2
= helper_le_ld_name(env
, addr2
, oi
, retaddr
+ GETPC_ADJ
);
228 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
230 /* Little-endian combine. */
231 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
235 /* Handle aligned access or unaligned access in the same page. */
236 if ((addr
& (DATA_SIZE
- 1)) != 0
237 && (get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
238 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
242 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
244 res
= glue(glue(ld
, LSUFFIX
), _p
)((uint8_t *)haddr
);
246 res
= glue(glue(ld
, LSUFFIX
), _le_p
)((uint8_t *)haddr
);
252 WORD_TYPE
helper_be_ld_name(CPUArchState
*env
, target_ulong addr
,
253 TCGMemOpIdx oi
, uintptr_t retaddr
)
255 unsigned mmu_idx
= get_mmuidx(oi
);
256 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
257 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
261 /* Adjust the given return address. */
262 retaddr
-= GETPC_ADJ
;
264 /* If the TLB entry is for a different page, reload and try again. */
265 if ((addr
& TARGET_PAGE_MASK
)
266 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
267 if ((addr
& (DATA_SIZE
- 1)) != 0
268 && (get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
269 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
272 if (!VICTIM_TLB_HIT(ADDR_READ
)) {
273 tlb_fill(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
276 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
279 /* Handle an IO access. */
280 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
281 CPUIOTLBEntry
*iotlbentry
;
282 if ((addr
& (DATA_SIZE
- 1)) != 0) {
283 goto do_unaligned_access
;
285 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
287 /* ??? Note that the io helpers always read data in the target
288 byte ordering. We should push the LE/BE request down into io. */
289 res
= glue(io_read
, SUFFIX
)(env
, iotlbentry
, addr
, retaddr
);
294 /* Handle slow unaligned access (it spans two pages or IO). */
296 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
297 >= TARGET_PAGE_SIZE
)) {
298 target_ulong addr1
, addr2
;
299 DATA_TYPE res1
, res2
;
302 if ((get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
303 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
306 addr1
= addr
& ~(DATA_SIZE
- 1);
307 addr2
= addr1
+ DATA_SIZE
;
308 /* Note the adjustment at the beginning of the function.
309 Undo that for the recursion. */
310 res1
= helper_be_ld_name(env
, addr1
, oi
, retaddr
+ GETPC_ADJ
);
311 res2
= helper_be_ld_name(env
, addr2
, oi
, retaddr
+ GETPC_ADJ
);
312 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
314 /* Big-endian combine. */
315 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
319 /* Handle aligned access or unaligned access in the same page. */
320 if ((addr
& (DATA_SIZE
- 1)) != 0
321 && (get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
322 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
326 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
327 res
= glue(glue(ld
, LSUFFIX
), _be_p
)((uint8_t *)haddr
);
330 #endif /* DATA_SIZE > 1 */
332 #ifndef SOFTMMU_CODE_ACCESS
334 /* Provide signed versions of the load routines as well. We can of course
335 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
336 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
337 WORD_TYPE
helper_le_lds_name(CPUArchState
*env
, target_ulong addr
,
338 TCGMemOpIdx oi
, uintptr_t retaddr
)
340 return (SDATA_TYPE
)helper_le_ld_name(env
, addr
, oi
, retaddr
);
344 WORD_TYPE
helper_be_lds_name(CPUArchState
*env
, target_ulong addr
,
345 TCGMemOpIdx oi
, uintptr_t retaddr
)
347 return (SDATA_TYPE
)helper_be_ld_name(env
, addr
, oi
, retaddr
);
352 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
353 CPUIOTLBEntry
*iotlbentry
,
358 CPUState
*cpu
= ENV_GET_CPU(env
);
359 hwaddr physaddr
= iotlbentry
->addr
;
360 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
);
362 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
363 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
364 cpu_io_recompile(cpu
, retaddr
);
367 cpu
->mem_io_vaddr
= addr
;
368 cpu
->mem_io_pc
= retaddr
;
369 memory_region_dispatch_write(mr
, physaddr
, val
, 1 << SHIFT
,
373 void helper_le_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
374 TCGMemOpIdx oi
, uintptr_t retaddr
)
376 unsigned mmu_idx
= get_mmuidx(oi
);
377 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
378 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
381 /* Adjust the given return address. */
382 retaddr
-= GETPC_ADJ
;
384 /* If the TLB entry is for a different page, reload and try again. */
385 if ((addr
& TARGET_PAGE_MASK
)
386 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
387 if ((addr
& (DATA_SIZE
- 1)) != 0
388 && (get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
389 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
392 if (!VICTIM_TLB_HIT(addr_write
)) {
393 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
395 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
398 /* Handle an IO access. */
399 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
400 CPUIOTLBEntry
*iotlbentry
;
401 if ((addr
& (DATA_SIZE
- 1)) != 0) {
402 goto do_unaligned_access
;
404 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
406 /* ??? Note that the io helpers always read data in the target
407 byte ordering. We should push the LE/BE request down into io. */
409 glue(io_write
, SUFFIX
)(env
, iotlbentry
, val
, addr
, retaddr
);
413 /* Handle slow unaligned access (it spans two pages or IO). */
415 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
416 >= TARGET_PAGE_SIZE
)) {
419 if ((get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
420 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
423 /* XXX: not efficient, but simple */
424 /* Note: relies on the fact that tlb_fill() does not remove the
425 * previous page from the TLB cache. */
426 for (i
= DATA_SIZE
- 1; i
>= 0; i
--) {
427 /* Little-endian extract. */
428 uint8_t val8
= val
>> (i
* 8);
429 /* Note the adjustment at the beginning of the function.
430 Undo that for the recursion. */
431 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
432 oi
, retaddr
+ GETPC_ADJ
);
437 /* Handle aligned access or unaligned access in the same page. */
438 if ((addr
& (DATA_SIZE
- 1)) != 0
439 && (get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
440 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
444 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
446 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)haddr
, val
);
448 glue(glue(st
, SUFFIX
), _le_p
)((uint8_t *)haddr
, val
);
453 void helper_be_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
454 TCGMemOpIdx oi
, uintptr_t retaddr
)
456 unsigned mmu_idx
= get_mmuidx(oi
);
457 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
458 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
461 /* Adjust the given return address. */
462 retaddr
-= GETPC_ADJ
;
464 /* If the TLB entry is for a different page, reload and try again. */
465 if ((addr
& TARGET_PAGE_MASK
)
466 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
467 if ((addr
& (DATA_SIZE
- 1)) != 0
468 && (get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
469 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
472 if (!VICTIM_TLB_HIT(addr_write
)) {
473 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
475 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
478 /* Handle an IO access. */
479 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
480 CPUIOTLBEntry
*iotlbentry
;
481 if ((addr
& (DATA_SIZE
- 1)) != 0) {
482 goto do_unaligned_access
;
484 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
486 /* ??? Note that the io helpers always read data in the target
487 byte ordering. We should push the LE/BE request down into io. */
489 glue(io_write
, SUFFIX
)(env
, iotlbentry
, val
, addr
, retaddr
);
493 /* Handle slow unaligned access (it spans two pages or IO). */
495 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
496 >= TARGET_PAGE_SIZE
)) {
499 if ((get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
500 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
503 /* XXX: not efficient, but simple */
504 /* Note: relies on the fact that tlb_fill() does not remove the
505 * previous page from the TLB cache. */
506 for (i
= DATA_SIZE
- 1; i
>= 0; i
--) {
507 /* Big-endian extract. */
508 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
509 /* Note the adjustment at the beginning of the function.
510 Undo that for the recursion. */
511 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
512 oi
, retaddr
+ GETPC_ADJ
);
517 /* Handle aligned access or unaligned access in the same page. */
518 if ((addr
& (DATA_SIZE
- 1)) != 0
519 && (get_memop(oi
) & MO_AMASK
) == MO_ALIGN
) {
520 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
524 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
525 glue(glue(st
, SUFFIX
), _be_p
)((uint8_t *)haddr
, val
);
527 #endif /* DATA_SIZE > 1 */
530 /* Probe for whether the specified guest write access is permitted.
531 * If it is not permitted then an exception will be taken in the same
532 * way as if this were a real write access (and we will not return).
533 * Otherwise the function will return, and there will be a valid
534 * entry in the TLB for this access.
536 void probe_write(CPUArchState
*env
, target_ulong addr
, int mmu_idx
,
539 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
540 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
542 if ((addr
& TARGET_PAGE_MASK
)
543 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
544 /* TLB entry is for a different page */
545 if (!VICTIM_TLB_HIT(addr_write
)) {
546 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
551 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
553 #undef READ_ACCESS_TYPE
569 #undef helper_le_ld_name
570 #undef helper_be_ld_name
571 #undef helper_le_lds_name
572 #undef helper_be_lds_name
573 #undef helper_le_st_name
574 #undef helper_be_st_name
575 #undef helper_te_ld_name
576 #undef helper_te_st_name