2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "exec/memory.h"
24 #include "exec/address-spaces.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
29 #include "exec/exec-all.h"
31 #include "qemu/error-report.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
41 # define DEBUG_TLB_GATE 1
43 # define DEBUG_TLB_LOG_GATE 1
45 # define DEBUG_TLB_LOG_GATE 0
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
65 * If flush_global is true (the usual case), flush all tlb entries.
66 * If flush_global is false, flush (at least) all tlb entries not
69 * Since QEMU doesn't currently implement a global/not-global flag
70 * for tlb entries, at the moment tlb_flush() will also flush all
71 * tlb entries in the flush_global == false case. This is OK because
72 * CPU architectures generally permit an implementation to drop
73 * entries from the TLB at any time, so flushing more entries than
74 * required is only an efficiency issue, not a correctness issue.
76 void tlb_flush(CPUState
*cpu
, int flush_global
)
78 CPUArchState
*env
= cpu
->env_ptr
;
80 tlb_debug("(%d)\n", flush_global
);
82 memset(env
->tlb_table
, -1, sizeof(env
->tlb_table
));
83 memset(env
->tlb_v_table
, -1, sizeof(env
->tlb_v_table
));
84 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
87 env
->tlb_flush_addr
= -1;
88 env
->tlb_flush_mask
= 0;
92 static inline void v_tlb_flush_by_mmuidx(CPUState
*cpu
, va_list argp
)
94 CPUArchState
*env
= cpu
->env_ptr
;
99 int mmu_idx
= va_arg(argp
, int);
105 tlb_debug("%d\n", mmu_idx
);
107 memset(env
->tlb_table
[mmu_idx
], -1, sizeof(env
->tlb_table
[0]));
108 memset(env
->tlb_v_table
[mmu_idx
], -1, sizeof(env
->tlb_v_table
[0]));
111 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
114 void tlb_flush_by_mmuidx(CPUState
*cpu
, ...)
118 v_tlb_flush_by_mmuidx(cpu
, argp
);
122 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
124 if (addr
== (tlb_entry
->addr_read
&
125 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
126 addr
== (tlb_entry
->addr_write
&
127 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
128 addr
== (tlb_entry
->addr_code
&
129 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
130 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
134 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
136 CPUArchState
*env
= cpu
->env_ptr
;
140 tlb_debug("page :" TARGET_FMT_lx
"\n", addr
);
142 /* Check if we need to flush due to large pages. */
143 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
144 tlb_debug("forcing full flush ("
145 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
146 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
152 addr
&= TARGET_PAGE_MASK
;
153 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
154 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
155 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
158 /* check whether there are entries that need to be flushed in the vtlb */
159 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
161 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
162 tlb_flush_entry(&env
->tlb_v_table
[mmu_idx
][k
], addr
);
166 tb_flush_jmp_cache(cpu
, addr
);
169 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, ...)
171 CPUArchState
*env
= cpu
->env_ptr
;
175 va_start(argp
, addr
);
177 tlb_debug("addr "TARGET_FMT_lx
"\n", addr
);
179 /* Check if we need to flush due to large pages. */
180 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
181 tlb_debug("forced full flush ("
182 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
183 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
185 v_tlb_flush_by_mmuidx(cpu
, argp
);
190 addr
&= TARGET_PAGE_MASK
;
191 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
194 int mmu_idx
= va_arg(argp
, int);
200 tlb_debug("idx %d\n", mmu_idx
);
202 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
204 /* check whether there are vltb entries that need to be flushed */
205 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
206 tlb_flush_entry(&env
->tlb_v_table
[mmu_idx
][k
], addr
);
211 tb_flush_jmp_cache(cpu
, addr
);
214 /* update the TLBs so that writes to code in the virtual page 'addr'
216 void tlb_protect_code(ram_addr_t ram_addr
)
218 cpu_physical_memory_test_and_clear_dirty(ram_addr
, TARGET_PAGE_SIZE
,
222 /* update the TLB so that writes in physical page 'phys_addr' are no longer
223 tested for self modifying code */
224 void tlb_unprotect_code(ram_addr_t ram_addr
)
226 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
229 static bool tlb_is_dirty_ram(CPUTLBEntry
*tlbe
)
231 return (tlbe
->addr_write
& (TLB_INVALID_MASK
|TLB_MMIO
|TLB_NOTDIRTY
)) == 0;
234 void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
, uintptr_t start
,
239 if (tlb_is_dirty_ram(tlb_entry
)) {
240 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
241 if ((addr
- start
) < length
) {
242 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
247 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
251 ram_addr
= qemu_ram_addr_from_host(ptr
);
252 if (ram_addr
== RAM_ADDR_INVALID
) {
253 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
259 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
266 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
269 for (i
= 0; i
< CPU_TLB_SIZE
; i
++) {
270 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
274 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
275 tlb_reset_dirty_range(&env
->tlb_v_table
[mmu_idx
][i
],
281 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
283 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
284 tlb_entry
->addr_write
= vaddr
;
288 /* update the TLB corresponding to virtual page vaddr
289 so that it is no longer dirty */
290 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
292 CPUArchState
*env
= cpu
->env_ptr
;
296 vaddr
&= TARGET_PAGE_MASK
;
297 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
298 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
299 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
302 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
304 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
305 tlb_set_dirty1(&env
->tlb_v_table
[mmu_idx
][k
], vaddr
);
310 /* Our TLB does not support large pages, so remember the area covered by
311 large pages and trigger a full TLB flush if these are invalidated. */
312 static void tlb_add_large_page(CPUArchState
*env
, target_ulong vaddr
,
315 target_ulong mask
= ~(size
- 1);
317 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
318 env
->tlb_flush_addr
= vaddr
& mask
;
319 env
->tlb_flush_mask
= mask
;
322 /* Extend the existing region to include the new page.
323 This is a compromise between unnecessary flushes and the cost
324 of maintaining a full variable size TLB. */
325 mask
&= env
->tlb_flush_mask
;
326 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
329 env
->tlb_flush_addr
&= mask
;
330 env
->tlb_flush_mask
= mask
;
333 /* Add a new TLB entry. At most one entry for a given virtual address
334 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
335 * supplied size is only used by tlb_flush_page.
337 * Called from TCG-generated code, which is under an RCU read-side
340 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
341 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
342 int mmu_idx
, target_ulong size
)
344 CPUArchState
*env
= cpu
->env_ptr
;
345 MemoryRegionSection
*section
;
347 target_ulong address
;
348 target_ulong code_address
;
351 hwaddr iotlb
, xlat
, sz
;
352 unsigned vidx
= env
->vtlb_index
++ % CPU_VTLB_SIZE
;
353 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
355 assert(size
>= TARGET_PAGE_SIZE
);
356 if (size
!= TARGET_PAGE_SIZE
) {
357 tlb_add_large_page(env
, vaddr
, size
);
361 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr
, &xlat
, &sz
);
362 assert(sz
>= TARGET_PAGE_SIZE
);
364 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
366 vaddr
, paddr
, prot
, mmu_idx
);
369 if (!memory_region_is_ram(section
->mr
) && !memory_region_is_romd(section
->mr
)) {
374 /* TLB_MMIO for rom/romd handled below */
375 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
378 code_address
= address
;
379 iotlb
= memory_region_section_get_iotlb(cpu
, section
, vaddr
, paddr
, xlat
,
382 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
383 te
= &env
->tlb_table
[mmu_idx
][index
];
385 /* do not discard the translation in te, evict it into a victim tlb */
386 env
->tlb_v_table
[mmu_idx
][vidx
] = *te
;
387 env
->iotlb_v
[mmu_idx
][vidx
] = env
->iotlb
[mmu_idx
][index
];
390 env
->iotlb
[mmu_idx
][index
].addr
= iotlb
- vaddr
;
391 env
->iotlb
[mmu_idx
][index
].attrs
= attrs
;
392 te
->addend
= addend
- vaddr
;
393 if (prot
& PAGE_READ
) {
394 te
->addr_read
= address
;
399 if (prot
& PAGE_EXEC
) {
400 te
->addr_code
= code_address
;
404 if (prot
& PAGE_WRITE
) {
405 if ((memory_region_is_ram(section
->mr
) && section
->readonly
)
406 || memory_region_is_romd(section
->mr
)) {
407 /* Write access calls the I/O callback. */
408 te
->addr_write
= address
| TLB_MMIO
;
409 } else if (memory_region_is_ram(section
->mr
)
410 && cpu_physical_memory_is_clean(
411 memory_region_get_ram_addr(section
->mr
) + xlat
)) {
412 te
->addr_write
= address
| TLB_NOTDIRTY
;
414 te
->addr_write
= address
;
421 /* Add a new TLB entry, but without specifying the memory
422 * transaction attributes to be used.
424 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
425 hwaddr paddr
, int prot
,
426 int mmu_idx
, target_ulong size
)
428 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
429 prot
, mmu_idx
, size
);
432 static void report_bad_exec(CPUState
*cpu
, target_ulong addr
)
434 /* Accidentally executing outside RAM or ROM is quite common for
435 * several user-error situations, so report it in a way that
436 * makes it clear that this isn't a QEMU bug and provide suggestions
437 * about what a user could do to fix things.
439 error_report("Trying to execute code outside RAM or ROM at 0x"
440 TARGET_FMT_lx
, addr
);
441 error_printf("This usually means one of the following happened:\n\n"
442 "(1) You told QEMU to execute a kernel for the wrong machine "
443 "type, and it crashed on startup (eg trying to run a "
444 "raspberry pi kernel on a versatilepb QEMU machine)\n"
445 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
446 "and QEMU executed a ROM full of no-op instructions until "
447 "it fell off the end\n"
448 "(3) Your guest kernel has a bug and crashed by jumping "
449 "off into nowhere\n\n"
450 "This is almost always one of the first two, so check your "
451 "command line and that you are using the right type of kernel "
452 "for this machine.\n"
453 "If you think option (3) is likely then you can try debugging "
454 "your guest with the -d debug options; in particular "
455 "-d guest_errors will cause the log to include a dump of the "
456 "guest register state at this point.\n\n"
457 "Execution cannot continue; stopping here.\n\n");
459 /* Report also to the logs, with more detail including register dump */
460 qemu_log_mask(LOG_GUEST_ERROR
, "qemu: fatal: Trying to execute code "
461 "outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
462 log_cpu_state_mask(LOG_GUEST_ERROR
, cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
465 /* NOTE: this function can trigger an exception */
466 /* NOTE2: the returned address is not exactly the physical address: it
467 * is actually a ram_addr_t (in system mode; the user mode emulation
468 * version of this function returns a guest virtual address).
470 tb_page_addr_t
get_page_addr_code(CPUArchState
*env1
, target_ulong addr
)
472 int mmu_idx
, page_index
, pd
;
475 CPUState
*cpu
= ENV_GET_CPU(env1
);
476 CPUIOTLBEntry
*iotlbentry
;
478 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
479 mmu_idx
= cpu_mmu_index(env1
, true);
480 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
481 (addr
& TARGET_PAGE_MASK
))) {
482 cpu_ldub_code(env1
, addr
);
484 iotlbentry
= &env1
->iotlb
[mmu_idx
][page_index
];
485 pd
= iotlbentry
->addr
& ~TARGET_PAGE_MASK
;
486 mr
= iotlb_to_region(cpu
, pd
, iotlbentry
->attrs
);
487 if (memory_region_is_unassigned(mr
)) {
488 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
490 if (cc
->do_unassigned_access
) {
491 cc
->do_unassigned_access(cpu
, addr
, false, true, 0, 4);
493 report_bad_exec(cpu
, addr
);
497 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
498 return qemu_ram_addr_from_host_nofail(p
);
501 static uint64_t io_readx(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
502 target_ulong addr
, uintptr_t retaddr
, int size
)
504 CPUState
*cpu
= ENV_GET_CPU(env
);
505 hwaddr physaddr
= iotlbentry
->addr
;
506 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
509 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
510 cpu
->mem_io_pc
= retaddr
;
511 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
512 cpu_io_recompile(cpu
, retaddr
);
515 cpu
->mem_io_vaddr
= addr
;
516 memory_region_dispatch_read(mr
, physaddr
, &val
, size
, iotlbentry
->attrs
);
520 static void io_writex(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
521 uint64_t val
, target_ulong addr
,
522 uintptr_t retaddr
, int size
)
524 CPUState
*cpu
= ENV_GET_CPU(env
);
525 hwaddr physaddr
= iotlbentry
->addr
;
526 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
528 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
529 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
530 cpu_io_recompile(cpu
, retaddr
);
533 cpu
->mem_io_vaddr
= addr
;
534 cpu
->mem_io_pc
= retaddr
;
535 memory_region_dispatch_write(mr
, physaddr
, val
, size
, iotlbentry
->attrs
);
538 /* Return true if ADDR is present in the victim tlb, and has been copied
539 back to the main tlb. */
540 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
541 size_t elt_ofs
, target_ulong page
)
544 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
545 CPUTLBEntry
*vtlb
= &env
->tlb_v_table
[mmu_idx
][vidx
];
546 target_ulong cmp
= *(target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
);
549 /* Found entry in victim tlb, swap tlb and iotlb. */
550 CPUTLBEntry tmptlb
, *tlb
= &env
->tlb_table
[mmu_idx
][index
];
551 CPUIOTLBEntry tmpio
, *io
= &env
->iotlb
[mmu_idx
][index
];
552 CPUIOTLBEntry
*vio
= &env
->iotlb_v
[mmu_idx
][vidx
];
554 tmptlb
= *tlb
; *tlb
= *vtlb
; *vtlb
= tmptlb
;
555 tmpio
= *io
; *io
= *vio
; *vio
= tmpio
;
562 /* Macro to call the above, with local variables from the use context. */
563 #define VICTIM_TLB_HIT(TY, ADDR) \
564 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
565 (ADDR) & TARGET_PAGE_MASK)
567 /* Probe for whether the specified guest write access is permitted.
568 * If it is not permitted then an exception will be taken in the same
569 * way as if this were a real write access (and we will not return).
570 * Otherwise the function will return, and there will be a valid
571 * entry in the TLB for this access.
573 void probe_write(CPUArchState
*env
, target_ulong addr
, int mmu_idx
,
576 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
577 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
579 if ((addr
& TARGET_PAGE_MASK
)
580 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
581 /* TLB entry is for a different page */
582 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
583 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
588 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
589 * operations, or io operations to proceed. Return the host address. */
590 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
591 TCGMemOpIdx oi
, uintptr_t retaddr
)
593 size_t mmu_idx
= get_mmuidx(oi
);
594 size_t index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
595 CPUTLBEntry
*tlbe
= &env
->tlb_table
[mmu_idx
][index
];
596 target_ulong tlb_addr
= tlbe
->addr_write
;
597 TCGMemOp mop
= get_memop(oi
);
598 int a_bits
= get_alignment_bits(mop
);
599 int s_bits
= mop
& MO_SIZE
;
601 /* Adjust the given return address. */
602 retaddr
-= GETPC_ADJ
;
604 /* Enforce guest required alignment. */
605 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
606 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
607 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
611 /* Enforce qemu required alignment. */
612 if (unlikely(addr
& ((1 << s_bits
) - 1))) {
613 /* We get here if guest alignment was not requested,
614 or was not enforced by cpu_unaligned_access above.
615 We might widen the access and emulate, but for now
616 mark an exception and exit the cpu loop. */
620 /* Check TLB entry and enforce page permissions. */
621 if ((addr
& TARGET_PAGE_MASK
)
622 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
623 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
624 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
626 tlb_addr
= tlbe
->addr_write
;
629 /* Notice an IO access, or a notdirty page. */
630 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
631 /* There's really nothing that can be done to
632 support this apart from stop-the-world. */
636 /* Let the guest notice RMW on a write-only page. */
637 if (unlikely(tlbe
->addr_read
!= tlb_addr
)) {
638 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_LOAD
, mmu_idx
, retaddr
);
639 /* Since we don't support reads and writes to different addresses,
640 and we do have the proper page loaded for write, this shouldn't
641 ever return. But just in case, handle via stop-the-world. */
645 return (void *)((uintptr_t)addr
+ tlbe
->addend
);
648 cpu_loop_exit_atomic(ENV_GET_CPU(env
), retaddr
);
651 #ifdef TARGET_WORDS_BIGENDIAN
652 # define TGT_BE(X) (X)
653 # define TGT_LE(X) BSWAP(X)
655 # define TGT_BE(X) BSWAP(X)
656 # define TGT_LE(X) (X)
659 #define MMUSUFFIX _mmu
662 #include "softmmu_template.h"
665 #include "softmmu_template.h"
668 #include "softmmu_template.h"
671 #include "softmmu_template.h"
673 /* First set of helpers allows passing in of OI and RETADDR. This makes
674 them callable from other helpers. */
676 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
677 #define ATOMIC_NAME(X) \
678 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
679 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
682 #include "atomic_template.h"
685 #include "atomic_template.h"
688 #include "atomic_template.h"
690 #ifdef CONFIG_ATOMIC64
692 #include "atomic_template.h"
695 #ifdef CONFIG_ATOMIC128
697 #include "atomic_template.h"
700 /* Second set of helpers are directly callable from TCG as helpers. */
704 #undef ATOMIC_MMU_LOOKUP
705 #define EXTRA_ARGS , TCGMemOpIdx oi
706 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
707 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
710 #include "atomic_template.h"
713 #include "atomic_template.h"
716 #include "atomic_template.h"
718 #ifdef CONFIG_ATOMIC64
720 #include "atomic_template.h"
723 /* Code access functions. */
726 #define MMUSUFFIX _cmmu
728 #define GETPC() ((uintptr_t)0)
729 #define SOFTMMU_CODE_ACCESS
732 #include "softmmu_template.h"
735 #include "softmmu_template.h"
738 #include "softmmu_template.h"
741 #include "softmmu_template.h"