2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "exec/memory.h"
24 #include "exec/address-spaces.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
30 #include "qemu/error-report.h"
32 #include "exec/helper-proto.h"
33 #include "qemu/atomic.h"
35 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
36 /* #define DEBUG_TLB */
37 /* #define DEBUG_TLB_LOG */
40 # define DEBUG_TLB_GATE 1
42 # define DEBUG_TLB_LOG_GATE 1
44 # define DEBUG_TLB_LOG_GATE 0
47 # define DEBUG_TLB_GATE 0
48 # define DEBUG_TLB_LOG_GATE 0
51 #define tlb_debug(fmt, ...) do { \
52 if (DEBUG_TLB_LOG_GATE) { \
53 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 } else if (DEBUG_TLB_GATE) { \
56 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
64 * If flush_global is true (the usual case), flush all tlb entries.
65 * If flush_global is false, flush (at least) all tlb entries not
68 * Since QEMU doesn't currently implement a global/not-global flag
69 * for tlb entries, at the moment tlb_flush() will also flush all
70 * tlb entries in the flush_global == false case. This is OK because
71 * CPU architectures generally permit an implementation to drop
72 * entries from the TLB at any time, so flushing more entries than
73 * required is only an efficiency issue, not a correctness issue.
75 void tlb_flush(CPUState
*cpu
, int flush_global
)
77 CPUArchState
*env
= cpu
->env_ptr
;
79 tlb_debug("(%d)\n", flush_global
);
81 memset(env
->tlb_table
, -1, sizeof(env
->tlb_table
));
82 memset(env
->tlb_v_table
, -1, sizeof(env
->tlb_v_table
));
83 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
86 env
->tlb_flush_addr
= -1;
87 env
->tlb_flush_mask
= 0;
91 static inline void v_tlb_flush_by_mmuidx(CPUState
*cpu
, va_list argp
)
93 CPUArchState
*env
= cpu
->env_ptr
;
98 int mmu_idx
= va_arg(argp
, int);
104 tlb_debug("%d\n", mmu_idx
);
106 memset(env
->tlb_table
[mmu_idx
], -1, sizeof(env
->tlb_table
[0]));
107 memset(env
->tlb_v_table
[mmu_idx
], -1, sizeof(env
->tlb_v_table
[0]));
110 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
113 void tlb_flush_by_mmuidx(CPUState
*cpu
, ...)
117 v_tlb_flush_by_mmuidx(cpu
, argp
);
121 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
123 if (addr
== (tlb_entry
->addr_read
&
124 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
125 addr
== (tlb_entry
->addr_write
&
126 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
127 addr
== (tlb_entry
->addr_code
&
128 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
129 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
133 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
135 CPUArchState
*env
= cpu
->env_ptr
;
139 tlb_debug("page :" TARGET_FMT_lx
"\n", addr
);
141 /* Check if we need to flush due to large pages. */
142 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
143 tlb_debug("forcing full flush ("
144 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
145 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
151 addr
&= TARGET_PAGE_MASK
;
152 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
153 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
154 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
157 /* check whether there are entries that need to be flushed in the vtlb */
158 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
160 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
161 tlb_flush_entry(&env
->tlb_v_table
[mmu_idx
][k
], addr
);
165 tb_flush_jmp_cache(cpu
, addr
);
168 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, ...)
170 CPUArchState
*env
= cpu
->env_ptr
;
174 va_start(argp
, addr
);
176 tlb_debug("addr "TARGET_FMT_lx
"\n", addr
);
178 /* Check if we need to flush due to large pages. */
179 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
180 tlb_debug("forced full flush ("
181 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
182 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
184 v_tlb_flush_by_mmuidx(cpu
, argp
);
189 addr
&= TARGET_PAGE_MASK
;
190 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
193 int mmu_idx
= va_arg(argp
, int);
199 tlb_debug("idx %d\n", mmu_idx
);
201 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
203 /* check whether there are vltb entries that need to be flushed */
204 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
205 tlb_flush_entry(&env
->tlb_v_table
[mmu_idx
][k
], addr
);
210 tb_flush_jmp_cache(cpu
, addr
);
213 /* update the TLBs so that writes to code in the virtual page 'addr'
215 void tlb_protect_code(ram_addr_t ram_addr
)
217 cpu_physical_memory_test_and_clear_dirty(ram_addr
, TARGET_PAGE_SIZE
,
221 /* update the TLB so that writes in physical page 'phys_addr' are no longer
222 tested for self modifying code */
223 void tlb_unprotect_code(ram_addr_t ram_addr
)
225 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
228 static bool tlb_is_dirty_ram(CPUTLBEntry
*tlbe
)
230 return (tlbe
->addr_write
& (TLB_INVALID_MASK
|TLB_MMIO
|TLB_NOTDIRTY
)) == 0;
233 void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
, uintptr_t start
,
238 if (tlb_is_dirty_ram(tlb_entry
)) {
239 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
240 if ((addr
- start
) < length
) {
241 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
246 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
250 ram_addr
= qemu_ram_addr_from_host(ptr
);
251 if (ram_addr
== RAM_ADDR_INVALID
) {
252 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
258 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
265 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
268 for (i
= 0; i
< CPU_TLB_SIZE
; i
++) {
269 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
273 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
274 tlb_reset_dirty_range(&env
->tlb_v_table
[mmu_idx
][i
],
280 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
282 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
283 tlb_entry
->addr_write
= vaddr
;
287 /* update the TLB corresponding to virtual page vaddr
288 so that it is no longer dirty */
289 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
291 CPUArchState
*env
= cpu
->env_ptr
;
295 vaddr
&= TARGET_PAGE_MASK
;
296 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
297 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
298 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
301 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
303 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
304 tlb_set_dirty1(&env
->tlb_v_table
[mmu_idx
][k
], vaddr
);
309 /* Our TLB does not support large pages, so remember the area covered by
310 large pages and trigger a full TLB flush if these are invalidated. */
311 static void tlb_add_large_page(CPUArchState
*env
, target_ulong vaddr
,
314 target_ulong mask
= ~(size
- 1);
316 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
317 env
->tlb_flush_addr
= vaddr
& mask
;
318 env
->tlb_flush_mask
= mask
;
321 /* Extend the existing region to include the new page.
322 This is a compromise between unnecessary flushes and the cost
323 of maintaining a full variable size TLB. */
324 mask
&= env
->tlb_flush_mask
;
325 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
328 env
->tlb_flush_addr
&= mask
;
329 env
->tlb_flush_mask
= mask
;
332 /* Add a new TLB entry. At most one entry for a given virtual address
333 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
334 * supplied size is only used by tlb_flush_page.
336 * Called from TCG-generated code, which is under an RCU read-side
339 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
340 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
341 int mmu_idx
, target_ulong size
)
343 CPUArchState
*env
= cpu
->env_ptr
;
344 MemoryRegionSection
*section
;
346 target_ulong address
;
347 target_ulong code_address
;
350 hwaddr iotlb
, xlat
, sz
;
351 unsigned vidx
= env
->vtlb_index
++ % CPU_VTLB_SIZE
;
352 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
354 assert(size
>= TARGET_PAGE_SIZE
);
355 if (size
!= TARGET_PAGE_SIZE
) {
356 tlb_add_large_page(env
, vaddr
, size
);
360 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr
, &xlat
, &sz
);
361 assert(sz
>= TARGET_PAGE_SIZE
);
363 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
365 vaddr
, paddr
, prot
, mmu_idx
);
368 if (!memory_region_is_ram(section
->mr
) && !memory_region_is_romd(section
->mr
)) {
373 /* TLB_MMIO for rom/romd handled below */
374 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
377 code_address
= address
;
378 iotlb
= memory_region_section_get_iotlb(cpu
, section
, vaddr
, paddr
, xlat
,
381 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
382 te
= &env
->tlb_table
[mmu_idx
][index
];
384 /* do not discard the translation in te, evict it into a victim tlb */
385 env
->tlb_v_table
[mmu_idx
][vidx
] = *te
;
386 env
->iotlb_v
[mmu_idx
][vidx
] = env
->iotlb
[mmu_idx
][index
];
389 env
->iotlb
[mmu_idx
][index
].addr
= iotlb
- vaddr
;
390 env
->iotlb
[mmu_idx
][index
].attrs
= attrs
;
391 te
->addend
= addend
- vaddr
;
392 if (prot
& PAGE_READ
) {
393 te
->addr_read
= address
;
398 if (prot
& PAGE_EXEC
) {
399 te
->addr_code
= code_address
;
403 if (prot
& PAGE_WRITE
) {
404 if ((memory_region_is_ram(section
->mr
) && section
->readonly
)
405 || memory_region_is_romd(section
->mr
)) {
406 /* Write access calls the I/O callback. */
407 te
->addr_write
= address
| TLB_MMIO
;
408 } else if (memory_region_is_ram(section
->mr
)
409 && cpu_physical_memory_is_clean(
410 memory_region_get_ram_addr(section
->mr
) + xlat
)) {
411 te
->addr_write
= address
| TLB_NOTDIRTY
;
413 te
->addr_write
= address
;
420 /* Add a new TLB entry, but without specifying the memory
421 * transaction attributes to be used.
423 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
424 hwaddr paddr
, int prot
,
425 int mmu_idx
, target_ulong size
)
427 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
428 prot
, mmu_idx
, size
);
431 static void report_bad_exec(CPUState
*cpu
, target_ulong addr
)
433 /* Accidentally executing outside RAM or ROM is quite common for
434 * several user-error situations, so report it in a way that
435 * makes it clear that this isn't a QEMU bug and provide suggestions
436 * about what a user could do to fix things.
438 error_report("Trying to execute code outside RAM or ROM at 0x"
439 TARGET_FMT_lx
, addr
);
440 error_printf("This usually means one of the following happened:\n\n"
441 "(1) You told QEMU to execute a kernel for the wrong machine "
442 "type, and it crashed on startup (eg trying to run a "
443 "raspberry pi kernel on a versatilepb QEMU machine)\n"
444 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
445 "and QEMU executed a ROM full of no-op instructions until "
446 "it fell off the end\n"
447 "(3) Your guest kernel has a bug and crashed by jumping "
448 "off into nowhere\n\n"
449 "This is almost always one of the first two, so check your "
450 "command line and that you are using the right type of kernel "
451 "for this machine.\n"
452 "If you think option (3) is likely then you can try debugging "
453 "your guest with the -d debug options; in particular "
454 "-d guest_errors will cause the log to include a dump of the "
455 "guest register state at this point.\n\n"
456 "Execution cannot continue; stopping here.\n\n");
458 /* Report also to the logs, with more detail including register dump */
459 qemu_log_mask(LOG_GUEST_ERROR
, "qemu: fatal: Trying to execute code "
460 "outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
461 log_cpu_state_mask(LOG_GUEST_ERROR
, cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
464 /* NOTE: this function can trigger an exception */
465 /* NOTE2: the returned address is not exactly the physical address: it
466 * is actually a ram_addr_t (in system mode; the user mode emulation
467 * version of this function returns a guest virtual address).
469 tb_page_addr_t
get_page_addr_code(CPUArchState
*env1
, target_ulong addr
)
471 int mmu_idx
, page_index
, pd
;
474 CPUState
*cpu
= ENV_GET_CPU(env1
);
475 CPUIOTLBEntry
*iotlbentry
;
477 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
478 mmu_idx
= cpu_mmu_index(env1
, true);
479 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
480 (addr
& TARGET_PAGE_MASK
))) {
481 cpu_ldub_code(env1
, addr
);
483 iotlbentry
= &env1
->iotlb
[mmu_idx
][page_index
];
484 pd
= iotlbentry
->addr
& ~TARGET_PAGE_MASK
;
485 mr
= iotlb_to_region(cpu
, pd
, iotlbentry
->attrs
);
486 if (memory_region_is_unassigned(mr
)) {
487 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
489 if (cc
->do_unassigned_access
) {
490 cc
->do_unassigned_access(cpu
, addr
, false, true, 0, 4);
492 report_bad_exec(cpu
, addr
);
496 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
497 return qemu_ram_addr_from_host_nofail(p
);
500 static uint64_t io_readx(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
501 target_ulong addr
, uintptr_t retaddr
, int size
)
503 CPUState
*cpu
= ENV_GET_CPU(env
);
504 hwaddr physaddr
= iotlbentry
->addr
;
505 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
508 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
509 cpu
->mem_io_pc
= retaddr
;
510 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
511 cpu_io_recompile(cpu
, retaddr
);
514 cpu
->mem_io_vaddr
= addr
;
515 memory_region_dispatch_read(mr
, physaddr
, &val
, size
, iotlbentry
->attrs
);
519 static void io_writex(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
520 uint64_t val
, target_ulong addr
,
521 uintptr_t retaddr
, int size
)
523 CPUState
*cpu
= ENV_GET_CPU(env
);
524 hwaddr physaddr
= iotlbentry
->addr
;
525 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
527 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
528 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
529 cpu_io_recompile(cpu
, retaddr
);
532 cpu
->mem_io_vaddr
= addr
;
533 cpu
->mem_io_pc
= retaddr
;
534 memory_region_dispatch_write(mr
, physaddr
, val
, size
, iotlbentry
->attrs
);
537 /* Return true if ADDR is present in the victim tlb, and has been copied
538 back to the main tlb. */
539 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
540 size_t elt_ofs
, target_ulong page
)
543 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
544 CPUTLBEntry
*vtlb
= &env
->tlb_v_table
[mmu_idx
][vidx
];
545 target_ulong cmp
= *(target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
);
548 /* Found entry in victim tlb, swap tlb and iotlb. */
549 CPUTLBEntry tmptlb
, *tlb
= &env
->tlb_table
[mmu_idx
][index
];
550 CPUIOTLBEntry tmpio
, *io
= &env
->iotlb
[mmu_idx
][index
];
551 CPUIOTLBEntry
*vio
= &env
->iotlb_v
[mmu_idx
][vidx
];
553 tmptlb
= *tlb
; *tlb
= *vtlb
; *vtlb
= tmptlb
;
554 tmpio
= *io
; *io
= *vio
; *vio
= tmpio
;
561 /* Macro to call the above, with local variables from the use context. */
562 #define VICTIM_TLB_HIT(TY, ADDR) \
563 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
564 (ADDR) & TARGET_PAGE_MASK)
566 /* Probe for whether the specified guest write access is permitted.
567 * If it is not permitted then an exception will be taken in the same
568 * way as if this were a real write access (and we will not return).
569 * Otherwise the function will return, and there will be a valid
570 * entry in the TLB for this access.
572 void probe_write(CPUArchState
*env
, target_ulong addr
, int mmu_idx
,
575 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
576 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
578 if ((addr
& TARGET_PAGE_MASK
)
579 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
580 /* TLB entry is for a different page */
581 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
582 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
587 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
588 * operations, or io operations to proceed. Return the host address. */
589 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
590 TCGMemOpIdx oi
, uintptr_t retaddr
)
592 size_t mmu_idx
= get_mmuidx(oi
);
593 size_t index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
594 CPUTLBEntry
*tlbe
= &env
->tlb_table
[mmu_idx
][index
];
595 target_ulong tlb_addr
= tlbe
->addr_write
;
596 TCGMemOp mop
= get_memop(oi
);
597 int a_bits
= get_alignment_bits(mop
);
598 int s_bits
= mop
& MO_SIZE
;
600 /* Adjust the given return address. */
601 retaddr
-= GETPC_ADJ
;
603 /* Enforce guest required alignment. */
604 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
605 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
606 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
610 /* Enforce qemu required alignment. */
611 if (unlikely(addr
& ((1 << s_bits
) - 1))) {
612 /* We get here if guest alignment was not requested,
613 or was not enforced by cpu_unaligned_access above.
614 We might widen the access and emulate, but for now
615 mark an exception and exit the cpu loop. */
619 /* Check TLB entry and enforce page permissions. */
620 if ((addr
& TARGET_PAGE_MASK
)
621 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
622 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
623 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
625 tlb_addr
= tlbe
->addr_write
;
628 /* Notice an IO access, or a notdirty page. */
629 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
630 /* There's really nothing that can be done to
631 support this apart from stop-the-world. */
635 /* Let the guest notice RMW on a write-only page. */
636 if (unlikely(tlbe
->addr_read
!= tlb_addr
)) {
637 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_LOAD
, mmu_idx
, retaddr
);
638 /* Since we don't support reads and writes to different addresses,
639 and we do have the proper page loaded for write, this shouldn't
640 ever return. But just in case, handle via stop-the-world. */
644 return (void *)((uintptr_t)addr
+ tlbe
->addend
);
647 cpu_loop_exit_atomic(ENV_GET_CPU(env
), retaddr
);
650 #ifdef TARGET_WORDS_BIGENDIAN
651 # define TGT_BE(X) (X)
652 # define TGT_LE(X) BSWAP(X)
654 # define TGT_BE(X) BSWAP(X)
655 # define TGT_LE(X) (X)
658 #define MMUSUFFIX _mmu
661 #include "softmmu_template.h"
664 #include "softmmu_template.h"
667 #include "softmmu_template.h"
670 #include "softmmu_template.h"
672 /* First set of helpers allows passing in of OI and RETADDR. This makes
673 them callable from other helpers. */
675 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
676 #define ATOMIC_NAME(X) \
677 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
678 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
681 #include "atomic_template.h"
684 #include "atomic_template.h"
687 #include "atomic_template.h"
689 #ifdef CONFIG_ATOMIC64
691 #include "atomic_template.h"
694 #ifdef CONFIG_ATOMIC128
696 #include "atomic_template.h"
699 /* Second set of helpers are directly callable from TCG as helpers. */
703 #undef ATOMIC_MMU_LOOKUP
704 #define EXTRA_ARGS , TCGMemOpIdx oi
705 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
706 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
709 #include "atomic_template.h"
712 #include "atomic_template.h"
715 #include "atomic_template.h"
717 #ifdef CONFIG_ATOMIC64
719 #include "atomic_template.h"
722 /* Code access functions. */
725 #define MMUSUFFIX _cmmu
727 #define GETPC() ((uintptr_t)0)
728 #define SOFTMMU_CODE_ACCESS
731 #include "softmmu_template.h"
734 #include "softmmu_template.h"
737 #include "softmmu_template.h"
740 #include "softmmu_template.h"