char: fold single-user functions in caller
[qemu/ar7.git] / cputlb.c
blob6c39927455bff8973622634de644725d437076db
1 /*
2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/memory.h"
24 #include "exec/address-spaces.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
29 #include "tcg/tcg.h"
30 #include "qemu/error-report.h"
31 #include "exec/log.h"
32 #include "exec/helper-proto.h"
33 #include "qemu/atomic.h"
35 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
36 /* #define DEBUG_TLB */
37 /* #define DEBUG_TLB_LOG */
39 #ifdef DEBUG_TLB
40 # define DEBUG_TLB_GATE 1
41 # ifdef DEBUG_TLB_LOG
42 # define DEBUG_TLB_LOG_GATE 1
43 # else
44 # define DEBUG_TLB_LOG_GATE 0
45 # endif
46 #else
47 # define DEBUG_TLB_GATE 0
48 # define DEBUG_TLB_LOG_GATE 0
49 #endif
51 #define tlb_debug(fmt, ...) do { \
52 if (DEBUG_TLB_LOG_GATE) { \
53 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
54 ## __VA_ARGS__); \
55 } else if (DEBUG_TLB_GATE) { \
56 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
57 } \
58 } while (0)
60 /* statistics */
61 int tlb_flush_count;
63 /* This is OK because CPU architectures generally permit an
64 * implementation to drop entries from the TLB at any time, so
65 * flushing more entries than required is only an efficiency issue,
66 * not a correctness issue.
68 void tlb_flush(CPUState *cpu)
70 CPUArchState *env = cpu->env_ptr;
72 memset(env->tlb_table, -1, sizeof(env->tlb_table));
73 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
74 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
76 env->vtlb_index = 0;
77 env->tlb_flush_addr = -1;
78 env->tlb_flush_mask = 0;
79 tlb_flush_count++;
82 static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
84 CPUArchState *env = cpu->env_ptr;
86 tlb_debug("start\n");
88 for (;;) {
89 int mmu_idx = va_arg(argp, int);
91 if (mmu_idx < 0) {
92 break;
95 tlb_debug("%d\n", mmu_idx);
97 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
98 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
101 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
104 void tlb_flush_by_mmuidx(CPUState *cpu, ...)
106 va_list argp;
107 va_start(argp, cpu);
108 v_tlb_flush_by_mmuidx(cpu, argp);
109 va_end(argp);
112 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
114 if (addr == (tlb_entry->addr_read &
115 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
116 addr == (tlb_entry->addr_write &
117 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
118 addr == (tlb_entry->addr_code &
119 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
120 memset(tlb_entry, -1, sizeof(*tlb_entry));
124 void tlb_flush_page(CPUState *cpu, target_ulong addr)
126 CPUArchState *env = cpu->env_ptr;
127 int i;
128 int mmu_idx;
130 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
132 /* Check if we need to flush due to large pages. */
133 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
134 tlb_debug("forcing full flush ("
135 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
136 env->tlb_flush_addr, env->tlb_flush_mask);
138 tlb_flush(cpu);
139 return;
142 addr &= TARGET_PAGE_MASK;
143 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
144 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
145 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
148 /* check whether there are entries that need to be flushed in the vtlb */
149 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
150 int k;
151 for (k = 0; k < CPU_VTLB_SIZE; k++) {
152 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
156 tb_flush_jmp_cache(cpu, addr);
159 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
161 CPUArchState *env = cpu->env_ptr;
162 int i, k;
163 va_list argp;
165 va_start(argp, addr);
167 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
169 /* Check if we need to flush due to large pages. */
170 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
171 tlb_debug("forced full flush ("
172 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
173 env->tlb_flush_addr, env->tlb_flush_mask);
175 v_tlb_flush_by_mmuidx(cpu, argp);
176 va_end(argp);
177 return;
180 addr &= TARGET_PAGE_MASK;
181 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
183 for (;;) {
184 int mmu_idx = va_arg(argp, int);
186 if (mmu_idx < 0) {
187 break;
190 tlb_debug("idx %d\n", mmu_idx);
192 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
194 /* check whether there are vltb entries that need to be flushed */
195 for (k = 0; k < CPU_VTLB_SIZE; k++) {
196 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
199 va_end(argp);
201 tb_flush_jmp_cache(cpu, addr);
204 /* update the TLBs so that writes to code in the virtual page 'addr'
205 can be detected */
206 void tlb_protect_code(ram_addr_t ram_addr)
208 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
209 DIRTY_MEMORY_CODE);
212 /* update the TLB so that writes in physical page 'phys_addr' are no longer
213 tested for self modifying code */
214 void tlb_unprotect_code(ram_addr_t ram_addr)
216 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
219 static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
221 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
224 void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
225 uintptr_t length)
227 uintptr_t addr;
229 if (tlb_is_dirty_ram(tlb_entry)) {
230 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
231 if ((addr - start) < length) {
232 tlb_entry->addr_write |= TLB_NOTDIRTY;
237 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
239 ram_addr_t ram_addr;
241 ram_addr = qemu_ram_addr_from_host(ptr);
242 if (ram_addr == RAM_ADDR_INVALID) {
243 fprintf(stderr, "Bad ram pointer %p\n", ptr);
244 abort();
246 return ram_addr;
249 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
251 CPUArchState *env;
253 int mmu_idx;
255 env = cpu->env_ptr;
256 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
257 unsigned int i;
259 for (i = 0; i < CPU_TLB_SIZE; i++) {
260 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
261 start1, length);
264 for (i = 0; i < CPU_VTLB_SIZE; i++) {
265 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
266 start1, length);
271 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
273 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
274 tlb_entry->addr_write = vaddr;
278 /* update the TLB corresponding to virtual page vaddr
279 so that it is no longer dirty */
280 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
282 CPUArchState *env = cpu->env_ptr;
283 int i;
284 int mmu_idx;
286 vaddr &= TARGET_PAGE_MASK;
287 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
288 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
289 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
292 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
293 int k;
294 for (k = 0; k < CPU_VTLB_SIZE; k++) {
295 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
300 /* Our TLB does not support large pages, so remember the area covered by
301 large pages and trigger a full TLB flush if these are invalidated. */
302 static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
303 target_ulong size)
305 target_ulong mask = ~(size - 1);
307 if (env->tlb_flush_addr == (target_ulong)-1) {
308 env->tlb_flush_addr = vaddr & mask;
309 env->tlb_flush_mask = mask;
310 return;
312 /* Extend the existing region to include the new page.
313 This is a compromise between unnecessary flushes and the cost
314 of maintaining a full variable size TLB. */
315 mask &= env->tlb_flush_mask;
316 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
317 mask <<= 1;
319 env->tlb_flush_addr &= mask;
320 env->tlb_flush_mask = mask;
323 /* Add a new TLB entry. At most one entry for a given virtual address
324 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
325 * supplied size is only used by tlb_flush_page.
327 * Called from TCG-generated code, which is under an RCU read-side
328 * critical section.
330 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
331 hwaddr paddr, MemTxAttrs attrs, int prot,
332 int mmu_idx, target_ulong size)
334 CPUArchState *env = cpu->env_ptr;
335 MemoryRegionSection *section;
336 unsigned int index;
337 target_ulong address;
338 target_ulong code_address;
339 uintptr_t addend;
340 CPUTLBEntry *te;
341 hwaddr iotlb, xlat, sz;
342 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
343 int asidx = cpu_asidx_from_attrs(cpu, attrs);
345 assert(size >= TARGET_PAGE_SIZE);
346 if (size != TARGET_PAGE_SIZE) {
347 tlb_add_large_page(env, vaddr, size);
350 sz = size;
351 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
352 assert(sz >= TARGET_PAGE_SIZE);
354 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
355 " prot=%x idx=%d\n",
356 vaddr, paddr, prot, mmu_idx);
358 address = vaddr;
359 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
360 /* IO memory case */
361 address |= TLB_MMIO;
362 addend = 0;
363 } else {
364 /* TLB_MMIO for rom/romd handled below */
365 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
368 code_address = address;
369 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
370 prot, &address);
372 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
373 te = &env->tlb_table[mmu_idx][index];
375 /* do not discard the translation in te, evict it into a victim tlb */
376 env->tlb_v_table[mmu_idx][vidx] = *te;
377 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
379 /* refill the tlb */
380 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
381 env->iotlb[mmu_idx][index].attrs = attrs;
382 te->addend = addend - vaddr;
383 if (prot & PAGE_READ) {
384 te->addr_read = address;
385 } else {
386 te->addr_read = -1;
389 if (prot & PAGE_EXEC) {
390 te->addr_code = code_address;
391 } else {
392 te->addr_code = -1;
394 if (prot & PAGE_WRITE) {
395 if ((memory_region_is_ram(section->mr) && section->readonly)
396 || memory_region_is_romd(section->mr)) {
397 /* Write access calls the I/O callback. */
398 te->addr_write = address | TLB_MMIO;
399 } else if (memory_region_is_ram(section->mr)
400 && cpu_physical_memory_is_clean(
401 memory_region_get_ram_addr(section->mr) + xlat)) {
402 te->addr_write = address | TLB_NOTDIRTY;
403 } else {
404 te->addr_write = address;
406 } else {
407 te->addr_write = -1;
411 /* Add a new TLB entry, but without specifying the memory
412 * transaction attributes to be used.
414 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
415 hwaddr paddr, int prot,
416 int mmu_idx, target_ulong size)
418 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
419 prot, mmu_idx, size);
422 static void report_bad_exec(CPUState *cpu, target_ulong addr)
424 /* Accidentally executing outside RAM or ROM is quite common for
425 * several user-error situations, so report it in a way that
426 * makes it clear that this isn't a QEMU bug and provide suggestions
427 * about what a user could do to fix things.
429 error_report("Trying to execute code outside RAM or ROM at 0x"
430 TARGET_FMT_lx, addr);
431 error_printf("This usually means one of the following happened:\n\n"
432 "(1) You told QEMU to execute a kernel for the wrong machine "
433 "type, and it crashed on startup (eg trying to run a "
434 "raspberry pi kernel on a versatilepb QEMU machine)\n"
435 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
436 "and QEMU executed a ROM full of no-op instructions until "
437 "it fell off the end\n"
438 "(3) Your guest kernel has a bug and crashed by jumping "
439 "off into nowhere\n\n"
440 "This is almost always one of the first two, so check your "
441 "command line and that you are using the right type of kernel "
442 "for this machine.\n"
443 "If you think option (3) is likely then you can try debugging "
444 "your guest with the -d debug options; in particular "
445 "-d guest_errors will cause the log to include a dump of the "
446 "guest register state at this point.\n\n"
447 "Execution cannot continue; stopping here.\n\n");
449 /* Report also to the logs, with more detail including register dump */
450 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
451 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
452 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
455 /* NOTE: this function can trigger an exception */
456 /* NOTE2: the returned address is not exactly the physical address: it
457 * is actually a ram_addr_t (in system mode; the user mode emulation
458 * version of this function returns a guest virtual address).
460 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
462 int mmu_idx, page_index, pd;
463 void *p;
464 MemoryRegion *mr;
465 CPUState *cpu = ENV_GET_CPU(env1);
466 CPUIOTLBEntry *iotlbentry;
468 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
469 mmu_idx = cpu_mmu_index(env1, true);
470 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
471 (addr & TARGET_PAGE_MASK))) {
472 cpu_ldub_code(env1, addr);
474 iotlbentry = &env1->iotlb[mmu_idx][page_index];
475 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
476 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
477 if (memory_region_is_unassigned(mr)) {
478 CPUClass *cc = CPU_GET_CLASS(cpu);
480 if (cc->do_unassigned_access) {
481 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
482 } else {
483 report_bad_exec(cpu, addr);
484 exit(1);
487 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
488 return qemu_ram_addr_from_host_nofail(p);
491 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
492 target_ulong addr, uintptr_t retaddr, int size)
494 CPUState *cpu = ENV_GET_CPU(env);
495 hwaddr physaddr = iotlbentry->addr;
496 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
497 uint64_t val;
499 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
500 cpu->mem_io_pc = retaddr;
501 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
502 cpu_io_recompile(cpu, retaddr);
505 cpu->mem_io_vaddr = addr;
506 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
507 return val;
510 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
511 uint64_t val, target_ulong addr,
512 uintptr_t retaddr, int size)
514 CPUState *cpu = ENV_GET_CPU(env);
515 hwaddr physaddr = iotlbentry->addr;
516 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
518 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
519 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
520 cpu_io_recompile(cpu, retaddr);
523 cpu->mem_io_vaddr = addr;
524 cpu->mem_io_pc = retaddr;
525 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
528 /* Return true if ADDR is present in the victim tlb, and has been copied
529 back to the main tlb. */
530 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
531 size_t elt_ofs, target_ulong page)
533 size_t vidx;
534 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
535 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
536 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
538 if (cmp == page) {
539 /* Found entry in victim tlb, swap tlb and iotlb. */
540 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
541 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
542 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
544 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
545 tmpio = *io; *io = *vio; *vio = tmpio;
546 return true;
549 return false;
552 /* Macro to call the above, with local variables from the use context. */
553 #define VICTIM_TLB_HIT(TY, ADDR) \
554 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
555 (ADDR) & TARGET_PAGE_MASK)
557 /* Probe for whether the specified guest write access is permitted.
558 * If it is not permitted then an exception will be taken in the same
559 * way as if this were a real write access (and we will not return).
560 * Otherwise the function will return, and there will be a valid
561 * entry in the TLB for this access.
563 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
564 uintptr_t retaddr)
566 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
567 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
569 if ((addr & TARGET_PAGE_MASK)
570 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
571 /* TLB entry is for a different page */
572 if (!VICTIM_TLB_HIT(addr_write, addr)) {
573 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
578 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
579 * operations, or io operations to proceed. Return the host address. */
580 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
581 TCGMemOpIdx oi, uintptr_t retaddr)
583 size_t mmu_idx = get_mmuidx(oi);
584 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
585 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
586 target_ulong tlb_addr = tlbe->addr_write;
587 TCGMemOp mop = get_memop(oi);
588 int a_bits = get_alignment_bits(mop);
589 int s_bits = mop & MO_SIZE;
591 /* Adjust the given return address. */
592 retaddr -= GETPC_ADJ;
594 /* Enforce guest required alignment. */
595 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
596 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
597 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
598 mmu_idx, retaddr);
601 /* Enforce qemu required alignment. */
602 if (unlikely(addr & ((1 << s_bits) - 1))) {
603 /* We get here if guest alignment was not requested,
604 or was not enforced by cpu_unaligned_access above.
605 We might widen the access and emulate, but for now
606 mark an exception and exit the cpu loop. */
607 goto stop_the_world;
610 /* Check TLB entry and enforce page permissions. */
611 if ((addr & TARGET_PAGE_MASK)
612 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
613 if (!VICTIM_TLB_HIT(addr_write, addr)) {
614 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
616 tlb_addr = tlbe->addr_write;
619 /* Notice an IO access, or a notdirty page. */
620 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
621 /* There's really nothing that can be done to
622 support this apart from stop-the-world. */
623 goto stop_the_world;
626 /* Let the guest notice RMW on a write-only page. */
627 if (unlikely(tlbe->addr_read != tlb_addr)) {
628 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
629 /* Since we don't support reads and writes to different addresses,
630 and we do have the proper page loaded for write, this shouldn't
631 ever return. But just in case, handle via stop-the-world. */
632 goto stop_the_world;
635 return (void *)((uintptr_t)addr + tlbe->addend);
637 stop_the_world:
638 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
641 #ifdef TARGET_WORDS_BIGENDIAN
642 # define TGT_BE(X) (X)
643 # define TGT_LE(X) BSWAP(X)
644 #else
645 # define TGT_BE(X) BSWAP(X)
646 # define TGT_LE(X) (X)
647 #endif
649 #define MMUSUFFIX _mmu
651 #define DATA_SIZE 1
652 #include "softmmu_template.h"
654 #define DATA_SIZE 2
655 #include "softmmu_template.h"
657 #define DATA_SIZE 4
658 #include "softmmu_template.h"
660 #define DATA_SIZE 8
661 #include "softmmu_template.h"
663 /* First set of helpers allows passing in of OI and RETADDR. This makes
664 them callable from other helpers. */
666 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
667 #define ATOMIC_NAME(X) \
668 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
669 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
671 #define DATA_SIZE 1
672 #include "atomic_template.h"
674 #define DATA_SIZE 2
675 #include "atomic_template.h"
677 #define DATA_SIZE 4
678 #include "atomic_template.h"
680 #ifdef CONFIG_ATOMIC64
681 #define DATA_SIZE 8
682 #include "atomic_template.h"
683 #endif
685 #ifdef CONFIG_ATOMIC128
686 #define DATA_SIZE 16
687 #include "atomic_template.h"
688 #endif
690 /* Second set of helpers are directly callable from TCG as helpers. */
692 #undef EXTRA_ARGS
693 #undef ATOMIC_NAME
694 #undef ATOMIC_MMU_LOOKUP
695 #define EXTRA_ARGS , TCGMemOpIdx oi
696 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
697 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
699 #define DATA_SIZE 1
700 #include "atomic_template.h"
702 #define DATA_SIZE 2
703 #include "atomic_template.h"
705 #define DATA_SIZE 4
706 #include "atomic_template.h"
708 #ifdef CONFIG_ATOMIC64
709 #define DATA_SIZE 8
710 #include "atomic_template.h"
711 #endif
713 /* Code access functions. */
715 #undef MMUSUFFIX
716 #define MMUSUFFIX _cmmu
717 #undef GETPC
718 #define GETPC() ((uintptr_t)0)
719 #define SOFTMMU_CODE_ACCESS
721 #define DATA_SIZE 1
722 #include "softmmu_template.h"
724 #define DATA_SIZE 2
725 #include "softmmu_template.h"
727 #define DATA_SIZE 4
728 #include "softmmu_template.h"
730 #define DATA_SIZE 8
731 #include "softmmu_template.h"