tcg: remove global exit_request
[qemu/ar7.git] / cputlb.c
blob1cc9d9da51c7fccafa813c435408d098683d8800
1 /*
2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
40 #ifdef DEBUG_TLB
41 # define DEBUG_TLB_GATE 1
42 # ifdef DEBUG_TLB_LOG
43 # define DEBUG_TLB_LOG_GATE 1
44 # else
45 # define DEBUG_TLB_LOG_GATE 0
46 # endif
47 #else
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
50 #endif
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59 } while (0)
61 /* statistics */
62 int tlb_flush_count;
64 /* This is OK because CPU architectures generally permit an
65 * implementation to drop entries from the TLB at any time, so
66 * flushing more entries than required is only an efficiency issue,
67 * not a correctness issue.
69 void tlb_flush(CPUState *cpu)
71 CPUArchState *env = cpu->env_ptr;
73 memset(env->tlb_table, -1, sizeof(env->tlb_table));
74 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
75 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
77 env->vtlb_index = 0;
78 env->tlb_flush_addr = -1;
79 env->tlb_flush_mask = 0;
80 tlb_flush_count++;
83 static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
85 CPUArchState *env = cpu->env_ptr;
87 tlb_debug("start\n");
89 for (;;) {
90 int mmu_idx = va_arg(argp, int);
92 if (mmu_idx < 0) {
93 break;
96 tlb_debug("%d\n", mmu_idx);
98 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
99 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
102 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
105 void tlb_flush_by_mmuidx(CPUState *cpu, ...)
107 va_list argp;
108 va_start(argp, cpu);
109 v_tlb_flush_by_mmuidx(cpu, argp);
110 va_end(argp);
113 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
115 if (addr == (tlb_entry->addr_read &
116 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
117 addr == (tlb_entry->addr_write &
118 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
119 addr == (tlb_entry->addr_code &
120 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
121 memset(tlb_entry, -1, sizeof(*tlb_entry));
125 void tlb_flush_page(CPUState *cpu, target_ulong addr)
127 CPUArchState *env = cpu->env_ptr;
128 int i;
129 int mmu_idx;
131 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
133 /* Check if we need to flush due to large pages. */
134 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
135 tlb_debug("forcing full flush ("
136 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
137 env->tlb_flush_addr, env->tlb_flush_mask);
139 tlb_flush(cpu);
140 return;
143 addr &= TARGET_PAGE_MASK;
144 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
145 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
146 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
149 /* check whether there are entries that need to be flushed in the vtlb */
150 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
151 int k;
152 for (k = 0; k < CPU_VTLB_SIZE; k++) {
153 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
157 tb_flush_jmp_cache(cpu, addr);
160 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
162 CPUArchState *env = cpu->env_ptr;
163 int i, k;
164 va_list argp;
166 va_start(argp, addr);
168 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
170 /* Check if we need to flush due to large pages. */
171 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
172 tlb_debug("forced full flush ("
173 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
174 env->tlb_flush_addr, env->tlb_flush_mask);
176 v_tlb_flush_by_mmuidx(cpu, argp);
177 va_end(argp);
178 return;
181 addr &= TARGET_PAGE_MASK;
182 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
184 for (;;) {
185 int mmu_idx = va_arg(argp, int);
187 if (mmu_idx < 0) {
188 break;
191 tlb_debug("idx %d\n", mmu_idx);
193 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
195 /* check whether there are vltb entries that need to be flushed */
196 for (k = 0; k < CPU_VTLB_SIZE; k++) {
197 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
200 va_end(argp);
202 tb_flush_jmp_cache(cpu, addr);
205 /* update the TLBs so that writes to code in the virtual page 'addr'
206 can be detected */
207 void tlb_protect_code(ram_addr_t ram_addr)
209 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
210 DIRTY_MEMORY_CODE);
213 /* update the TLB so that writes in physical page 'phys_addr' are no longer
214 tested for self modifying code */
215 void tlb_unprotect_code(ram_addr_t ram_addr)
217 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
220 static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
222 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
225 void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
226 uintptr_t length)
228 uintptr_t addr;
230 if (tlb_is_dirty_ram(tlb_entry)) {
231 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
232 if ((addr - start) < length) {
233 tlb_entry->addr_write |= TLB_NOTDIRTY;
238 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
240 ram_addr_t ram_addr;
242 ram_addr = qemu_ram_addr_from_host(ptr);
243 if (ram_addr == RAM_ADDR_INVALID) {
244 fprintf(stderr, "Bad ram pointer %p\n", ptr);
245 abort();
247 return ram_addr;
250 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
252 CPUArchState *env;
254 int mmu_idx;
256 env = cpu->env_ptr;
257 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
258 unsigned int i;
260 for (i = 0; i < CPU_TLB_SIZE; i++) {
261 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
262 start1, length);
265 for (i = 0; i < CPU_VTLB_SIZE; i++) {
266 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
267 start1, length);
272 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
274 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
275 tlb_entry->addr_write = vaddr;
279 /* update the TLB corresponding to virtual page vaddr
280 so that it is no longer dirty */
281 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
283 CPUArchState *env = cpu->env_ptr;
284 int i;
285 int mmu_idx;
287 vaddr &= TARGET_PAGE_MASK;
288 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
289 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
290 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
293 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
294 int k;
295 for (k = 0; k < CPU_VTLB_SIZE; k++) {
296 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
301 /* Our TLB does not support large pages, so remember the area covered by
302 large pages and trigger a full TLB flush if these are invalidated. */
303 static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
304 target_ulong size)
306 target_ulong mask = ~(size - 1);
308 if (env->tlb_flush_addr == (target_ulong)-1) {
309 env->tlb_flush_addr = vaddr & mask;
310 env->tlb_flush_mask = mask;
311 return;
313 /* Extend the existing region to include the new page.
314 This is a compromise between unnecessary flushes and the cost
315 of maintaining a full variable size TLB. */
316 mask &= env->tlb_flush_mask;
317 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
318 mask <<= 1;
320 env->tlb_flush_addr &= mask;
321 env->tlb_flush_mask = mask;
324 /* Add a new TLB entry. At most one entry for a given virtual address
325 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
326 * supplied size is only used by tlb_flush_page.
328 * Called from TCG-generated code, which is under an RCU read-side
329 * critical section.
331 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
332 hwaddr paddr, MemTxAttrs attrs, int prot,
333 int mmu_idx, target_ulong size)
335 CPUArchState *env = cpu->env_ptr;
336 MemoryRegionSection *section;
337 unsigned int index;
338 target_ulong address;
339 target_ulong code_address;
340 uintptr_t addend;
341 CPUTLBEntry *te;
342 hwaddr iotlb, xlat, sz;
343 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
344 int asidx = cpu_asidx_from_attrs(cpu, attrs);
346 assert(size >= TARGET_PAGE_SIZE);
347 if (size != TARGET_PAGE_SIZE) {
348 tlb_add_large_page(env, vaddr, size);
351 sz = size;
352 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
353 assert(sz >= TARGET_PAGE_SIZE);
355 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
356 " prot=%x idx=%d\n",
357 vaddr, paddr, prot, mmu_idx);
359 address = vaddr;
360 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
361 /* IO memory case */
362 address |= TLB_MMIO;
363 addend = 0;
364 } else {
365 /* TLB_MMIO for rom/romd handled below */
366 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
369 code_address = address;
370 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
371 prot, &address);
373 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
374 te = &env->tlb_table[mmu_idx][index];
376 /* do not discard the translation in te, evict it into a victim tlb */
377 env->tlb_v_table[mmu_idx][vidx] = *te;
378 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
380 /* refill the tlb */
381 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
382 env->iotlb[mmu_idx][index].attrs = attrs;
383 te->addend = addend - vaddr;
384 if (prot & PAGE_READ) {
385 te->addr_read = address;
386 } else {
387 te->addr_read = -1;
390 if (prot & PAGE_EXEC) {
391 te->addr_code = code_address;
392 } else {
393 te->addr_code = -1;
395 if (prot & PAGE_WRITE) {
396 if ((memory_region_is_ram(section->mr) && section->readonly)
397 || memory_region_is_romd(section->mr)) {
398 /* Write access calls the I/O callback. */
399 te->addr_write = address | TLB_MMIO;
400 } else if (memory_region_is_ram(section->mr)
401 && cpu_physical_memory_is_clean(
402 memory_region_get_ram_addr(section->mr) + xlat)) {
403 te->addr_write = address | TLB_NOTDIRTY;
404 } else {
405 te->addr_write = address;
407 } else {
408 te->addr_write = -1;
412 /* Add a new TLB entry, but without specifying the memory
413 * transaction attributes to be used.
415 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
416 hwaddr paddr, int prot,
417 int mmu_idx, target_ulong size)
419 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
420 prot, mmu_idx, size);
423 static void report_bad_exec(CPUState *cpu, target_ulong addr)
425 /* Accidentally executing outside RAM or ROM is quite common for
426 * several user-error situations, so report it in a way that
427 * makes it clear that this isn't a QEMU bug and provide suggestions
428 * about what a user could do to fix things.
430 error_report("Trying to execute code outside RAM or ROM at 0x"
431 TARGET_FMT_lx, addr);
432 error_printf("This usually means one of the following happened:\n\n"
433 "(1) You told QEMU to execute a kernel for the wrong machine "
434 "type, and it crashed on startup (eg trying to run a "
435 "raspberry pi kernel on a versatilepb QEMU machine)\n"
436 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
437 "and QEMU executed a ROM full of no-op instructions until "
438 "it fell off the end\n"
439 "(3) Your guest kernel has a bug and crashed by jumping "
440 "off into nowhere\n\n"
441 "This is almost always one of the first two, so check your "
442 "command line and that you are using the right type of kernel "
443 "for this machine.\n"
444 "If you think option (3) is likely then you can try debugging "
445 "your guest with the -d debug options; in particular "
446 "-d guest_errors will cause the log to include a dump of the "
447 "guest register state at this point.\n\n"
448 "Execution cannot continue; stopping here.\n\n");
450 /* Report also to the logs, with more detail including register dump */
451 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
452 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
453 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
456 /* NOTE: this function can trigger an exception */
457 /* NOTE2: the returned address is not exactly the physical address: it
458 * is actually a ram_addr_t (in system mode; the user mode emulation
459 * version of this function returns a guest virtual address).
461 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
463 int mmu_idx, page_index, pd;
464 void *p;
465 MemoryRegion *mr;
466 CPUState *cpu = ENV_GET_CPU(env1);
467 CPUIOTLBEntry *iotlbentry;
469 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
470 mmu_idx = cpu_mmu_index(env1, true);
471 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
472 (addr & TARGET_PAGE_MASK))) {
473 cpu_ldub_code(env1, addr);
475 iotlbentry = &env1->iotlb[mmu_idx][page_index];
476 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
477 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
478 if (memory_region_is_unassigned(mr)) {
479 CPUClass *cc = CPU_GET_CLASS(cpu);
481 if (cc->do_unassigned_access) {
482 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
483 } else {
484 report_bad_exec(cpu, addr);
485 exit(1);
488 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
489 return qemu_ram_addr_from_host_nofail(p);
492 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
493 target_ulong addr, uintptr_t retaddr, int size)
495 CPUState *cpu = ENV_GET_CPU(env);
496 hwaddr physaddr = iotlbentry->addr;
497 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
498 uint64_t val;
499 bool locked = false;
501 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
502 cpu->mem_io_pc = retaddr;
503 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
504 cpu_io_recompile(cpu, retaddr);
507 cpu->mem_io_vaddr = addr;
509 if (mr->global_locking) {
510 qemu_mutex_lock_iothread();
511 locked = true;
513 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
514 if (locked) {
515 qemu_mutex_unlock_iothread();
518 return val;
521 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
522 uint64_t val, target_ulong addr,
523 uintptr_t retaddr, int size)
525 CPUState *cpu = ENV_GET_CPU(env);
526 hwaddr physaddr = iotlbentry->addr;
527 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
528 bool locked = false;
530 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
531 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
532 cpu_io_recompile(cpu, retaddr);
534 cpu->mem_io_vaddr = addr;
535 cpu->mem_io_pc = retaddr;
537 if (mr->global_locking) {
538 qemu_mutex_lock_iothread();
539 locked = true;
541 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
542 if (locked) {
543 qemu_mutex_unlock_iothread();
547 /* Return true if ADDR is present in the victim tlb, and has been copied
548 back to the main tlb. */
549 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
550 size_t elt_ofs, target_ulong page)
552 size_t vidx;
553 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
554 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
555 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
557 if (cmp == page) {
558 /* Found entry in victim tlb, swap tlb and iotlb. */
559 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
560 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
561 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
563 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
564 tmpio = *io; *io = *vio; *vio = tmpio;
565 return true;
568 return false;
571 /* Macro to call the above, with local variables from the use context. */
572 #define VICTIM_TLB_HIT(TY, ADDR) \
573 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
574 (ADDR) & TARGET_PAGE_MASK)
576 /* Probe for whether the specified guest write access is permitted.
577 * If it is not permitted then an exception will be taken in the same
578 * way as if this were a real write access (and we will not return).
579 * Otherwise the function will return, and there will be a valid
580 * entry in the TLB for this access.
582 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
583 uintptr_t retaddr)
585 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
586 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
588 if ((addr & TARGET_PAGE_MASK)
589 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
590 /* TLB entry is for a different page */
591 if (!VICTIM_TLB_HIT(addr_write, addr)) {
592 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
597 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
598 * operations, or io operations to proceed. Return the host address. */
599 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
600 TCGMemOpIdx oi, uintptr_t retaddr)
602 size_t mmu_idx = get_mmuidx(oi);
603 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
604 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
605 target_ulong tlb_addr = tlbe->addr_write;
606 TCGMemOp mop = get_memop(oi);
607 int a_bits = get_alignment_bits(mop);
608 int s_bits = mop & MO_SIZE;
610 /* Adjust the given return address. */
611 retaddr -= GETPC_ADJ;
613 /* Enforce guest required alignment. */
614 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
615 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
616 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
617 mmu_idx, retaddr);
620 /* Enforce qemu required alignment. */
621 if (unlikely(addr & ((1 << s_bits) - 1))) {
622 /* We get here if guest alignment was not requested,
623 or was not enforced by cpu_unaligned_access above.
624 We might widen the access and emulate, but for now
625 mark an exception and exit the cpu loop. */
626 goto stop_the_world;
629 /* Check TLB entry and enforce page permissions. */
630 if ((addr & TARGET_PAGE_MASK)
631 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
632 if (!VICTIM_TLB_HIT(addr_write, addr)) {
633 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
635 tlb_addr = tlbe->addr_write;
638 /* Notice an IO access, or a notdirty page. */
639 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
640 /* There's really nothing that can be done to
641 support this apart from stop-the-world. */
642 goto stop_the_world;
645 /* Let the guest notice RMW on a write-only page. */
646 if (unlikely(tlbe->addr_read != tlb_addr)) {
647 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
648 /* Since we don't support reads and writes to different addresses,
649 and we do have the proper page loaded for write, this shouldn't
650 ever return. But just in case, handle via stop-the-world. */
651 goto stop_the_world;
654 return (void *)((uintptr_t)addr + tlbe->addend);
656 stop_the_world:
657 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
660 #ifdef TARGET_WORDS_BIGENDIAN
661 # define TGT_BE(X) (X)
662 # define TGT_LE(X) BSWAP(X)
663 #else
664 # define TGT_BE(X) BSWAP(X)
665 # define TGT_LE(X) (X)
666 #endif
668 #define MMUSUFFIX _mmu
670 #define DATA_SIZE 1
671 #include "softmmu_template.h"
673 #define DATA_SIZE 2
674 #include "softmmu_template.h"
676 #define DATA_SIZE 4
677 #include "softmmu_template.h"
679 #define DATA_SIZE 8
680 #include "softmmu_template.h"
682 /* First set of helpers allows passing in of OI and RETADDR. This makes
683 them callable from other helpers. */
685 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
686 #define ATOMIC_NAME(X) \
687 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
688 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
690 #define DATA_SIZE 1
691 #include "atomic_template.h"
693 #define DATA_SIZE 2
694 #include "atomic_template.h"
696 #define DATA_SIZE 4
697 #include "atomic_template.h"
699 #ifdef CONFIG_ATOMIC64
700 #define DATA_SIZE 8
701 #include "atomic_template.h"
702 #endif
704 #ifdef CONFIG_ATOMIC128
705 #define DATA_SIZE 16
706 #include "atomic_template.h"
707 #endif
709 /* Second set of helpers are directly callable from TCG as helpers. */
711 #undef EXTRA_ARGS
712 #undef ATOMIC_NAME
713 #undef ATOMIC_MMU_LOOKUP
714 #define EXTRA_ARGS , TCGMemOpIdx oi
715 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
716 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
718 #define DATA_SIZE 1
719 #include "atomic_template.h"
721 #define DATA_SIZE 2
722 #include "atomic_template.h"
724 #define DATA_SIZE 4
725 #include "atomic_template.h"
727 #ifdef CONFIG_ATOMIC64
728 #define DATA_SIZE 8
729 #include "atomic_template.h"
730 #endif
732 /* Code access functions. */
734 #undef MMUSUFFIX
735 #define MMUSUFFIX _cmmu
736 #undef GETPC
737 #define GETPC() ((uintptr_t)0)
738 #define SOFTMMU_CODE_ACCESS
740 #define DATA_SIZE 1
741 #include "softmmu_template.h"
743 #define DATA_SIZE 2
744 #include "softmmu_template.h"
746 #define DATA_SIZE 4
747 #include "softmmu_template.h"
749 #define DATA_SIZE 8
750 #include "softmmu_template.h"