cputlb: add assert_cpu_is_self checks
[qemu/kevin.git] / cputlb.c
blobaf0e65cd2c2833e82179d4ea857f48fb48248177
1 /*
2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
40 #ifdef DEBUG_TLB
41 # define DEBUG_TLB_GATE 1
42 # ifdef DEBUG_TLB_LOG
43 # define DEBUG_TLB_LOG_GATE 1
44 # else
45 # define DEBUG_TLB_LOG_GATE 0
46 # endif
47 #else
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
50 #endif
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59 } while (0)
61 #define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
67 /* statistics */
68 int tlb_flush_count;
70 /* This is OK because CPU architectures generally permit an
71 * implementation to drop entries from the TLB at any time, so
72 * flushing more entries than required is only an efficiency issue,
73 * not a correctness issue.
75 void tlb_flush(CPUState *cpu)
77 CPUArchState *env = cpu->env_ptr;
79 assert_cpu_is_self(cpu);
80 tlb_debug("(count: %d)\n", tlb_flush_count++);
82 memset(env->tlb_table, -1, sizeof(env->tlb_table));
83 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
84 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
86 env->vtlb_index = 0;
87 env->tlb_flush_addr = -1;
88 env->tlb_flush_mask = 0;
91 static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
93 CPUArchState *env = cpu->env_ptr;
95 assert_cpu_is_self(cpu);
96 tlb_debug("start\n");
98 for (;;) {
99 int mmu_idx = va_arg(argp, int);
101 if (mmu_idx < 0) {
102 break;
105 tlb_debug("%d\n", mmu_idx);
107 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
108 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
111 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
114 void tlb_flush_by_mmuidx(CPUState *cpu, ...)
116 va_list argp;
117 va_start(argp, cpu);
118 v_tlb_flush_by_mmuidx(cpu, argp);
119 va_end(argp);
122 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
124 if (addr == (tlb_entry->addr_read &
125 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
126 addr == (tlb_entry->addr_write &
127 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
128 addr == (tlb_entry->addr_code &
129 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
130 memset(tlb_entry, -1, sizeof(*tlb_entry));
134 void tlb_flush_page(CPUState *cpu, target_ulong addr)
136 CPUArchState *env = cpu->env_ptr;
137 int i;
138 int mmu_idx;
140 assert_cpu_is_self(cpu);
141 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
143 /* Check if we need to flush due to large pages. */
144 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
145 tlb_debug("forcing full flush ("
146 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
147 env->tlb_flush_addr, env->tlb_flush_mask);
149 tlb_flush(cpu);
150 return;
153 addr &= TARGET_PAGE_MASK;
154 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
155 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
156 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
159 /* check whether there are entries that need to be flushed in the vtlb */
160 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
161 int k;
162 for (k = 0; k < CPU_VTLB_SIZE; k++) {
163 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
167 tb_flush_jmp_cache(cpu, addr);
170 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
172 CPUArchState *env = cpu->env_ptr;
173 int i, k;
174 va_list argp;
176 va_start(argp, addr);
178 assert_cpu_is_self(cpu);
179 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
181 /* Check if we need to flush due to large pages. */
182 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
183 tlb_debug("forced full flush ("
184 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
185 env->tlb_flush_addr, env->tlb_flush_mask);
187 v_tlb_flush_by_mmuidx(cpu, argp);
188 va_end(argp);
189 return;
192 addr &= TARGET_PAGE_MASK;
193 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
195 for (;;) {
196 int mmu_idx = va_arg(argp, int);
198 if (mmu_idx < 0) {
199 break;
202 tlb_debug("idx %d\n", mmu_idx);
204 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
206 /* check whether there are vltb entries that need to be flushed */
207 for (k = 0; k < CPU_VTLB_SIZE; k++) {
208 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
211 va_end(argp);
213 tb_flush_jmp_cache(cpu, addr);
216 /* update the TLBs so that writes to code in the virtual page 'addr'
217 can be detected */
218 void tlb_protect_code(ram_addr_t ram_addr)
220 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
221 DIRTY_MEMORY_CODE);
224 /* update the TLB so that writes in physical page 'phys_addr' are no longer
225 tested for self modifying code */
226 void tlb_unprotect_code(ram_addr_t ram_addr)
228 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
231 static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
233 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
236 void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
237 uintptr_t length)
239 uintptr_t addr;
241 if (tlb_is_dirty_ram(tlb_entry)) {
242 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
243 if ((addr - start) < length) {
244 tlb_entry->addr_write |= TLB_NOTDIRTY;
249 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
251 ram_addr_t ram_addr;
253 ram_addr = qemu_ram_addr_from_host(ptr);
254 if (ram_addr == RAM_ADDR_INVALID) {
255 fprintf(stderr, "Bad ram pointer %p\n", ptr);
256 abort();
258 return ram_addr;
261 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
263 CPUArchState *env;
265 int mmu_idx;
267 assert_cpu_is_self(cpu);
269 env = cpu->env_ptr;
270 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
271 unsigned int i;
273 for (i = 0; i < CPU_TLB_SIZE; i++) {
274 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
275 start1, length);
278 for (i = 0; i < CPU_VTLB_SIZE; i++) {
279 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
280 start1, length);
285 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
287 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
288 tlb_entry->addr_write = vaddr;
292 /* update the TLB corresponding to virtual page vaddr
293 so that it is no longer dirty */
294 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
296 CPUArchState *env = cpu->env_ptr;
297 int i;
298 int mmu_idx;
300 assert_cpu_is_self(cpu);
302 vaddr &= TARGET_PAGE_MASK;
303 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
304 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
305 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
308 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
309 int k;
310 for (k = 0; k < CPU_VTLB_SIZE; k++) {
311 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
316 /* Our TLB does not support large pages, so remember the area covered by
317 large pages and trigger a full TLB flush if these are invalidated. */
318 static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
319 target_ulong size)
321 target_ulong mask = ~(size - 1);
323 if (env->tlb_flush_addr == (target_ulong)-1) {
324 env->tlb_flush_addr = vaddr & mask;
325 env->tlb_flush_mask = mask;
326 return;
328 /* Extend the existing region to include the new page.
329 This is a compromise between unnecessary flushes and the cost
330 of maintaining a full variable size TLB. */
331 mask &= env->tlb_flush_mask;
332 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
333 mask <<= 1;
335 env->tlb_flush_addr &= mask;
336 env->tlb_flush_mask = mask;
339 /* Add a new TLB entry. At most one entry for a given virtual address
340 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
341 * supplied size is only used by tlb_flush_page.
343 * Called from TCG-generated code, which is under an RCU read-side
344 * critical section.
346 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
347 hwaddr paddr, MemTxAttrs attrs, int prot,
348 int mmu_idx, target_ulong size)
350 CPUArchState *env = cpu->env_ptr;
351 MemoryRegionSection *section;
352 unsigned int index;
353 target_ulong address;
354 target_ulong code_address;
355 uintptr_t addend;
356 CPUTLBEntry *te;
357 hwaddr iotlb, xlat, sz;
358 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
359 int asidx = cpu_asidx_from_attrs(cpu, attrs);
361 assert_cpu_is_self(cpu);
362 assert(size >= TARGET_PAGE_SIZE);
363 if (size != TARGET_PAGE_SIZE) {
364 tlb_add_large_page(env, vaddr, size);
367 sz = size;
368 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
369 assert(sz >= TARGET_PAGE_SIZE);
371 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
372 " prot=%x idx=%d\n",
373 vaddr, paddr, prot, mmu_idx);
375 address = vaddr;
376 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
377 /* IO memory case */
378 address |= TLB_MMIO;
379 addend = 0;
380 } else {
381 /* TLB_MMIO for rom/romd handled below */
382 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
385 code_address = address;
386 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
387 prot, &address);
389 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
390 te = &env->tlb_table[mmu_idx][index];
392 /* do not discard the translation in te, evict it into a victim tlb */
393 env->tlb_v_table[mmu_idx][vidx] = *te;
394 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
396 /* refill the tlb */
397 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
398 env->iotlb[mmu_idx][index].attrs = attrs;
399 te->addend = addend - vaddr;
400 if (prot & PAGE_READ) {
401 te->addr_read = address;
402 } else {
403 te->addr_read = -1;
406 if (prot & PAGE_EXEC) {
407 te->addr_code = code_address;
408 } else {
409 te->addr_code = -1;
411 if (prot & PAGE_WRITE) {
412 if ((memory_region_is_ram(section->mr) && section->readonly)
413 || memory_region_is_romd(section->mr)) {
414 /* Write access calls the I/O callback. */
415 te->addr_write = address | TLB_MMIO;
416 } else if (memory_region_is_ram(section->mr)
417 && cpu_physical_memory_is_clean(
418 memory_region_get_ram_addr(section->mr) + xlat)) {
419 te->addr_write = address | TLB_NOTDIRTY;
420 } else {
421 te->addr_write = address;
423 } else {
424 te->addr_write = -1;
428 /* Add a new TLB entry, but without specifying the memory
429 * transaction attributes to be used.
431 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
432 hwaddr paddr, int prot,
433 int mmu_idx, target_ulong size)
435 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
436 prot, mmu_idx, size);
439 static void report_bad_exec(CPUState *cpu, target_ulong addr)
441 /* Accidentally executing outside RAM or ROM is quite common for
442 * several user-error situations, so report it in a way that
443 * makes it clear that this isn't a QEMU bug and provide suggestions
444 * about what a user could do to fix things.
446 error_report("Trying to execute code outside RAM or ROM at 0x"
447 TARGET_FMT_lx, addr);
448 error_printf("This usually means one of the following happened:\n\n"
449 "(1) You told QEMU to execute a kernel for the wrong machine "
450 "type, and it crashed on startup (eg trying to run a "
451 "raspberry pi kernel on a versatilepb QEMU machine)\n"
452 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
453 "and QEMU executed a ROM full of no-op instructions until "
454 "it fell off the end\n"
455 "(3) Your guest kernel has a bug and crashed by jumping "
456 "off into nowhere\n\n"
457 "This is almost always one of the first two, so check your "
458 "command line and that you are using the right type of kernel "
459 "for this machine.\n"
460 "If you think option (3) is likely then you can try debugging "
461 "your guest with the -d debug options; in particular "
462 "-d guest_errors will cause the log to include a dump of the "
463 "guest register state at this point.\n\n"
464 "Execution cannot continue; stopping here.\n\n");
466 /* Report also to the logs, with more detail including register dump */
467 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
468 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
469 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
472 /* NOTE: this function can trigger an exception */
473 /* NOTE2: the returned address is not exactly the physical address: it
474 * is actually a ram_addr_t (in system mode; the user mode emulation
475 * version of this function returns a guest virtual address).
477 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
479 int mmu_idx, page_index, pd;
480 void *p;
481 MemoryRegion *mr;
482 CPUState *cpu = ENV_GET_CPU(env1);
483 CPUIOTLBEntry *iotlbentry;
485 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
486 mmu_idx = cpu_mmu_index(env1, true);
487 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
488 (addr & TARGET_PAGE_MASK))) {
489 cpu_ldub_code(env1, addr);
491 iotlbentry = &env1->iotlb[mmu_idx][page_index];
492 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
493 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
494 if (memory_region_is_unassigned(mr)) {
495 CPUClass *cc = CPU_GET_CLASS(cpu);
497 if (cc->do_unassigned_access) {
498 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
499 } else {
500 report_bad_exec(cpu, addr);
501 exit(1);
504 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
505 return qemu_ram_addr_from_host_nofail(p);
508 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
509 target_ulong addr, uintptr_t retaddr, int size)
511 CPUState *cpu = ENV_GET_CPU(env);
512 hwaddr physaddr = iotlbentry->addr;
513 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
514 uint64_t val;
515 bool locked = false;
517 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
518 cpu->mem_io_pc = retaddr;
519 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
520 cpu_io_recompile(cpu, retaddr);
523 cpu->mem_io_vaddr = addr;
525 if (mr->global_locking) {
526 qemu_mutex_lock_iothread();
527 locked = true;
529 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
530 if (locked) {
531 qemu_mutex_unlock_iothread();
534 return val;
537 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
538 uint64_t val, target_ulong addr,
539 uintptr_t retaddr, int size)
541 CPUState *cpu = ENV_GET_CPU(env);
542 hwaddr physaddr = iotlbentry->addr;
543 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
544 bool locked = false;
546 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
547 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
548 cpu_io_recompile(cpu, retaddr);
550 cpu->mem_io_vaddr = addr;
551 cpu->mem_io_pc = retaddr;
553 if (mr->global_locking) {
554 qemu_mutex_lock_iothread();
555 locked = true;
557 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
558 if (locked) {
559 qemu_mutex_unlock_iothread();
563 /* Return true if ADDR is present in the victim tlb, and has been copied
564 back to the main tlb. */
565 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
566 size_t elt_ofs, target_ulong page)
568 size_t vidx;
569 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
570 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
571 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
573 if (cmp == page) {
574 /* Found entry in victim tlb, swap tlb and iotlb. */
575 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
576 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
577 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
579 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
580 tmpio = *io; *io = *vio; *vio = tmpio;
581 return true;
584 return false;
587 /* Macro to call the above, with local variables from the use context. */
588 #define VICTIM_TLB_HIT(TY, ADDR) \
589 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
590 (ADDR) & TARGET_PAGE_MASK)
592 /* Probe for whether the specified guest write access is permitted.
593 * If it is not permitted then an exception will be taken in the same
594 * way as if this were a real write access (and we will not return).
595 * Otherwise the function will return, and there will be a valid
596 * entry in the TLB for this access.
598 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
599 uintptr_t retaddr)
601 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
602 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
604 if ((addr & TARGET_PAGE_MASK)
605 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
606 /* TLB entry is for a different page */
607 if (!VICTIM_TLB_HIT(addr_write, addr)) {
608 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
613 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
614 * operations, or io operations to proceed. Return the host address. */
615 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
616 TCGMemOpIdx oi, uintptr_t retaddr)
618 size_t mmu_idx = get_mmuidx(oi);
619 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
620 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
621 target_ulong tlb_addr = tlbe->addr_write;
622 TCGMemOp mop = get_memop(oi);
623 int a_bits = get_alignment_bits(mop);
624 int s_bits = mop & MO_SIZE;
626 /* Adjust the given return address. */
627 retaddr -= GETPC_ADJ;
629 /* Enforce guest required alignment. */
630 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
631 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
632 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
633 mmu_idx, retaddr);
636 /* Enforce qemu required alignment. */
637 if (unlikely(addr & ((1 << s_bits) - 1))) {
638 /* We get here if guest alignment was not requested,
639 or was not enforced by cpu_unaligned_access above.
640 We might widen the access and emulate, but for now
641 mark an exception and exit the cpu loop. */
642 goto stop_the_world;
645 /* Check TLB entry and enforce page permissions. */
646 if ((addr & TARGET_PAGE_MASK)
647 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
648 if (!VICTIM_TLB_HIT(addr_write, addr)) {
649 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
651 tlb_addr = tlbe->addr_write;
654 /* Notice an IO access, or a notdirty page. */
655 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
656 /* There's really nothing that can be done to
657 support this apart from stop-the-world. */
658 goto stop_the_world;
661 /* Let the guest notice RMW on a write-only page. */
662 if (unlikely(tlbe->addr_read != tlb_addr)) {
663 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
664 /* Since we don't support reads and writes to different addresses,
665 and we do have the proper page loaded for write, this shouldn't
666 ever return. But just in case, handle via stop-the-world. */
667 goto stop_the_world;
670 return (void *)((uintptr_t)addr + tlbe->addend);
672 stop_the_world:
673 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
676 #ifdef TARGET_WORDS_BIGENDIAN
677 # define TGT_BE(X) (X)
678 # define TGT_LE(X) BSWAP(X)
679 #else
680 # define TGT_BE(X) BSWAP(X)
681 # define TGT_LE(X) (X)
682 #endif
684 #define MMUSUFFIX _mmu
686 #define DATA_SIZE 1
687 #include "softmmu_template.h"
689 #define DATA_SIZE 2
690 #include "softmmu_template.h"
692 #define DATA_SIZE 4
693 #include "softmmu_template.h"
695 #define DATA_SIZE 8
696 #include "softmmu_template.h"
698 /* First set of helpers allows passing in of OI and RETADDR. This makes
699 them callable from other helpers. */
701 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
702 #define ATOMIC_NAME(X) \
703 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
704 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
706 #define DATA_SIZE 1
707 #include "atomic_template.h"
709 #define DATA_SIZE 2
710 #include "atomic_template.h"
712 #define DATA_SIZE 4
713 #include "atomic_template.h"
715 #ifdef CONFIG_ATOMIC64
716 #define DATA_SIZE 8
717 #include "atomic_template.h"
718 #endif
720 #ifdef CONFIG_ATOMIC128
721 #define DATA_SIZE 16
722 #include "atomic_template.h"
723 #endif
725 /* Second set of helpers are directly callable from TCG as helpers. */
727 #undef EXTRA_ARGS
728 #undef ATOMIC_NAME
729 #undef ATOMIC_MMU_LOOKUP
730 #define EXTRA_ARGS , TCGMemOpIdx oi
731 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
732 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
734 #define DATA_SIZE 1
735 #include "atomic_template.h"
737 #define DATA_SIZE 2
738 #include "atomic_template.h"
740 #define DATA_SIZE 4
741 #include "atomic_template.h"
743 #ifdef CONFIG_ATOMIC64
744 #define DATA_SIZE 8
745 #include "atomic_template.h"
746 #endif
748 /* Code access functions. */
750 #undef MMUSUFFIX
751 #define MMUSUFFIX _cmmu
752 #undef GETPC
753 #define GETPC() ((uintptr_t)0)
754 #define SOFTMMU_CODE_ACCESS
756 #define DATA_SIZE 1
757 #include "softmmu_template.h"
759 #define DATA_SIZE 2
760 #include "softmmu_template.h"
762 #define DATA_SIZE 4
763 #include "softmmu_template.h"
765 #define DATA_SIZE 8
766 #include "softmmu_template.h"