cputlb: add tlb_flush_by_mmuidx async routines
[qemu/kevin.git] / cputlb.c
blobc50254be26c491cd173eb02f72cb76fca4857e47
1 /*
2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
36 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37 /* #define DEBUG_TLB */
38 /* #define DEBUG_TLB_LOG */
40 #ifdef DEBUG_TLB
41 # define DEBUG_TLB_GATE 1
42 # ifdef DEBUG_TLB_LOG
43 # define DEBUG_TLB_LOG_GATE 1
44 # else
45 # define DEBUG_TLB_LOG_GATE 0
46 # endif
47 #else
48 # define DEBUG_TLB_GATE 0
49 # define DEBUG_TLB_LOG_GATE 0
50 #endif
52 #define tlb_debug(fmt, ...) do { \
53 if (DEBUG_TLB_LOG_GATE) { \
54 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55 ## __VA_ARGS__); \
56 } else if (DEBUG_TLB_GATE) { \
57 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58 } \
59 } while (0)
61 #define assert_cpu_is_self(this_cpu) do { \
62 if (DEBUG_TLB_GATE) { \
63 g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
64 } \
65 } while (0)
67 /* run_on_cpu_data.target_ptr should always be big enough for a
68 * target_ulong even on 32 bit builds */
69 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76 /* statistics */
77 int tlb_flush_count;
79 /* This is OK because CPU architectures generally permit an
80 * implementation to drop entries from the TLB at any time, so
81 * flushing more entries than required is only an efficiency issue,
82 * not a correctness issue.
84 static void tlb_flush_nocheck(CPUState *cpu)
86 CPUArchState *env = cpu->env_ptr;
88 /* The QOM tests will trigger tlb_flushes without setting up TCG
89 * so we bug out here in that case.
91 if (!tcg_enabled()) {
92 return;
95 assert_cpu_is_self(cpu);
96 tlb_debug("(count: %d)\n", tlb_flush_count++);
98 tb_lock();
100 memset(env->tlb_table, -1, sizeof(env->tlb_table));
101 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
102 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
104 env->vtlb_index = 0;
105 env->tlb_flush_addr = -1;
106 env->tlb_flush_mask = 0;
108 tb_unlock();
110 atomic_mb_set(&cpu->pending_tlb_flush, 0);
113 static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
115 tlb_flush_nocheck(cpu);
118 void tlb_flush(CPUState *cpu)
120 if (cpu->created && !qemu_cpu_is_self(cpu)) {
121 if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
122 atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
123 async_run_on_cpu(cpu, tlb_flush_global_async_work,
124 RUN_ON_CPU_NULL);
126 } else {
127 tlb_flush_nocheck(cpu);
131 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
133 CPUArchState *env = cpu->env_ptr;
134 unsigned long mmu_idx_bitmask = data.host_int;
135 int mmu_idx;
137 assert_cpu_is_self(cpu);
139 tb_lock();
141 tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
143 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
145 if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
146 tlb_debug("%d\n", mmu_idx);
148 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
149 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
153 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
155 tlb_debug("done\n");
157 tb_unlock();
160 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
162 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
164 if (!qemu_cpu_is_self(cpu)) {
165 uint16_t pending_flushes = idxmap;
166 pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
168 if (pending_flushes) {
169 tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
171 atomic_or(&cpu->pending_tlb_flush, pending_flushes);
172 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
173 RUN_ON_CPU_HOST_INT(pending_flushes));
175 } else {
176 tlb_flush_by_mmuidx_async_work(cpu,
177 RUN_ON_CPU_HOST_INT(idxmap));
181 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
183 if (addr == (tlb_entry->addr_read &
184 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
185 addr == (tlb_entry->addr_write &
186 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
187 addr == (tlb_entry->addr_code &
188 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
189 memset(tlb_entry, -1, sizeof(*tlb_entry));
193 static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
195 CPUArchState *env = cpu->env_ptr;
196 target_ulong addr = (target_ulong) data.target_ptr;
197 int i;
198 int mmu_idx;
200 assert_cpu_is_self(cpu);
202 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
204 /* Check if we need to flush due to large pages. */
205 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
206 tlb_debug("forcing full flush ("
207 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
208 env->tlb_flush_addr, env->tlb_flush_mask);
210 tlb_flush(cpu);
211 return;
214 addr &= TARGET_PAGE_MASK;
215 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
216 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
217 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
220 /* check whether there are entries that need to be flushed in the vtlb */
221 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
222 int k;
223 for (k = 0; k < CPU_VTLB_SIZE; k++) {
224 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
228 tb_flush_jmp_cache(cpu, addr);
231 void tlb_flush_page(CPUState *cpu, target_ulong addr)
233 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
235 if (!qemu_cpu_is_self(cpu)) {
236 async_run_on_cpu(cpu, tlb_flush_page_async_work,
237 RUN_ON_CPU_TARGET_PTR(addr));
238 } else {
239 tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
243 /* As we are going to hijack the bottom bits of the page address for a
244 * mmuidx bit mask we need to fail to build if we can't do that
246 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
248 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
249 run_on_cpu_data data)
251 CPUArchState *env = cpu->env_ptr;
252 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
253 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
254 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
255 int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
256 int mmu_idx;
257 int i;
259 assert_cpu_is_self(cpu);
261 tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
262 page, addr, mmu_idx_bitmap);
264 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
265 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
266 tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
268 /* check whether there are vltb entries that need to be flushed */
269 for (i = 0; i < CPU_VTLB_SIZE; i++) {
270 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
275 tb_flush_jmp_cache(cpu, addr);
278 static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
279 run_on_cpu_data data)
281 CPUArchState *env = cpu->env_ptr;
282 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
283 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
284 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
286 tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
288 /* Check if we need to flush due to large pages. */
289 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
290 tlb_debug("forced full flush ("
291 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
292 env->tlb_flush_addr, env->tlb_flush_mask);
294 tlb_flush_by_mmuidx_async_work(cpu,
295 RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
296 } else {
297 tlb_flush_page_by_mmuidx_async_work(cpu, data);
301 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
303 target_ulong addr_and_mmu_idx;
305 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
307 /* This should already be page aligned */
308 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
309 addr_and_mmu_idx |= idxmap;
311 if (!qemu_cpu_is_self(cpu)) {
312 async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
313 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
314 } else {
315 tlb_check_page_and_flush_by_mmuidx_async_work(
316 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
320 void tlb_flush_page_all(target_ulong addr)
322 CPUState *cpu;
324 CPU_FOREACH(cpu) {
325 async_run_on_cpu(cpu, tlb_flush_page_async_work,
326 RUN_ON_CPU_TARGET_PTR(addr));
330 /* update the TLBs so that writes to code in the virtual page 'addr'
331 can be detected */
332 void tlb_protect_code(ram_addr_t ram_addr)
334 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
335 DIRTY_MEMORY_CODE);
338 /* update the TLB so that writes in physical page 'phys_addr' are no longer
339 tested for self modifying code */
340 void tlb_unprotect_code(ram_addr_t ram_addr)
342 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
345 static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
347 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
350 void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
351 uintptr_t length)
353 uintptr_t addr;
355 if (tlb_is_dirty_ram(tlb_entry)) {
356 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
357 if ((addr - start) < length) {
358 tlb_entry->addr_write |= TLB_NOTDIRTY;
363 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
365 CPUArchState *env;
367 int mmu_idx;
369 assert_cpu_is_self(cpu);
371 env = cpu->env_ptr;
372 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
373 unsigned int i;
375 for (i = 0; i < CPU_TLB_SIZE; i++) {
376 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
377 start1, length);
380 for (i = 0; i < CPU_VTLB_SIZE; i++) {
381 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
382 start1, length);
387 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
389 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
390 tlb_entry->addr_write = vaddr;
394 /* update the TLB corresponding to virtual page vaddr
395 so that it is no longer dirty */
396 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
398 CPUArchState *env = cpu->env_ptr;
399 int i;
400 int mmu_idx;
402 assert_cpu_is_self(cpu);
404 vaddr &= TARGET_PAGE_MASK;
405 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
406 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
407 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
410 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
411 int k;
412 for (k = 0; k < CPU_VTLB_SIZE; k++) {
413 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
418 /* Our TLB does not support large pages, so remember the area covered by
419 large pages and trigger a full TLB flush if these are invalidated. */
420 static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
421 target_ulong size)
423 target_ulong mask = ~(size - 1);
425 if (env->tlb_flush_addr == (target_ulong)-1) {
426 env->tlb_flush_addr = vaddr & mask;
427 env->tlb_flush_mask = mask;
428 return;
430 /* Extend the existing region to include the new page.
431 This is a compromise between unnecessary flushes and the cost
432 of maintaining a full variable size TLB. */
433 mask &= env->tlb_flush_mask;
434 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
435 mask <<= 1;
437 env->tlb_flush_addr &= mask;
438 env->tlb_flush_mask = mask;
441 /* Add a new TLB entry. At most one entry for a given virtual address
442 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
443 * supplied size is only used by tlb_flush_page.
445 * Called from TCG-generated code, which is under an RCU read-side
446 * critical section.
448 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
449 hwaddr paddr, MemTxAttrs attrs, int prot,
450 int mmu_idx, target_ulong size)
452 CPUArchState *env = cpu->env_ptr;
453 MemoryRegionSection *section;
454 unsigned int index;
455 target_ulong address;
456 target_ulong code_address;
457 uintptr_t addend;
458 CPUTLBEntry *te;
459 hwaddr iotlb, xlat, sz;
460 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
461 int asidx = cpu_asidx_from_attrs(cpu, attrs);
463 assert_cpu_is_self(cpu);
464 assert(size >= TARGET_PAGE_SIZE);
465 if (size != TARGET_PAGE_SIZE) {
466 tlb_add_large_page(env, vaddr, size);
469 sz = size;
470 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
471 assert(sz >= TARGET_PAGE_SIZE);
473 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
474 " prot=%x idx=%d\n",
475 vaddr, paddr, prot, mmu_idx);
477 address = vaddr;
478 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
479 /* IO memory case */
480 address |= TLB_MMIO;
481 addend = 0;
482 } else {
483 /* TLB_MMIO for rom/romd handled below */
484 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
487 code_address = address;
488 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
489 prot, &address);
491 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
492 te = &env->tlb_table[mmu_idx][index];
494 /* do not discard the translation in te, evict it into a victim tlb */
495 env->tlb_v_table[mmu_idx][vidx] = *te;
496 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
498 /* refill the tlb */
499 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
500 env->iotlb[mmu_idx][index].attrs = attrs;
501 te->addend = addend - vaddr;
502 if (prot & PAGE_READ) {
503 te->addr_read = address;
504 } else {
505 te->addr_read = -1;
508 if (prot & PAGE_EXEC) {
509 te->addr_code = code_address;
510 } else {
511 te->addr_code = -1;
513 if (prot & PAGE_WRITE) {
514 if ((memory_region_is_ram(section->mr) && section->readonly)
515 || memory_region_is_romd(section->mr)) {
516 /* Write access calls the I/O callback. */
517 te->addr_write = address | TLB_MMIO;
518 } else if (memory_region_is_ram(section->mr)
519 && cpu_physical_memory_is_clean(
520 memory_region_get_ram_addr(section->mr) + xlat)) {
521 te->addr_write = address | TLB_NOTDIRTY;
522 } else {
523 te->addr_write = address;
525 } else {
526 te->addr_write = -1;
530 /* Add a new TLB entry, but without specifying the memory
531 * transaction attributes to be used.
533 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
534 hwaddr paddr, int prot,
535 int mmu_idx, target_ulong size)
537 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
538 prot, mmu_idx, size);
541 static void report_bad_exec(CPUState *cpu, target_ulong addr)
543 /* Accidentally executing outside RAM or ROM is quite common for
544 * several user-error situations, so report it in a way that
545 * makes it clear that this isn't a QEMU bug and provide suggestions
546 * about what a user could do to fix things.
548 error_report("Trying to execute code outside RAM or ROM at 0x"
549 TARGET_FMT_lx, addr);
550 error_printf("This usually means one of the following happened:\n\n"
551 "(1) You told QEMU to execute a kernel for the wrong machine "
552 "type, and it crashed on startup (eg trying to run a "
553 "raspberry pi kernel on a versatilepb QEMU machine)\n"
554 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
555 "and QEMU executed a ROM full of no-op instructions until "
556 "it fell off the end\n"
557 "(3) Your guest kernel has a bug and crashed by jumping "
558 "off into nowhere\n\n"
559 "This is almost always one of the first two, so check your "
560 "command line and that you are using the right type of kernel "
561 "for this machine.\n"
562 "If you think option (3) is likely then you can try debugging "
563 "your guest with the -d debug options; in particular "
564 "-d guest_errors will cause the log to include a dump of the "
565 "guest register state at this point.\n\n"
566 "Execution cannot continue; stopping here.\n\n");
568 /* Report also to the logs, with more detail including register dump */
569 qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
570 "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
571 log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
574 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
576 ram_addr_t ram_addr;
578 ram_addr = qemu_ram_addr_from_host(ptr);
579 if (ram_addr == RAM_ADDR_INVALID) {
580 error_report("Bad ram pointer %p", ptr);
581 abort();
583 return ram_addr;
586 /* NOTE: this function can trigger an exception */
587 /* NOTE2: the returned address is not exactly the physical address: it
588 * is actually a ram_addr_t (in system mode; the user mode emulation
589 * version of this function returns a guest virtual address).
591 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
593 int mmu_idx, page_index, pd;
594 void *p;
595 MemoryRegion *mr;
596 CPUState *cpu = ENV_GET_CPU(env1);
597 CPUIOTLBEntry *iotlbentry;
599 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
600 mmu_idx = cpu_mmu_index(env1, true);
601 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
602 (addr & TARGET_PAGE_MASK))) {
603 cpu_ldub_code(env1, addr);
605 iotlbentry = &env1->iotlb[mmu_idx][page_index];
606 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
607 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
608 if (memory_region_is_unassigned(mr)) {
609 CPUClass *cc = CPU_GET_CLASS(cpu);
611 if (cc->do_unassigned_access) {
612 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
613 } else {
614 report_bad_exec(cpu, addr);
615 exit(1);
618 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
619 return qemu_ram_addr_from_host_nofail(p);
622 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
623 target_ulong addr, uintptr_t retaddr, int size)
625 CPUState *cpu = ENV_GET_CPU(env);
626 hwaddr physaddr = iotlbentry->addr;
627 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
628 uint64_t val;
629 bool locked = false;
631 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
632 cpu->mem_io_pc = retaddr;
633 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
634 cpu_io_recompile(cpu, retaddr);
637 cpu->mem_io_vaddr = addr;
639 if (mr->global_locking) {
640 qemu_mutex_lock_iothread();
641 locked = true;
643 memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
644 if (locked) {
645 qemu_mutex_unlock_iothread();
648 return val;
651 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
652 uint64_t val, target_ulong addr,
653 uintptr_t retaddr, int size)
655 CPUState *cpu = ENV_GET_CPU(env);
656 hwaddr physaddr = iotlbentry->addr;
657 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
658 bool locked = false;
660 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
661 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
662 cpu_io_recompile(cpu, retaddr);
664 cpu->mem_io_vaddr = addr;
665 cpu->mem_io_pc = retaddr;
667 if (mr->global_locking) {
668 qemu_mutex_lock_iothread();
669 locked = true;
671 memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
672 if (locked) {
673 qemu_mutex_unlock_iothread();
677 /* Return true if ADDR is present in the victim tlb, and has been copied
678 back to the main tlb. */
679 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
680 size_t elt_ofs, target_ulong page)
682 size_t vidx;
683 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
684 CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
685 target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
687 if (cmp == page) {
688 /* Found entry in victim tlb, swap tlb and iotlb. */
689 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
690 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
691 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
693 tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
694 tmpio = *io; *io = *vio; *vio = tmpio;
695 return true;
698 return false;
701 /* Macro to call the above, with local variables from the use context. */
702 #define VICTIM_TLB_HIT(TY, ADDR) \
703 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
704 (ADDR) & TARGET_PAGE_MASK)
706 /* Probe for whether the specified guest write access is permitted.
707 * If it is not permitted then an exception will be taken in the same
708 * way as if this were a real write access (and we will not return).
709 * Otherwise the function will return, and there will be a valid
710 * entry in the TLB for this access.
712 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
713 uintptr_t retaddr)
715 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
716 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
718 if ((addr & TARGET_PAGE_MASK)
719 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
720 /* TLB entry is for a different page */
721 if (!VICTIM_TLB_HIT(addr_write, addr)) {
722 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
727 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
728 * operations, or io operations to proceed. Return the host address. */
729 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
730 TCGMemOpIdx oi, uintptr_t retaddr)
732 size_t mmu_idx = get_mmuidx(oi);
733 size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
734 CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
735 target_ulong tlb_addr = tlbe->addr_write;
736 TCGMemOp mop = get_memop(oi);
737 int a_bits = get_alignment_bits(mop);
738 int s_bits = mop & MO_SIZE;
740 /* Adjust the given return address. */
741 retaddr -= GETPC_ADJ;
743 /* Enforce guest required alignment. */
744 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
745 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
746 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
747 mmu_idx, retaddr);
750 /* Enforce qemu required alignment. */
751 if (unlikely(addr & ((1 << s_bits) - 1))) {
752 /* We get here if guest alignment was not requested,
753 or was not enforced by cpu_unaligned_access above.
754 We might widen the access and emulate, but for now
755 mark an exception and exit the cpu loop. */
756 goto stop_the_world;
759 /* Check TLB entry and enforce page permissions. */
760 if ((addr & TARGET_PAGE_MASK)
761 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
762 if (!VICTIM_TLB_HIT(addr_write, addr)) {
763 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
765 tlb_addr = tlbe->addr_write;
768 /* Notice an IO access, or a notdirty page. */
769 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
770 /* There's really nothing that can be done to
771 support this apart from stop-the-world. */
772 goto stop_the_world;
775 /* Let the guest notice RMW on a write-only page. */
776 if (unlikely(tlbe->addr_read != tlb_addr)) {
777 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
778 /* Since we don't support reads and writes to different addresses,
779 and we do have the proper page loaded for write, this shouldn't
780 ever return. But just in case, handle via stop-the-world. */
781 goto stop_the_world;
784 return (void *)((uintptr_t)addr + tlbe->addend);
786 stop_the_world:
787 cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
790 #ifdef TARGET_WORDS_BIGENDIAN
791 # define TGT_BE(X) (X)
792 # define TGT_LE(X) BSWAP(X)
793 #else
794 # define TGT_BE(X) BSWAP(X)
795 # define TGT_LE(X) (X)
796 #endif
798 #define MMUSUFFIX _mmu
800 #define DATA_SIZE 1
801 #include "softmmu_template.h"
803 #define DATA_SIZE 2
804 #include "softmmu_template.h"
806 #define DATA_SIZE 4
807 #include "softmmu_template.h"
809 #define DATA_SIZE 8
810 #include "softmmu_template.h"
812 /* First set of helpers allows passing in of OI and RETADDR. This makes
813 them callable from other helpers. */
815 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
816 #define ATOMIC_NAME(X) \
817 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
818 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
820 #define DATA_SIZE 1
821 #include "atomic_template.h"
823 #define DATA_SIZE 2
824 #include "atomic_template.h"
826 #define DATA_SIZE 4
827 #include "atomic_template.h"
829 #ifdef CONFIG_ATOMIC64
830 #define DATA_SIZE 8
831 #include "atomic_template.h"
832 #endif
834 #ifdef CONFIG_ATOMIC128
835 #define DATA_SIZE 16
836 #include "atomic_template.h"
837 #endif
839 /* Second set of helpers are directly callable from TCG as helpers. */
841 #undef EXTRA_ARGS
842 #undef ATOMIC_NAME
843 #undef ATOMIC_MMU_LOOKUP
844 #define EXTRA_ARGS , TCGMemOpIdx oi
845 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
846 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
848 #define DATA_SIZE 1
849 #include "atomic_template.h"
851 #define DATA_SIZE 2
852 #include "atomic_template.h"
854 #define DATA_SIZE 4
855 #include "atomic_template.h"
857 #ifdef CONFIG_ATOMIC64
858 #define DATA_SIZE 8
859 #include "atomic_template.h"
860 #endif
862 /* Code access functions. */
864 #undef MMUSUFFIX
865 #define MMUSUFFIX _cmmu
866 #undef GETPC
867 #define GETPC() ((uintptr_t)0)
868 #define SOFTMMU_CODE_ACCESS
870 #define DATA_SIZE 1
871 #include "softmmu_template.h"
873 #define DATA_SIZE 2
874 #include "softmmu_template.h"
876 #define DATA_SIZE 4
877 #include "softmmu_template.h"
879 #define DATA_SIZE 8
880 #include "softmmu_template.h"