cirrus: Force use of shadow pixmap when HW cursor is enabled
[qemu.git] / translate-all.c
blobcf054720085791aa0841c71964f94db12fbe3b11
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/types.h>
23 #include <sys/mman.h>
24 #endif
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <inttypes.h>
31 #include "config.h"
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
35 #include "cpu.h"
36 #include "trace.h"
37 #include "disas/disas.h"
38 #include "tcg.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include "qemu.h"
41 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42 #include <sys/param.h>
43 #if __FreeBSD_version >= 700104
44 #define HAVE_KINFO_GETVMMAP
45 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <sys/time.h>
47 #include <sys/proc.h>
48 #include <machine/profile.h>
49 #define _KERNEL
50 #include <sys/user.h>
51 #undef _KERNEL
52 #undef sigqueue
53 #include <libutil.h>
54 #endif
55 #endif
56 #else
57 #include "exec/address-spaces.h"
58 #endif
60 #include "exec/cputlb.h"
61 #include "translate-all.h"
62 #include "qemu/timer.h"
64 //#define DEBUG_TB_INVALIDATE
65 //#define DEBUG_FLUSH
66 /* make various TB consistency checks */
67 //#define DEBUG_TB_CHECK
69 #if !defined(CONFIG_USER_ONLY)
70 /* TB consistency checks only implemented for usermode emulation. */
71 #undef DEBUG_TB_CHECK
72 #endif
74 #define SMC_BITMAP_USE_THRESHOLD 10
76 typedef struct PageDesc {
77 /* list of TBs intersecting this ram page */
78 TranslationBlock *first_tb;
79 /* in order to optimize self modifying code, we count the number
80 of lookups we do to a given page to use a bitmap */
81 unsigned int code_write_count;
82 uint8_t *code_bitmap;
83 #if defined(CONFIG_USER_ONLY)
84 unsigned long flags;
85 #endif
86 } PageDesc;
88 /* In system mode we want L1_MAP to be based on ram offsets,
89 while in user mode we want it to be based on virtual addresses. */
90 #if !defined(CONFIG_USER_ONLY)
91 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
92 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
93 #else
94 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
95 #endif
96 #else
97 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
98 #endif
100 /* Size of the L2 (and L3, etc) page tables. */
101 #define V_L2_BITS 10
102 #define V_L2_SIZE (1 << V_L2_BITS)
104 /* The bits remaining after N lower levels of page tables. */
105 #define V_L1_BITS_REM \
106 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
108 #if V_L1_BITS_REM < 4
109 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
110 #else
111 #define V_L1_BITS V_L1_BITS_REM
112 #endif
114 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
116 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
118 uintptr_t qemu_real_host_page_size;
119 uintptr_t qemu_host_page_size;
120 uintptr_t qemu_host_page_mask;
122 /* This is a multi-level map on the virtual address space.
123 The bottom level has pointers to PageDesc. */
124 static void *l1_map[V_L1_SIZE];
126 /* code generation context */
127 TCGContext tcg_ctx;
129 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
130 tb_page_addr_t phys_page2);
131 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
133 void cpu_gen_init(void)
135 tcg_context_init(&tcg_ctx);
138 /* return non zero if the very first instruction is invalid so that
139 the virtual CPU can trigger an exception.
141 '*gen_code_size_ptr' contains the size of the generated code (host
142 code).
144 int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
146 TCGContext *s = &tcg_ctx;
147 tcg_insn_unit *gen_code_buf;
148 int gen_code_size;
149 #ifdef CONFIG_PROFILER
150 int64_t ti;
151 #endif
153 #ifdef CONFIG_PROFILER
154 s->tb_count1++; /* includes aborted translations because of
155 exceptions */
156 ti = profile_getclock();
157 #endif
158 tcg_func_start(s);
160 gen_intermediate_code(env, tb);
162 trace_translate_block(tb, tb->pc, tb->tc_ptr);
164 /* generate machine code */
165 gen_code_buf = tb->tc_ptr;
166 tb->tb_next_offset[0] = 0xffff;
167 tb->tb_next_offset[1] = 0xffff;
168 s->tb_next_offset = tb->tb_next_offset;
169 #ifdef USE_DIRECT_JUMP
170 s->tb_jmp_offset = tb->tb_jmp_offset;
171 s->tb_next = NULL;
172 #else
173 s->tb_jmp_offset = NULL;
174 s->tb_next = tb->tb_next;
175 #endif
177 #ifdef CONFIG_PROFILER
178 s->tb_count++;
179 s->interm_time += profile_getclock() - ti;
180 s->code_time -= profile_getclock();
181 #endif
182 gen_code_size = tcg_gen_code(s, gen_code_buf);
183 *gen_code_size_ptr = gen_code_size;
184 #ifdef CONFIG_PROFILER
185 s->code_time += profile_getclock();
186 s->code_in_len += tb->size;
187 s->code_out_len += gen_code_size;
188 #endif
190 #ifdef DEBUG_DISAS
191 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
192 qemu_log("OUT: [size=%d]\n", gen_code_size);
193 log_disas(tb->tc_ptr, gen_code_size);
194 qemu_log("\n");
195 qemu_log_flush();
197 #endif
198 return 0;
201 /* The cpu state corresponding to 'searched_pc' is restored.
203 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
204 uintptr_t searched_pc)
206 CPUArchState *env = cpu->env_ptr;
207 TCGContext *s = &tcg_ctx;
208 int j;
209 uintptr_t tc_ptr;
210 #ifdef CONFIG_PROFILER
211 int64_t ti;
212 #endif
214 #ifdef CONFIG_PROFILER
215 ti = profile_getclock();
216 #endif
217 tcg_func_start(s);
219 gen_intermediate_code_pc(env, tb);
221 if (use_icount) {
222 /* Reset the cycle counter to the start of the block. */
223 cpu->icount_decr.u16.low += tb->icount;
224 /* Clear the IO flag. */
225 cpu->can_do_io = 0;
228 /* find opc index corresponding to search_pc */
229 tc_ptr = (uintptr_t)tb->tc_ptr;
230 if (searched_pc < tc_ptr)
231 return -1;
233 s->tb_next_offset = tb->tb_next_offset;
234 #ifdef USE_DIRECT_JUMP
235 s->tb_jmp_offset = tb->tb_jmp_offset;
236 s->tb_next = NULL;
237 #else
238 s->tb_jmp_offset = NULL;
239 s->tb_next = tb->tb_next;
240 #endif
241 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
242 searched_pc - tc_ptr);
243 if (j < 0)
244 return -1;
245 /* now find start of instruction before */
246 while (s->gen_opc_instr_start[j] == 0) {
247 j--;
249 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
251 restore_state_to_opc(env, tb, j);
253 #ifdef CONFIG_PROFILER
254 s->restore_time += profile_getclock() - ti;
255 s->restore_count++;
256 #endif
257 return 0;
260 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
262 TranslationBlock *tb;
264 tb = tb_find_pc(retaddr);
265 if (tb) {
266 cpu_restore_state_from_tb(cpu, tb, retaddr);
267 if (tb->cflags & CF_NOCACHE) {
268 /* one-shot translation, invalidate it immediately */
269 cpu->current_tb = NULL;
270 tb_phys_invalidate(tb, -1);
271 tb_free(tb);
273 return true;
275 return false;
278 #ifdef _WIN32
279 static inline void map_exec(void *addr, long size)
281 DWORD old_protect;
282 VirtualProtect(addr, size,
283 PAGE_EXECUTE_READWRITE, &old_protect);
285 #else
286 static inline void map_exec(void *addr, long size)
288 unsigned long start, end, page_size;
290 page_size = getpagesize();
291 start = (unsigned long)addr;
292 start &= ~(page_size - 1);
294 end = (unsigned long)addr + size;
295 end += page_size - 1;
296 end &= ~(page_size - 1);
298 mprotect((void *)start, end - start,
299 PROT_READ | PROT_WRITE | PROT_EXEC);
301 #endif
303 void page_size_init(void)
305 /* NOTE: we can always suppose that qemu_host_page_size >=
306 TARGET_PAGE_SIZE */
307 qemu_real_host_page_size = getpagesize();
308 if (qemu_host_page_size == 0) {
309 qemu_host_page_size = qemu_real_host_page_size;
311 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
312 qemu_host_page_size = TARGET_PAGE_SIZE;
314 qemu_host_page_mask = ~(qemu_host_page_size - 1);
317 static void page_init(void)
319 page_size_init();
320 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
322 #ifdef HAVE_KINFO_GETVMMAP
323 struct kinfo_vmentry *freep;
324 int i, cnt;
326 freep = kinfo_getvmmap(getpid(), &cnt);
327 if (freep) {
328 mmap_lock();
329 for (i = 0; i < cnt; i++) {
330 unsigned long startaddr, endaddr;
332 startaddr = freep[i].kve_start;
333 endaddr = freep[i].kve_end;
334 if (h2g_valid(startaddr)) {
335 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
337 if (h2g_valid(endaddr)) {
338 endaddr = h2g(endaddr);
339 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
340 } else {
341 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
342 endaddr = ~0ul;
343 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
344 #endif
348 free(freep);
349 mmap_unlock();
351 #else
352 FILE *f;
354 last_brk = (unsigned long)sbrk(0);
356 f = fopen("/compat/linux/proc/self/maps", "r");
357 if (f) {
358 mmap_lock();
360 do {
361 unsigned long startaddr, endaddr;
362 int n;
364 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
366 if (n == 2 && h2g_valid(startaddr)) {
367 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
369 if (h2g_valid(endaddr)) {
370 endaddr = h2g(endaddr);
371 } else {
372 endaddr = ~0ul;
374 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
376 } while (!feof(f));
378 fclose(f);
379 mmap_unlock();
381 #endif
383 #endif
386 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
388 PageDesc *pd;
389 void **lp;
390 int i;
392 #if defined(CONFIG_USER_ONLY)
393 /* We can't use g_malloc because it may recurse into a locked mutex. */
394 # define ALLOC(P, SIZE) \
395 do { \
396 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
397 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
398 } while (0)
399 #else
400 # define ALLOC(P, SIZE) \
401 do { P = g_malloc0(SIZE); } while (0)
402 #endif
404 /* Level 1. Always allocated. */
405 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
407 /* Level 2..N-1. */
408 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
409 void **p = *lp;
411 if (p == NULL) {
412 if (!alloc) {
413 return NULL;
415 ALLOC(p, sizeof(void *) * V_L2_SIZE);
416 *lp = p;
419 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
422 pd = *lp;
423 if (pd == NULL) {
424 if (!alloc) {
425 return NULL;
427 ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
428 *lp = pd;
431 #undef ALLOC
433 return pd + (index & (V_L2_SIZE - 1));
436 static inline PageDesc *page_find(tb_page_addr_t index)
438 return page_find_alloc(index, 0);
441 #if !defined(CONFIG_USER_ONLY)
442 #define mmap_lock() do { } while (0)
443 #define mmap_unlock() do { } while (0)
444 #endif
446 #if defined(CONFIG_USER_ONLY)
447 /* Currently it is not recommended to allocate big chunks of data in
448 user mode. It will change when a dedicated libc will be used. */
449 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
450 region in which the guest needs to run. Revisit this. */
451 #define USE_STATIC_CODE_GEN_BUFFER
452 #endif
454 /* ??? Should configure for this, not list operating systems here. */
455 #if (defined(__linux__) \
456 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
457 || defined(__DragonFly__) || defined(__OpenBSD__) \
458 || defined(__NetBSD__))
459 # define USE_MMAP
460 #endif
462 /* Minimum size of the code gen buffer. This number is randomly chosen,
463 but not so small that we can't have a fair number of TB's live. */
464 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
466 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
467 indicated, this is constrained by the range of direct branches on the
468 host cpu, as used by the TCG implementation of goto_tb. */
469 #if defined(__x86_64__)
470 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
471 #elif defined(__sparc__)
472 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
473 #elif defined(__aarch64__)
474 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
475 #elif defined(__arm__)
476 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
477 #elif defined(__s390x__)
478 /* We have a +- 4GB range on the branches; leave some slop. */
479 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
480 #elif defined(__mips__)
481 /* We have a 256MB branch region, but leave room to make sure the
482 main executable is also within that region. */
483 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
484 #else
485 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
486 #endif
488 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
490 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
491 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
492 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
494 static inline size_t size_code_gen_buffer(size_t tb_size)
496 /* Size the buffer. */
497 if (tb_size == 0) {
498 #ifdef USE_STATIC_CODE_GEN_BUFFER
499 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
500 #else
501 /* ??? Needs adjustments. */
502 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
503 static buffer, we could size this on RESERVED_VA, on the text
504 segment size of the executable, or continue to use the default. */
505 tb_size = (unsigned long)(ram_size / 4);
506 #endif
508 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
509 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
511 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
512 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
514 tcg_ctx.code_gen_buffer_size = tb_size;
515 return tb_size;
518 #ifdef __mips__
519 /* In order to use J and JAL within the code_gen_buffer, we require
520 that the buffer not cross a 256MB boundary. */
521 static inline bool cross_256mb(void *addr, size_t size)
523 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
526 /* We weren't able to allocate a buffer without crossing that boundary,
527 so make do with the larger portion of the buffer that doesn't cross.
528 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
529 static inline void *split_cross_256mb(void *buf1, size_t size1)
531 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
532 size_t size2 = buf1 + size1 - buf2;
534 size1 = buf2 - buf1;
535 if (size1 < size2) {
536 size1 = size2;
537 buf1 = buf2;
540 tcg_ctx.code_gen_buffer_size = size1;
541 return buf1;
543 #endif
545 #ifdef USE_STATIC_CODE_GEN_BUFFER
546 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
547 __attribute__((aligned(CODE_GEN_ALIGN)));
549 static inline void *alloc_code_gen_buffer(void)
551 void *buf = static_code_gen_buffer;
552 #ifdef __mips__
553 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
554 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
556 #endif
557 map_exec(buf, tcg_ctx.code_gen_buffer_size);
558 return buf;
560 #elif defined(USE_MMAP)
561 static inline void *alloc_code_gen_buffer(void)
563 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
564 uintptr_t start = 0;
565 void *buf;
567 /* Constrain the position of the buffer based on the host cpu.
568 Note that these addresses are chosen in concert with the
569 addresses assigned in the relevant linker script file. */
570 # if defined(__PIE__) || defined(__PIC__)
571 /* Don't bother setting a preferred location if we're building
572 a position-independent executable. We're more likely to get
573 an address near the main executable if we let the kernel
574 choose the address. */
575 # elif defined(__x86_64__) && defined(MAP_32BIT)
576 /* Force the memory down into low memory with the executable.
577 Leave the choice of exact location with the kernel. */
578 flags |= MAP_32BIT;
579 /* Cannot expect to map more than 800MB in low memory. */
580 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
581 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
583 # elif defined(__sparc__)
584 start = 0x40000000ul;
585 # elif defined(__s390x__)
586 start = 0x90000000ul;
587 # elif defined(__mips__)
588 /* ??? We ought to more explicitly manage layout for softmmu too. */
589 # ifdef CONFIG_USER_ONLY
590 start = 0x68000000ul;
591 # elif _MIPS_SIM == _ABI64
592 start = 0x128000000ul;
593 # else
594 start = 0x08000000ul;
595 # endif
596 # endif
598 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
599 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
600 if (buf == MAP_FAILED) {
601 return NULL;
604 #ifdef __mips__
605 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
606 /* Try again, with the original still mapped, to avoid re-acquiring
607 that 256mb crossing. This time don't specify an address. */
608 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
609 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
610 flags, -1, 0);
611 if (buf2 != MAP_FAILED) {
612 if (!cross_256mb(buf2, size1)) {
613 /* Success! Use the new buffer. */
614 munmap(buf, size1);
615 return buf2;
617 /* Failure. Work with what we had. */
618 munmap(buf2, size1);
621 /* Split the original buffer. Free the smaller half. */
622 buf2 = split_cross_256mb(buf, size1);
623 size2 = tcg_ctx.code_gen_buffer_size;
624 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
625 return buf2;
627 #endif
629 return buf;
631 #else
632 static inline void *alloc_code_gen_buffer(void)
634 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
636 if (buf == NULL) {
637 return NULL;
640 #ifdef __mips__
641 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
642 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
643 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
644 /* Success! Use the new buffer. */
645 free(buf);
646 buf = buf2;
647 } else {
648 /* Failure. Work with what we had. Since this is malloc
649 and not mmap, we can't free the other half. */
650 free(buf2);
651 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
654 #endif
656 map_exec(buf, tcg_ctx.code_gen_buffer_size);
657 return buf;
659 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
661 static inline void code_gen_alloc(size_t tb_size)
663 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
664 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
665 if (tcg_ctx.code_gen_buffer == NULL) {
666 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
667 exit(1);
670 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
671 QEMU_MADV_HUGEPAGE);
673 /* Steal room for the prologue at the end of the buffer. This ensures
674 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
675 from TB's to the prologue are going to be in range. It also means
676 that we don't need to mark (additional) portions of the data segment
677 as executable. */
678 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
679 tcg_ctx.code_gen_buffer_size - 1024;
680 tcg_ctx.code_gen_buffer_size -= 1024;
682 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
683 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
684 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
685 CODE_GEN_AVG_BLOCK_SIZE;
686 tcg_ctx.tb_ctx.tbs =
687 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
690 /* Must be called before using the QEMU cpus. 'tb_size' is the size
691 (in bytes) allocated to the translation buffer. Zero means default
692 size. */
693 void tcg_exec_init(unsigned long tb_size)
695 cpu_gen_init();
696 code_gen_alloc(tb_size);
697 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
698 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
699 page_init();
700 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
701 /* There's no guest base to take into account, so go ahead and
702 initialize the prologue now. */
703 tcg_prologue_init(&tcg_ctx);
704 #endif
707 bool tcg_enabled(void)
709 return tcg_ctx.code_gen_buffer != NULL;
712 /* Allocate a new translation block. Flush the translation buffer if
713 too many translation blocks or too much generated code. */
714 static TranslationBlock *tb_alloc(target_ulong pc)
716 TranslationBlock *tb;
718 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
719 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
720 tcg_ctx.code_gen_buffer_max_size) {
721 return NULL;
723 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
724 tb->pc = pc;
725 tb->cflags = 0;
726 return tb;
729 void tb_free(TranslationBlock *tb)
731 /* In practice this is mostly used for single use temporary TB
732 Ignore the hard cases and just back up if this TB happens to
733 be the last one generated. */
734 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
735 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
736 tcg_ctx.code_gen_ptr = tb->tc_ptr;
737 tcg_ctx.tb_ctx.nb_tbs--;
741 static inline void invalidate_page_bitmap(PageDesc *p)
743 if (p->code_bitmap) {
744 g_free(p->code_bitmap);
745 p->code_bitmap = NULL;
747 p->code_write_count = 0;
750 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
751 static void page_flush_tb_1(int level, void **lp)
753 int i;
755 if (*lp == NULL) {
756 return;
758 if (level == 0) {
759 PageDesc *pd = *lp;
761 for (i = 0; i < V_L2_SIZE; ++i) {
762 pd[i].first_tb = NULL;
763 invalidate_page_bitmap(pd + i);
765 } else {
766 void **pp = *lp;
768 for (i = 0; i < V_L2_SIZE; ++i) {
769 page_flush_tb_1(level - 1, pp + i);
774 static void page_flush_tb(void)
776 int i;
778 for (i = 0; i < V_L1_SIZE; i++) {
779 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
783 /* flush all the translation blocks */
784 /* XXX: tb_flush is currently not thread safe */
785 void tb_flush(CPUArchState *env1)
787 CPUState *cpu = ENV_GET_CPU(env1);
789 #if defined(DEBUG_FLUSH)
790 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
791 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
792 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
793 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
794 tcg_ctx.tb_ctx.nb_tbs : 0);
795 #endif
796 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
797 > tcg_ctx.code_gen_buffer_size) {
798 cpu_abort(cpu, "Internal error: code buffer overflow\n");
800 tcg_ctx.tb_ctx.nb_tbs = 0;
802 CPU_FOREACH(cpu) {
803 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
806 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
807 page_flush_tb();
809 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
810 /* XXX: flush processor icache at this point if cache flush is
811 expensive */
812 tcg_ctx.tb_ctx.tb_flush_count++;
815 #ifdef DEBUG_TB_CHECK
817 static void tb_invalidate_check(target_ulong address)
819 TranslationBlock *tb;
820 int i;
822 address &= TARGET_PAGE_MASK;
823 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
824 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
825 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
826 address >= tb->pc + tb->size)) {
827 printf("ERROR invalidate: address=" TARGET_FMT_lx
828 " PC=%08lx size=%04x\n",
829 address, (long)tb->pc, tb->size);
835 /* verify that all the pages have correct rights for code */
836 static void tb_page_check(void)
838 TranslationBlock *tb;
839 int i, flags1, flags2;
841 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
842 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
843 tb = tb->phys_hash_next) {
844 flags1 = page_get_flags(tb->pc);
845 flags2 = page_get_flags(tb->pc + tb->size - 1);
846 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
847 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
848 (long)tb->pc, tb->size, flags1, flags2);
854 #endif
856 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
858 TranslationBlock *tb1;
860 for (;;) {
861 tb1 = *ptb;
862 if (tb1 == tb) {
863 *ptb = tb1->phys_hash_next;
864 break;
866 ptb = &tb1->phys_hash_next;
870 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
872 TranslationBlock *tb1;
873 unsigned int n1;
875 for (;;) {
876 tb1 = *ptb;
877 n1 = (uintptr_t)tb1 & 3;
878 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
879 if (tb1 == tb) {
880 *ptb = tb1->page_next[n1];
881 break;
883 ptb = &tb1->page_next[n1];
887 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
889 TranslationBlock *tb1, **ptb;
890 unsigned int n1;
892 ptb = &tb->jmp_next[n];
893 tb1 = *ptb;
894 if (tb1) {
895 /* find tb(n) in circular list */
896 for (;;) {
897 tb1 = *ptb;
898 n1 = (uintptr_t)tb1 & 3;
899 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
900 if (n1 == n && tb1 == tb) {
901 break;
903 if (n1 == 2) {
904 ptb = &tb1->jmp_first;
905 } else {
906 ptb = &tb1->jmp_next[n1];
909 /* now we can suppress tb(n) from the list */
910 *ptb = tb->jmp_next[n];
912 tb->jmp_next[n] = NULL;
916 /* reset the jump entry 'n' of a TB so that it is not chained to
917 another TB */
918 static inline void tb_reset_jump(TranslationBlock *tb, int n)
920 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
923 /* invalidate one TB */
924 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
926 CPUState *cpu;
927 PageDesc *p;
928 unsigned int h, n1;
929 tb_page_addr_t phys_pc;
930 TranslationBlock *tb1, *tb2;
932 /* remove the TB from the hash list */
933 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 h = tb_phys_hash_func(phys_pc);
935 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
937 /* remove the TB from the page list */
938 if (tb->page_addr[0] != page_addr) {
939 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
940 tb_page_remove(&p->first_tb, tb);
941 invalidate_page_bitmap(p);
943 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
944 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
949 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
951 /* remove the TB from the hash list */
952 h = tb_jmp_cache_hash_func(tb->pc);
953 CPU_FOREACH(cpu) {
954 if (cpu->tb_jmp_cache[h] == tb) {
955 cpu->tb_jmp_cache[h] = NULL;
959 /* suppress this TB from the two jump lists */
960 tb_jmp_remove(tb, 0);
961 tb_jmp_remove(tb, 1);
963 /* suppress any remaining jumps to this TB */
964 tb1 = tb->jmp_first;
965 for (;;) {
966 n1 = (uintptr_t)tb1 & 3;
967 if (n1 == 2) {
968 break;
970 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
971 tb2 = tb1->jmp_next[n1];
972 tb_reset_jump(tb1, n1);
973 tb1->jmp_next[n1] = NULL;
974 tb1 = tb2;
976 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
978 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
981 static inline void set_bits(uint8_t *tab, int start, int len)
983 int end, mask, end1;
985 end = start + len;
986 tab += start >> 3;
987 mask = 0xff << (start & 7);
988 if ((start & ~7) == (end & ~7)) {
989 if (start < end) {
990 mask &= ~(0xff << (end & 7));
991 *tab |= mask;
993 } else {
994 *tab++ |= mask;
995 start = (start + 8) & ~7;
996 end1 = end & ~7;
997 while (start < end1) {
998 *tab++ = 0xff;
999 start += 8;
1001 if (start < end) {
1002 mask = ~(0xff << (end & 7));
1003 *tab |= mask;
1008 static void build_page_bitmap(PageDesc *p)
1010 int n, tb_start, tb_end;
1011 TranslationBlock *tb;
1013 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1015 tb = p->first_tb;
1016 while (tb != NULL) {
1017 n = (uintptr_t)tb & 3;
1018 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1019 /* NOTE: this is subtle as a TB may span two physical pages */
1020 if (n == 0) {
1021 /* NOTE: tb_end may be after the end of the page, but
1022 it is not a problem */
1023 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1024 tb_end = tb_start + tb->size;
1025 if (tb_end > TARGET_PAGE_SIZE) {
1026 tb_end = TARGET_PAGE_SIZE;
1028 } else {
1029 tb_start = 0;
1030 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1032 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1033 tb = tb->page_next[n];
1037 TranslationBlock *tb_gen_code(CPUState *cpu,
1038 target_ulong pc, target_ulong cs_base,
1039 int flags, int cflags)
1041 CPUArchState *env = cpu->env_ptr;
1042 TranslationBlock *tb;
1043 tb_page_addr_t phys_pc, phys_page2;
1044 target_ulong virt_page2;
1045 int code_gen_size;
1047 phys_pc = get_page_addr_code(env, pc);
1048 tb = tb_alloc(pc);
1049 if (!tb) {
1050 /* flush must be done */
1051 tb_flush(env);
1052 /* cannot fail at this point */
1053 tb = tb_alloc(pc);
1054 /* Don't forget to invalidate previous TB info. */
1055 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1057 tb->tc_ptr = tcg_ctx.code_gen_ptr;
1058 tb->cs_base = cs_base;
1059 tb->flags = flags;
1060 tb->cflags = cflags;
1061 cpu_gen_code(env, tb, &code_gen_size);
1062 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1063 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1065 /* check next page if needed */
1066 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1067 phys_page2 = -1;
1068 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1069 phys_page2 = get_page_addr_code(env, virt_page2);
1071 tb_link_page(tb, phys_pc, phys_page2);
1072 return tb;
1076 * Invalidate all TBs which intersect with the target physical address range
1077 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1078 * 'is_cpu_write_access' should be true if called from a real cpu write
1079 * access: the virtual CPU will exit the current TB if code is modified inside
1080 * this TB.
1082 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1083 int is_cpu_write_access)
1085 while (start < end) {
1086 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1087 start &= TARGET_PAGE_MASK;
1088 start += TARGET_PAGE_SIZE;
1093 * Invalidate all TBs which intersect with the target physical address range
1094 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1095 * 'is_cpu_write_access' should be true if called from a real cpu write
1096 * access: the virtual CPU will exit the current TB if code is modified inside
1097 * this TB.
1099 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1100 int is_cpu_write_access)
1102 TranslationBlock *tb, *tb_next, *saved_tb;
1103 CPUState *cpu = current_cpu;
1104 #if defined(TARGET_HAS_PRECISE_SMC)
1105 CPUArchState *env = NULL;
1106 #endif
1107 tb_page_addr_t tb_start, tb_end;
1108 PageDesc *p;
1109 int n;
1110 #ifdef TARGET_HAS_PRECISE_SMC
1111 int current_tb_not_found = is_cpu_write_access;
1112 TranslationBlock *current_tb = NULL;
1113 int current_tb_modified = 0;
1114 target_ulong current_pc = 0;
1115 target_ulong current_cs_base = 0;
1116 int current_flags = 0;
1117 #endif /* TARGET_HAS_PRECISE_SMC */
1119 p = page_find(start >> TARGET_PAGE_BITS);
1120 if (!p) {
1121 return;
1123 if (!p->code_bitmap &&
1124 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1125 is_cpu_write_access) {
1126 /* build code bitmap */
1127 build_page_bitmap(p);
1129 #if defined(TARGET_HAS_PRECISE_SMC)
1130 if (cpu != NULL) {
1131 env = cpu->env_ptr;
1133 #endif
1135 /* we remove all the TBs in the range [start, end[ */
1136 /* XXX: see if in some cases it could be faster to invalidate all
1137 the code */
1138 tb = p->first_tb;
1139 while (tb != NULL) {
1140 n = (uintptr_t)tb & 3;
1141 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1142 tb_next = tb->page_next[n];
1143 /* NOTE: this is subtle as a TB may span two physical pages */
1144 if (n == 0) {
1145 /* NOTE: tb_end may be after the end of the page, but
1146 it is not a problem */
1147 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1148 tb_end = tb_start + tb->size;
1149 } else {
1150 tb_start = tb->page_addr[1];
1151 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1153 if (!(tb_end <= start || tb_start >= end)) {
1154 #ifdef TARGET_HAS_PRECISE_SMC
1155 if (current_tb_not_found) {
1156 current_tb_not_found = 0;
1157 current_tb = NULL;
1158 if (cpu->mem_io_pc) {
1159 /* now we have a real cpu fault */
1160 current_tb = tb_find_pc(cpu->mem_io_pc);
1163 if (current_tb == tb &&
1164 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1165 /* If we are modifying the current TB, we must stop
1166 its execution. We could be more precise by checking
1167 that the modification is after the current PC, but it
1168 would require a specialized function to partially
1169 restore the CPU state */
1171 current_tb_modified = 1;
1172 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1173 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1174 &current_flags);
1176 #endif /* TARGET_HAS_PRECISE_SMC */
1177 /* we need to do that to handle the case where a signal
1178 occurs while doing tb_phys_invalidate() */
1179 saved_tb = NULL;
1180 if (cpu != NULL) {
1181 saved_tb = cpu->current_tb;
1182 cpu->current_tb = NULL;
1184 tb_phys_invalidate(tb, -1);
1185 if (cpu != NULL) {
1186 cpu->current_tb = saved_tb;
1187 if (cpu->interrupt_request && cpu->current_tb) {
1188 cpu_interrupt(cpu, cpu->interrupt_request);
1192 tb = tb_next;
1194 #if !defined(CONFIG_USER_ONLY)
1195 /* if no code remaining, no need to continue to use slow writes */
1196 if (!p->first_tb) {
1197 invalidate_page_bitmap(p);
1198 if (is_cpu_write_access) {
1199 tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
1202 #endif
1203 #ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_modified) {
1205 /* we generate a block containing just the instruction
1206 modifying the memory. It will ensure that it cannot modify
1207 itself */
1208 cpu->current_tb = NULL;
1209 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1210 cpu_resume_from_signal(cpu, NULL);
1212 #endif
1215 /* len must be <= 8 and start must be a multiple of len */
1216 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1218 PageDesc *p;
1219 int offset, b;
1221 #if 0
1222 if (1) {
1223 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1224 cpu_single_env->mem_io_vaddr, len,
1225 cpu_single_env->eip,
1226 cpu_single_env->eip +
1227 (intptr_t)cpu_single_env->segs[R_CS].base);
1229 #endif
1230 p = page_find(start >> TARGET_PAGE_BITS);
1231 if (!p) {
1232 return;
1234 if (p->code_bitmap) {
1235 offset = start & ~TARGET_PAGE_MASK;
1236 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1237 if (b & ((1 << len) - 1)) {
1238 goto do_invalidate;
1240 } else {
1241 do_invalidate:
1242 tb_invalidate_phys_page_range(start, start + len, 1);
1246 #if !defined(CONFIG_SOFTMMU)
1247 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1248 uintptr_t pc, void *puc,
1249 bool locked)
1251 TranslationBlock *tb;
1252 PageDesc *p;
1253 int n;
1254 #ifdef TARGET_HAS_PRECISE_SMC
1255 TranslationBlock *current_tb = NULL;
1256 CPUState *cpu = current_cpu;
1257 CPUArchState *env = NULL;
1258 int current_tb_modified = 0;
1259 target_ulong current_pc = 0;
1260 target_ulong current_cs_base = 0;
1261 int current_flags = 0;
1262 #endif
1264 addr &= TARGET_PAGE_MASK;
1265 p = page_find(addr >> TARGET_PAGE_BITS);
1266 if (!p) {
1267 return;
1269 tb = p->first_tb;
1270 #ifdef TARGET_HAS_PRECISE_SMC
1271 if (tb && pc != 0) {
1272 current_tb = tb_find_pc(pc);
1274 if (cpu != NULL) {
1275 env = cpu->env_ptr;
1277 #endif
1278 while (tb != NULL) {
1279 n = (uintptr_t)tb & 3;
1280 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1281 #ifdef TARGET_HAS_PRECISE_SMC
1282 if (current_tb == tb &&
1283 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1284 /* If we are modifying the current TB, we must stop
1285 its execution. We could be more precise by checking
1286 that the modification is after the current PC, but it
1287 would require a specialized function to partially
1288 restore the CPU state */
1290 current_tb_modified = 1;
1291 cpu_restore_state_from_tb(cpu, current_tb, pc);
1292 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1293 &current_flags);
1295 #endif /* TARGET_HAS_PRECISE_SMC */
1296 tb_phys_invalidate(tb, addr);
1297 tb = tb->page_next[n];
1299 p->first_tb = NULL;
1300 #ifdef TARGET_HAS_PRECISE_SMC
1301 if (current_tb_modified) {
1302 /* we generate a block containing just the instruction
1303 modifying the memory. It will ensure that it cannot modify
1304 itself */
1305 cpu->current_tb = NULL;
1306 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1307 if (locked) {
1308 mmap_unlock();
1310 cpu_resume_from_signal(cpu, puc);
1312 #endif
1314 #endif
1316 /* add the tb in the target page and protect it if necessary */
1317 static inline void tb_alloc_page(TranslationBlock *tb,
1318 unsigned int n, tb_page_addr_t page_addr)
1320 PageDesc *p;
1321 #ifndef CONFIG_USER_ONLY
1322 bool page_already_protected;
1323 #endif
1325 tb->page_addr[n] = page_addr;
1326 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1327 tb->page_next[n] = p->first_tb;
1328 #ifndef CONFIG_USER_ONLY
1329 page_already_protected = p->first_tb != NULL;
1330 #endif
1331 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1332 invalidate_page_bitmap(p);
1334 #if defined(TARGET_HAS_SMC) || 1
1336 #if defined(CONFIG_USER_ONLY)
1337 if (p->flags & PAGE_WRITE) {
1338 target_ulong addr;
1339 PageDesc *p2;
1340 int prot;
1342 /* force the host page as non writable (writes will have a
1343 page fault + mprotect overhead) */
1344 page_addr &= qemu_host_page_mask;
1345 prot = 0;
1346 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1347 addr += TARGET_PAGE_SIZE) {
1349 p2 = page_find(addr >> TARGET_PAGE_BITS);
1350 if (!p2) {
1351 continue;
1353 prot |= p2->flags;
1354 p2->flags &= ~PAGE_WRITE;
1356 mprotect(g2h(page_addr), qemu_host_page_size,
1357 (prot & PAGE_BITS) & ~PAGE_WRITE);
1358 #ifdef DEBUG_TB_INVALIDATE
1359 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1360 page_addr);
1361 #endif
1363 #else
1364 /* if some code is already present, then the pages are already
1365 protected. So we handle the case where only the first TB is
1366 allocated in a physical page */
1367 if (!page_already_protected) {
1368 tlb_protect_code(page_addr);
1370 #endif
1372 #endif /* TARGET_HAS_SMC */
1375 /* add a new TB and link it to the physical page tables. phys_page2 is
1376 (-1) to indicate that only one page contains the TB. */
1377 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1378 tb_page_addr_t phys_page2)
1380 unsigned int h;
1381 TranslationBlock **ptb;
1383 /* Grab the mmap lock to stop another thread invalidating this TB
1384 before we are done. */
1385 mmap_lock();
1386 /* add in the physical hash table */
1387 h = tb_phys_hash_func(phys_pc);
1388 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1389 tb->phys_hash_next = *ptb;
1390 *ptb = tb;
1392 /* add in the page list */
1393 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1394 if (phys_page2 != -1) {
1395 tb_alloc_page(tb, 1, phys_page2);
1396 } else {
1397 tb->page_addr[1] = -1;
1400 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1401 tb->jmp_next[0] = NULL;
1402 tb->jmp_next[1] = NULL;
1404 /* init original jump addresses */
1405 if (tb->tb_next_offset[0] != 0xffff) {
1406 tb_reset_jump(tb, 0);
1408 if (tb->tb_next_offset[1] != 0xffff) {
1409 tb_reset_jump(tb, 1);
1412 #ifdef DEBUG_TB_CHECK
1413 tb_page_check();
1414 #endif
1415 mmap_unlock();
1418 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1419 tb[1].tc_ptr. Return NULL if not found */
1420 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1422 int m_min, m_max, m;
1423 uintptr_t v;
1424 TranslationBlock *tb;
1426 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1427 return NULL;
1429 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1430 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1431 return NULL;
1433 /* binary search (cf Knuth) */
1434 m_min = 0;
1435 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1436 while (m_min <= m_max) {
1437 m = (m_min + m_max) >> 1;
1438 tb = &tcg_ctx.tb_ctx.tbs[m];
1439 v = (uintptr_t)tb->tc_ptr;
1440 if (v == tc_ptr) {
1441 return tb;
1442 } else if (tc_ptr < v) {
1443 m_max = m - 1;
1444 } else {
1445 m_min = m + 1;
1448 return &tcg_ctx.tb_ctx.tbs[m_max];
1451 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1452 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1454 ram_addr_t ram_addr;
1455 MemoryRegion *mr;
1456 hwaddr l = 1;
1458 mr = address_space_translate(as, addr, &addr, &l, false);
1459 if (!(memory_region_is_ram(mr)
1460 || memory_region_is_romd(mr))) {
1461 return;
1463 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1464 + addr;
1465 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1467 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1469 void tb_check_watchpoint(CPUState *cpu)
1471 TranslationBlock *tb;
1473 tb = tb_find_pc(cpu->mem_io_pc);
1474 if (!tb) {
1475 cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
1476 (void *)cpu->mem_io_pc);
1478 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1479 tb_phys_invalidate(tb, -1);
1482 #ifndef CONFIG_USER_ONLY
1483 /* mask must never be zero, except for A20 change call */
1484 static void tcg_handle_interrupt(CPUState *cpu, int mask)
1486 int old_mask;
1488 old_mask = cpu->interrupt_request;
1489 cpu->interrupt_request |= mask;
1492 * If called from iothread context, wake the target cpu in
1493 * case its halted.
1495 if (!qemu_cpu_is_self(cpu)) {
1496 qemu_cpu_kick(cpu);
1497 return;
1500 if (use_icount) {
1501 cpu->icount_decr.u16.high = 0xffff;
1502 if (!cpu_can_do_io(cpu)
1503 && (mask & ~old_mask) != 0) {
1504 cpu_abort(cpu, "Raised interrupt while not in I/O function");
1506 } else {
1507 cpu->tcg_exit_req = 1;
1511 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1513 /* in deterministic execution mode, instructions doing device I/Os
1514 must be at the end of the TB */
1515 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1517 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1518 CPUArchState *env = cpu->env_ptr;
1519 #endif
1520 TranslationBlock *tb;
1521 uint32_t n, cflags;
1522 target_ulong pc, cs_base;
1523 uint64_t flags;
1525 tb = tb_find_pc(retaddr);
1526 if (!tb) {
1527 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1528 (void *)retaddr);
1530 n = cpu->icount_decr.u16.low + tb->icount;
1531 cpu_restore_state_from_tb(cpu, tb, retaddr);
1532 /* Calculate how many instructions had been executed before the fault
1533 occurred. */
1534 n = n - cpu->icount_decr.u16.low;
1535 /* Generate a new TB ending on the I/O insn. */
1536 n++;
1537 /* On MIPS and SH, delay slot instructions can only be restarted if
1538 they were already the first instruction in the TB. If this is not
1539 the first instruction in a TB then re-execute the preceding
1540 branch. */
1541 #if defined(TARGET_MIPS)
1542 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1543 env->active_tc.PC -= 4;
1544 cpu->icount_decr.u16.low++;
1545 env->hflags &= ~MIPS_HFLAG_BMASK;
1547 #elif defined(TARGET_SH4)
1548 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1549 && n > 1) {
1550 env->pc -= 2;
1551 cpu->icount_decr.u16.low++;
1552 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1554 #endif
1555 /* This should never happen. */
1556 if (n > CF_COUNT_MASK) {
1557 cpu_abort(cpu, "TB too big during recompile");
1560 cflags = n | CF_LAST_IO;
1561 pc = tb->pc;
1562 cs_base = tb->cs_base;
1563 flags = tb->flags;
1564 tb_phys_invalidate(tb, -1);
1565 /* FIXME: In theory this could raise an exception. In practice
1566 we have already translated the block once so it's probably ok. */
1567 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1568 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1569 the first in the TB) then we end up generating a whole new TB and
1570 repeating the fault, which is horribly inefficient.
1571 Better would be to execute just this insn uncached, or generate a
1572 second new TB. */
1573 cpu_resume_from_signal(cpu, NULL);
1576 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1578 unsigned int i;
1580 /* Discard jump cache entries for any tb which might potentially
1581 overlap the flushed page. */
1582 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1583 memset(&cpu->tb_jmp_cache[i], 0,
1584 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1586 i = tb_jmp_cache_hash_page(addr);
1587 memset(&cpu->tb_jmp_cache[i], 0,
1588 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1591 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1593 int i, target_code_size, max_target_code_size;
1594 int direct_jmp_count, direct_jmp2_count, cross_page;
1595 TranslationBlock *tb;
1597 target_code_size = 0;
1598 max_target_code_size = 0;
1599 cross_page = 0;
1600 direct_jmp_count = 0;
1601 direct_jmp2_count = 0;
1602 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1603 tb = &tcg_ctx.tb_ctx.tbs[i];
1604 target_code_size += tb->size;
1605 if (tb->size > max_target_code_size) {
1606 max_target_code_size = tb->size;
1608 if (tb->page_addr[1] != -1) {
1609 cross_page++;
1611 if (tb->tb_next_offset[0] != 0xffff) {
1612 direct_jmp_count++;
1613 if (tb->tb_next_offset[1] != 0xffff) {
1614 direct_jmp2_count++;
1618 /* XXX: avoid using doubles ? */
1619 cpu_fprintf(f, "Translation buffer state:\n");
1620 cpu_fprintf(f, "gen code size %td/%zd\n",
1621 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1622 tcg_ctx.code_gen_buffer_max_size);
1623 cpu_fprintf(f, "TB count %d/%d\n",
1624 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1625 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1626 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1627 tcg_ctx.tb_ctx.nb_tbs : 0,
1628 max_target_code_size);
1629 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1630 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1631 tcg_ctx.code_gen_buffer) /
1632 tcg_ctx.tb_ctx.nb_tbs : 0,
1633 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1634 tcg_ctx.code_gen_buffer) /
1635 target_code_size : 0);
1636 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1637 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1638 tcg_ctx.tb_ctx.nb_tbs : 0);
1639 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1640 direct_jmp_count,
1641 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1642 tcg_ctx.tb_ctx.nb_tbs : 0,
1643 direct_jmp2_count,
1644 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1645 tcg_ctx.tb_ctx.nb_tbs : 0);
1646 cpu_fprintf(f, "\nStatistics:\n");
1647 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1648 cpu_fprintf(f, "TB invalidate count %d\n",
1649 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1650 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1651 tcg_dump_info(f, cpu_fprintf);
1654 #else /* CONFIG_USER_ONLY */
1656 void cpu_interrupt(CPUState *cpu, int mask)
1658 cpu->interrupt_request |= mask;
1659 cpu->tcg_exit_req = 1;
1663 * Walks guest process memory "regions" one by one
1664 * and calls callback function 'fn' for each region.
1666 struct walk_memory_regions_data {
1667 walk_memory_regions_fn fn;
1668 void *priv;
1669 target_ulong start;
1670 int prot;
1673 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1674 target_ulong end, int new_prot)
1676 if (data->start != -1u) {
1677 int rc = data->fn(data->priv, data->start, end, data->prot);
1678 if (rc != 0) {
1679 return rc;
1683 data->start = (new_prot ? end : -1u);
1684 data->prot = new_prot;
1686 return 0;
1689 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1690 target_ulong base, int level, void **lp)
1692 target_ulong pa;
1693 int i, rc;
1695 if (*lp == NULL) {
1696 return walk_memory_regions_end(data, base, 0);
1699 if (level == 0) {
1700 PageDesc *pd = *lp;
1702 for (i = 0; i < V_L2_SIZE; ++i) {
1703 int prot = pd[i].flags;
1705 pa = base | (i << TARGET_PAGE_BITS);
1706 if (prot != data->prot) {
1707 rc = walk_memory_regions_end(data, pa, prot);
1708 if (rc != 0) {
1709 return rc;
1713 } else {
1714 void **pp = *lp;
1716 for (i = 0; i < V_L2_SIZE; ++i) {
1717 pa = base | ((target_ulong)i <<
1718 (TARGET_PAGE_BITS + V_L2_BITS * level));
1719 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1720 if (rc != 0) {
1721 return rc;
1726 return 0;
1729 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1731 struct walk_memory_regions_data data;
1732 uintptr_t i;
1734 data.fn = fn;
1735 data.priv = priv;
1736 data.start = -1u;
1737 data.prot = 0;
1739 for (i = 0; i < V_L1_SIZE; i++) {
1740 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1741 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1742 if (rc != 0) {
1743 return rc;
1747 return walk_memory_regions_end(&data, 0, 0);
1750 static int dump_region(void *priv, target_ulong start,
1751 target_ulong end, unsigned long prot)
1753 FILE *f = (FILE *)priv;
1755 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1756 " "TARGET_FMT_lx" %c%c%c\n",
1757 start, end, end - start,
1758 ((prot & PAGE_READ) ? 'r' : '-'),
1759 ((prot & PAGE_WRITE) ? 'w' : '-'),
1760 ((prot & PAGE_EXEC) ? 'x' : '-'));
1762 return 0;
1765 /* dump memory mappings */
1766 void page_dump(FILE *f)
1768 const int length = sizeof(target_ulong) * 2;
1769 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1770 length, "start", length, "end", length, "size", "prot");
1771 walk_memory_regions(f, dump_region);
1774 int page_get_flags(target_ulong address)
1776 PageDesc *p;
1778 p = page_find(address >> TARGET_PAGE_BITS);
1779 if (!p) {
1780 return 0;
1782 return p->flags;
1785 /* Modify the flags of a page and invalidate the code if necessary.
1786 The flag PAGE_WRITE_ORG is positioned automatically depending
1787 on PAGE_WRITE. The mmap_lock should already be held. */
1788 void page_set_flags(target_ulong start, target_ulong end, int flags)
1790 target_ulong addr, len;
1792 /* This function should never be called with addresses outside the
1793 guest address space. If this assert fires, it probably indicates
1794 a missing call to h2g_valid. */
1795 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1796 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1797 #endif
1798 assert(start < end);
1800 start = start & TARGET_PAGE_MASK;
1801 end = TARGET_PAGE_ALIGN(end);
1803 if (flags & PAGE_WRITE) {
1804 flags |= PAGE_WRITE_ORG;
1807 for (addr = start, len = end - start;
1808 len != 0;
1809 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1810 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1812 /* If the write protection bit is set, then we invalidate
1813 the code inside. */
1814 if (!(p->flags & PAGE_WRITE) &&
1815 (flags & PAGE_WRITE) &&
1816 p->first_tb) {
1817 tb_invalidate_phys_page(addr, 0, NULL, false);
1819 p->flags = flags;
1823 int page_check_range(target_ulong start, target_ulong len, int flags)
1825 PageDesc *p;
1826 target_ulong end;
1827 target_ulong addr;
1829 /* This function should never be called with addresses outside the
1830 guest address space. If this assert fires, it probably indicates
1831 a missing call to h2g_valid. */
1832 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1833 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1834 #endif
1836 if (len == 0) {
1837 return 0;
1839 if (start + len - 1 < start) {
1840 /* We've wrapped around. */
1841 return -1;
1844 /* must do before we loose bits in the next step */
1845 end = TARGET_PAGE_ALIGN(start + len);
1846 start = start & TARGET_PAGE_MASK;
1848 for (addr = start, len = end - start;
1849 len != 0;
1850 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1851 p = page_find(addr >> TARGET_PAGE_BITS);
1852 if (!p) {
1853 return -1;
1855 if (!(p->flags & PAGE_VALID)) {
1856 return -1;
1859 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1860 return -1;
1862 if (flags & PAGE_WRITE) {
1863 if (!(p->flags & PAGE_WRITE_ORG)) {
1864 return -1;
1866 /* unprotect the page if it was put read-only because it
1867 contains translated code */
1868 if (!(p->flags & PAGE_WRITE)) {
1869 if (!page_unprotect(addr, 0, NULL)) {
1870 return -1;
1875 return 0;
1878 /* called from signal handler: invalidate the code and unprotect the
1879 page. Return TRUE if the fault was successfully handled. */
1880 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1882 unsigned int prot;
1883 PageDesc *p;
1884 target_ulong host_start, host_end, addr;
1886 /* Technically this isn't safe inside a signal handler. However we
1887 know this only ever happens in a synchronous SEGV handler, so in
1888 practice it seems to be ok. */
1889 mmap_lock();
1891 p = page_find(address >> TARGET_PAGE_BITS);
1892 if (!p) {
1893 mmap_unlock();
1894 return 0;
1897 /* if the page was really writable, then we change its
1898 protection back to writable */
1899 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1900 host_start = address & qemu_host_page_mask;
1901 host_end = host_start + qemu_host_page_size;
1903 prot = 0;
1904 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1905 p = page_find(addr >> TARGET_PAGE_BITS);
1906 p->flags |= PAGE_WRITE;
1907 prot |= p->flags;
1909 /* and since the content will be modified, we must invalidate
1910 the corresponding translated code. */
1911 tb_invalidate_phys_page(addr, pc, puc, true);
1912 #ifdef DEBUG_TB_CHECK
1913 tb_invalidate_check(addr);
1914 #endif
1916 mprotect((void *)g2h(host_start), qemu_host_page_size,
1917 prot & PAGE_BITS);
1919 mmap_unlock();
1920 return 1;
1922 mmap_unlock();
1923 return 0;
1925 #endif /* CONFIG_USER_ONLY */