configure: add Linux libnuma detection
[qemu.git] / translate-all.c
blob5425d038d9ae4559fe4f7bfbad114a252a3def7d
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/types.h>
23 #include <sys/mman.h>
24 #endif
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <inttypes.h>
31 #include "config.h"
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
35 #include "cpu.h"
36 #include "disas/disas.h"
37 #include "tcg.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include "qemu.h"
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else
56 #include "exec/address-spaces.h"
57 #endif
59 #include "exec/cputlb.h"
60 #include "translate-all.h"
61 #include "qemu/timer.h"
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
70 #undef DEBUG_TB_CHECK
71 #endif
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 typedef struct PageDesc {
76 /* list of TBs intersecting this ram page */
77 TranslationBlock *first_tb;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count;
81 uint8_t *code_bitmap;
82 #if defined(CONFIG_USER_ONLY)
83 unsigned long flags;
84 #endif
85 } PageDesc;
87 /* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89 #if !defined(CONFIG_USER_ONLY)
90 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
92 #else
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
94 #endif
95 #else
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
97 #endif
99 /* Size of the L2 (and L3, etc) page tables. */
100 #define V_L2_BITS 10
101 #define V_L2_SIZE (1 << V_L2_BITS)
103 /* The bits remaining after N lower levels of page tables. */
104 #define V_L1_BITS_REM \
105 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
107 #if V_L1_BITS_REM < 4
108 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
109 #else
110 #define V_L1_BITS V_L1_BITS_REM
111 #endif
113 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
115 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
117 uintptr_t qemu_real_host_page_size;
118 uintptr_t qemu_host_page_size;
119 uintptr_t qemu_host_page_mask;
121 /* This is a multi-level map on the virtual address space.
122 The bottom level has pointers to PageDesc. */
123 static void *l1_map[V_L1_SIZE];
125 /* code generation context */
126 TCGContext tcg_ctx;
128 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
129 tb_page_addr_t phys_page2);
130 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
132 void cpu_gen_init(void)
134 tcg_context_init(&tcg_ctx);
137 /* return non zero if the very first instruction is invalid so that
138 the virtual CPU can trigger an exception.
140 '*gen_code_size_ptr' contains the size of the generated code (host
141 code).
143 int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
145 TCGContext *s = &tcg_ctx;
146 tcg_insn_unit *gen_code_buf;
147 int gen_code_size;
148 #ifdef CONFIG_PROFILER
149 int64_t ti;
150 #endif
152 #ifdef CONFIG_PROFILER
153 s->tb_count1++; /* includes aborted translations because of
154 exceptions */
155 ti = profile_getclock();
156 #endif
157 tcg_func_start(s);
159 gen_intermediate_code(env, tb);
161 /* generate machine code */
162 gen_code_buf = tb->tc_ptr;
163 tb->tb_next_offset[0] = 0xffff;
164 tb->tb_next_offset[1] = 0xffff;
165 s->tb_next_offset = tb->tb_next_offset;
166 #ifdef USE_DIRECT_JUMP
167 s->tb_jmp_offset = tb->tb_jmp_offset;
168 s->tb_next = NULL;
169 #else
170 s->tb_jmp_offset = NULL;
171 s->tb_next = tb->tb_next;
172 #endif
174 #ifdef CONFIG_PROFILER
175 s->tb_count++;
176 s->interm_time += profile_getclock() - ti;
177 s->code_time -= profile_getclock();
178 #endif
179 gen_code_size = tcg_gen_code(s, gen_code_buf);
180 *gen_code_size_ptr = gen_code_size;
181 #ifdef CONFIG_PROFILER
182 s->code_time += profile_getclock();
183 s->code_in_len += tb->size;
184 s->code_out_len += gen_code_size;
185 #endif
187 #ifdef DEBUG_DISAS
188 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
189 qemu_log("OUT: [size=%d]\n", gen_code_size);
190 log_disas(tb->tc_ptr, gen_code_size);
191 qemu_log("\n");
192 qemu_log_flush();
194 #endif
195 return 0;
198 /* The cpu state corresponding to 'searched_pc' is restored.
200 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
201 uintptr_t searched_pc)
203 CPUArchState *env = cpu->env_ptr;
204 TCGContext *s = &tcg_ctx;
205 int j;
206 uintptr_t tc_ptr;
207 #ifdef CONFIG_PROFILER
208 int64_t ti;
209 #endif
211 #ifdef CONFIG_PROFILER
212 ti = profile_getclock();
213 #endif
214 tcg_func_start(s);
216 gen_intermediate_code_pc(env, tb);
218 if (use_icount) {
219 /* Reset the cycle counter to the start of the block. */
220 cpu->icount_decr.u16.low += tb->icount;
221 /* Clear the IO flag. */
222 cpu->can_do_io = 0;
225 /* find opc index corresponding to search_pc */
226 tc_ptr = (uintptr_t)tb->tc_ptr;
227 if (searched_pc < tc_ptr)
228 return -1;
230 s->tb_next_offset = tb->tb_next_offset;
231 #ifdef USE_DIRECT_JUMP
232 s->tb_jmp_offset = tb->tb_jmp_offset;
233 s->tb_next = NULL;
234 #else
235 s->tb_jmp_offset = NULL;
236 s->tb_next = tb->tb_next;
237 #endif
238 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
239 searched_pc - tc_ptr);
240 if (j < 0)
241 return -1;
242 /* now find start of instruction before */
243 while (s->gen_opc_instr_start[j] == 0) {
244 j--;
246 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
248 restore_state_to_opc(env, tb, j);
250 #ifdef CONFIG_PROFILER
251 s->restore_time += profile_getclock() - ti;
252 s->restore_count++;
253 #endif
254 return 0;
257 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
259 TranslationBlock *tb;
261 tb = tb_find_pc(retaddr);
262 if (tb) {
263 cpu_restore_state_from_tb(cpu, tb, retaddr);
264 return true;
266 return false;
269 #ifdef _WIN32
270 static inline void map_exec(void *addr, long size)
272 DWORD old_protect;
273 VirtualProtect(addr, size,
274 PAGE_EXECUTE_READWRITE, &old_protect);
276 #else
277 static inline void map_exec(void *addr, long size)
279 unsigned long start, end, page_size;
281 page_size = getpagesize();
282 start = (unsigned long)addr;
283 start &= ~(page_size - 1);
285 end = (unsigned long)addr + size;
286 end += page_size - 1;
287 end &= ~(page_size - 1);
289 mprotect((void *)start, end - start,
290 PROT_READ | PROT_WRITE | PROT_EXEC);
292 #endif
294 void page_size_init(void)
296 /* NOTE: we can always suppose that qemu_host_page_size >=
297 TARGET_PAGE_SIZE */
298 qemu_real_host_page_size = getpagesize();
299 if (qemu_host_page_size == 0) {
300 qemu_host_page_size = qemu_real_host_page_size;
302 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
303 qemu_host_page_size = TARGET_PAGE_SIZE;
305 qemu_host_page_mask = ~(qemu_host_page_size - 1);
308 static void page_init(void)
310 page_size_init();
311 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
313 #ifdef HAVE_KINFO_GETVMMAP
314 struct kinfo_vmentry *freep;
315 int i, cnt;
317 freep = kinfo_getvmmap(getpid(), &cnt);
318 if (freep) {
319 mmap_lock();
320 for (i = 0; i < cnt; i++) {
321 unsigned long startaddr, endaddr;
323 startaddr = freep[i].kve_start;
324 endaddr = freep[i].kve_end;
325 if (h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
331 } else {
332 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
333 endaddr = ~0ul;
334 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
335 #endif
339 free(freep);
340 mmap_unlock();
342 #else
343 FILE *f;
345 last_brk = (unsigned long)sbrk(0);
347 f = fopen("/compat/linux/proc/self/maps", "r");
348 if (f) {
349 mmap_lock();
351 do {
352 unsigned long startaddr, endaddr;
353 int n;
355 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
357 if (n == 2 && h2g_valid(startaddr)) {
358 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
360 if (h2g_valid(endaddr)) {
361 endaddr = h2g(endaddr);
362 } else {
363 endaddr = ~0ul;
365 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
367 } while (!feof(f));
369 fclose(f);
370 mmap_unlock();
372 #endif
374 #endif
377 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
379 PageDesc *pd;
380 void **lp;
381 int i;
383 #if defined(CONFIG_USER_ONLY)
384 /* We can't use g_malloc because it may recurse into a locked mutex. */
385 # define ALLOC(P, SIZE) \
386 do { \
387 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
388 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
389 } while (0)
390 #else
391 # define ALLOC(P, SIZE) \
392 do { P = g_malloc0(SIZE); } while (0)
393 #endif
395 /* Level 1. Always allocated. */
396 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
398 /* Level 2..N-1. */
399 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
400 void **p = *lp;
402 if (p == NULL) {
403 if (!alloc) {
404 return NULL;
406 ALLOC(p, sizeof(void *) * V_L2_SIZE);
407 *lp = p;
410 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
413 pd = *lp;
414 if (pd == NULL) {
415 if (!alloc) {
416 return NULL;
418 ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
419 *lp = pd;
422 #undef ALLOC
424 return pd + (index & (V_L2_SIZE - 1));
427 static inline PageDesc *page_find(tb_page_addr_t index)
429 return page_find_alloc(index, 0);
432 #if !defined(CONFIG_USER_ONLY)
433 #define mmap_lock() do { } while (0)
434 #define mmap_unlock() do { } while (0)
435 #endif
437 #if defined(CONFIG_USER_ONLY)
438 /* Currently it is not recommended to allocate big chunks of data in
439 user mode. It will change when a dedicated libc will be used. */
440 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
441 region in which the guest needs to run. Revisit this. */
442 #define USE_STATIC_CODE_GEN_BUFFER
443 #endif
445 /* ??? Should configure for this, not list operating systems here. */
446 #if (defined(__linux__) \
447 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
448 || defined(__DragonFly__) || defined(__OpenBSD__) \
449 || defined(__NetBSD__))
450 # define USE_MMAP
451 #endif
453 /* Minimum size of the code gen buffer. This number is randomly chosen,
454 but not so small that we can't have a fair number of TB's live. */
455 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
457 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
458 indicated, this is constrained by the range of direct branches on the
459 host cpu, as used by the TCG implementation of goto_tb. */
460 #if defined(__x86_64__)
461 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
462 #elif defined(__sparc__)
463 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
464 #elif defined(__aarch64__)
465 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
466 #elif defined(__arm__)
467 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
468 #elif defined(__s390x__)
469 /* We have a +- 4GB range on the branches; leave some slop. */
470 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
471 #elif defined(__mips__)
472 /* We have a 256MB branch region, but leave room to make sure the
473 main executable is also within that region. */
474 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
475 #else
476 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
477 #endif
479 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
481 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
482 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
483 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
485 static inline size_t size_code_gen_buffer(size_t tb_size)
487 /* Size the buffer. */
488 if (tb_size == 0) {
489 #ifdef USE_STATIC_CODE_GEN_BUFFER
490 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
491 #else
492 /* ??? Needs adjustments. */
493 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
494 static buffer, we could size this on RESERVED_VA, on the text
495 segment size of the executable, or continue to use the default. */
496 tb_size = (unsigned long)(ram_size / 4);
497 #endif
499 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
500 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
502 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
503 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
505 tcg_ctx.code_gen_buffer_size = tb_size;
506 return tb_size;
509 #ifdef __mips__
510 /* In order to use J and JAL within the code_gen_buffer, we require
511 that the buffer not cross a 256MB boundary. */
512 static inline bool cross_256mb(void *addr, size_t size)
514 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
517 /* We weren't able to allocate a buffer without crossing that boundary,
518 so make do with the larger portion of the buffer that doesn't cross.
519 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
520 static inline void *split_cross_256mb(void *buf1, size_t size1)
522 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
523 size_t size2 = buf1 + size1 - buf2;
525 size1 = buf2 - buf1;
526 if (size1 < size2) {
527 size1 = size2;
528 buf1 = buf2;
531 tcg_ctx.code_gen_buffer_size = size1;
532 return buf1;
534 #endif
536 #ifdef USE_STATIC_CODE_GEN_BUFFER
537 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
538 __attribute__((aligned(CODE_GEN_ALIGN)));
540 static inline void *alloc_code_gen_buffer(void)
542 void *buf = static_code_gen_buffer;
543 #ifdef __mips__
544 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
545 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
547 #endif
548 map_exec(buf, tcg_ctx.code_gen_buffer_size);
549 return buf;
551 #elif defined(USE_MMAP)
552 static inline void *alloc_code_gen_buffer(void)
554 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
555 uintptr_t start = 0;
556 void *buf;
558 /* Constrain the position of the buffer based on the host cpu.
559 Note that these addresses are chosen in concert with the
560 addresses assigned in the relevant linker script file. */
561 # if defined(__PIE__) || defined(__PIC__)
562 /* Don't bother setting a preferred location if we're building
563 a position-independent executable. We're more likely to get
564 an address near the main executable if we let the kernel
565 choose the address. */
566 # elif defined(__x86_64__) && defined(MAP_32BIT)
567 /* Force the memory down into low memory with the executable.
568 Leave the choice of exact location with the kernel. */
569 flags |= MAP_32BIT;
570 /* Cannot expect to map more than 800MB in low memory. */
571 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
572 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
574 # elif defined(__sparc__)
575 start = 0x40000000ul;
576 # elif defined(__s390x__)
577 start = 0x90000000ul;
578 # elif defined(__mips__)
579 /* ??? We ought to more explicitly manage layout for softmmu too. */
580 # ifdef CONFIG_USER_ONLY
581 start = 0x68000000ul;
582 # elif _MIPS_SIM == _ABI64
583 start = 0x128000000ul;
584 # else
585 start = 0x08000000ul;
586 # endif
587 # endif
589 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
590 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
591 if (buf == MAP_FAILED) {
592 return NULL;
595 #ifdef __mips__
596 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
597 /* Try again, with the original still mapped, to avoid re-aquiring
598 that 256mb crossing. This time don't specify an address. */
599 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
600 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
601 flags, -1, 0);
602 if (buf2 != MAP_FAILED) {
603 if (!cross_256mb(buf2, size1)) {
604 /* Success! Use the new buffer. */
605 munmap(buf, size1);
606 return buf2;
608 /* Failure. Work with what we had. */
609 munmap(buf2, size1);
612 /* Split the original buffer. Free the smaller half. */
613 buf2 = split_cross_256mb(buf, size1);
614 size2 = tcg_ctx.code_gen_buffer_size;
615 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
616 return buf2;
618 #endif
620 return buf;
622 #else
623 static inline void *alloc_code_gen_buffer(void)
625 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
627 if (buf == NULL) {
628 return NULL;
631 #ifdef __mips__
632 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
633 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
634 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
635 /* Success! Use the new buffer. */
636 free(buf);
637 buf = buf2;
638 } else {
639 /* Failure. Work with what we had. Since this is malloc
640 and not mmap, we can't free the other half. */
641 free(buf2);
642 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
645 #endif
647 map_exec(buf, tcg_ctx.code_gen_buffer_size);
648 return buf;
650 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
652 static inline void code_gen_alloc(size_t tb_size)
654 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
655 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
656 if (tcg_ctx.code_gen_buffer == NULL) {
657 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
658 exit(1);
661 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
662 QEMU_MADV_HUGEPAGE);
664 /* Steal room for the prologue at the end of the buffer. This ensures
665 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
666 from TB's to the prologue are going to be in range. It also means
667 that we don't need to mark (additional) portions of the data segment
668 as executable. */
669 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
670 tcg_ctx.code_gen_buffer_size - 1024;
671 tcg_ctx.code_gen_buffer_size -= 1024;
673 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
674 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
675 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
676 CODE_GEN_AVG_BLOCK_SIZE;
677 tcg_ctx.tb_ctx.tbs =
678 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
681 /* Must be called before using the QEMU cpus. 'tb_size' is the size
682 (in bytes) allocated to the translation buffer. Zero means default
683 size. */
684 void tcg_exec_init(unsigned long tb_size)
686 cpu_gen_init();
687 code_gen_alloc(tb_size);
688 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
689 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
690 page_init();
691 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
692 /* There's no guest base to take into account, so go ahead and
693 initialize the prologue now. */
694 tcg_prologue_init(&tcg_ctx);
695 #endif
698 bool tcg_enabled(void)
700 return tcg_ctx.code_gen_buffer != NULL;
703 /* Allocate a new translation block. Flush the translation buffer if
704 too many translation blocks or too much generated code. */
705 static TranslationBlock *tb_alloc(target_ulong pc)
707 TranslationBlock *tb;
709 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
710 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
711 tcg_ctx.code_gen_buffer_max_size) {
712 return NULL;
714 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
715 tb->pc = pc;
716 tb->cflags = 0;
717 return tb;
720 void tb_free(TranslationBlock *tb)
722 /* In practice this is mostly used for single use temporary TB
723 Ignore the hard cases and just back up if this TB happens to
724 be the last one generated. */
725 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
726 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
727 tcg_ctx.code_gen_ptr = tb->tc_ptr;
728 tcg_ctx.tb_ctx.nb_tbs--;
732 static inline void invalidate_page_bitmap(PageDesc *p)
734 if (p->code_bitmap) {
735 g_free(p->code_bitmap);
736 p->code_bitmap = NULL;
738 p->code_write_count = 0;
741 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
742 static void page_flush_tb_1(int level, void **lp)
744 int i;
746 if (*lp == NULL) {
747 return;
749 if (level == 0) {
750 PageDesc *pd = *lp;
752 for (i = 0; i < V_L2_SIZE; ++i) {
753 pd[i].first_tb = NULL;
754 invalidate_page_bitmap(pd + i);
756 } else {
757 void **pp = *lp;
759 for (i = 0; i < V_L2_SIZE; ++i) {
760 page_flush_tb_1(level - 1, pp + i);
765 static void page_flush_tb(void)
767 int i;
769 for (i = 0; i < V_L1_SIZE; i++) {
770 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
774 /* flush all the translation blocks */
775 /* XXX: tb_flush is currently not thread safe */
776 void tb_flush(CPUArchState *env1)
778 CPUState *cpu = ENV_GET_CPU(env1);
780 #if defined(DEBUG_FLUSH)
781 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
782 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
783 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
784 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
785 tcg_ctx.tb_ctx.nb_tbs : 0);
786 #endif
787 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
788 > tcg_ctx.code_gen_buffer_size) {
789 cpu_abort(cpu, "Internal error: code buffer overflow\n");
791 tcg_ctx.tb_ctx.nb_tbs = 0;
793 CPU_FOREACH(cpu) {
794 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
797 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
798 page_flush_tb();
800 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
801 /* XXX: flush processor icache at this point if cache flush is
802 expensive */
803 tcg_ctx.tb_ctx.tb_flush_count++;
806 #ifdef DEBUG_TB_CHECK
808 static void tb_invalidate_check(target_ulong address)
810 TranslationBlock *tb;
811 int i;
813 address &= TARGET_PAGE_MASK;
814 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
815 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
816 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
817 address >= tb->pc + tb->size)) {
818 printf("ERROR invalidate: address=" TARGET_FMT_lx
819 " PC=%08lx size=%04x\n",
820 address, (long)tb->pc, tb->size);
826 /* verify that all the pages have correct rights for code */
827 static void tb_page_check(void)
829 TranslationBlock *tb;
830 int i, flags1, flags2;
832 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
833 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
834 tb = tb->phys_hash_next) {
835 flags1 = page_get_flags(tb->pc);
836 flags2 = page_get_flags(tb->pc + tb->size - 1);
837 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
838 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
839 (long)tb->pc, tb->size, flags1, flags2);
845 #endif
847 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
849 TranslationBlock *tb1;
851 for (;;) {
852 tb1 = *ptb;
853 if (tb1 == tb) {
854 *ptb = tb1->phys_hash_next;
855 break;
857 ptb = &tb1->phys_hash_next;
861 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
863 TranslationBlock *tb1;
864 unsigned int n1;
866 for (;;) {
867 tb1 = *ptb;
868 n1 = (uintptr_t)tb1 & 3;
869 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
870 if (tb1 == tb) {
871 *ptb = tb1->page_next[n1];
872 break;
874 ptb = &tb1->page_next[n1];
878 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
880 TranslationBlock *tb1, **ptb;
881 unsigned int n1;
883 ptb = &tb->jmp_next[n];
884 tb1 = *ptb;
885 if (tb1) {
886 /* find tb(n) in circular list */
887 for (;;) {
888 tb1 = *ptb;
889 n1 = (uintptr_t)tb1 & 3;
890 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
891 if (n1 == n && tb1 == tb) {
892 break;
894 if (n1 == 2) {
895 ptb = &tb1->jmp_first;
896 } else {
897 ptb = &tb1->jmp_next[n1];
900 /* now we can suppress tb(n) from the list */
901 *ptb = tb->jmp_next[n];
903 tb->jmp_next[n] = NULL;
907 /* reset the jump entry 'n' of a TB so that it is not chained to
908 another TB */
909 static inline void tb_reset_jump(TranslationBlock *tb, int n)
911 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
914 /* invalidate one TB */
915 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
917 CPUState *cpu;
918 PageDesc *p;
919 unsigned int h, n1;
920 tb_page_addr_t phys_pc;
921 TranslationBlock *tb1, *tb2;
923 /* remove the TB from the hash list */
924 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
925 h = tb_phys_hash_func(phys_pc);
926 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
928 /* remove the TB from the page list */
929 if (tb->page_addr[0] != page_addr) {
930 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
931 tb_page_remove(&p->first_tb, tb);
932 invalidate_page_bitmap(p);
934 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
935 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
936 tb_page_remove(&p->first_tb, tb);
937 invalidate_page_bitmap(p);
940 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
942 /* remove the TB from the hash list */
943 h = tb_jmp_cache_hash_func(tb->pc);
944 CPU_FOREACH(cpu) {
945 if (cpu->tb_jmp_cache[h] == tb) {
946 cpu->tb_jmp_cache[h] = NULL;
950 /* suppress this TB from the two jump lists */
951 tb_jmp_remove(tb, 0);
952 tb_jmp_remove(tb, 1);
954 /* suppress any remaining jumps to this TB */
955 tb1 = tb->jmp_first;
956 for (;;) {
957 n1 = (uintptr_t)tb1 & 3;
958 if (n1 == 2) {
959 break;
961 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
962 tb2 = tb1->jmp_next[n1];
963 tb_reset_jump(tb1, n1);
964 tb1->jmp_next[n1] = NULL;
965 tb1 = tb2;
967 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
969 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
972 static inline void set_bits(uint8_t *tab, int start, int len)
974 int end, mask, end1;
976 end = start + len;
977 tab += start >> 3;
978 mask = 0xff << (start & 7);
979 if ((start & ~7) == (end & ~7)) {
980 if (start < end) {
981 mask &= ~(0xff << (end & 7));
982 *tab |= mask;
984 } else {
985 *tab++ |= mask;
986 start = (start + 8) & ~7;
987 end1 = end & ~7;
988 while (start < end1) {
989 *tab++ = 0xff;
990 start += 8;
992 if (start < end) {
993 mask = ~(0xff << (end & 7));
994 *tab |= mask;
999 static void build_page_bitmap(PageDesc *p)
1001 int n, tb_start, tb_end;
1002 TranslationBlock *tb;
1004 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1006 tb = p->first_tb;
1007 while (tb != NULL) {
1008 n = (uintptr_t)tb & 3;
1009 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1010 /* NOTE: this is subtle as a TB may span two physical pages */
1011 if (n == 0) {
1012 /* NOTE: tb_end may be after the end of the page, but
1013 it is not a problem */
1014 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1015 tb_end = tb_start + tb->size;
1016 if (tb_end > TARGET_PAGE_SIZE) {
1017 tb_end = TARGET_PAGE_SIZE;
1019 } else {
1020 tb_start = 0;
1021 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1024 tb = tb->page_next[n];
1028 TranslationBlock *tb_gen_code(CPUState *cpu,
1029 target_ulong pc, target_ulong cs_base,
1030 int flags, int cflags)
1032 CPUArchState *env = cpu->env_ptr;
1033 TranslationBlock *tb;
1034 tb_page_addr_t phys_pc, phys_page2;
1035 target_ulong virt_page2;
1036 int code_gen_size;
1038 phys_pc = get_page_addr_code(env, pc);
1039 tb = tb_alloc(pc);
1040 if (!tb) {
1041 /* flush must be done */
1042 tb_flush(env);
1043 /* cannot fail at this point */
1044 tb = tb_alloc(pc);
1045 /* Don't forget to invalidate previous TB info. */
1046 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1048 tb->tc_ptr = tcg_ctx.code_gen_ptr;
1049 tb->cs_base = cs_base;
1050 tb->flags = flags;
1051 tb->cflags = cflags;
1052 cpu_gen_code(env, tb, &code_gen_size);
1053 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1054 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1056 /* check next page if needed */
1057 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1058 phys_page2 = -1;
1059 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1060 phys_page2 = get_page_addr_code(env, virt_page2);
1062 tb_link_page(tb, phys_pc, phys_page2);
1063 return tb;
1067 * Invalidate all TBs which intersect with the target physical address range
1068 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1069 * 'is_cpu_write_access' should be true if called from a real cpu write
1070 * access: the virtual CPU will exit the current TB if code is modified inside
1071 * this TB.
1073 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1074 int is_cpu_write_access)
1076 while (start < end) {
1077 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1078 start &= TARGET_PAGE_MASK;
1079 start += TARGET_PAGE_SIZE;
1084 * Invalidate all TBs which intersect with the target physical address range
1085 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1086 * 'is_cpu_write_access' should be true if called from a real cpu write
1087 * access: the virtual CPU will exit the current TB if code is modified inside
1088 * this TB.
1090 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1091 int is_cpu_write_access)
1093 TranslationBlock *tb, *tb_next, *saved_tb;
1094 CPUState *cpu = current_cpu;
1095 #if defined(TARGET_HAS_PRECISE_SMC)
1096 CPUArchState *env = NULL;
1097 #endif
1098 tb_page_addr_t tb_start, tb_end;
1099 PageDesc *p;
1100 int n;
1101 #ifdef TARGET_HAS_PRECISE_SMC
1102 int current_tb_not_found = is_cpu_write_access;
1103 TranslationBlock *current_tb = NULL;
1104 int current_tb_modified = 0;
1105 target_ulong current_pc = 0;
1106 target_ulong current_cs_base = 0;
1107 int current_flags = 0;
1108 #endif /* TARGET_HAS_PRECISE_SMC */
1110 p = page_find(start >> TARGET_PAGE_BITS);
1111 if (!p) {
1112 return;
1114 if (!p->code_bitmap &&
1115 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1116 is_cpu_write_access) {
1117 /* build code bitmap */
1118 build_page_bitmap(p);
1120 #if defined(TARGET_HAS_PRECISE_SMC)
1121 if (cpu != NULL) {
1122 env = cpu->env_ptr;
1124 #endif
1126 /* we remove all the TBs in the range [start, end[ */
1127 /* XXX: see if in some cases it could be faster to invalidate all
1128 the code */
1129 tb = p->first_tb;
1130 while (tb != NULL) {
1131 n = (uintptr_t)tb & 3;
1132 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1133 tb_next = tb->page_next[n];
1134 /* NOTE: this is subtle as a TB may span two physical pages */
1135 if (n == 0) {
1136 /* NOTE: tb_end may be after the end of the page, but
1137 it is not a problem */
1138 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1139 tb_end = tb_start + tb->size;
1140 } else {
1141 tb_start = tb->page_addr[1];
1142 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1144 if (!(tb_end <= start || tb_start >= end)) {
1145 #ifdef TARGET_HAS_PRECISE_SMC
1146 if (current_tb_not_found) {
1147 current_tb_not_found = 0;
1148 current_tb = NULL;
1149 if (cpu->mem_io_pc) {
1150 /* now we have a real cpu fault */
1151 current_tb = tb_find_pc(cpu->mem_io_pc);
1154 if (current_tb == tb &&
1155 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1156 /* If we are modifying the current TB, we must stop
1157 its execution. We could be more precise by checking
1158 that the modification is after the current PC, but it
1159 would require a specialized function to partially
1160 restore the CPU state */
1162 current_tb_modified = 1;
1163 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1164 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1165 &current_flags);
1167 #endif /* TARGET_HAS_PRECISE_SMC */
1168 /* we need to do that to handle the case where a signal
1169 occurs while doing tb_phys_invalidate() */
1170 saved_tb = NULL;
1171 if (cpu != NULL) {
1172 saved_tb = cpu->current_tb;
1173 cpu->current_tb = NULL;
1175 tb_phys_invalidate(tb, -1);
1176 if (cpu != NULL) {
1177 cpu->current_tb = saved_tb;
1178 if (cpu->interrupt_request && cpu->current_tb) {
1179 cpu_interrupt(cpu, cpu->interrupt_request);
1183 tb = tb_next;
1185 #if !defined(CONFIG_USER_ONLY)
1186 /* if no code remaining, no need to continue to use slow writes */
1187 if (!p->first_tb) {
1188 invalidate_page_bitmap(p);
1189 if (is_cpu_write_access) {
1190 tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
1193 #endif
1194 #ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb_modified) {
1196 /* we generate a block containing just the instruction
1197 modifying the memory. It will ensure that it cannot modify
1198 itself */
1199 cpu->current_tb = NULL;
1200 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1201 cpu_resume_from_signal(cpu, NULL);
1203 #endif
1206 /* len must be <= 8 and start must be a multiple of len */
1207 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1209 PageDesc *p;
1210 int offset, b;
1212 #if 0
1213 if (1) {
1214 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1215 cpu_single_env->mem_io_vaddr, len,
1216 cpu_single_env->eip,
1217 cpu_single_env->eip +
1218 (intptr_t)cpu_single_env->segs[R_CS].base);
1220 #endif
1221 p = page_find(start >> TARGET_PAGE_BITS);
1222 if (!p) {
1223 return;
1225 if (p->code_bitmap) {
1226 offset = start & ~TARGET_PAGE_MASK;
1227 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1228 if (b & ((1 << len) - 1)) {
1229 goto do_invalidate;
1231 } else {
1232 do_invalidate:
1233 tb_invalidate_phys_page_range(start, start + len, 1);
1237 #if !defined(CONFIG_SOFTMMU)
1238 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1239 uintptr_t pc, void *puc,
1240 bool locked)
1242 TranslationBlock *tb;
1243 PageDesc *p;
1244 int n;
1245 #ifdef TARGET_HAS_PRECISE_SMC
1246 TranslationBlock *current_tb = NULL;
1247 CPUState *cpu = current_cpu;
1248 CPUArchState *env = NULL;
1249 int current_tb_modified = 0;
1250 target_ulong current_pc = 0;
1251 target_ulong current_cs_base = 0;
1252 int current_flags = 0;
1253 #endif
1255 addr &= TARGET_PAGE_MASK;
1256 p = page_find(addr >> TARGET_PAGE_BITS);
1257 if (!p) {
1258 return;
1260 tb = p->first_tb;
1261 #ifdef TARGET_HAS_PRECISE_SMC
1262 if (tb && pc != 0) {
1263 current_tb = tb_find_pc(pc);
1265 if (cpu != NULL) {
1266 env = cpu->env_ptr;
1268 #endif
1269 while (tb != NULL) {
1270 n = (uintptr_t)tb & 3;
1271 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1272 #ifdef TARGET_HAS_PRECISE_SMC
1273 if (current_tb == tb &&
1274 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1275 /* If we are modifying the current TB, we must stop
1276 its execution. We could be more precise by checking
1277 that the modification is after the current PC, but it
1278 would require a specialized function to partially
1279 restore the CPU state */
1281 current_tb_modified = 1;
1282 cpu_restore_state_from_tb(cpu, current_tb, pc);
1283 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1284 &current_flags);
1286 #endif /* TARGET_HAS_PRECISE_SMC */
1287 tb_phys_invalidate(tb, addr);
1288 tb = tb->page_next[n];
1290 p->first_tb = NULL;
1291 #ifdef TARGET_HAS_PRECISE_SMC
1292 if (current_tb_modified) {
1293 /* we generate a block containing just the instruction
1294 modifying the memory. It will ensure that it cannot modify
1295 itself */
1296 cpu->current_tb = NULL;
1297 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1298 if (locked) {
1299 mmap_unlock();
1301 cpu_resume_from_signal(cpu, puc);
1303 #endif
1305 #endif
1307 /* add the tb in the target page and protect it if necessary */
1308 static inline void tb_alloc_page(TranslationBlock *tb,
1309 unsigned int n, tb_page_addr_t page_addr)
1311 PageDesc *p;
1312 #ifndef CONFIG_USER_ONLY
1313 bool page_already_protected;
1314 #endif
1316 tb->page_addr[n] = page_addr;
1317 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1318 tb->page_next[n] = p->first_tb;
1319 #ifndef CONFIG_USER_ONLY
1320 page_already_protected = p->first_tb != NULL;
1321 #endif
1322 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1323 invalidate_page_bitmap(p);
1325 #if defined(TARGET_HAS_SMC) || 1
1327 #if defined(CONFIG_USER_ONLY)
1328 if (p->flags & PAGE_WRITE) {
1329 target_ulong addr;
1330 PageDesc *p2;
1331 int prot;
1333 /* force the host page as non writable (writes will have a
1334 page fault + mprotect overhead) */
1335 page_addr &= qemu_host_page_mask;
1336 prot = 0;
1337 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1338 addr += TARGET_PAGE_SIZE) {
1340 p2 = page_find(addr >> TARGET_PAGE_BITS);
1341 if (!p2) {
1342 continue;
1344 prot |= p2->flags;
1345 p2->flags &= ~PAGE_WRITE;
1347 mprotect(g2h(page_addr), qemu_host_page_size,
1348 (prot & PAGE_BITS) & ~PAGE_WRITE);
1349 #ifdef DEBUG_TB_INVALIDATE
1350 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1351 page_addr);
1352 #endif
1354 #else
1355 /* if some code is already present, then the pages are already
1356 protected. So we handle the case where only the first TB is
1357 allocated in a physical page */
1358 if (!page_already_protected) {
1359 tlb_protect_code(page_addr);
1361 #endif
1363 #endif /* TARGET_HAS_SMC */
1366 /* add a new TB and link it to the physical page tables. phys_page2 is
1367 (-1) to indicate that only one page contains the TB. */
1368 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1369 tb_page_addr_t phys_page2)
1371 unsigned int h;
1372 TranslationBlock **ptb;
1374 /* Grab the mmap lock to stop another thread invalidating this TB
1375 before we are done. */
1376 mmap_lock();
1377 /* add in the physical hash table */
1378 h = tb_phys_hash_func(phys_pc);
1379 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1380 tb->phys_hash_next = *ptb;
1381 *ptb = tb;
1383 /* add in the page list */
1384 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1385 if (phys_page2 != -1) {
1386 tb_alloc_page(tb, 1, phys_page2);
1387 } else {
1388 tb->page_addr[1] = -1;
1391 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1392 tb->jmp_next[0] = NULL;
1393 tb->jmp_next[1] = NULL;
1395 /* init original jump addresses */
1396 if (tb->tb_next_offset[0] != 0xffff) {
1397 tb_reset_jump(tb, 0);
1399 if (tb->tb_next_offset[1] != 0xffff) {
1400 tb_reset_jump(tb, 1);
1403 #ifdef DEBUG_TB_CHECK
1404 tb_page_check();
1405 #endif
1406 mmap_unlock();
1409 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1410 tb[1].tc_ptr. Return NULL if not found */
1411 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1413 int m_min, m_max, m;
1414 uintptr_t v;
1415 TranslationBlock *tb;
1417 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1418 return NULL;
1420 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1421 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1422 return NULL;
1424 /* binary search (cf Knuth) */
1425 m_min = 0;
1426 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1427 while (m_min <= m_max) {
1428 m = (m_min + m_max) >> 1;
1429 tb = &tcg_ctx.tb_ctx.tbs[m];
1430 v = (uintptr_t)tb->tc_ptr;
1431 if (v == tc_ptr) {
1432 return tb;
1433 } else if (tc_ptr < v) {
1434 m_max = m - 1;
1435 } else {
1436 m_min = m + 1;
1439 return &tcg_ctx.tb_ctx.tbs[m_max];
1442 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1443 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1445 ram_addr_t ram_addr;
1446 MemoryRegion *mr;
1447 hwaddr l = 1;
1449 mr = address_space_translate(as, addr, &addr, &l, false);
1450 if (!(memory_region_is_ram(mr)
1451 || memory_region_is_romd(mr))) {
1452 return;
1454 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1455 + addr;
1456 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1458 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1460 void tb_check_watchpoint(CPUState *cpu)
1462 TranslationBlock *tb;
1464 tb = tb_find_pc(cpu->mem_io_pc);
1465 if (!tb) {
1466 cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
1467 (void *)cpu->mem_io_pc);
1469 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1470 tb_phys_invalidate(tb, -1);
1473 #ifndef CONFIG_USER_ONLY
1474 /* mask must never be zero, except for A20 change call */
1475 static void tcg_handle_interrupt(CPUState *cpu, int mask)
1477 int old_mask;
1479 old_mask = cpu->interrupt_request;
1480 cpu->interrupt_request |= mask;
1483 * If called from iothread context, wake the target cpu in
1484 * case its halted.
1486 if (!qemu_cpu_is_self(cpu)) {
1487 qemu_cpu_kick(cpu);
1488 return;
1491 if (use_icount) {
1492 cpu->icount_decr.u16.high = 0xffff;
1493 if (!cpu_can_do_io(cpu)
1494 && (mask & ~old_mask) != 0) {
1495 cpu_abort(cpu, "Raised interrupt while not in I/O function");
1497 } else {
1498 cpu->tcg_exit_req = 1;
1502 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1504 /* in deterministic execution mode, instructions doing device I/Os
1505 must be at the end of the TB */
1506 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1508 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1509 CPUArchState *env = cpu->env_ptr;
1510 #endif
1511 TranslationBlock *tb;
1512 uint32_t n, cflags;
1513 target_ulong pc, cs_base;
1514 uint64_t flags;
1516 tb = tb_find_pc(retaddr);
1517 if (!tb) {
1518 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1519 (void *)retaddr);
1521 n = cpu->icount_decr.u16.low + tb->icount;
1522 cpu_restore_state_from_tb(cpu, tb, retaddr);
1523 /* Calculate how many instructions had been executed before the fault
1524 occurred. */
1525 n = n - cpu->icount_decr.u16.low;
1526 /* Generate a new TB ending on the I/O insn. */
1527 n++;
1528 /* On MIPS and SH, delay slot instructions can only be restarted if
1529 they were already the first instruction in the TB. If this is not
1530 the first instruction in a TB then re-execute the preceding
1531 branch. */
1532 #if defined(TARGET_MIPS)
1533 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1534 env->active_tc.PC -= 4;
1535 cpu->icount_decr.u16.low++;
1536 env->hflags &= ~MIPS_HFLAG_BMASK;
1538 #elif defined(TARGET_SH4)
1539 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1540 && n > 1) {
1541 env->pc -= 2;
1542 cpu->icount_decr.u16.low++;
1543 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1545 #endif
1546 /* This should never happen. */
1547 if (n > CF_COUNT_MASK) {
1548 cpu_abort(cpu, "TB too big during recompile");
1551 cflags = n | CF_LAST_IO;
1552 pc = tb->pc;
1553 cs_base = tb->cs_base;
1554 flags = tb->flags;
1555 tb_phys_invalidate(tb, -1);
1556 /* FIXME: In theory this could raise an exception. In practice
1557 we have already translated the block once so it's probably ok. */
1558 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1559 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1560 the first in the TB) then we end up generating a whole new TB and
1561 repeating the fault, which is horribly inefficient.
1562 Better would be to execute just this insn uncached, or generate a
1563 second new TB. */
1564 cpu_resume_from_signal(cpu, NULL);
1567 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1569 unsigned int i;
1571 /* Discard jump cache entries for any tb which might potentially
1572 overlap the flushed page. */
1573 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1574 memset(&cpu->tb_jmp_cache[i], 0,
1575 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1577 i = tb_jmp_cache_hash_page(addr);
1578 memset(&cpu->tb_jmp_cache[i], 0,
1579 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1582 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1584 int i, target_code_size, max_target_code_size;
1585 int direct_jmp_count, direct_jmp2_count, cross_page;
1586 TranslationBlock *tb;
1588 target_code_size = 0;
1589 max_target_code_size = 0;
1590 cross_page = 0;
1591 direct_jmp_count = 0;
1592 direct_jmp2_count = 0;
1593 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1594 tb = &tcg_ctx.tb_ctx.tbs[i];
1595 target_code_size += tb->size;
1596 if (tb->size > max_target_code_size) {
1597 max_target_code_size = tb->size;
1599 if (tb->page_addr[1] != -1) {
1600 cross_page++;
1602 if (tb->tb_next_offset[0] != 0xffff) {
1603 direct_jmp_count++;
1604 if (tb->tb_next_offset[1] != 0xffff) {
1605 direct_jmp2_count++;
1609 /* XXX: avoid using doubles ? */
1610 cpu_fprintf(f, "Translation buffer state:\n");
1611 cpu_fprintf(f, "gen code size %td/%zd\n",
1612 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1613 tcg_ctx.code_gen_buffer_max_size);
1614 cpu_fprintf(f, "TB count %d/%d\n",
1615 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1616 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1617 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1618 tcg_ctx.tb_ctx.nb_tbs : 0,
1619 max_target_code_size);
1620 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1621 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1622 tcg_ctx.code_gen_buffer) /
1623 tcg_ctx.tb_ctx.nb_tbs : 0,
1624 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1625 tcg_ctx.code_gen_buffer) /
1626 target_code_size : 0);
1627 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1628 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1629 tcg_ctx.tb_ctx.nb_tbs : 0);
1630 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1631 direct_jmp_count,
1632 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1633 tcg_ctx.tb_ctx.nb_tbs : 0,
1634 direct_jmp2_count,
1635 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1636 tcg_ctx.tb_ctx.nb_tbs : 0);
1637 cpu_fprintf(f, "\nStatistics:\n");
1638 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1639 cpu_fprintf(f, "TB invalidate count %d\n",
1640 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1641 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1642 tcg_dump_info(f, cpu_fprintf);
1645 #else /* CONFIG_USER_ONLY */
1647 void cpu_interrupt(CPUState *cpu, int mask)
1649 cpu->interrupt_request |= mask;
1650 cpu->tcg_exit_req = 1;
1654 * Walks guest process memory "regions" one by one
1655 * and calls callback function 'fn' for each region.
1657 struct walk_memory_regions_data {
1658 walk_memory_regions_fn fn;
1659 void *priv;
1660 uintptr_t start;
1661 int prot;
1664 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1665 abi_ulong end, int new_prot)
1667 if (data->start != -1ul) {
1668 int rc = data->fn(data->priv, data->start, end, data->prot);
1669 if (rc != 0) {
1670 return rc;
1674 data->start = (new_prot ? end : -1ul);
1675 data->prot = new_prot;
1677 return 0;
1680 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1681 abi_ulong base, int level, void **lp)
1683 abi_ulong pa;
1684 int i, rc;
1686 if (*lp == NULL) {
1687 return walk_memory_regions_end(data, base, 0);
1690 if (level == 0) {
1691 PageDesc *pd = *lp;
1693 for (i = 0; i < V_L2_SIZE; ++i) {
1694 int prot = pd[i].flags;
1696 pa = base | (i << TARGET_PAGE_BITS);
1697 if (prot != data->prot) {
1698 rc = walk_memory_regions_end(data, pa, prot);
1699 if (rc != 0) {
1700 return rc;
1704 } else {
1705 void **pp = *lp;
1707 for (i = 0; i < V_L2_SIZE; ++i) {
1708 pa = base | ((abi_ulong)i <<
1709 (TARGET_PAGE_BITS + V_L2_BITS * level));
1710 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1711 if (rc != 0) {
1712 return rc;
1717 return 0;
1720 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1722 struct walk_memory_regions_data data;
1723 uintptr_t i;
1725 data.fn = fn;
1726 data.priv = priv;
1727 data.start = -1ul;
1728 data.prot = 0;
1730 for (i = 0; i < V_L1_SIZE; i++) {
1731 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1732 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1734 if (rc != 0) {
1735 return rc;
1739 return walk_memory_regions_end(&data, 0, 0);
1742 static int dump_region(void *priv, abi_ulong start,
1743 abi_ulong end, unsigned long prot)
1745 FILE *f = (FILE *)priv;
1747 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1748 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1749 start, end, end - start,
1750 ((prot & PAGE_READ) ? 'r' : '-'),
1751 ((prot & PAGE_WRITE) ? 'w' : '-'),
1752 ((prot & PAGE_EXEC) ? 'x' : '-'));
1754 return 0;
1757 /* dump memory mappings */
1758 void page_dump(FILE *f)
1760 const int length = sizeof(abi_ulong) * 2;
1761 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1762 length, "start", length, "end", length, "size", "prot");
1763 walk_memory_regions(f, dump_region);
1766 int page_get_flags(target_ulong address)
1768 PageDesc *p;
1770 p = page_find(address >> TARGET_PAGE_BITS);
1771 if (!p) {
1772 return 0;
1774 return p->flags;
1777 /* Modify the flags of a page and invalidate the code if necessary.
1778 The flag PAGE_WRITE_ORG is positioned automatically depending
1779 on PAGE_WRITE. The mmap_lock should already be held. */
1780 void page_set_flags(target_ulong start, target_ulong end, int flags)
1782 target_ulong addr, len;
1784 /* This function should never be called with addresses outside the
1785 guest address space. If this assert fires, it probably indicates
1786 a missing call to h2g_valid. */
1787 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1788 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1789 #endif
1790 assert(start < end);
1792 start = start & TARGET_PAGE_MASK;
1793 end = TARGET_PAGE_ALIGN(end);
1795 if (flags & PAGE_WRITE) {
1796 flags |= PAGE_WRITE_ORG;
1799 for (addr = start, len = end - start;
1800 len != 0;
1801 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1802 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1804 /* If the write protection bit is set, then we invalidate
1805 the code inside. */
1806 if (!(p->flags & PAGE_WRITE) &&
1807 (flags & PAGE_WRITE) &&
1808 p->first_tb) {
1809 tb_invalidate_phys_page(addr, 0, NULL, false);
1811 p->flags = flags;
1815 int page_check_range(target_ulong start, target_ulong len, int flags)
1817 PageDesc *p;
1818 target_ulong end;
1819 target_ulong addr;
1821 /* This function should never be called with addresses outside the
1822 guest address space. If this assert fires, it probably indicates
1823 a missing call to h2g_valid. */
1824 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1825 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1826 #endif
1828 if (len == 0) {
1829 return 0;
1831 if (start + len - 1 < start) {
1832 /* We've wrapped around. */
1833 return -1;
1836 /* must do before we loose bits in the next step */
1837 end = TARGET_PAGE_ALIGN(start + len);
1838 start = start & TARGET_PAGE_MASK;
1840 for (addr = start, len = end - start;
1841 len != 0;
1842 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1843 p = page_find(addr >> TARGET_PAGE_BITS);
1844 if (!p) {
1845 return -1;
1847 if (!(p->flags & PAGE_VALID)) {
1848 return -1;
1851 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1852 return -1;
1854 if (flags & PAGE_WRITE) {
1855 if (!(p->flags & PAGE_WRITE_ORG)) {
1856 return -1;
1858 /* unprotect the page if it was put read-only because it
1859 contains translated code */
1860 if (!(p->flags & PAGE_WRITE)) {
1861 if (!page_unprotect(addr, 0, NULL)) {
1862 return -1;
1867 return 0;
1870 /* called from signal handler: invalidate the code and unprotect the
1871 page. Return TRUE if the fault was successfully handled. */
1872 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1874 unsigned int prot;
1875 PageDesc *p;
1876 target_ulong host_start, host_end, addr;
1878 /* Technically this isn't safe inside a signal handler. However we
1879 know this only ever happens in a synchronous SEGV handler, so in
1880 practice it seems to be ok. */
1881 mmap_lock();
1883 p = page_find(address >> TARGET_PAGE_BITS);
1884 if (!p) {
1885 mmap_unlock();
1886 return 0;
1889 /* if the page was really writable, then we change its
1890 protection back to writable */
1891 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1892 host_start = address & qemu_host_page_mask;
1893 host_end = host_start + qemu_host_page_size;
1895 prot = 0;
1896 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1897 p = page_find(addr >> TARGET_PAGE_BITS);
1898 p->flags |= PAGE_WRITE;
1899 prot |= p->flags;
1901 /* and since the content will be modified, we must invalidate
1902 the corresponding translated code. */
1903 tb_invalidate_phys_page(addr, pc, puc, true);
1904 #ifdef DEBUG_TB_CHECK
1905 tb_invalidate_check(addr);
1906 #endif
1908 mprotect((void *)g2h(host_start), qemu_host_page_size,
1909 prot & PAGE_BITS);
1911 mmap_unlock();
1912 return 1;
1914 mmap_unlock();
1915 return 0;
1917 #endif /* CONFIG_USER_ONLY */