tcg: comment on which functions have to be called with tb_lock held
[qemu.git] / translate-all.c
blobfad2646ddd7959ef3acc4db34942c9b650831eb0
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <sys/proc.h>
41 #include <machine/profile.h>
42 #define _KERNEL
43 #include <sys/user.h>
44 #undef _KERNEL
45 #undef sigqueue
46 #include <libutil.h>
47 #endif
48 #endif
49 #else
50 #include "exec/address-spaces.h"
51 #endif
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
58 #include "exec/log.h"
60 /* #define DEBUG_TB_INVALIDATE */
61 /* #define DEBUG_TB_FLUSH */
62 /* #define DEBUG_LOCKING */
63 /* make various TB consistency checks */
64 /* #define DEBUG_TB_CHECK */
66 #if !defined(CONFIG_USER_ONLY)
67 /* TB consistency checks only implemented for usermode emulation. */
68 #undef DEBUG_TB_CHECK
69 #endif
71 /* Access to the various translations structures need to be serialised via locks
72 * for consistency. This is automatic for SoftMMU based system
73 * emulation due to its single threaded nature. In user-mode emulation
74 * access to the memory related structures are protected with the
75 * mmap_lock.
77 #ifdef DEBUG_LOCKING
78 #define DEBUG_MEM_LOCKS 1
79 #else
80 #define DEBUG_MEM_LOCKS 0
81 #endif
83 #ifdef CONFIG_SOFTMMU
84 #define assert_memory_lock() do { /* nothing */ } while (0)
85 #else
86 #define assert_memory_lock() do { \
87 if (DEBUG_MEM_LOCKS) { \
88 g_assert(have_mmap_lock()); \
89 } \
90 } while (0)
91 #endif
93 #define SMC_BITMAP_USE_THRESHOLD 10
95 typedef struct PageDesc {
96 /* list of TBs intersecting this ram page */
97 TranslationBlock *first_tb;
98 #ifdef CONFIG_SOFTMMU
99 /* in order to optimize self modifying code, we count the number
100 of lookups we do to a given page to use a bitmap */
101 unsigned int code_write_count;
102 unsigned long *code_bitmap;
103 #else
104 unsigned long flags;
105 #endif
106 } PageDesc;
108 /* In system mode we want L1_MAP to be based on ram offsets,
109 while in user mode we want it to be based on virtual addresses. */
110 #if !defined(CONFIG_USER_ONLY)
111 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
112 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
113 #else
114 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
115 #endif
116 #else
117 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
118 #endif
120 /* Size of the L2 (and L3, etc) page tables. */
121 #define V_L2_BITS 10
122 #define V_L2_SIZE (1 << V_L2_BITS)
124 uintptr_t qemu_host_page_size;
125 intptr_t qemu_host_page_mask;
128 * L1 Mapping properties
130 static int v_l1_size;
131 static int v_l1_shift;
132 static int v_l2_levels;
134 /* The bottom level has pointers to PageDesc, and is indexed by
135 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
137 #define V_L1_MIN_BITS 4
138 #define V_L1_MAX_BITS (V_L2_BITS + 3)
139 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
141 static void *l1_map[V_L1_MAX_SIZE];
143 /* code generation context */
144 TCGContext tcg_ctx;
145 bool parallel_cpus;
147 /* translation block context */
148 #ifdef CONFIG_USER_ONLY
149 __thread int have_tb_lock;
150 #endif
152 static void page_table_config_init(void)
154 uint32_t v_l1_bits;
156 assert(TARGET_PAGE_BITS);
157 /* The bits remaining after N lower levels of page tables. */
158 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
159 if (v_l1_bits < V_L1_MIN_BITS) {
160 v_l1_bits += V_L2_BITS;
163 v_l1_size = 1 << v_l1_bits;
164 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
165 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
167 assert(v_l1_bits <= V_L1_MAX_BITS);
168 assert(v_l1_shift % V_L2_BITS == 0);
169 assert(v_l2_levels >= 0);
172 void tb_lock(void)
174 #ifdef CONFIG_USER_ONLY
175 assert(!have_tb_lock);
176 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
177 have_tb_lock++;
178 #endif
181 void tb_unlock(void)
183 #ifdef CONFIG_USER_ONLY
184 assert(have_tb_lock);
185 have_tb_lock--;
186 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
187 #endif
190 void tb_lock_reset(void)
192 #ifdef CONFIG_USER_ONLY
193 if (have_tb_lock) {
194 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
195 have_tb_lock = 0;
197 #endif
200 #ifdef DEBUG_LOCKING
201 #define DEBUG_TB_LOCKS 1
202 #else
203 #define DEBUG_TB_LOCKS 0
204 #endif
206 #ifdef CONFIG_SOFTMMU
207 #define assert_tb_lock() do { /* nothing */ } while (0)
208 #else
209 #define assert_tb_lock() do { \
210 if (DEBUG_TB_LOCKS) { \
211 g_assert(have_tb_lock); \
213 } while (0)
214 #endif
217 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
219 void cpu_gen_init(void)
221 tcg_context_init(&tcg_ctx);
224 /* Encode VAL as a signed leb128 sequence at P.
225 Return P incremented past the encoded value. */
226 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
228 int more, byte;
230 do {
231 byte = val & 0x7f;
232 val >>= 7;
233 more = !((val == 0 && (byte & 0x40) == 0)
234 || (val == -1 && (byte & 0x40) != 0));
235 if (more) {
236 byte |= 0x80;
238 *p++ = byte;
239 } while (more);
241 return p;
244 /* Decode a signed leb128 sequence at *PP; increment *PP past the
245 decoded value. Return the decoded value. */
246 static target_long decode_sleb128(uint8_t **pp)
248 uint8_t *p = *pp;
249 target_long val = 0;
250 int byte, shift = 0;
252 do {
253 byte = *p++;
254 val |= (target_ulong)(byte & 0x7f) << shift;
255 shift += 7;
256 } while (byte & 0x80);
257 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
258 val |= -(target_ulong)1 << shift;
261 *pp = p;
262 return val;
265 /* Encode the data collected about the instructions while compiling TB.
266 Place the data at BLOCK, and return the number of bytes consumed.
268 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
269 which come from the target's insn_start data, followed by a uintptr_t
270 which comes from the host pc of the end of the code implementing the insn.
272 Each line of the table is encoded as sleb128 deltas from the previous
273 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
274 That is, the first column is seeded with the guest pc, the last column
275 with the host pc, and the middle columns with zeros. */
277 static int encode_search(TranslationBlock *tb, uint8_t *block)
279 uint8_t *highwater = tcg_ctx.code_gen_highwater;
280 uint8_t *p = block;
281 int i, j, n;
283 tb->tc_search = block;
285 for (i = 0, n = tb->icount; i < n; ++i) {
286 target_ulong prev;
288 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
289 if (i == 0) {
290 prev = (j == 0 ? tb->pc : 0);
291 } else {
292 prev = tcg_ctx.gen_insn_data[i - 1][j];
294 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
296 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
297 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
299 /* Test for (pending) buffer overflow. The assumption is that any
300 one row beginning below the high water mark cannot overrun
301 the buffer completely. Thus we can test for overflow after
302 encoding a row without having to check during encoding. */
303 if (unlikely(p > highwater)) {
304 return -1;
308 return p - block;
311 /* The cpu state corresponding to 'searched_pc' is restored.
312 * Called with tb_lock held.
314 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
315 uintptr_t searched_pc)
317 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
318 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
319 CPUArchState *env = cpu->env_ptr;
320 uint8_t *p = tb->tc_search;
321 int i, j, num_insns = tb->icount;
322 #ifdef CONFIG_PROFILER
323 int64_t ti = profile_getclock();
324 #endif
326 searched_pc -= GETPC_ADJ;
328 if (searched_pc < host_pc) {
329 return -1;
332 /* Reconstruct the stored insn data while looking for the point at
333 which the end of the insn exceeds the searched_pc. */
334 for (i = 0; i < num_insns; ++i) {
335 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
336 data[j] += decode_sleb128(&p);
338 host_pc += decode_sleb128(&p);
339 if (host_pc > searched_pc) {
340 goto found;
343 return -1;
345 found:
346 if (tb->cflags & CF_USE_ICOUNT) {
347 assert(use_icount);
348 /* Reset the cycle counter to the start of the block. */
349 cpu->icount_decr.u16.low += num_insns;
350 /* Clear the IO flag. */
351 cpu->can_do_io = 0;
353 cpu->icount_decr.u16.low -= i;
354 restore_state_to_opc(env, tb, data);
356 #ifdef CONFIG_PROFILER
357 tcg_ctx.restore_time += profile_getclock() - ti;
358 tcg_ctx.restore_count++;
359 #endif
360 return 0;
363 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
365 TranslationBlock *tb;
367 tb = tb_find_pc(retaddr);
368 if (tb) {
369 cpu_restore_state_from_tb(cpu, tb, retaddr);
370 if (tb->cflags & CF_NOCACHE) {
371 /* one-shot translation, invalidate it immediately */
372 tb_phys_invalidate(tb, -1);
373 tb_free(tb);
375 return true;
377 return false;
380 void page_size_init(void)
382 /* NOTE: we can always suppose that qemu_host_page_size >=
383 TARGET_PAGE_SIZE */
384 qemu_real_host_page_size = getpagesize();
385 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
386 if (qemu_host_page_size == 0) {
387 qemu_host_page_size = qemu_real_host_page_size;
389 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
390 qemu_host_page_size = TARGET_PAGE_SIZE;
392 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
395 static void page_init(void)
397 page_size_init();
398 page_table_config_init();
400 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
402 #ifdef HAVE_KINFO_GETVMMAP
403 struct kinfo_vmentry *freep;
404 int i, cnt;
406 freep = kinfo_getvmmap(getpid(), &cnt);
407 if (freep) {
408 mmap_lock();
409 for (i = 0; i < cnt; i++) {
410 unsigned long startaddr, endaddr;
412 startaddr = freep[i].kve_start;
413 endaddr = freep[i].kve_end;
414 if (h2g_valid(startaddr)) {
415 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
417 if (h2g_valid(endaddr)) {
418 endaddr = h2g(endaddr);
419 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
420 } else {
421 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
422 endaddr = ~0ul;
423 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
424 #endif
428 free(freep);
429 mmap_unlock();
431 #else
432 FILE *f;
434 last_brk = (unsigned long)sbrk(0);
436 f = fopen("/compat/linux/proc/self/maps", "r");
437 if (f) {
438 mmap_lock();
440 do {
441 unsigned long startaddr, endaddr;
442 int n;
444 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
446 if (n == 2 && h2g_valid(startaddr)) {
447 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
449 if (h2g_valid(endaddr)) {
450 endaddr = h2g(endaddr);
451 } else {
452 endaddr = ~0ul;
454 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
456 } while (!feof(f));
458 fclose(f);
459 mmap_unlock();
461 #endif
463 #endif
466 /* If alloc=1:
467 * Called with tb_lock held for system emulation.
468 * Called with mmap_lock held for user-mode emulation.
470 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
472 PageDesc *pd;
473 void **lp;
474 int i;
476 /* Level 1. Always allocated. */
477 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
479 /* Level 2..N-1. */
480 for (i = v_l2_levels; i > 0; i--) {
481 void **p = atomic_rcu_read(lp);
483 if (p == NULL) {
484 if (!alloc) {
485 return NULL;
487 p = g_new0(void *, V_L2_SIZE);
488 atomic_rcu_set(lp, p);
491 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
494 pd = atomic_rcu_read(lp);
495 if (pd == NULL) {
496 if (!alloc) {
497 return NULL;
499 pd = g_new0(PageDesc, V_L2_SIZE);
500 atomic_rcu_set(lp, pd);
503 return pd + (index & (V_L2_SIZE - 1));
506 static inline PageDesc *page_find(tb_page_addr_t index)
508 return page_find_alloc(index, 0);
511 #if defined(CONFIG_USER_ONLY)
512 /* Currently it is not recommended to allocate big chunks of data in
513 user mode. It will change when a dedicated libc will be used. */
514 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
515 region in which the guest needs to run. Revisit this. */
516 #define USE_STATIC_CODE_GEN_BUFFER
517 #endif
519 /* Minimum size of the code gen buffer. This number is randomly chosen,
520 but not so small that we can't have a fair number of TB's live. */
521 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
523 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
524 indicated, this is constrained by the range of direct branches on the
525 host cpu, as used by the TCG implementation of goto_tb. */
526 #if defined(__x86_64__)
527 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
528 #elif defined(__sparc__)
529 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
530 #elif defined(__powerpc64__)
531 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
532 #elif defined(__powerpc__)
533 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
534 #elif defined(__aarch64__)
535 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
536 #elif defined(__arm__)
537 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
538 #elif defined(__s390x__)
539 /* We have a +- 4GB range on the branches; leave some slop. */
540 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
541 #elif defined(__mips__)
542 /* We have a 256MB branch region, but leave room to make sure the
543 main executable is also within that region. */
544 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
545 #else
546 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
547 #endif
549 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
551 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
552 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
553 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
555 static inline size_t size_code_gen_buffer(size_t tb_size)
557 /* Size the buffer. */
558 if (tb_size == 0) {
559 #ifdef USE_STATIC_CODE_GEN_BUFFER
560 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
561 #else
562 /* ??? Needs adjustments. */
563 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
564 static buffer, we could size this on RESERVED_VA, on the text
565 segment size of the executable, or continue to use the default. */
566 tb_size = (unsigned long)(ram_size / 4);
567 #endif
569 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
570 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
572 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
573 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
575 return tb_size;
578 #ifdef __mips__
579 /* In order to use J and JAL within the code_gen_buffer, we require
580 that the buffer not cross a 256MB boundary. */
581 static inline bool cross_256mb(void *addr, size_t size)
583 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
586 /* We weren't able to allocate a buffer without crossing that boundary,
587 so make do with the larger portion of the buffer that doesn't cross.
588 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
589 static inline void *split_cross_256mb(void *buf1, size_t size1)
591 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
592 size_t size2 = buf1 + size1 - buf2;
594 size1 = buf2 - buf1;
595 if (size1 < size2) {
596 size1 = size2;
597 buf1 = buf2;
600 tcg_ctx.code_gen_buffer_size = size1;
601 return buf1;
603 #endif
605 #ifdef USE_STATIC_CODE_GEN_BUFFER
606 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
607 __attribute__((aligned(CODE_GEN_ALIGN)));
609 # ifdef _WIN32
610 static inline void do_protect(void *addr, long size, int prot)
612 DWORD old_protect;
613 VirtualProtect(addr, size, prot, &old_protect);
616 static inline void map_exec(void *addr, long size)
618 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
621 static inline void map_none(void *addr, long size)
623 do_protect(addr, size, PAGE_NOACCESS);
625 # else
626 static inline void do_protect(void *addr, long size, int prot)
628 uintptr_t start, end;
630 start = (uintptr_t)addr;
631 start &= qemu_real_host_page_mask;
633 end = (uintptr_t)addr + size;
634 end = ROUND_UP(end, qemu_real_host_page_size);
636 mprotect((void *)start, end - start, prot);
639 static inline void map_exec(void *addr, long size)
641 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
644 static inline void map_none(void *addr, long size)
646 do_protect(addr, size, PROT_NONE);
648 # endif /* WIN32 */
650 static inline void *alloc_code_gen_buffer(void)
652 void *buf = static_code_gen_buffer;
653 size_t full_size, size;
655 /* The size of the buffer, rounded down to end on a page boundary. */
656 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
657 & qemu_real_host_page_mask) - (uintptr_t)buf;
659 /* Reserve a guard page. */
660 size = full_size - qemu_real_host_page_size;
662 /* Honor a command-line option limiting the size of the buffer. */
663 if (size > tcg_ctx.code_gen_buffer_size) {
664 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
665 & qemu_real_host_page_mask) - (uintptr_t)buf;
667 tcg_ctx.code_gen_buffer_size = size;
669 #ifdef __mips__
670 if (cross_256mb(buf, size)) {
671 buf = split_cross_256mb(buf, size);
672 size = tcg_ctx.code_gen_buffer_size;
674 #endif
676 map_exec(buf, size);
677 map_none(buf + size, qemu_real_host_page_size);
678 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
680 return buf;
682 #elif defined(_WIN32)
683 static inline void *alloc_code_gen_buffer(void)
685 size_t size = tcg_ctx.code_gen_buffer_size;
686 void *buf1, *buf2;
688 /* Perform the allocation in two steps, so that the guard page
689 is reserved but uncommitted. */
690 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
691 MEM_RESERVE, PAGE_NOACCESS);
692 if (buf1 != NULL) {
693 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
694 assert(buf1 == buf2);
697 return buf1;
699 #else
700 static inline void *alloc_code_gen_buffer(void)
702 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
703 uintptr_t start = 0;
704 size_t size = tcg_ctx.code_gen_buffer_size;
705 void *buf;
707 /* Constrain the position of the buffer based on the host cpu.
708 Note that these addresses are chosen in concert with the
709 addresses assigned in the relevant linker script file. */
710 # if defined(__PIE__) || defined(__PIC__)
711 /* Don't bother setting a preferred location if we're building
712 a position-independent executable. We're more likely to get
713 an address near the main executable if we let the kernel
714 choose the address. */
715 # elif defined(__x86_64__) && defined(MAP_32BIT)
716 /* Force the memory down into low memory with the executable.
717 Leave the choice of exact location with the kernel. */
718 flags |= MAP_32BIT;
719 /* Cannot expect to map more than 800MB in low memory. */
720 if (size > 800u * 1024 * 1024) {
721 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
723 # elif defined(__sparc__)
724 start = 0x40000000ul;
725 # elif defined(__s390x__)
726 start = 0x90000000ul;
727 # elif defined(__mips__)
728 # if _MIPS_SIM == _ABI64
729 start = 0x128000000ul;
730 # else
731 start = 0x08000000ul;
732 # endif
733 # endif
735 buf = mmap((void *)start, size + qemu_real_host_page_size,
736 PROT_NONE, flags, -1, 0);
737 if (buf == MAP_FAILED) {
738 return NULL;
741 #ifdef __mips__
742 if (cross_256mb(buf, size)) {
743 /* Try again, with the original still mapped, to avoid re-acquiring
744 that 256mb crossing. This time don't specify an address. */
745 size_t size2;
746 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
747 PROT_NONE, flags, -1, 0);
748 switch (buf2 != MAP_FAILED) {
749 case 1:
750 if (!cross_256mb(buf2, size)) {
751 /* Success! Use the new buffer. */
752 munmap(buf, size + qemu_real_host_page_size);
753 break;
755 /* Failure. Work with what we had. */
756 munmap(buf2, size + qemu_real_host_page_size);
757 /* fallthru */
758 default:
759 /* Split the original buffer. Free the smaller half. */
760 buf2 = split_cross_256mb(buf, size);
761 size2 = tcg_ctx.code_gen_buffer_size;
762 if (buf == buf2) {
763 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
764 } else {
765 munmap(buf, size - size2);
767 size = size2;
768 break;
770 buf = buf2;
772 #endif
774 /* Make the final buffer accessible. The guard page at the end
775 will remain inaccessible with PROT_NONE. */
776 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
778 /* Request large pages for the buffer. */
779 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
781 return buf;
783 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
785 static inline void code_gen_alloc(size_t tb_size)
787 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
788 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
789 if (tcg_ctx.code_gen_buffer == NULL) {
790 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
791 exit(1);
794 /* Estimate a good size for the number of TBs we can support. We
795 still haven't deducted the prologue from the buffer size here,
796 but that's minimal and won't affect the estimate much. */
797 tcg_ctx.code_gen_max_blocks
798 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
799 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
801 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
804 static void tb_htable_init(void)
806 unsigned int mode = QHT_MODE_AUTO_RESIZE;
808 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
811 /* Must be called before using the QEMU cpus. 'tb_size' is the size
812 (in bytes) allocated to the translation buffer. Zero means default
813 size. */
814 void tcg_exec_init(unsigned long tb_size)
816 cpu_gen_init();
817 page_init();
818 tb_htable_init();
819 code_gen_alloc(tb_size);
820 #if defined(CONFIG_SOFTMMU)
821 /* There's no guest base to take into account, so go ahead and
822 initialize the prologue now. */
823 tcg_prologue_init(&tcg_ctx);
824 #endif
827 bool tcg_enabled(void)
829 return tcg_ctx.code_gen_buffer != NULL;
833 * Allocate a new translation block. Flush the translation buffer if
834 * too many translation blocks or too much generated code.
836 * Called with tb_lock held.
838 static TranslationBlock *tb_alloc(target_ulong pc)
840 TranslationBlock *tb;
842 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
843 return NULL;
845 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
846 tb->pc = pc;
847 tb->cflags = 0;
848 tb->invalid = false;
849 return tb;
852 /* Called with tb_lock held. */
853 void tb_free(TranslationBlock *tb)
855 /* In practice this is mostly used for single use temporary TB
856 Ignore the hard cases and just back up if this TB happens to
857 be the last one generated. */
858 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
859 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
860 tcg_ctx.code_gen_ptr = tb->tc_ptr;
861 tcg_ctx.tb_ctx.nb_tbs--;
865 static inline void invalidate_page_bitmap(PageDesc *p)
867 #ifdef CONFIG_SOFTMMU
868 g_free(p->code_bitmap);
869 p->code_bitmap = NULL;
870 p->code_write_count = 0;
871 #endif
874 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
875 static void page_flush_tb_1(int level, void **lp)
877 int i;
879 if (*lp == NULL) {
880 return;
882 if (level == 0) {
883 PageDesc *pd = *lp;
885 for (i = 0; i < V_L2_SIZE; ++i) {
886 pd[i].first_tb = NULL;
887 invalidate_page_bitmap(pd + i);
889 } else {
890 void **pp = *lp;
892 for (i = 0; i < V_L2_SIZE; ++i) {
893 page_flush_tb_1(level - 1, pp + i);
898 static void page_flush_tb(void)
900 int i, l1_sz = v_l1_size;
902 for (i = 0; i < l1_sz; i++) {
903 page_flush_tb_1(v_l2_levels, l1_map + i);
907 /* flush all the translation blocks */
908 static void do_tb_flush(CPUState *cpu, void *data)
910 unsigned tb_flush_req = (unsigned) (uintptr_t) data;
912 tb_lock();
914 /* If it's already been done on request of another CPU,
915 * just retry.
917 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) {
918 goto done;
921 #if defined(DEBUG_TB_FLUSH)
922 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
923 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
924 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
925 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
926 tcg_ctx.tb_ctx.nb_tbs : 0);
927 #endif
928 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
929 > tcg_ctx.code_gen_buffer_size) {
930 cpu_abort(cpu, "Internal error: code buffer overflow\n");
933 CPU_FOREACH(cpu) {
934 int i;
936 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
937 atomic_set(&cpu->tb_jmp_cache[i], NULL);
941 tcg_ctx.tb_ctx.nb_tbs = 0;
942 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
943 page_flush_tb();
945 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
946 /* XXX: flush processor icache at this point if cache flush is
947 expensive */
948 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
949 tcg_ctx.tb_ctx.tb_flush_count + 1);
951 done:
952 tb_unlock();
955 void tb_flush(CPUState *cpu)
957 if (tcg_enabled()) {
958 uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
959 async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req);
963 #ifdef DEBUG_TB_CHECK
965 static void
966 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
968 TranslationBlock *tb = p;
969 target_ulong addr = *(target_ulong *)userp;
971 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
972 printf("ERROR invalidate: address=" TARGET_FMT_lx
973 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
977 /* verify that all the pages have correct rights for code
979 * Called with tb_lock held.
981 static void tb_invalidate_check(target_ulong address)
983 address &= TARGET_PAGE_MASK;
984 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
987 static void
988 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
990 TranslationBlock *tb = p;
991 int flags1, flags2;
993 flags1 = page_get_flags(tb->pc);
994 flags2 = page_get_flags(tb->pc + tb->size - 1);
995 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
996 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
997 (long)tb->pc, tb->size, flags1, flags2);
1001 /* verify that all the pages have correct rights for code */
1002 static void tb_page_check(void)
1004 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
1007 #endif
1009 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1011 TranslationBlock *tb1;
1012 unsigned int n1;
1014 for (;;) {
1015 tb1 = *ptb;
1016 n1 = (uintptr_t)tb1 & 3;
1017 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1018 if (tb1 == tb) {
1019 *ptb = tb1->page_next[n1];
1020 break;
1022 ptb = &tb1->page_next[n1];
1026 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1027 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1029 TranslationBlock *tb1;
1030 uintptr_t *ptb, ntb;
1031 unsigned int n1;
1033 ptb = &tb->jmp_list_next[n];
1034 if (*ptb) {
1035 /* find tb(n) in circular list */
1036 for (;;) {
1037 ntb = *ptb;
1038 n1 = ntb & 3;
1039 tb1 = (TranslationBlock *)(ntb & ~3);
1040 if (n1 == n && tb1 == tb) {
1041 break;
1043 if (n1 == 2) {
1044 ptb = &tb1->jmp_list_first;
1045 } else {
1046 ptb = &tb1->jmp_list_next[n1];
1049 /* now we can suppress tb(n) from the list */
1050 *ptb = tb->jmp_list_next[n];
1052 tb->jmp_list_next[n] = (uintptr_t)NULL;
1056 /* reset the jump entry 'n' of a TB so that it is not chained to
1057 another TB */
1058 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1060 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1061 tb_set_jmp_target(tb, n, addr);
1064 /* remove any jumps to the TB */
1065 static inline void tb_jmp_unlink(TranslationBlock *tb)
1067 TranslationBlock *tb1;
1068 uintptr_t *ptb, ntb;
1069 unsigned int n1;
1071 ptb = &tb->jmp_list_first;
1072 for (;;) {
1073 ntb = *ptb;
1074 n1 = ntb & 3;
1075 tb1 = (TranslationBlock *)(ntb & ~3);
1076 if (n1 == 2) {
1077 break;
1079 tb_reset_jump(tb1, n1);
1080 *ptb = tb1->jmp_list_next[n1];
1081 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1085 /* invalidate one TB
1087 * Called with tb_lock held.
1089 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1091 CPUState *cpu;
1092 PageDesc *p;
1093 uint32_t h;
1094 tb_page_addr_t phys_pc;
1096 atomic_set(&tb->invalid, true);
1098 /* remove the TB from the hash list */
1099 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1100 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1101 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1103 /* remove the TB from the page list */
1104 if (tb->page_addr[0] != page_addr) {
1105 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1106 tb_page_remove(&p->first_tb, tb);
1107 invalidate_page_bitmap(p);
1109 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1110 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1111 tb_page_remove(&p->first_tb, tb);
1112 invalidate_page_bitmap(p);
1115 /* remove the TB from the hash list */
1116 h = tb_jmp_cache_hash_func(tb->pc);
1117 CPU_FOREACH(cpu) {
1118 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1119 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1123 /* suppress this TB from the two jump lists */
1124 tb_remove_from_jmp_list(tb, 0);
1125 tb_remove_from_jmp_list(tb, 1);
1127 /* suppress any remaining jumps to this TB */
1128 tb_jmp_unlink(tb);
1130 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1133 #ifdef CONFIG_SOFTMMU
1134 static void build_page_bitmap(PageDesc *p)
1136 int n, tb_start, tb_end;
1137 TranslationBlock *tb;
1139 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1141 tb = p->first_tb;
1142 while (tb != NULL) {
1143 n = (uintptr_t)tb & 3;
1144 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1145 /* NOTE: this is subtle as a TB may span two physical pages */
1146 if (n == 0) {
1147 /* NOTE: tb_end may be after the end of the page, but
1148 it is not a problem */
1149 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1150 tb_end = tb_start + tb->size;
1151 if (tb_end > TARGET_PAGE_SIZE) {
1152 tb_end = TARGET_PAGE_SIZE;
1154 } else {
1155 tb_start = 0;
1156 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1158 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1159 tb = tb->page_next[n];
1162 #endif
1164 /* add the tb in the target page and protect it if necessary
1166 * Called with mmap_lock held for user-mode emulation.
1168 static inline void tb_alloc_page(TranslationBlock *tb,
1169 unsigned int n, tb_page_addr_t page_addr)
1171 PageDesc *p;
1172 #ifndef CONFIG_USER_ONLY
1173 bool page_already_protected;
1174 #endif
1176 tb->page_addr[n] = page_addr;
1177 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1178 tb->page_next[n] = p->first_tb;
1179 #ifndef CONFIG_USER_ONLY
1180 page_already_protected = p->first_tb != NULL;
1181 #endif
1182 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1183 invalidate_page_bitmap(p);
1185 #if defined(CONFIG_USER_ONLY)
1186 if (p->flags & PAGE_WRITE) {
1187 target_ulong addr;
1188 PageDesc *p2;
1189 int prot;
1191 /* force the host page as non writable (writes will have a
1192 page fault + mprotect overhead) */
1193 page_addr &= qemu_host_page_mask;
1194 prot = 0;
1195 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1196 addr += TARGET_PAGE_SIZE) {
1198 p2 = page_find(addr >> TARGET_PAGE_BITS);
1199 if (!p2) {
1200 continue;
1202 prot |= p2->flags;
1203 p2->flags &= ~PAGE_WRITE;
1205 mprotect(g2h(page_addr), qemu_host_page_size,
1206 (prot & PAGE_BITS) & ~PAGE_WRITE);
1207 #ifdef DEBUG_TB_INVALIDATE
1208 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1209 page_addr);
1210 #endif
1212 #else
1213 /* if some code is already present, then the pages are already
1214 protected. So we handle the case where only the first TB is
1215 allocated in a physical page */
1216 if (!page_already_protected) {
1217 tlb_protect_code(page_addr);
1219 #endif
1222 /* add a new TB and link it to the physical page tables. phys_page2 is
1223 * (-1) to indicate that only one page contains the TB.
1225 * Called with mmap_lock held for user-mode emulation.
1227 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1228 tb_page_addr_t phys_page2)
1230 uint32_t h;
1232 /* add in the page list */
1233 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1234 if (phys_page2 != -1) {
1235 tb_alloc_page(tb, 1, phys_page2);
1236 } else {
1237 tb->page_addr[1] = -1;
1240 /* add in the hash table */
1241 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1242 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1244 #ifdef DEBUG_TB_CHECK
1245 tb_page_check();
1246 #endif
1249 /* Called with mmap_lock held for user mode emulation. */
1250 TranslationBlock *tb_gen_code(CPUState *cpu,
1251 target_ulong pc, target_ulong cs_base,
1252 uint32_t flags, int cflags)
1254 CPUArchState *env = cpu->env_ptr;
1255 TranslationBlock *tb;
1256 tb_page_addr_t phys_pc, phys_page2;
1257 target_ulong virt_page2;
1258 tcg_insn_unit *gen_code_buf;
1259 int gen_code_size, search_size;
1260 #ifdef CONFIG_PROFILER
1261 int64_t ti;
1262 #endif
1264 phys_pc = get_page_addr_code(env, pc);
1265 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1266 cflags |= CF_USE_ICOUNT;
1269 tb = tb_alloc(pc);
1270 if (unlikely(!tb)) {
1271 buffer_overflow:
1272 /* flush must be done */
1273 tb_flush(cpu);
1274 mmap_unlock();
1275 cpu_loop_exit(cpu);
1278 gen_code_buf = tcg_ctx.code_gen_ptr;
1279 tb->tc_ptr = gen_code_buf;
1280 tb->cs_base = cs_base;
1281 tb->flags = flags;
1282 tb->cflags = cflags;
1284 #ifdef CONFIG_PROFILER
1285 tcg_ctx.tb_count1++; /* includes aborted translations because of
1286 exceptions */
1287 ti = profile_getclock();
1288 #endif
1290 tcg_func_start(&tcg_ctx);
1292 tcg_ctx.cpu = ENV_GET_CPU(env);
1293 gen_intermediate_code(env, tb);
1294 tcg_ctx.cpu = NULL;
1296 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1298 /* generate machine code */
1299 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1300 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1301 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1302 #ifdef USE_DIRECT_JUMP
1303 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1304 tcg_ctx.tb_jmp_target_addr = NULL;
1305 #else
1306 tcg_ctx.tb_jmp_insn_offset = NULL;
1307 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1308 #endif
1310 #ifdef CONFIG_PROFILER
1311 tcg_ctx.tb_count++;
1312 tcg_ctx.interm_time += profile_getclock() - ti;
1313 tcg_ctx.code_time -= profile_getclock();
1314 #endif
1316 /* ??? Overflow could be handled better here. In particular, we
1317 don't need to re-do gen_intermediate_code, nor should we re-do
1318 the tcg optimization currently hidden inside tcg_gen_code. All
1319 that should be required is to flush the TBs, allocate a new TB,
1320 re-initialize it per above, and re-do the actual code generation. */
1321 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1322 if (unlikely(gen_code_size < 0)) {
1323 goto buffer_overflow;
1325 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1326 if (unlikely(search_size < 0)) {
1327 goto buffer_overflow;
1330 #ifdef CONFIG_PROFILER
1331 tcg_ctx.code_time += profile_getclock();
1332 tcg_ctx.code_in_len += tb->size;
1333 tcg_ctx.code_out_len += gen_code_size;
1334 tcg_ctx.search_out_len += search_size;
1335 #endif
1337 #ifdef DEBUG_DISAS
1338 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1339 qemu_log_in_addr_range(tb->pc)) {
1340 qemu_log("OUT: [size=%d]\n", gen_code_size);
1341 log_disas(tb->tc_ptr, gen_code_size);
1342 qemu_log("\n");
1343 qemu_log_flush();
1345 #endif
1347 tcg_ctx.code_gen_ptr = (void *)
1348 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1349 CODE_GEN_ALIGN);
1351 /* init jump list */
1352 assert(((uintptr_t)tb & 3) == 0);
1353 tb->jmp_list_first = (uintptr_t)tb | 2;
1354 tb->jmp_list_next[0] = (uintptr_t)NULL;
1355 tb->jmp_list_next[1] = (uintptr_t)NULL;
1357 /* init original jump addresses wich has been set during tcg_gen_code() */
1358 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1359 tb_reset_jump(tb, 0);
1361 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1362 tb_reset_jump(tb, 1);
1365 /* check next page if needed */
1366 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1367 phys_page2 = -1;
1368 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1369 phys_page2 = get_page_addr_code(env, virt_page2);
1371 /* As long as consistency of the TB stuff is provided by tb_lock in user
1372 * mode and is implicit in single-threaded softmmu emulation, no explicit
1373 * memory barrier is required before tb_link_page() makes the TB visible
1374 * through the physical hash table and physical page list.
1376 tb_link_page(tb, phys_pc, phys_page2);
1377 return tb;
1381 * Invalidate all TBs which intersect with the target physical address range
1382 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1383 * 'is_cpu_write_access' should be true if called from a real cpu write
1384 * access: the virtual CPU will exit the current TB if code is modified inside
1385 * this TB.
1387 * Called with mmap_lock held for user-mode emulation
1389 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1391 while (start < end) {
1392 tb_invalidate_phys_page_range(start, end, 0);
1393 start &= TARGET_PAGE_MASK;
1394 start += TARGET_PAGE_SIZE;
1399 * Invalidate all TBs which intersect with the target physical address range
1400 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1401 * 'is_cpu_write_access' should be true if called from a real cpu write
1402 * access: the virtual CPU will exit the current TB if code is modified inside
1403 * this TB.
1405 * Called with mmap_lock held for user-mode emulation
1407 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1408 int is_cpu_write_access)
1410 TranslationBlock *tb, *tb_next;
1411 #if defined(TARGET_HAS_PRECISE_SMC)
1412 CPUState *cpu = current_cpu;
1413 CPUArchState *env = NULL;
1414 #endif
1415 tb_page_addr_t tb_start, tb_end;
1416 PageDesc *p;
1417 int n;
1418 #ifdef TARGET_HAS_PRECISE_SMC
1419 int current_tb_not_found = is_cpu_write_access;
1420 TranslationBlock *current_tb = NULL;
1421 int current_tb_modified = 0;
1422 target_ulong current_pc = 0;
1423 target_ulong current_cs_base = 0;
1424 uint32_t current_flags = 0;
1425 #endif /* TARGET_HAS_PRECISE_SMC */
1427 p = page_find(start >> TARGET_PAGE_BITS);
1428 if (!p) {
1429 return;
1431 #if defined(TARGET_HAS_PRECISE_SMC)
1432 if (cpu != NULL) {
1433 env = cpu->env_ptr;
1435 #endif
1437 /* we remove all the TBs in the range [start, end[ */
1438 /* XXX: see if in some cases it could be faster to invalidate all
1439 the code */
1440 tb = p->first_tb;
1441 while (tb != NULL) {
1442 n = (uintptr_t)tb & 3;
1443 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1444 tb_next = tb->page_next[n];
1445 /* NOTE: this is subtle as a TB may span two physical pages */
1446 if (n == 0) {
1447 /* NOTE: tb_end may be after the end of the page, but
1448 it is not a problem */
1449 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1450 tb_end = tb_start + tb->size;
1451 } else {
1452 tb_start = tb->page_addr[1];
1453 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1455 if (!(tb_end <= start || tb_start >= end)) {
1456 #ifdef TARGET_HAS_PRECISE_SMC
1457 if (current_tb_not_found) {
1458 current_tb_not_found = 0;
1459 current_tb = NULL;
1460 if (cpu->mem_io_pc) {
1461 /* now we have a real cpu fault */
1462 current_tb = tb_find_pc(cpu->mem_io_pc);
1465 if (current_tb == tb &&
1466 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1467 /* If we are modifying the current TB, we must stop
1468 its execution. We could be more precise by checking
1469 that the modification is after the current PC, but it
1470 would require a specialized function to partially
1471 restore the CPU state */
1473 current_tb_modified = 1;
1474 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1475 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1476 &current_flags);
1478 #endif /* TARGET_HAS_PRECISE_SMC */
1479 tb_phys_invalidate(tb, -1);
1481 tb = tb_next;
1483 #if !defined(CONFIG_USER_ONLY)
1484 /* if no code remaining, no need to continue to use slow writes */
1485 if (!p->first_tb) {
1486 invalidate_page_bitmap(p);
1487 tlb_unprotect_code(start);
1489 #endif
1490 #ifdef TARGET_HAS_PRECISE_SMC
1491 if (current_tb_modified) {
1492 /* we generate a block containing just the instruction
1493 modifying the memory. It will ensure that it cannot modify
1494 itself */
1495 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1496 cpu_loop_exit_noexc(cpu);
1498 #endif
1501 #ifdef CONFIG_SOFTMMU
1502 /* len must be <= 8 and start must be a multiple of len */
1503 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1505 PageDesc *p;
1507 #if 0
1508 if (1) {
1509 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1510 cpu_single_env->mem_io_vaddr, len,
1511 cpu_single_env->eip,
1512 cpu_single_env->eip +
1513 (intptr_t)cpu_single_env->segs[R_CS].base);
1515 #endif
1516 p = page_find(start >> TARGET_PAGE_BITS);
1517 if (!p) {
1518 return;
1520 if (!p->code_bitmap &&
1521 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1522 /* build code bitmap. FIXME: writes should be protected by
1523 * tb_lock, reads by tb_lock or RCU.
1525 build_page_bitmap(p);
1527 if (p->code_bitmap) {
1528 unsigned int nr;
1529 unsigned long b;
1531 nr = start & ~TARGET_PAGE_MASK;
1532 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1533 if (b & ((1 << len) - 1)) {
1534 goto do_invalidate;
1536 } else {
1537 do_invalidate:
1538 tb_invalidate_phys_page_range(start, start + len, 1);
1541 #else
1542 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1543 * host PC of the faulting store instruction that caused this invalidate.
1544 * Returns true if the caller needs to abort execution of the current
1545 * TB (because it was modified by this store and the guest CPU has
1546 * precise-SMC semantics).
1548 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1550 TranslationBlock *tb;
1551 PageDesc *p;
1552 int n;
1553 #ifdef TARGET_HAS_PRECISE_SMC
1554 TranslationBlock *current_tb = NULL;
1555 CPUState *cpu = current_cpu;
1556 CPUArchState *env = NULL;
1557 int current_tb_modified = 0;
1558 target_ulong current_pc = 0;
1559 target_ulong current_cs_base = 0;
1560 uint32_t current_flags = 0;
1561 #endif
1563 addr &= TARGET_PAGE_MASK;
1564 p = page_find(addr >> TARGET_PAGE_BITS);
1565 if (!p) {
1566 return false;
1568 tb = p->first_tb;
1569 #ifdef TARGET_HAS_PRECISE_SMC
1570 if (tb && pc != 0) {
1571 current_tb = tb_find_pc(pc);
1573 if (cpu != NULL) {
1574 env = cpu->env_ptr;
1576 #endif
1577 while (tb != NULL) {
1578 n = (uintptr_t)tb & 3;
1579 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1580 #ifdef TARGET_HAS_PRECISE_SMC
1581 if (current_tb == tb &&
1582 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1583 /* If we are modifying the current TB, we must stop
1584 its execution. We could be more precise by checking
1585 that the modification is after the current PC, but it
1586 would require a specialized function to partially
1587 restore the CPU state */
1589 current_tb_modified = 1;
1590 cpu_restore_state_from_tb(cpu, current_tb, pc);
1591 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1592 &current_flags);
1594 #endif /* TARGET_HAS_PRECISE_SMC */
1595 tb_phys_invalidate(tb, addr);
1596 tb = tb->page_next[n];
1598 p->first_tb = NULL;
1599 #ifdef TARGET_HAS_PRECISE_SMC
1600 if (current_tb_modified) {
1601 /* we generate a block containing just the instruction
1602 modifying the memory. It will ensure that it cannot modify
1603 itself */
1604 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1605 return true;
1607 #endif
1608 return false;
1610 #endif
1612 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1613 tb[1].tc_ptr. Return NULL if not found */
1614 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1616 int m_min, m_max, m;
1617 uintptr_t v;
1618 TranslationBlock *tb;
1620 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1621 return NULL;
1623 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1624 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1625 return NULL;
1627 /* binary search (cf Knuth) */
1628 m_min = 0;
1629 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1630 while (m_min <= m_max) {
1631 m = (m_min + m_max) >> 1;
1632 tb = &tcg_ctx.tb_ctx.tbs[m];
1633 v = (uintptr_t)tb->tc_ptr;
1634 if (v == tc_ptr) {
1635 return tb;
1636 } else if (tc_ptr < v) {
1637 m_max = m - 1;
1638 } else {
1639 m_min = m + 1;
1642 return &tcg_ctx.tb_ctx.tbs[m_max];
1645 #if !defined(CONFIG_USER_ONLY)
1646 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1648 ram_addr_t ram_addr;
1649 MemoryRegion *mr;
1650 hwaddr l = 1;
1652 rcu_read_lock();
1653 mr = address_space_translate(as, addr, &addr, &l, false);
1654 if (!(memory_region_is_ram(mr)
1655 || memory_region_is_romd(mr))) {
1656 rcu_read_unlock();
1657 return;
1659 ram_addr = memory_region_get_ram_addr(mr) + addr;
1660 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1661 rcu_read_unlock();
1663 #endif /* !defined(CONFIG_USER_ONLY) */
1665 /* Called with tb_lock held. */
1666 void tb_check_watchpoint(CPUState *cpu)
1668 TranslationBlock *tb;
1670 tb = tb_find_pc(cpu->mem_io_pc);
1671 if (tb) {
1672 /* We can use retranslation to find the PC. */
1673 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1674 tb_phys_invalidate(tb, -1);
1675 } else {
1676 /* The exception probably happened in a helper. The CPU state should
1677 have been saved before calling it. Fetch the PC from there. */
1678 CPUArchState *env = cpu->env_ptr;
1679 target_ulong pc, cs_base;
1680 tb_page_addr_t addr;
1681 uint32_t flags;
1683 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1684 addr = get_page_addr_code(env, pc);
1685 tb_invalidate_phys_range(addr, addr + 1);
1689 #ifndef CONFIG_USER_ONLY
1690 /* in deterministic execution mode, instructions doing device I/Os
1691 must be at the end of the TB */
1692 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1694 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1695 CPUArchState *env = cpu->env_ptr;
1696 #endif
1697 TranslationBlock *tb;
1698 uint32_t n, cflags;
1699 target_ulong pc, cs_base;
1700 uint32_t flags;
1702 tb = tb_find_pc(retaddr);
1703 if (!tb) {
1704 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1705 (void *)retaddr);
1707 n = cpu->icount_decr.u16.low + tb->icount;
1708 cpu_restore_state_from_tb(cpu, tb, retaddr);
1709 /* Calculate how many instructions had been executed before the fault
1710 occurred. */
1711 n = n - cpu->icount_decr.u16.low;
1712 /* Generate a new TB ending on the I/O insn. */
1713 n++;
1714 /* On MIPS and SH, delay slot instructions can only be restarted if
1715 they were already the first instruction in the TB. If this is not
1716 the first instruction in a TB then re-execute the preceding
1717 branch. */
1718 #if defined(TARGET_MIPS)
1719 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1720 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1721 cpu->icount_decr.u16.low++;
1722 env->hflags &= ~MIPS_HFLAG_BMASK;
1724 #elif defined(TARGET_SH4)
1725 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1726 && n > 1) {
1727 env->pc -= 2;
1728 cpu->icount_decr.u16.low++;
1729 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1731 #endif
1732 /* This should never happen. */
1733 if (n > CF_COUNT_MASK) {
1734 cpu_abort(cpu, "TB too big during recompile");
1737 cflags = n | CF_LAST_IO;
1738 pc = tb->pc;
1739 cs_base = tb->cs_base;
1740 flags = tb->flags;
1741 tb_phys_invalidate(tb, -1);
1742 if (tb->cflags & CF_NOCACHE) {
1743 if (tb->orig_tb) {
1744 /* Invalidate original TB if this TB was generated in
1745 * cpu_exec_nocache() */
1746 tb_phys_invalidate(tb->orig_tb, -1);
1748 tb_free(tb);
1750 /* FIXME: In theory this could raise an exception. In practice
1751 we have already translated the block once so it's probably ok. */
1752 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1753 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1754 the first in the TB) then we end up generating a whole new TB and
1755 repeating the fault, which is horribly inefficient.
1756 Better would be to execute just this insn uncached, or generate a
1757 second new TB. */
1758 cpu_loop_exit_noexc(cpu);
1761 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1763 unsigned int i;
1765 /* Discard jump cache entries for any tb which might potentially
1766 overlap the flushed page. */
1767 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1768 memset(&cpu->tb_jmp_cache[i], 0,
1769 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1771 i = tb_jmp_cache_hash_page(addr);
1772 memset(&cpu->tb_jmp_cache[i], 0,
1773 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1776 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1777 struct qht_stats hst)
1779 uint32_t hgram_opts;
1780 size_t hgram_bins;
1781 char *hgram;
1783 if (!hst.head_buckets) {
1784 return;
1786 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1787 hst.used_head_buckets, hst.head_buckets,
1788 (double)hst.used_head_buckets / hst.head_buckets * 100);
1790 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1791 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1792 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1793 hgram_opts |= QDIST_PR_NODECIMAL;
1795 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1796 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1797 qdist_avg(&hst.occupancy) * 100, hgram);
1798 g_free(hgram);
1800 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1801 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1802 if (hgram_bins > 10) {
1803 hgram_bins = 10;
1804 } else {
1805 hgram_bins = 0;
1806 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1808 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1809 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1810 qdist_avg(&hst.chain), hgram);
1811 g_free(hgram);
1814 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1816 int i, target_code_size, max_target_code_size;
1817 int direct_jmp_count, direct_jmp2_count, cross_page;
1818 TranslationBlock *tb;
1819 struct qht_stats hst;
1821 target_code_size = 0;
1822 max_target_code_size = 0;
1823 cross_page = 0;
1824 direct_jmp_count = 0;
1825 direct_jmp2_count = 0;
1826 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1827 tb = &tcg_ctx.tb_ctx.tbs[i];
1828 target_code_size += tb->size;
1829 if (tb->size > max_target_code_size) {
1830 max_target_code_size = tb->size;
1832 if (tb->page_addr[1] != -1) {
1833 cross_page++;
1835 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1836 direct_jmp_count++;
1837 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1838 direct_jmp2_count++;
1842 /* XXX: avoid using doubles ? */
1843 cpu_fprintf(f, "Translation buffer state:\n");
1844 cpu_fprintf(f, "gen code size %td/%zd\n",
1845 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1846 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1847 cpu_fprintf(f, "TB count %d/%d\n",
1848 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1849 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1850 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1851 tcg_ctx.tb_ctx.nb_tbs : 0,
1852 max_target_code_size);
1853 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1854 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1855 tcg_ctx.code_gen_buffer) /
1856 tcg_ctx.tb_ctx.nb_tbs : 0,
1857 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1858 tcg_ctx.code_gen_buffer) /
1859 target_code_size : 0);
1860 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1861 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1862 tcg_ctx.tb_ctx.nb_tbs : 0);
1863 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1864 direct_jmp_count,
1865 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1866 tcg_ctx.tb_ctx.nb_tbs : 0,
1867 direct_jmp2_count,
1868 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1869 tcg_ctx.tb_ctx.nb_tbs : 0);
1871 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1872 print_qht_statistics(f, cpu_fprintf, hst);
1873 qht_statistics_destroy(&hst);
1875 cpu_fprintf(f, "\nStatistics:\n");
1876 cpu_fprintf(f, "TB flush count %u\n",
1877 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1878 cpu_fprintf(f, "TB invalidate count %d\n",
1879 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1880 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1881 tcg_dump_info(f, cpu_fprintf);
1884 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1886 tcg_dump_op_count(f, cpu_fprintf);
1889 #else /* CONFIG_USER_ONLY */
1891 void cpu_interrupt(CPUState *cpu, int mask)
1893 cpu->interrupt_request |= mask;
1894 cpu->tcg_exit_req = 1;
1898 * Walks guest process memory "regions" one by one
1899 * and calls callback function 'fn' for each region.
1901 struct walk_memory_regions_data {
1902 walk_memory_regions_fn fn;
1903 void *priv;
1904 target_ulong start;
1905 int prot;
1908 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1909 target_ulong end, int new_prot)
1911 if (data->start != -1u) {
1912 int rc = data->fn(data->priv, data->start, end, data->prot);
1913 if (rc != 0) {
1914 return rc;
1918 data->start = (new_prot ? end : -1u);
1919 data->prot = new_prot;
1921 return 0;
1924 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1925 target_ulong base, int level, void **lp)
1927 target_ulong pa;
1928 int i, rc;
1930 if (*lp == NULL) {
1931 return walk_memory_regions_end(data, base, 0);
1934 if (level == 0) {
1935 PageDesc *pd = *lp;
1937 for (i = 0; i < V_L2_SIZE; ++i) {
1938 int prot = pd[i].flags;
1940 pa = base | (i << TARGET_PAGE_BITS);
1941 if (prot != data->prot) {
1942 rc = walk_memory_regions_end(data, pa, prot);
1943 if (rc != 0) {
1944 return rc;
1948 } else {
1949 void **pp = *lp;
1951 for (i = 0; i < V_L2_SIZE; ++i) {
1952 pa = base | ((target_ulong)i <<
1953 (TARGET_PAGE_BITS + V_L2_BITS * level));
1954 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1955 if (rc != 0) {
1956 return rc;
1961 return 0;
1964 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1966 struct walk_memory_regions_data data;
1967 uintptr_t i, l1_sz = v_l1_size;
1969 data.fn = fn;
1970 data.priv = priv;
1971 data.start = -1u;
1972 data.prot = 0;
1974 for (i = 0; i < l1_sz; i++) {
1975 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
1976 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
1977 if (rc != 0) {
1978 return rc;
1982 return walk_memory_regions_end(&data, 0, 0);
1985 static int dump_region(void *priv, target_ulong start,
1986 target_ulong end, unsigned long prot)
1988 FILE *f = (FILE *)priv;
1990 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1991 " "TARGET_FMT_lx" %c%c%c\n",
1992 start, end, end - start,
1993 ((prot & PAGE_READ) ? 'r' : '-'),
1994 ((prot & PAGE_WRITE) ? 'w' : '-'),
1995 ((prot & PAGE_EXEC) ? 'x' : '-'));
1997 return 0;
2000 /* dump memory mappings */
2001 void page_dump(FILE *f)
2003 const int length = sizeof(target_ulong) * 2;
2004 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2005 length, "start", length, "end", length, "size", "prot");
2006 walk_memory_regions(f, dump_region);
2009 int page_get_flags(target_ulong address)
2011 PageDesc *p;
2013 p = page_find(address >> TARGET_PAGE_BITS);
2014 if (!p) {
2015 return 0;
2017 return p->flags;
2020 /* Modify the flags of a page and invalidate the code if necessary.
2021 The flag PAGE_WRITE_ORG is positioned automatically depending
2022 on PAGE_WRITE. The mmap_lock should already be held. */
2023 void page_set_flags(target_ulong start, target_ulong end, int flags)
2025 target_ulong addr, len;
2027 /* This function should never be called with addresses outside the
2028 guest address space. If this assert fires, it probably indicates
2029 a missing call to h2g_valid. */
2030 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2031 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2032 #endif
2033 assert(start < end);
2035 start = start & TARGET_PAGE_MASK;
2036 end = TARGET_PAGE_ALIGN(end);
2038 if (flags & PAGE_WRITE) {
2039 flags |= PAGE_WRITE_ORG;
2042 for (addr = start, len = end - start;
2043 len != 0;
2044 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2045 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2047 /* If the write protection bit is set, then we invalidate
2048 the code inside. */
2049 if (!(p->flags & PAGE_WRITE) &&
2050 (flags & PAGE_WRITE) &&
2051 p->first_tb) {
2052 tb_invalidate_phys_page(addr, 0);
2054 p->flags = flags;
2058 int page_check_range(target_ulong start, target_ulong len, int flags)
2060 PageDesc *p;
2061 target_ulong end;
2062 target_ulong addr;
2064 /* This function should never be called with addresses outside the
2065 guest address space. If this assert fires, it probably indicates
2066 a missing call to h2g_valid. */
2067 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2068 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2069 #endif
2071 if (len == 0) {
2072 return 0;
2074 if (start + len - 1 < start) {
2075 /* We've wrapped around. */
2076 return -1;
2079 /* must do before we loose bits in the next step */
2080 end = TARGET_PAGE_ALIGN(start + len);
2081 start = start & TARGET_PAGE_MASK;
2083 for (addr = start, len = end - start;
2084 len != 0;
2085 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2086 p = page_find(addr >> TARGET_PAGE_BITS);
2087 if (!p) {
2088 return -1;
2090 if (!(p->flags & PAGE_VALID)) {
2091 return -1;
2094 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2095 return -1;
2097 if (flags & PAGE_WRITE) {
2098 if (!(p->flags & PAGE_WRITE_ORG)) {
2099 return -1;
2101 /* unprotect the page if it was put read-only because it
2102 contains translated code */
2103 if (!(p->flags & PAGE_WRITE)) {
2104 if (!page_unprotect(addr, 0)) {
2105 return -1;
2110 return 0;
2113 /* called from signal handler: invalidate the code and unprotect the
2114 * page. Return 0 if the fault was not handled, 1 if it was handled,
2115 * and 2 if it was handled but the caller must cause the TB to be
2116 * immediately exited. (We can only return 2 if the 'pc' argument is
2117 * non-zero.)
2119 int page_unprotect(target_ulong address, uintptr_t pc)
2121 unsigned int prot;
2122 bool current_tb_invalidated;
2123 PageDesc *p;
2124 target_ulong host_start, host_end, addr;
2126 /* Technically this isn't safe inside a signal handler. However we
2127 know this only ever happens in a synchronous SEGV handler, so in
2128 practice it seems to be ok. */
2129 mmap_lock();
2131 p = page_find(address >> TARGET_PAGE_BITS);
2132 if (!p) {
2133 mmap_unlock();
2134 return 0;
2137 /* if the page was really writable, then we change its
2138 protection back to writable */
2139 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2140 host_start = address & qemu_host_page_mask;
2141 host_end = host_start + qemu_host_page_size;
2143 prot = 0;
2144 current_tb_invalidated = false;
2145 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2146 p = page_find(addr >> TARGET_PAGE_BITS);
2147 p->flags |= PAGE_WRITE;
2148 prot |= p->flags;
2150 /* and since the content will be modified, we must invalidate
2151 the corresponding translated code. */
2152 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2153 #ifdef DEBUG_TB_CHECK
2154 tb_invalidate_check(addr);
2155 #endif
2157 mprotect((void *)g2h(host_start), qemu_host_page_size,
2158 prot & PAGE_BITS);
2160 mmap_unlock();
2161 /* If current TB was invalidated return to main loop */
2162 return current_tb_invalidated ? 2 : 1;
2164 mmap_unlock();
2165 return 0;
2167 #endif /* CONFIG_USER_ONLY */