kvm: external module: include <linux/time.h>
[qemu-kvm/fedora.git] / exec.c
blob37a956b51600aa9183f2c9400f67bde04105217c
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
40 #include "tcg.h"
41 #include "qemu-kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
87 TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
95 uint8_t *code_gen_buffer;
96 unsigned long code_gen_buffer_size;
97 /* threshold to flush the translated code buffer */
98 unsigned long code_gen_buffer_max_size;
99 uint8_t *code_gen_ptr;
101 ram_addr_t phys_ram_size;
102 int phys_ram_fd;
103 uint8_t *phys_ram_base;
104 uint8_t *phys_ram_dirty;
105 uint8_t *bios_mem;
106 static int in_migration;
107 static ram_addr_t phys_ram_alloc_offset = 0;
109 CPUState *first_cpu;
110 /* current CPU in the current thread. It is only valid inside
111 cpu_exec() */
112 CPUState *cpu_single_env;
114 typedef struct PageDesc {
115 /* list of TBs intersecting this ram page */
116 TranslationBlock *first_tb;
117 /* in order to optimize self modifying code, we count the number
118 of lookups we do to a given page to use a bitmap */
119 unsigned int code_write_count;
120 uint8_t *code_bitmap;
121 #if defined(CONFIG_USER_ONLY)
122 unsigned long flags;
123 #endif
124 } PageDesc;
126 typedef struct PhysPageDesc {
127 /* offset in host memory of the page + io_index in the low 12 bits */
128 ram_addr_t phys_offset;
129 } PhysPageDesc;
131 #define L2_BITS 10
132 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
133 /* XXX: this is a temporary hack for alpha target.
134 * In the future, this is to be replaced by a multi-level table
135 * to actually be able to handle the complete 64 bits address space.
137 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
138 #else
139 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
140 #endif
142 #define L1_SIZE (1 << L1_BITS)
143 #define L2_SIZE (1 << L2_BITS)
145 static void io_mem_init(void);
147 unsigned long qemu_real_host_page_size;
148 unsigned long qemu_host_page_bits;
149 unsigned long qemu_host_page_size;
150 unsigned long qemu_host_page_mask;
152 /* XXX: for system emulation, it could just be an array */
153 static PageDesc *l1_map[L1_SIZE];
154 PhysPageDesc **l1_phys_map;
156 /* io memory support */
157 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
158 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
159 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
160 char io_mem_used[IO_MEM_NB_ENTRIES];
161 #if defined(CONFIG_SOFTMMU)
162 static int io_mem_watch;
163 #endif
165 /* log support */
166 char *logfilename = "/tmp/qemu.log";
167 FILE *logfile;
168 int loglevel;
169 static int log_append = 0;
171 /* statistics */
172 static int tlb_flush_count;
173 static int tb_flush_count;
174 static int tb_phys_invalidate_count;
176 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
177 typedef struct subpage_t {
178 target_phys_addr_t base;
179 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
180 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
181 void *opaque[TARGET_PAGE_SIZE][2][4];
182 } subpage_t;
184 #ifdef _WIN32
185 static void map_exec(void *addr, long size)
187 DWORD old_protect;
188 VirtualProtect(addr, size,
189 PAGE_EXECUTE_READWRITE, &old_protect);
192 #else
193 static void map_exec(void *addr, long size)
195 unsigned long start, end, page_size;
197 page_size = getpagesize();
198 start = (unsigned long)addr;
199 start &= ~(page_size - 1);
201 end = (unsigned long)addr + size;
202 end += page_size - 1;
203 end &= ~(page_size - 1);
205 mprotect((void *)start, end - start,
206 PROT_READ | PROT_WRITE | PROT_EXEC);
208 #endif
210 static void page_init(void)
212 /* NOTE: we can always suppose that qemu_host_page_size >=
213 TARGET_PAGE_SIZE */
214 #ifdef _WIN32
216 SYSTEM_INFO system_info;
217 DWORD old_protect;
219 GetSystemInfo(&system_info);
220 qemu_real_host_page_size = system_info.dwPageSize;
222 #else
223 qemu_real_host_page_size = getpagesize();
224 #endif
225 if (qemu_host_page_size == 0)
226 qemu_host_page_size = qemu_real_host_page_size;
227 if (qemu_host_page_size < TARGET_PAGE_SIZE)
228 qemu_host_page_size = TARGET_PAGE_SIZE;
229 qemu_host_page_bits = 0;
230 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
231 qemu_host_page_bits++;
232 qemu_host_page_mask = ~(qemu_host_page_size - 1);
233 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
234 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
236 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
238 long long startaddr, endaddr;
239 FILE *f;
240 int n;
242 mmap_lock();
243 last_brk = (unsigned long)sbrk(0);
244 f = fopen("/proc/self/maps", "r");
245 if (f) {
246 do {
247 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
248 if (n == 2) {
249 startaddr = MIN(startaddr,
250 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
251 endaddr = MIN(endaddr,
252 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
253 page_set_flags(startaddr & TARGET_PAGE_MASK,
254 TARGET_PAGE_ALIGN(endaddr),
255 PAGE_RESERVED);
257 } while (!feof(f));
258 fclose(f);
260 mmap_unlock();
262 #endif
265 static inline PageDesc *page_find_alloc(target_ulong index)
267 PageDesc **lp, *p;
269 lp = &l1_map[index >> L2_BITS];
270 p = *lp;
271 if (!p) {
272 /* allocate if not found */
273 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
274 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
275 *lp = p;
277 return p + (index & (L2_SIZE - 1));
280 static inline PageDesc *page_find(target_ulong index)
282 PageDesc *p;
284 p = l1_map[index >> L2_BITS];
285 if (!p)
286 return 0;
287 return p + (index & (L2_SIZE - 1));
290 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
292 void **lp, **p;
293 PhysPageDesc *pd;
295 p = (void **)l1_phys_map;
296 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
298 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
299 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
300 #endif
301 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
302 p = *lp;
303 if (!p) {
304 /* allocate if not found */
305 if (!alloc)
306 return NULL;
307 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
308 memset(p, 0, sizeof(void *) * L1_SIZE);
309 *lp = p;
311 #endif
312 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
313 pd = *lp;
314 if (!pd) {
315 int i;
316 /* allocate if not found */
317 if (!alloc)
318 return NULL;
319 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
320 *lp = pd;
321 for (i = 0; i < L2_SIZE; i++)
322 pd[i].phys_offset = IO_MEM_UNASSIGNED;
324 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
327 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
329 return phys_page_find_alloc(index, 0);
332 #if !defined(CONFIG_USER_ONLY)
333 static void tlb_protect_code(ram_addr_t ram_addr);
334 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
335 target_ulong vaddr);
336 #define mmap_lock() do { } while(0)
337 #define mmap_unlock() do { } while(0)
338 #endif
340 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
342 #if defined(CONFIG_USER_ONLY)
343 /* Currently it is not recommanded to allocate big chunks of data in
344 user mode. It will change when a dedicated libc will be used */
345 #define USE_STATIC_CODE_GEN_BUFFER
346 #endif
348 #ifdef USE_STATIC_CODE_GEN_BUFFER
349 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
350 #endif
352 void code_gen_alloc(unsigned long tb_size)
354 #ifdef USE_STATIC_CODE_GEN_BUFFER
355 code_gen_buffer = static_code_gen_buffer;
356 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
357 map_exec(code_gen_buffer, code_gen_buffer_size);
358 #else
359 code_gen_buffer_size = tb_size;
360 if (code_gen_buffer_size == 0) {
361 #if defined(CONFIG_USER_ONLY)
362 /* in user mode, phys_ram_size is not meaningful */
363 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
364 #else
365 /* XXX: needs ajustments */
366 code_gen_buffer_size = (int)(phys_ram_size / 4);
367 #endif
369 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
370 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
371 /* The code gen buffer location may have constraints depending on
372 the host cpu and OS */
373 #if defined(__linux__)
375 int flags;
376 flags = MAP_PRIVATE | MAP_ANONYMOUS;
377 #if defined(__x86_64__)
378 flags |= MAP_32BIT;
379 /* Cannot map more than that */
380 if (code_gen_buffer_size > (800 * 1024 * 1024))
381 code_gen_buffer_size = (800 * 1024 * 1024);
382 #endif
383 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
384 PROT_WRITE | PROT_READ | PROT_EXEC,
385 flags, -1, 0);
386 if (code_gen_buffer == MAP_FAILED) {
387 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
388 exit(1);
391 #else
392 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
393 if (!code_gen_buffer) {
394 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
395 exit(1);
397 map_exec(code_gen_buffer, code_gen_buffer_size);
398 #endif
399 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
400 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
401 code_gen_buffer_max_size = code_gen_buffer_size -
402 code_gen_max_block_size();
403 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
404 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
407 /* Must be called before using the QEMU cpus. 'tb_size' is the size
408 (in bytes) allocated to the translation buffer. Zero means default
409 size. */
410 void cpu_exec_init_all(unsigned long tb_size)
412 cpu_gen_init();
413 code_gen_alloc(tb_size);
414 code_gen_ptr = code_gen_buffer;
415 page_init();
416 io_mem_init();
419 void cpu_exec_init(CPUState *env)
421 CPUState **penv;
422 int cpu_index;
424 env->next_cpu = NULL;
425 penv = &first_cpu;
426 cpu_index = 0;
427 while (*penv != NULL) {
428 penv = (CPUState **)&(*penv)->next_cpu;
429 cpu_index++;
431 env->cpu_index = cpu_index;
432 env->nb_watchpoints = 0;
433 #ifdef __WIN32
434 env->thread_id = GetCurrentProcessId();
435 #else
436 env->thread_id = getpid();
437 #endif
438 *penv = env;
441 static inline void invalidate_page_bitmap(PageDesc *p)
443 if (p->code_bitmap) {
444 qemu_free(p->code_bitmap);
445 p->code_bitmap = NULL;
447 p->code_write_count = 0;
450 /* set to NULL all the 'first_tb' fields in all PageDescs */
451 static void page_flush_tb(void)
453 int i, j;
454 PageDesc *p;
456 for(i = 0; i < L1_SIZE; i++) {
457 p = l1_map[i];
458 if (p) {
459 for(j = 0; j < L2_SIZE; j++) {
460 p->first_tb = NULL;
461 invalidate_page_bitmap(p);
462 p++;
468 /* flush all the translation blocks */
469 /* XXX: tb_flush is currently not thread safe */
470 void tb_flush(CPUState *env1)
472 CPUState *env;
473 #if defined(DEBUG_FLUSH)
474 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
475 (unsigned long)(code_gen_ptr - code_gen_buffer),
476 nb_tbs, nb_tbs > 0 ?
477 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
478 #endif
479 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
480 cpu_abort(env1, "Internal error: code buffer overflow\n");
482 nb_tbs = 0;
484 for(env = first_cpu; env != NULL; env = env->next_cpu) {
485 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
488 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
489 page_flush_tb();
491 code_gen_ptr = code_gen_buffer;
492 /* XXX: flush processor icache at this point if cache flush is
493 expensive */
494 tb_flush_count++;
497 #ifdef DEBUG_TB_CHECK
499 static void tb_invalidate_check(target_ulong address)
501 TranslationBlock *tb;
502 int i;
503 address &= TARGET_PAGE_MASK;
504 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
505 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
506 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
507 address >= tb->pc + tb->size)) {
508 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
509 address, (long)tb->pc, tb->size);
515 /* verify that all the pages have correct rights for code */
516 static void tb_page_check(void)
518 TranslationBlock *tb;
519 int i, flags1, flags2;
521 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
522 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
523 flags1 = page_get_flags(tb->pc);
524 flags2 = page_get_flags(tb->pc + tb->size - 1);
525 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
526 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
527 (long)tb->pc, tb->size, flags1, flags2);
533 void tb_jmp_check(TranslationBlock *tb)
535 TranslationBlock *tb1;
536 unsigned int n1;
538 /* suppress any remaining jumps to this TB */
539 tb1 = tb->jmp_first;
540 for(;;) {
541 n1 = (long)tb1 & 3;
542 tb1 = (TranslationBlock *)((long)tb1 & ~3);
543 if (n1 == 2)
544 break;
545 tb1 = tb1->jmp_next[n1];
547 /* check end of list */
548 if (tb1 != tb) {
549 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
553 #endif
555 /* invalidate one TB */
556 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
557 int next_offset)
559 TranslationBlock *tb1;
560 for(;;) {
561 tb1 = *ptb;
562 if (tb1 == tb) {
563 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
564 break;
566 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
570 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
572 TranslationBlock *tb1;
573 unsigned int n1;
575 for(;;) {
576 tb1 = *ptb;
577 n1 = (long)tb1 & 3;
578 tb1 = (TranslationBlock *)((long)tb1 & ~3);
579 if (tb1 == tb) {
580 *ptb = tb1->page_next[n1];
581 break;
583 ptb = &tb1->page_next[n1];
587 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
589 TranslationBlock *tb1, **ptb;
590 unsigned int n1;
592 ptb = &tb->jmp_next[n];
593 tb1 = *ptb;
594 if (tb1) {
595 /* find tb(n) in circular list */
596 for(;;) {
597 tb1 = *ptb;
598 n1 = (long)tb1 & 3;
599 tb1 = (TranslationBlock *)((long)tb1 & ~3);
600 if (n1 == n && tb1 == tb)
601 break;
602 if (n1 == 2) {
603 ptb = &tb1->jmp_first;
604 } else {
605 ptb = &tb1->jmp_next[n1];
608 /* now we can suppress tb(n) from the list */
609 *ptb = tb->jmp_next[n];
611 tb->jmp_next[n] = NULL;
615 /* reset the jump entry 'n' of a TB so that it is not chained to
616 another TB */
617 static inline void tb_reset_jump(TranslationBlock *tb, int n)
619 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
622 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
624 CPUState *env;
625 PageDesc *p;
626 unsigned int h, n1;
627 target_phys_addr_t phys_pc;
628 TranslationBlock *tb1, *tb2;
630 /* remove the TB from the hash list */
631 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
632 h = tb_phys_hash_func(phys_pc);
633 tb_remove(&tb_phys_hash[h], tb,
634 offsetof(TranslationBlock, phys_hash_next));
636 /* remove the TB from the page list */
637 if (tb->page_addr[0] != page_addr) {
638 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
639 tb_page_remove(&p->first_tb, tb);
640 invalidate_page_bitmap(p);
642 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
643 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
644 tb_page_remove(&p->first_tb, tb);
645 invalidate_page_bitmap(p);
648 tb_invalidated_flag = 1;
650 /* remove the TB from the hash list */
651 h = tb_jmp_cache_hash_func(tb->pc);
652 for(env = first_cpu; env != NULL; env = env->next_cpu) {
653 if (env->tb_jmp_cache[h] == tb)
654 env->tb_jmp_cache[h] = NULL;
657 /* suppress this TB from the two jump lists */
658 tb_jmp_remove(tb, 0);
659 tb_jmp_remove(tb, 1);
661 /* suppress any remaining jumps to this TB */
662 tb1 = tb->jmp_first;
663 for(;;) {
664 n1 = (long)tb1 & 3;
665 if (n1 == 2)
666 break;
667 tb1 = (TranslationBlock *)((long)tb1 & ~3);
668 tb2 = tb1->jmp_next[n1];
669 tb_reset_jump(tb1, n1);
670 tb1->jmp_next[n1] = NULL;
671 tb1 = tb2;
673 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
675 tb_phys_invalidate_count++;
678 static inline void set_bits(uint8_t *tab, int start, int len)
680 int end, mask, end1;
682 end = start + len;
683 tab += start >> 3;
684 mask = 0xff << (start & 7);
685 if ((start & ~7) == (end & ~7)) {
686 if (start < end) {
687 mask &= ~(0xff << (end & 7));
688 *tab |= mask;
690 } else {
691 *tab++ |= mask;
692 start = (start + 8) & ~7;
693 end1 = end & ~7;
694 while (start < end1) {
695 *tab++ = 0xff;
696 start += 8;
698 if (start < end) {
699 mask = ~(0xff << (end & 7));
700 *tab |= mask;
705 static void build_page_bitmap(PageDesc *p)
707 int n, tb_start, tb_end;
708 TranslationBlock *tb;
710 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
711 if (!p->code_bitmap)
712 return;
713 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
715 tb = p->first_tb;
716 while (tb != NULL) {
717 n = (long)tb & 3;
718 tb = (TranslationBlock *)((long)tb & ~3);
719 /* NOTE: this is subtle as a TB may span two physical pages */
720 if (n == 0) {
721 /* NOTE: tb_end may be after the end of the page, but
722 it is not a problem */
723 tb_start = tb->pc & ~TARGET_PAGE_MASK;
724 tb_end = tb_start + tb->size;
725 if (tb_end > TARGET_PAGE_SIZE)
726 tb_end = TARGET_PAGE_SIZE;
727 } else {
728 tb_start = 0;
729 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
731 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
732 tb = tb->page_next[n];
736 #ifdef TARGET_HAS_PRECISE_SMC
738 static void tb_gen_code(CPUState *env,
739 target_ulong pc, target_ulong cs_base, int flags,
740 int cflags)
742 TranslationBlock *tb;
743 uint8_t *tc_ptr;
744 target_ulong phys_pc, phys_page2, virt_page2;
745 int code_gen_size;
747 phys_pc = get_phys_addr_code(env, pc);
748 tb = tb_alloc(pc);
749 if (!tb) {
750 /* flush must be done */
751 tb_flush(env);
752 /* cannot fail at this point */
753 tb = tb_alloc(pc);
755 tc_ptr = code_gen_ptr;
756 tb->tc_ptr = tc_ptr;
757 tb->cs_base = cs_base;
758 tb->flags = flags;
759 tb->cflags = cflags;
760 cpu_gen_code(env, tb, &code_gen_size);
761 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
763 /* check next page if needed */
764 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
765 phys_page2 = -1;
766 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
767 phys_page2 = get_phys_addr_code(env, virt_page2);
769 tb_link_phys(tb, phys_pc, phys_page2);
771 #endif
773 /* invalidate all TBs which intersect with the target physical page
774 starting in range [start;end[. NOTE: start and end must refer to
775 the same physical page. 'is_cpu_write_access' should be true if called
776 from a real cpu write access: the virtual CPU will exit the current
777 TB if code is modified inside this TB. */
778 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
779 int is_cpu_write_access)
781 int n, current_tb_modified, current_tb_not_found, current_flags;
782 CPUState *env = cpu_single_env;
783 PageDesc *p;
784 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
785 target_ulong tb_start, tb_end;
786 target_ulong current_pc, current_cs_base;
788 p = page_find(start >> TARGET_PAGE_BITS);
789 if (!p)
790 return;
791 if (!p->code_bitmap &&
792 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
793 is_cpu_write_access) {
794 /* build code bitmap */
795 build_page_bitmap(p);
798 /* we remove all the TBs in the range [start, end[ */
799 /* XXX: see if in some cases it could be faster to invalidate all the code */
800 current_tb_not_found = is_cpu_write_access;
801 current_tb_modified = 0;
802 current_tb = NULL; /* avoid warning */
803 current_pc = 0; /* avoid warning */
804 current_cs_base = 0; /* avoid warning */
805 current_flags = 0; /* avoid warning */
806 tb = p->first_tb;
807 while (tb != NULL) {
808 n = (long)tb & 3;
809 tb = (TranslationBlock *)((long)tb & ~3);
810 tb_next = tb->page_next[n];
811 /* NOTE: this is subtle as a TB may span two physical pages */
812 if (n == 0) {
813 /* NOTE: tb_end may be after the end of the page, but
814 it is not a problem */
815 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
816 tb_end = tb_start + tb->size;
817 } else {
818 tb_start = tb->page_addr[1];
819 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
821 if (!(tb_end <= start || tb_start >= end)) {
822 #ifdef TARGET_HAS_PRECISE_SMC
823 if (current_tb_not_found) {
824 current_tb_not_found = 0;
825 current_tb = NULL;
826 if (env->mem_write_pc) {
827 /* now we have a real cpu fault */
828 current_tb = tb_find_pc(env->mem_write_pc);
831 if (current_tb == tb &&
832 !(current_tb->cflags & CF_SINGLE_INSN)) {
833 /* If we are modifying the current TB, we must stop
834 its execution. We could be more precise by checking
835 that the modification is after the current PC, but it
836 would require a specialized function to partially
837 restore the CPU state */
839 current_tb_modified = 1;
840 cpu_restore_state(current_tb, env,
841 env->mem_write_pc, NULL);
842 #if defined(TARGET_I386)
843 current_flags = env->hflags;
844 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
845 current_cs_base = (target_ulong)env->segs[R_CS].base;
846 current_pc = current_cs_base + env->eip;
847 #else
848 #error unsupported CPU
849 #endif
851 #endif /* TARGET_HAS_PRECISE_SMC */
852 /* we need to do that to handle the case where a signal
853 occurs while doing tb_phys_invalidate() */
854 saved_tb = NULL;
855 if (env) {
856 saved_tb = env->current_tb;
857 env->current_tb = NULL;
859 tb_phys_invalidate(tb, -1);
860 if (env) {
861 env->current_tb = saved_tb;
862 if (env->interrupt_request && env->current_tb)
863 cpu_interrupt(env, env->interrupt_request);
866 tb = tb_next;
868 #if !defined(CONFIG_USER_ONLY)
869 /* if no code remaining, no need to continue to use slow writes */
870 if (!p->first_tb) {
871 invalidate_page_bitmap(p);
872 if (is_cpu_write_access) {
873 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
876 #endif
877 #ifdef TARGET_HAS_PRECISE_SMC
878 if (current_tb_modified) {
879 /* we generate a block containing just the instruction
880 modifying the memory. It will ensure that it cannot modify
881 itself */
882 env->current_tb = NULL;
883 tb_gen_code(env, current_pc, current_cs_base, current_flags,
884 CF_SINGLE_INSN);
885 cpu_resume_from_signal(env, NULL);
887 #endif
890 /* len must be <= 8 and start must be a multiple of len */
891 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
893 PageDesc *p;
894 int offset, b;
895 #if 0
896 if (1) {
897 if (loglevel) {
898 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
899 cpu_single_env->mem_write_vaddr, len,
900 cpu_single_env->eip,
901 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
904 #endif
905 p = page_find(start >> TARGET_PAGE_BITS);
906 if (!p)
907 return;
908 if (p->code_bitmap) {
909 offset = start & ~TARGET_PAGE_MASK;
910 b = p->code_bitmap[offset >> 3] >> (offset & 7);
911 if (b & ((1 << len) - 1))
912 goto do_invalidate;
913 } else {
914 do_invalidate:
915 tb_invalidate_phys_page_range(start, start + len, 1);
919 #if !defined(CONFIG_SOFTMMU)
920 static void tb_invalidate_phys_page(target_phys_addr_t addr,
921 unsigned long pc, void *puc)
923 int n, current_flags, current_tb_modified;
924 target_ulong current_pc, current_cs_base;
925 PageDesc *p;
926 TranslationBlock *tb, *current_tb;
927 #ifdef TARGET_HAS_PRECISE_SMC
928 CPUState *env = cpu_single_env;
929 #endif
931 addr &= TARGET_PAGE_MASK;
932 p = page_find(addr >> TARGET_PAGE_BITS);
933 if (!p)
934 return;
935 tb = p->first_tb;
936 current_tb_modified = 0;
937 current_tb = NULL;
938 current_pc = 0; /* avoid warning */
939 current_cs_base = 0; /* avoid warning */
940 current_flags = 0; /* avoid warning */
941 #ifdef TARGET_HAS_PRECISE_SMC
942 if (tb && pc != 0) {
943 current_tb = tb_find_pc(pc);
945 #endif
946 while (tb != NULL) {
947 n = (long)tb & 3;
948 tb = (TranslationBlock *)((long)tb & ~3);
949 #ifdef TARGET_HAS_PRECISE_SMC
950 if (current_tb == tb &&
951 !(current_tb->cflags & CF_SINGLE_INSN)) {
952 /* If we are modifying the current TB, we must stop
953 its execution. We could be more precise by checking
954 that the modification is after the current PC, but it
955 would require a specialized function to partially
956 restore the CPU state */
958 current_tb_modified = 1;
959 cpu_restore_state(current_tb, env, pc, puc);
960 #if defined(TARGET_I386)
961 current_flags = env->hflags;
962 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
963 current_cs_base = (target_ulong)env->segs[R_CS].base;
964 current_pc = current_cs_base + env->eip;
965 #else
966 #error unsupported CPU
967 #endif
969 #endif /* TARGET_HAS_PRECISE_SMC */
970 tb_phys_invalidate(tb, addr);
971 tb = tb->page_next[n];
973 p->first_tb = NULL;
974 #ifdef TARGET_HAS_PRECISE_SMC
975 if (current_tb_modified) {
976 /* we generate a block containing just the instruction
977 modifying the memory. It will ensure that it cannot modify
978 itself */
979 env->current_tb = NULL;
980 tb_gen_code(env, current_pc, current_cs_base, current_flags,
981 CF_SINGLE_INSN);
982 cpu_resume_from_signal(env, puc);
984 #endif
986 #endif
988 /* add the tb in the target page and protect it if necessary */
989 static inline void tb_alloc_page(TranslationBlock *tb,
990 unsigned int n, target_ulong page_addr)
992 PageDesc *p;
993 TranslationBlock *last_first_tb;
995 tb->page_addr[n] = page_addr;
996 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
997 tb->page_next[n] = p->first_tb;
998 last_first_tb = p->first_tb;
999 p->first_tb = (TranslationBlock *)((long)tb | n);
1000 invalidate_page_bitmap(p);
1002 #if defined(TARGET_HAS_SMC) || 1
1004 #if defined(CONFIG_USER_ONLY)
1005 if (p->flags & PAGE_WRITE) {
1006 target_ulong addr;
1007 PageDesc *p2;
1008 int prot;
1010 /* force the host page as non writable (writes will have a
1011 page fault + mprotect overhead) */
1012 page_addr &= qemu_host_page_mask;
1013 prot = 0;
1014 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1015 addr += TARGET_PAGE_SIZE) {
1017 p2 = page_find (addr >> TARGET_PAGE_BITS);
1018 if (!p2)
1019 continue;
1020 prot |= p2->flags;
1021 p2->flags &= ~PAGE_WRITE;
1022 page_get_flags(addr);
1024 mprotect(g2h(page_addr), qemu_host_page_size,
1025 (prot & PAGE_BITS) & ~PAGE_WRITE);
1026 #ifdef DEBUG_TB_INVALIDATE
1027 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1028 page_addr);
1029 #endif
1031 #else
1032 /* if some code is already present, then the pages are already
1033 protected. So we handle the case where only the first TB is
1034 allocated in a physical page */
1035 if (!last_first_tb) {
1036 tlb_protect_code(page_addr);
1038 #endif
1040 #endif /* TARGET_HAS_SMC */
1043 /* Allocate a new translation block. Flush the translation buffer if
1044 too many translation blocks or too much generated code. */
1045 TranslationBlock *tb_alloc(target_ulong pc)
1047 TranslationBlock *tb;
1049 if (nb_tbs >= code_gen_max_blocks ||
1050 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1051 return NULL;
1052 tb = &tbs[nb_tbs++];
1053 tb->pc = pc;
1054 tb->cflags = 0;
1055 return tb;
1058 /* add a new TB and link it to the physical page tables. phys_page2 is
1059 (-1) to indicate that only one page contains the TB. */
1060 void tb_link_phys(TranslationBlock *tb,
1061 target_ulong phys_pc, target_ulong phys_page2)
1063 unsigned int h;
1064 TranslationBlock **ptb;
1066 /* Grab the mmap lock to stop another thread invalidating this TB
1067 before we are done. */
1068 mmap_lock();
1069 /* add in the physical hash table */
1070 h = tb_phys_hash_func(phys_pc);
1071 ptb = &tb_phys_hash[h];
1072 tb->phys_hash_next = *ptb;
1073 *ptb = tb;
1075 /* add in the page list */
1076 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1077 if (phys_page2 != -1)
1078 tb_alloc_page(tb, 1, phys_page2);
1079 else
1080 tb->page_addr[1] = -1;
1082 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1083 tb->jmp_next[0] = NULL;
1084 tb->jmp_next[1] = NULL;
1086 /* init original jump addresses */
1087 if (tb->tb_next_offset[0] != 0xffff)
1088 tb_reset_jump(tb, 0);
1089 if (tb->tb_next_offset[1] != 0xffff)
1090 tb_reset_jump(tb, 1);
1092 #ifdef DEBUG_TB_CHECK
1093 tb_page_check();
1094 #endif
1095 mmap_unlock();
1098 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1099 tb[1].tc_ptr. Return NULL if not found */
1100 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1102 int m_min, m_max, m;
1103 unsigned long v;
1104 TranslationBlock *tb;
1106 if (nb_tbs <= 0)
1107 return NULL;
1108 if (tc_ptr < (unsigned long)code_gen_buffer ||
1109 tc_ptr >= (unsigned long)code_gen_ptr)
1110 return NULL;
1111 /* binary search (cf Knuth) */
1112 m_min = 0;
1113 m_max = nb_tbs - 1;
1114 while (m_min <= m_max) {
1115 m = (m_min + m_max) >> 1;
1116 tb = &tbs[m];
1117 v = (unsigned long)tb->tc_ptr;
1118 if (v == tc_ptr)
1119 return tb;
1120 else if (tc_ptr < v) {
1121 m_max = m - 1;
1122 } else {
1123 m_min = m + 1;
1126 return &tbs[m_max];
1129 static void tb_reset_jump_recursive(TranslationBlock *tb);
1131 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1133 TranslationBlock *tb1, *tb_next, **ptb;
1134 unsigned int n1;
1136 tb1 = tb->jmp_next[n];
1137 if (tb1 != NULL) {
1138 /* find head of list */
1139 for(;;) {
1140 n1 = (long)tb1 & 3;
1141 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1142 if (n1 == 2)
1143 break;
1144 tb1 = tb1->jmp_next[n1];
1146 /* we are now sure now that tb jumps to tb1 */
1147 tb_next = tb1;
1149 /* remove tb from the jmp_first list */
1150 ptb = &tb_next->jmp_first;
1151 for(;;) {
1152 tb1 = *ptb;
1153 n1 = (long)tb1 & 3;
1154 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1155 if (n1 == n && tb1 == tb)
1156 break;
1157 ptb = &tb1->jmp_next[n1];
1159 *ptb = tb->jmp_next[n];
1160 tb->jmp_next[n] = NULL;
1162 /* suppress the jump to next tb in generated code */
1163 tb_reset_jump(tb, n);
1165 /* suppress jumps in the tb on which we could have jumped */
1166 tb_reset_jump_recursive(tb_next);
1170 static void tb_reset_jump_recursive(TranslationBlock *tb)
1172 tb_reset_jump_recursive2(tb, 0);
1173 tb_reset_jump_recursive2(tb, 1);
1176 #if defined(TARGET_HAS_ICE)
1177 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1179 target_phys_addr_t addr;
1180 target_ulong pd;
1181 ram_addr_t ram_addr;
1182 PhysPageDesc *p;
1184 addr = cpu_get_phys_page_debug(env, pc);
1185 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1186 if (!p) {
1187 pd = IO_MEM_UNASSIGNED;
1188 } else {
1189 pd = p->phys_offset;
1191 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1192 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1194 #endif
1196 /* Add a watchpoint. */
1197 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1199 int i;
1201 for (i = 0; i < env->nb_watchpoints; i++) {
1202 if (addr == env->watchpoint[i].vaddr)
1203 return 0;
1205 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1206 return -1;
1208 i = env->nb_watchpoints++;
1209 env->watchpoint[i].vaddr = addr;
1210 tlb_flush_page(env, addr);
1211 /* FIXME: This flush is needed because of the hack to make memory ops
1212 terminate the TB. It can be removed once the proper IO trap and
1213 re-execute bits are in. */
1214 tb_flush(env);
1215 return i;
1218 /* Remove a watchpoint. */
1219 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1221 int i;
1223 for (i = 0; i < env->nb_watchpoints; i++) {
1224 if (addr == env->watchpoint[i].vaddr) {
1225 env->nb_watchpoints--;
1226 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1227 tlb_flush_page(env, addr);
1228 return 0;
1231 return -1;
1234 /* Remove all watchpoints. */
1235 void cpu_watchpoint_remove_all(CPUState *env) {
1236 int i;
1238 for (i = 0; i < env->nb_watchpoints; i++) {
1239 tlb_flush_page(env, env->watchpoint[i].vaddr);
1241 env->nb_watchpoints = 0;
1244 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1245 breakpoint is reached */
1246 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1248 #if defined(TARGET_HAS_ICE)
1249 int i;
1251 for(i = 0; i < env->nb_breakpoints; i++) {
1252 if (env->breakpoints[i] == pc)
1253 return 0;
1256 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1257 return -1;
1258 env->breakpoints[env->nb_breakpoints++] = pc;
1260 if (kvm_enabled())
1261 kvm_update_debugger(env);
1263 breakpoint_invalidate(env, pc);
1264 return 0;
1265 #else
1266 return -1;
1267 #endif
1270 /* remove all breakpoints */
1271 void cpu_breakpoint_remove_all(CPUState *env) {
1272 #if defined(TARGET_HAS_ICE)
1273 int i;
1274 for(i = 0; i < env->nb_breakpoints; i++) {
1275 breakpoint_invalidate(env, env->breakpoints[i]);
1277 env->nb_breakpoints = 0;
1278 #endif
1281 /* remove a breakpoint */
1282 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1284 #if defined(TARGET_HAS_ICE)
1285 int i;
1286 for(i = 0; i < env->nb_breakpoints; i++) {
1287 if (env->breakpoints[i] == pc)
1288 goto found;
1290 return -1;
1291 found:
1292 env->nb_breakpoints--;
1293 if (i < env->nb_breakpoints)
1294 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1296 if (kvm_enabled())
1297 kvm_update_debugger(env);
1299 breakpoint_invalidate(env, pc);
1300 return 0;
1301 #else
1302 return -1;
1303 #endif
1306 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1307 CPU loop after each instruction */
1308 void cpu_single_step(CPUState *env, int enabled)
1310 #if defined(TARGET_HAS_ICE)
1311 if (env->singlestep_enabled != enabled) {
1312 env->singlestep_enabled = enabled;
1313 /* must flush all the translated code to avoid inconsistancies */
1314 /* XXX: only flush what is necessary */
1315 tb_flush(env);
1317 if (kvm_enabled())
1318 kvm_update_debugger(env);
1319 #endif
1322 /* enable or disable low levels log */
1323 void cpu_set_log(int log_flags)
1325 loglevel = log_flags;
1326 if (loglevel && !logfile) {
1327 logfile = fopen(logfilename, log_append ? "a" : "w");
1328 if (!logfile) {
1329 perror(logfilename);
1330 _exit(1);
1332 #if !defined(CONFIG_SOFTMMU)
1333 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1335 static uint8_t logfile_buf[4096];
1336 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1338 #else
1339 setvbuf(logfile, NULL, _IOLBF, 0);
1340 #endif
1341 log_append = 1;
1343 if (!loglevel && logfile) {
1344 fclose(logfile);
1345 logfile = NULL;
1349 void cpu_set_log_filename(const char *filename)
1351 logfilename = strdup(filename);
1352 if (logfile) {
1353 fclose(logfile);
1354 logfile = NULL;
1356 cpu_set_log(loglevel);
1359 /* mask must never be zero, except for A20 change call */
1360 void cpu_interrupt(CPUState *env, int mask)
1362 TranslationBlock *tb;
1363 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1365 env->interrupt_request |= mask;
1366 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1367 kvm_update_interrupt_request(env);
1369 /* if the cpu is currently executing code, we must unlink it and
1370 all the potentially executing TB */
1371 tb = env->current_tb;
1372 if (tb && !testandset(&interrupt_lock)) {
1373 env->current_tb = NULL;
1374 tb_reset_jump_recursive(tb);
1375 resetlock(&interrupt_lock);
1379 void cpu_reset_interrupt(CPUState *env, int mask)
1381 env->interrupt_request &= ~mask;
1384 CPULogItem cpu_log_items[] = {
1385 { CPU_LOG_TB_OUT_ASM, "out_asm",
1386 "show generated host assembly code for each compiled TB" },
1387 { CPU_LOG_TB_IN_ASM, "in_asm",
1388 "show target assembly code for each compiled TB" },
1389 { CPU_LOG_TB_OP, "op",
1390 "show micro ops for each compiled TB" },
1391 { CPU_LOG_TB_OP_OPT, "op_opt",
1392 "show micro ops "
1393 #ifdef TARGET_I386
1394 "before eflags optimization and "
1395 #endif
1396 "after liveness analysis" },
1397 { CPU_LOG_INT, "int",
1398 "show interrupts/exceptions in short format" },
1399 { CPU_LOG_EXEC, "exec",
1400 "show trace before each executed TB (lots of logs)" },
1401 { CPU_LOG_TB_CPU, "cpu",
1402 "show CPU state before block translation" },
1403 #ifdef TARGET_I386
1404 { CPU_LOG_PCALL, "pcall",
1405 "show protected mode far calls/returns/exceptions" },
1406 #endif
1407 #ifdef DEBUG_IOPORT
1408 { CPU_LOG_IOPORT, "ioport",
1409 "show all i/o ports accesses" },
1410 #endif
1411 { 0, NULL, NULL },
1414 static int cmp1(const char *s1, int n, const char *s2)
1416 if (strlen(s2) != n)
1417 return 0;
1418 return memcmp(s1, s2, n) == 0;
1421 /* takes a comma separated list of log masks. Return 0 if error. */
1422 int cpu_str_to_log_mask(const char *str)
1424 CPULogItem *item;
1425 int mask;
1426 const char *p, *p1;
1428 p = str;
1429 mask = 0;
1430 for(;;) {
1431 p1 = strchr(p, ',');
1432 if (!p1)
1433 p1 = p + strlen(p);
1434 if(cmp1(p,p1-p,"all")) {
1435 for(item = cpu_log_items; item->mask != 0; item++) {
1436 mask |= item->mask;
1438 } else {
1439 for(item = cpu_log_items; item->mask != 0; item++) {
1440 if (cmp1(p, p1 - p, item->name))
1441 goto found;
1443 return 0;
1445 found:
1446 mask |= item->mask;
1447 if (*p1 != ',')
1448 break;
1449 p = p1 + 1;
1451 return mask;
1454 void cpu_abort(CPUState *env, const char *fmt, ...)
1456 va_list ap;
1457 va_list ap2;
1459 va_start(ap, fmt);
1460 va_copy(ap2, ap);
1461 fprintf(stderr, "qemu: fatal: ");
1462 vfprintf(stderr, fmt, ap);
1463 fprintf(stderr, "\n");
1464 #ifdef TARGET_I386
1465 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1466 #else
1467 cpu_dump_state(env, stderr, fprintf, 0);
1468 #endif
1469 if (logfile) {
1470 fprintf(logfile, "qemu: fatal: ");
1471 vfprintf(logfile, fmt, ap2);
1472 fprintf(logfile, "\n");
1473 #ifdef TARGET_I386
1474 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1475 #else
1476 cpu_dump_state(env, logfile, fprintf, 0);
1477 #endif
1478 fflush(logfile);
1479 fclose(logfile);
1481 va_end(ap2);
1482 va_end(ap);
1483 abort();
1486 CPUState *cpu_copy(CPUState *env)
1488 CPUState *new_env = cpu_init(env->cpu_model_str);
1489 /* preserve chaining and index */
1490 CPUState *next_cpu = new_env->next_cpu;
1491 int cpu_index = new_env->cpu_index;
1492 memcpy(new_env, env, sizeof(CPUState));
1493 new_env->next_cpu = next_cpu;
1494 new_env->cpu_index = cpu_index;
1495 return new_env;
1498 #if !defined(CONFIG_USER_ONLY)
1500 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1502 unsigned int i;
1504 /* Discard jump cache entries for any tb which might potentially
1505 overlap the flushed page. */
1506 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1507 memset (&env->tb_jmp_cache[i], 0,
1508 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1510 i = tb_jmp_cache_hash_page(addr);
1511 memset (&env->tb_jmp_cache[i], 0,
1512 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1515 /* NOTE: if flush_global is true, also flush global entries (not
1516 implemented yet) */
1517 void tlb_flush(CPUState *env, int flush_global)
1519 int i;
1521 #if defined(DEBUG_TLB)
1522 printf("tlb_flush:\n");
1523 #endif
1524 /* must reset current TB so that interrupts cannot modify the
1525 links while we are modifying them */
1526 env->current_tb = NULL;
1528 for(i = 0; i < CPU_TLB_SIZE; i++) {
1529 env->tlb_table[0][i].addr_read = -1;
1530 env->tlb_table[0][i].addr_write = -1;
1531 env->tlb_table[0][i].addr_code = -1;
1532 env->tlb_table[1][i].addr_read = -1;
1533 env->tlb_table[1][i].addr_write = -1;
1534 env->tlb_table[1][i].addr_code = -1;
1535 #if (NB_MMU_MODES >= 3)
1536 env->tlb_table[2][i].addr_read = -1;
1537 env->tlb_table[2][i].addr_write = -1;
1538 env->tlb_table[2][i].addr_code = -1;
1539 #if (NB_MMU_MODES == 4)
1540 env->tlb_table[3][i].addr_read = -1;
1541 env->tlb_table[3][i].addr_write = -1;
1542 env->tlb_table[3][i].addr_code = -1;
1543 #endif
1544 #endif
1547 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1549 #if !defined(CONFIG_SOFTMMU)
1550 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1551 #endif
1552 #ifdef USE_KQEMU
1553 if (env->kqemu_enabled) {
1554 kqemu_flush(env, flush_global);
1556 #endif
1557 tlb_flush_count++;
1560 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1562 if (addr == (tlb_entry->addr_read &
1563 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1564 addr == (tlb_entry->addr_write &
1565 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1566 addr == (tlb_entry->addr_code &
1567 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1568 tlb_entry->addr_read = -1;
1569 tlb_entry->addr_write = -1;
1570 tlb_entry->addr_code = -1;
1574 void tlb_flush_page(CPUState *env, target_ulong addr)
1576 int i;
1578 #if defined(DEBUG_TLB)
1579 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1580 #endif
1581 /* must reset current TB so that interrupts cannot modify the
1582 links while we are modifying them */
1583 env->current_tb = NULL;
1585 addr &= TARGET_PAGE_MASK;
1586 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1587 tlb_flush_entry(&env->tlb_table[0][i], addr);
1588 tlb_flush_entry(&env->tlb_table[1][i], addr);
1589 #if (NB_MMU_MODES >= 3)
1590 tlb_flush_entry(&env->tlb_table[2][i], addr);
1591 #if (NB_MMU_MODES == 4)
1592 tlb_flush_entry(&env->tlb_table[3][i], addr);
1593 #endif
1594 #endif
1596 tlb_flush_jmp_cache(env, addr);
1598 #if !defined(CONFIG_SOFTMMU)
1599 if (addr < MMAP_AREA_END)
1600 munmap((void *)addr, TARGET_PAGE_SIZE);
1601 #endif
1602 #ifdef USE_KQEMU
1603 if (env->kqemu_enabled) {
1604 kqemu_flush_page(env, addr);
1606 #endif
1609 /* update the TLBs so that writes to code in the virtual page 'addr'
1610 can be detected */
1611 static void tlb_protect_code(ram_addr_t ram_addr)
1613 cpu_physical_memory_reset_dirty(ram_addr,
1614 ram_addr + TARGET_PAGE_SIZE,
1615 CODE_DIRTY_FLAG);
1618 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1619 tested for self modifying code */
1620 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1621 target_ulong vaddr)
1623 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1626 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1627 unsigned long start, unsigned long length)
1629 unsigned long addr;
1630 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1631 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1632 if ((addr - start) < length) {
1633 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1638 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1639 int dirty_flags)
1641 CPUState *env;
1642 unsigned long length, start1;
1643 int i, mask, len;
1644 uint8_t *p;
1646 start &= TARGET_PAGE_MASK;
1647 end = TARGET_PAGE_ALIGN(end);
1649 length = end - start;
1650 if (length == 0)
1651 return;
1652 len = length >> TARGET_PAGE_BITS;
1653 #ifdef USE_KQEMU
1654 /* XXX: should not depend on cpu context */
1655 env = first_cpu;
1656 if (env->kqemu_enabled) {
1657 ram_addr_t addr;
1658 addr = start;
1659 for(i = 0; i < len; i++) {
1660 kqemu_set_notdirty(env, addr);
1661 addr += TARGET_PAGE_SIZE;
1664 #endif
1665 mask = ~dirty_flags;
1666 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1667 for(i = 0; i < len; i++)
1668 p[i] &= mask;
1670 /* we modify the TLB cache so that the dirty bit will be set again
1671 when accessing the range */
1672 start1 = start + (unsigned long)phys_ram_base;
1673 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1674 for(i = 0; i < CPU_TLB_SIZE; i++)
1675 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1676 for(i = 0; i < CPU_TLB_SIZE; i++)
1677 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1678 #if (NB_MMU_MODES >= 3)
1679 for(i = 0; i < CPU_TLB_SIZE; i++)
1680 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1681 #if (NB_MMU_MODES == 4)
1682 for(i = 0; i < CPU_TLB_SIZE; i++)
1683 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1684 #endif
1685 #endif
1688 #if !defined(CONFIG_SOFTMMU)
1689 /* XXX: this is expensive */
1691 VirtPageDesc *p;
1692 int j;
1693 target_ulong addr;
1695 for(i = 0; i < L1_SIZE; i++) {
1696 p = l1_virt_map[i];
1697 if (p) {
1698 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1699 for(j = 0; j < L2_SIZE; j++) {
1700 if (p->valid_tag == virt_valid_tag &&
1701 p->phys_addr >= start && p->phys_addr < end &&
1702 (p->prot & PROT_WRITE)) {
1703 if (addr < MMAP_AREA_END) {
1704 mprotect((void *)addr, TARGET_PAGE_SIZE,
1705 p->prot & ~PROT_WRITE);
1708 addr += TARGET_PAGE_SIZE;
1709 p++;
1714 #endif
1717 int cpu_physical_memory_set_dirty_tracking(int enable)
1719 int r=0;
1721 if (kvm_enabled())
1722 r = kvm_physical_memory_set_dirty_tracking(enable);
1723 in_migration = enable;
1724 return r;
1727 int cpu_physical_memory_get_dirty_tracking(void)
1729 return in_migration;
1732 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1734 ram_addr_t ram_addr;
1736 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1737 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1738 tlb_entry->addend - (unsigned long)phys_ram_base;
1739 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1740 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1745 /* update the TLB according to the current state of the dirty bits */
1746 void cpu_tlb_update_dirty(CPUState *env)
1748 int i;
1749 for(i = 0; i < CPU_TLB_SIZE; i++)
1750 tlb_update_dirty(&env->tlb_table[0][i]);
1751 for(i = 0; i < CPU_TLB_SIZE; i++)
1752 tlb_update_dirty(&env->tlb_table[1][i]);
1753 #if (NB_MMU_MODES >= 3)
1754 for(i = 0; i < CPU_TLB_SIZE; i++)
1755 tlb_update_dirty(&env->tlb_table[2][i]);
1756 #if (NB_MMU_MODES == 4)
1757 for(i = 0; i < CPU_TLB_SIZE; i++)
1758 tlb_update_dirty(&env->tlb_table[3][i]);
1759 #endif
1760 #endif
1763 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1764 unsigned long start)
1766 unsigned long addr;
1767 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1768 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1769 if (addr == start) {
1770 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1775 /* update the TLB corresponding to virtual page vaddr and phys addr
1776 addr so that it is no longer dirty */
1777 static inline void tlb_set_dirty(CPUState *env,
1778 unsigned long addr, target_ulong vaddr)
1780 int i;
1782 addr &= TARGET_PAGE_MASK;
1783 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1784 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1785 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1786 #if (NB_MMU_MODES >= 3)
1787 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1788 #if (NB_MMU_MODES == 4)
1789 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1790 #endif
1791 #endif
1794 /* add a new TLB entry. At most one entry for a given virtual address
1795 is permitted. Return 0 if OK or 2 if the page could not be mapped
1796 (can only happen in non SOFTMMU mode for I/O pages or pages
1797 conflicting with the host address space). */
1798 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1799 target_phys_addr_t paddr, int prot,
1800 int mmu_idx, int is_softmmu)
1802 PhysPageDesc *p;
1803 unsigned long pd;
1804 unsigned int index;
1805 target_ulong address;
1806 target_phys_addr_t addend;
1807 int ret;
1808 CPUTLBEntry *te;
1809 int i;
1811 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1812 if (!p) {
1813 pd = IO_MEM_UNASSIGNED;
1814 } else {
1815 pd = p->phys_offset;
1817 #if defined(DEBUG_TLB)
1818 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1819 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1820 #endif
1822 ret = 0;
1823 #if !defined(CONFIG_SOFTMMU)
1824 if (is_softmmu)
1825 #endif
1827 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1828 /* IO memory case */
1829 address = vaddr | pd;
1830 addend = paddr;
1831 } else {
1832 /* standard memory */
1833 address = vaddr;
1834 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1837 /* Make accesses to pages with watchpoints go via the
1838 watchpoint trap routines. */
1839 for (i = 0; i < env->nb_watchpoints; i++) {
1840 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1841 if (address & ~TARGET_PAGE_MASK) {
1842 env->watchpoint[i].addend = 0;
1843 address = vaddr | io_mem_watch;
1844 } else {
1845 env->watchpoint[i].addend = pd - paddr +
1846 (unsigned long) phys_ram_base;
1847 /* TODO: Figure out how to make read watchpoints coexist
1848 with code. */
1849 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1854 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1855 addend -= vaddr;
1856 te = &env->tlb_table[mmu_idx][index];
1857 te->addend = addend;
1858 if (prot & PAGE_READ) {
1859 te->addr_read = address;
1860 } else {
1861 te->addr_read = -1;
1864 if (prot & PAGE_EXEC) {
1865 te->addr_code = address;
1866 } else {
1867 te->addr_code = -1;
1869 if (prot & PAGE_WRITE) {
1870 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1871 (pd & IO_MEM_ROMD)) {
1872 /* write access calls the I/O callback */
1873 te->addr_write = vaddr |
1874 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1875 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1876 !cpu_physical_memory_is_dirty(pd)) {
1877 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1878 } else {
1879 te->addr_write = address;
1881 } else {
1882 te->addr_write = -1;
1885 #if !defined(CONFIG_SOFTMMU)
1886 else {
1887 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1888 /* IO access: no mapping is done as it will be handled by the
1889 soft MMU */
1890 if (!(env->hflags & HF_SOFTMMU_MASK))
1891 ret = 2;
1892 } else {
1893 void *map_addr;
1895 if (vaddr >= MMAP_AREA_END) {
1896 ret = 2;
1897 } else {
1898 if (prot & PROT_WRITE) {
1899 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1900 #if defined(TARGET_HAS_SMC) || 1
1901 first_tb ||
1902 #endif
1903 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1904 !cpu_physical_memory_is_dirty(pd))) {
1905 /* ROM: we do as if code was inside */
1906 /* if code is present, we only map as read only and save the
1907 original mapping */
1908 VirtPageDesc *vp;
1910 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1911 vp->phys_addr = pd;
1912 vp->prot = prot;
1913 vp->valid_tag = virt_valid_tag;
1914 prot &= ~PAGE_WRITE;
1917 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1918 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1919 if (map_addr == MAP_FAILED) {
1920 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1921 paddr, vaddr);
1926 #endif
1927 return ret;
1930 /* called from signal handler: invalidate the code and unprotect the
1931 page. Return TRUE if the fault was succesfully handled. */
1932 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1934 #if !defined(CONFIG_SOFTMMU)
1935 VirtPageDesc *vp;
1937 #if defined(DEBUG_TLB)
1938 printf("page_unprotect: addr=0x%08x\n", addr);
1939 #endif
1940 addr &= TARGET_PAGE_MASK;
1942 /* if it is not mapped, no need to worry here */
1943 if (addr >= MMAP_AREA_END)
1944 return 0;
1945 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1946 if (!vp)
1947 return 0;
1948 /* NOTE: in this case, validate_tag is _not_ tested as it
1949 validates only the code TLB */
1950 if (vp->valid_tag != virt_valid_tag)
1951 return 0;
1952 if (!(vp->prot & PAGE_WRITE))
1953 return 0;
1954 #if defined(DEBUG_TLB)
1955 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1956 addr, vp->phys_addr, vp->prot);
1957 #endif
1958 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1959 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1960 (unsigned long)addr, vp->prot);
1961 /* set the dirty bit */
1962 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1963 /* flush the code inside */
1964 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1965 return 1;
1966 #else
1967 return 0;
1968 #endif
1971 #else
1973 void tlb_flush(CPUState *env, int flush_global)
1977 void tlb_flush_page(CPUState *env, target_ulong addr)
1981 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1982 target_phys_addr_t paddr, int prot,
1983 int mmu_idx, int is_softmmu)
1985 return 0;
1988 /* dump memory mappings */
1989 void page_dump(FILE *f)
1991 unsigned long start, end;
1992 int i, j, prot, prot1;
1993 PageDesc *p;
1995 fprintf(f, "%-8s %-8s %-8s %s\n",
1996 "start", "end", "size", "prot");
1997 start = -1;
1998 end = -1;
1999 prot = 0;
2000 for(i = 0; i <= L1_SIZE; i++) {
2001 if (i < L1_SIZE)
2002 p = l1_map[i];
2003 else
2004 p = NULL;
2005 for(j = 0;j < L2_SIZE; j++) {
2006 if (!p)
2007 prot1 = 0;
2008 else
2009 prot1 = p[j].flags;
2010 if (prot1 != prot) {
2011 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2012 if (start != -1) {
2013 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2014 start, end, end - start,
2015 prot & PAGE_READ ? 'r' : '-',
2016 prot & PAGE_WRITE ? 'w' : '-',
2017 prot & PAGE_EXEC ? 'x' : '-');
2019 if (prot1 != 0)
2020 start = end;
2021 else
2022 start = -1;
2023 prot = prot1;
2025 if (!p)
2026 break;
2031 int page_get_flags(target_ulong address)
2033 PageDesc *p;
2035 p = page_find(address >> TARGET_PAGE_BITS);
2036 if (!p)
2037 return 0;
2038 return p->flags;
2041 /* modify the flags of a page and invalidate the code if
2042 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2043 depending on PAGE_WRITE */
2044 void page_set_flags(target_ulong start, target_ulong end, int flags)
2046 PageDesc *p;
2047 target_ulong addr;
2049 /* mmap_lock should already be held. */
2050 start = start & TARGET_PAGE_MASK;
2051 end = TARGET_PAGE_ALIGN(end);
2052 if (flags & PAGE_WRITE)
2053 flags |= PAGE_WRITE_ORG;
2054 spin_lock(&tb_lock);
2055 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2056 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2057 /* if the write protection is set, then we invalidate the code
2058 inside */
2059 if (!(p->flags & PAGE_WRITE) &&
2060 (flags & PAGE_WRITE) &&
2061 p->first_tb) {
2062 tb_invalidate_phys_page(addr, 0, NULL);
2064 p->flags = flags;
2066 spin_unlock(&tb_lock);
2069 int page_check_range(target_ulong start, target_ulong len, int flags)
2071 PageDesc *p;
2072 target_ulong end;
2073 target_ulong addr;
2075 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2076 start = start & TARGET_PAGE_MASK;
2078 if( end < start )
2079 /* we've wrapped around */
2080 return -1;
2081 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2082 p = page_find(addr >> TARGET_PAGE_BITS);
2083 if( !p )
2084 return -1;
2085 if( !(p->flags & PAGE_VALID) )
2086 return -1;
2088 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2089 return -1;
2090 if (flags & PAGE_WRITE) {
2091 if (!(p->flags & PAGE_WRITE_ORG))
2092 return -1;
2093 /* unprotect the page if it was put read-only because it
2094 contains translated code */
2095 if (!(p->flags & PAGE_WRITE)) {
2096 if (!page_unprotect(addr, 0, NULL))
2097 return -1;
2099 return 0;
2102 return 0;
2105 /* called from signal handler: invalidate the code and unprotect the
2106 page. Return TRUE if the fault was succesfully handled. */
2107 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2109 unsigned int page_index, prot, pindex;
2110 PageDesc *p, *p1;
2111 target_ulong host_start, host_end, addr;
2113 /* Technically this isn't safe inside a signal handler. However we
2114 know this only ever happens in a synchronous SEGV handler, so in
2115 practice it seems to be ok. */
2116 mmap_lock();
2118 host_start = address & qemu_host_page_mask;
2119 page_index = host_start >> TARGET_PAGE_BITS;
2120 p1 = page_find(page_index);
2121 if (!p1) {
2122 mmap_unlock();
2123 return 0;
2125 host_end = host_start + qemu_host_page_size;
2126 p = p1;
2127 prot = 0;
2128 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2129 prot |= p->flags;
2130 p++;
2132 /* if the page was really writable, then we change its
2133 protection back to writable */
2134 if (prot & PAGE_WRITE_ORG) {
2135 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2136 if (!(p1[pindex].flags & PAGE_WRITE)) {
2137 mprotect((void *)g2h(host_start), qemu_host_page_size,
2138 (prot & PAGE_BITS) | PAGE_WRITE);
2139 p1[pindex].flags |= PAGE_WRITE;
2140 /* and since the content will be modified, we must invalidate
2141 the corresponding translated code. */
2142 tb_invalidate_phys_page(address, pc, puc);
2143 #ifdef DEBUG_TB_CHECK
2144 tb_invalidate_check(address);
2145 #endif
2146 mmap_unlock();
2147 return 1;
2150 mmap_unlock();
2151 return 0;
2154 static inline void tlb_set_dirty(CPUState *env,
2155 unsigned long addr, target_ulong vaddr)
2158 #endif /* defined(CONFIG_USER_ONLY) */
2160 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2161 ram_addr_t memory);
2162 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2163 ram_addr_t orig_memory);
2164 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2165 need_subpage) \
2166 do { \
2167 if (addr > start_addr) \
2168 start_addr2 = 0; \
2169 else { \
2170 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2171 if (start_addr2 > 0) \
2172 need_subpage = 1; \
2175 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2176 end_addr2 = TARGET_PAGE_SIZE - 1; \
2177 else { \
2178 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2179 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2180 need_subpage = 1; \
2182 } while (0)
2184 /* register physical memory. 'size' must be a multiple of the target
2185 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2186 io memory page */
2187 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2188 ram_addr_t size,
2189 ram_addr_t phys_offset)
2191 target_phys_addr_t addr, end_addr;
2192 PhysPageDesc *p;
2193 CPUState *env;
2194 ram_addr_t orig_size = size;
2195 void *subpage;
2197 #ifdef USE_KQEMU
2198 /* XXX: should not depend on cpu context */
2199 env = first_cpu;
2200 if (env->kqemu_enabled) {
2201 kqemu_set_phys_mem(start_addr, size, phys_offset);
2203 #endif
2204 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2205 end_addr = start_addr + (target_phys_addr_t)size;
2206 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2207 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2208 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2209 ram_addr_t orig_memory = p->phys_offset;
2210 target_phys_addr_t start_addr2, end_addr2;
2211 int need_subpage = 0;
2213 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2214 need_subpage);
2215 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2216 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2217 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2218 &p->phys_offset, orig_memory);
2219 } else {
2220 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2221 >> IO_MEM_SHIFT];
2223 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2224 } else {
2225 p->phys_offset = phys_offset;
2226 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2227 (phys_offset & IO_MEM_ROMD))
2228 phys_offset += TARGET_PAGE_SIZE;
2230 } else {
2231 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2232 p->phys_offset = phys_offset;
2233 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2234 (phys_offset & IO_MEM_ROMD))
2235 phys_offset += TARGET_PAGE_SIZE;
2236 else {
2237 target_phys_addr_t start_addr2, end_addr2;
2238 int need_subpage = 0;
2240 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2241 end_addr2, need_subpage);
2243 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2244 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2245 &p->phys_offset, IO_MEM_UNASSIGNED);
2246 subpage_register(subpage, start_addr2, end_addr2,
2247 phys_offset);
2253 /* since each CPU stores ram addresses in its TLB cache, we must
2254 reset the modified entries */
2255 /* XXX: slow ! */
2256 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2257 tlb_flush(env, 1);
2261 /* XXX: temporary until new memory mapping API */
2262 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2264 PhysPageDesc *p;
2266 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2267 if (!p)
2268 return IO_MEM_UNASSIGNED;
2269 return p->phys_offset;
2272 /* XXX: better than nothing */
2273 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2275 ram_addr_t addr;
2276 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2277 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2278 (uint64_t)size, (uint64_t)phys_ram_size);
2279 abort();
2281 addr = phys_ram_alloc_offset;
2282 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2283 return addr;
2286 void qemu_ram_free(ram_addr_t addr)
2290 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2292 #ifdef DEBUG_UNASSIGNED
2293 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2294 #endif
2295 #ifdef TARGET_SPARC
2296 do_unassigned_access(addr, 0, 0, 0);
2297 #elif TARGET_CRIS
2298 do_unassigned_access(addr, 0, 0, 0);
2299 #endif
2300 return 0;
2303 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2305 #ifdef DEBUG_UNASSIGNED
2306 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2307 #endif
2308 #ifdef TARGET_SPARC
2309 do_unassigned_access(addr, 1, 0, 0);
2310 #elif TARGET_CRIS
2311 do_unassigned_access(addr, 1, 0, 0);
2312 #endif
2315 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2316 unassigned_mem_readb,
2317 unassigned_mem_readb,
2318 unassigned_mem_readb,
2321 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2322 unassigned_mem_writeb,
2323 unassigned_mem_writeb,
2324 unassigned_mem_writeb,
2327 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2329 unsigned long ram_addr;
2330 int dirty_flags;
2331 ram_addr = addr - (unsigned long)phys_ram_base;
2332 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2333 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2334 #if !defined(CONFIG_USER_ONLY)
2335 tb_invalidate_phys_page_fast(ram_addr, 1);
2336 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2337 #endif
2339 stb_p((uint8_t *)(long)addr, val);
2340 #ifdef USE_KQEMU
2341 if (cpu_single_env->kqemu_enabled &&
2342 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2343 kqemu_modify_page(cpu_single_env, ram_addr);
2344 #endif
2345 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2346 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2347 /* we remove the notdirty callback only if the code has been
2348 flushed */
2349 if (dirty_flags == 0xff)
2350 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2353 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2355 unsigned long ram_addr;
2356 int dirty_flags;
2357 ram_addr = addr - (unsigned long)phys_ram_base;
2358 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2359 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2360 #if !defined(CONFIG_USER_ONLY)
2361 tb_invalidate_phys_page_fast(ram_addr, 2);
2362 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2363 #endif
2365 stw_p((uint8_t *)(long)addr, val);
2366 #ifdef USE_KQEMU
2367 if (cpu_single_env->kqemu_enabled &&
2368 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2369 kqemu_modify_page(cpu_single_env, ram_addr);
2370 #endif
2371 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2372 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2373 /* we remove the notdirty callback only if the code has been
2374 flushed */
2375 if (dirty_flags == 0xff)
2376 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2379 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2381 unsigned long ram_addr;
2382 int dirty_flags;
2383 ram_addr = addr - (unsigned long)phys_ram_base;
2384 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2385 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2386 #if !defined(CONFIG_USER_ONLY)
2387 tb_invalidate_phys_page_fast(ram_addr, 4);
2388 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2389 #endif
2391 stl_p((uint8_t *)(long)addr, val);
2392 #ifdef USE_KQEMU
2393 if (cpu_single_env->kqemu_enabled &&
2394 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2395 kqemu_modify_page(cpu_single_env, ram_addr);
2396 #endif
2397 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2398 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2399 /* we remove the notdirty callback only if the code has been
2400 flushed */
2401 if (dirty_flags == 0xff)
2402 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2405 static CPUReadMemoryFunc *error_mem_read[3] = {
2406 NULL, /* never used */
2407 NULL, /* never used */
2408 NULL, /* never used */
2411 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2412 notdirty_mem_writeb,
2413 notdirty_mem_writew,
2414 notdirty_mem_writel,
2417 #if defined(CONFIG_SOFTMMU)
2418 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2419 so these check for a hit then pass through to the normal out-of-line
2420 phys routines. */
2421 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2423 return ldub_phys(addr);
2426 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2428 return lduw_phys(addr);
2431 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2433 return ldl_phys(addr);
2436 /* Generate a debug exception if a watchpoint has been hit.
2437 Returns the real physical address of the access. addr will be a host
2438 address in case of a RAM location. */
2439 static target_ulong check_watchpoint(target_phys_addr_t addr)
2441 CPUState *env = cpu_single_env;
2442 target_ulong watch;
2443 target_ulong retaddr;
2444 int i;
2446 retaddr = addr;
2447 for (i = 0; i < env->nb_watchpoints; i++) {
2448 watch = env->watchpoint[i].vaddr;
2449 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2450 retaddr = addr - env->watchpoint[i].addend;
2451 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2452 cpu_single_env->watchpoint_hit = i + 1;
2453 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2454 break;
2458 return retaddr;
2461 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2462 uint32_t val)
2464 addr = check_watchpoint(addr);
2465 stb_phys(addr, val);
2468 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2469 uint32_t val)
2471 addr = check_watchpoint(addr);
2472 stw_phys(addr, val);
2475 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2476 uint32_t val)
2478 addr = check_watchpoint(addr);
2479 stl_phys(addr, val);
2482 static CPUReadMemoryFunc *watch_mem_read[3] = {
2483 watch_mem_readb,
2484 watch_mem_readw,
2485 watch_mem_readl,
2488 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2489 watch_mem_writeb,
2490 watch_mem_writew,
2491 watch_mem_writel,
2493 #endif
2495 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2496 unsigned int len)
2498 uint32_t ret;
2499 unsigned int idx;
2501 idx = SUBPAGE_IDX(addr - mmio->base);
2502 #if defined(DEBUG_SUBPAGE)
2503 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2504 mmio, len, addr, idx);
2505 #endif
2506 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2508 return ret;
2511 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2512 uint32_t value, unsigned int len)
2514 unsigned int idx;
2516 idx = SUBPAGE_IDX(addr - mmio->base);
2517 #if defined(DEBUG_SUBPAGE)
2518 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2519 mmio, len, addr, idx, value);
2520 #endif
2521 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2524 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2526 #if defined(DEBUG_SUBPAGE)
2527 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2528 #endif
2530 return subpage_readlen(opaque, addr, 0);
2533 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2534 uint32_t value)
2536 #if defined(DEBUG_SUBPAGE)
2537 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2538 #endif
2539 subpage_writelen(opaque, addr, value, 0);
2542 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2544 #if defined(DEBUG_SUBPAGE)
2545 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2546 #endif
2548 return subpage_readlen(opaque, addr, 1);
2551 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2552 uint32_t value)
2554 #if defined(DEBUG_SUBPAGE)
2555 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2556 #endif
2557 subpage_writelen(opaque, addr, value, 1);
2560 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2562 #if defined(DEBUG_SUBPAGE)
2563 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2564 #endif
2566 return subpage_readlen(opaque, addr, 2);
2569 static void subpage_writel (void *opaque,
2570 target_phys_addr_t addr, uint32_t value)
2572 #if defined(DEBUG_SUBPAGE)
2573 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2574 #endif
2575 subpage_writelen(opaque, addr, value, 2);
2578 static CPUReadMemoryFunc *subpage_read[] = {
2579 &subpage_readb,
2580 &subpage_readw,
2581 &subpage_readl,
2584 static CPUWriteMemoryFunc *subpage_write[] = {
2585 &subpage_writeb,
2586 &subpage_writew,
2587 &subpage_writel,
2590 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2591 ram_addr_t memory)
2593 int idx, eidx;
2594 unsigned int i;
2596 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2597 return -1;
2598 idx = SUBPAGE_IDX(start);
2599 eidx = SUBPAGE_IDX(end);
2600 #if defined(DEBUG_SUBPAGE)
2601 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2602 mmio, start, end, idx, eidx, memory);
2603 #endif
2604 memory >>= IO_MEM_SHIFT;
2605 for (; idx <= eidx; idx++) {
2606 for (i = 0; i < 4; i++) {
2607 if (io_mem_read[memory][i]) {
2608 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2609 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2611 if (io_mem_write[memory][i]) {
2612 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2613 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2618 return 0;
2621 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2622 ram_addr_t orig_memory)
2624 subpage_t *mmio;
2625 int subpage_memory;
2627 mmio = qemu_mallocz(sizeof(subpage_t));
2628 if (mmio != NULL) {
2629 mmio->base = base;
2630 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2631 #if defined(DEBUG_SUBPAGE)
2632 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2633 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2634 #endif
2635 *phys = subpage_memory | IO_MEM_SUBPAGE;
2636 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2639 return mmio;
2642 static int get_free_io_mem_idx(void)
2644 int i;
2646 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2647 if (!io_mem_used[i]) {
2648 io_mem_used[i] = 1;
2649 return i;
2652 return -1;
2655 static void io_mem_init(void)
2657 int i;
2659 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2660 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2661 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2662 for (i=0; i<5; i++)
2663 io_mem_used[i] = 1;
2665 #if defined(CONFIG_SOFTMMU)
2666 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2667 watch_mem_write, NULL);
2668 #endif
2669 /* alloc dirty bits array */
2670 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2671 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2674 /* mem_read and mem_write are arrays of functions containing the
2675 function to access byte (index 0), word (index 1) and dword (index
2676 2). Functions can be omitted with a NULL function pointer. The
2677 registered functions may be modified dynamically later.
2678 If io_index is non zero, the corresponding io zone is
2679 modified. If it is zero, a new io zone is allocated. The return
2680 value can be used with cpu_register_physical_memory(). (-1) is
2681 returned if error. */
2682 int cpu_register_io_memory(int io_index,
2683 CPUReadMemoryFunc **mem_read,
2684 CPUWriteMemoryFunc **mem_write,
2685 void *opaque)
2687 int i, subwidth = 0;
2689 if (io_index <= 0) {
2690 io_index = get_free_io_mem_idx();
2691 if (io_index == -1)
2692 return io_index;
2693 } else {
2694 if (io_index >= IO_MEM_NB_ENTRIES)
2695 return -1;
2698 for(i = 0;i < 3; i++) {
2699 if (!mem_read[i] || !mem_write[i])
2700 subwidth = IO_MEM_SUBWIDTH;
2701 io_mem_read[io_index][i] = mem_read[i];
2702 io_mem_write[io_index][i] = mem_write[i];
2704 io_mem_opaque[io_index] = opaque;
2705 return (io_index << IO_MEM_SHIFT) | subwidth;
2708 void cpu_unregister_io_memory(int io_table_address)
2710 int i;
2711 int io_index = io_table_address >> IO_MEM_SHIFT;
2713 for (i=0;i < 3; i++) {
2714 io_mem_read[io_index][i] = unassigned_mem_read[i];
2715 io_mem_write[io_index][i] = unassigned_mem_write[i];
2717 io_mem_opaque[io_index] = NULL;
2718 io_mem_used[io_index] = 0;
2721 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2723 return io_mem_write[io_index >> IO_MEM_SHIFT];
2726 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2728 return io_mem_read[io_index >> IO_MEM_SHIFT];
2731 /* physical memory access (slow version, mainly for debug) */
2732 #if defined(CONFIG_USER_ONLY)
2733 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2734 int len, int is_write)
2736 int l, flags;
2737 target_ulong page;
2738 void * p;
2740 while (len > 0) {
2741 page = addr & TARGET_PAGE_MASK;
2742 l = (page + TARGET_PAGE_SIZE) - addr;
2743 if (l > len)
2744 l = len;
2745 flags = page_get_flags(page);
2746 if (!(flags & PAGE_VALID))
2747 return;
2748 if (is_write) {
2749 if (!(flags & PAGE_WRITE))
2750 return;
2751 /* XXX: this code should not depend on lock_user */
2752 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2753 /* FIXME - should this return an error rather than just fail? */
2754 return;
2755 memcpy(p, buf, l);
2756 unlock_user(p, addr, l);
2757 } else {
2758 if (!(flags & PAGE_READ))
2759 return;
2760 /* XXX: this code should not depend on lock_user */
2761 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2762 /* FIXME - should this return an error rather than just fail? */
2763 return;
2764 memcpy(buf, p, l);
2765 unlock_user(p, addr, 0);
2767 len -= l;
2768 buf += l;
2769 addr += l;
2773 #else
2774 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2775 int len, int is_write)
2777 int l, io_index;
2778 uint8_t *ptr;
2779 uint32_t val;
2780 target_phys_addr_t page;
2781 unsigned long pd;
2782 PhysPageDesc *p;
2784 while (len > 0) {
2785 page = addr & TARGET_PAGE_MASK;
2786 l = (page + TARGET_PAGE_SIZE) - addr;
2787 if (l > len)
2788 l = len;
2789 p = phys_page_find(page >> TARGET_PAGE_BITS);
2790 if (!p) {
2791 pd = IO_MEM_UNASSIGNED;
2792 } else {
2793 pd = p->phys_offset;
2796 if (is_write) {
2797 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2798 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2799 /* XXX: could force cpu_single_env to NULL to avoid
2800 potential bugs */
2801 if (l >= 4 && ((addr & 3) == 0)) {
2802 /* 32 bit write access */
2803 val = ldl_p(buf);
2804 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2805 l = 4;
2806 } else if (l >= 2 && ((addr & 1) == 0)) {
2807 /* 16 bit write access */
2808 val = lduw_p(buf);
2809 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2810 l = 2;
2811 } else {
2812 /* 8 bit write access */
2813 val = ldub_p(buf);
2814 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2815 l = 1;
2817 } else {
2818 unsigned long addr1;
2819 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2820 /* RAM case */
2821 ptr = phys_ram_base + addr1;
2822 memcpy(ptr, buf, l);
2823 if (!cpu_physical_memory_is_dirty(addr1)) {
2824 /* invalidate code */
2825 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2826 /* set dirty bit */
2827 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2828 (0xff & ~CODE_DIRTY_FLAG);
2830 /* qemu doesn't execute guest code directly, but kvm does
2831 therefore fluch instruction caches */
2832 if (kvm_enabled())
2833 flush_icache_range((unsigned long)ptr,
2834 ((unsigned long)ptr)+l);
2836 } else {
2837 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2838 !(pd & IO_MEM_ROMD)) {
2839 /* I/O case */
2840 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2841 if (l >= 4 && ((addr & 3) == 0)) {
2842 /* 32 bit read access */
2843 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2844 stl_p(buf, val);
2845 l = 4;
2846 } else if (l >= 2 && ((addr & 1) == 0)) {
2847 /* 16 bit read access */
2848 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2849 stw_p(buf, val);
2850 l = 2;
2851 } else {
2852 /* 8 bit read access */
2853 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2854 stb_p(buf, val);
2855 l = 1;
2857 } else {
2858 /* RAM case */
2859 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2860 (addr & ~TARGET_PAGE_MASK);
2861 memcpy(buf, ptr, l);
2864 len -= l;
2865 buf += l;
2866 addr += l;
2870 /* used for ROM loading : can write in RAM and ROM */
2871 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2872 const uint8_t *buf, int len)
2874 int l;
2875 uint8_t *ptr;
2876 target_phys_addr_t page;
2877 unsigned long pd;
2878 PhysPageDesc *p;
2880 while (len > 0) {
2881 page = addr & TARGET_PAGE_MASK;
2882 l = (page + TARGET_PAGE_SIZE) - addr;
2883 if (l > len)
2884 l = len;
2885 p = phys_page_find(page >> TARGET_PAGE_BITS);
2886 if (!p) {
2887 pd = IO_MEM_UNASSIGNED;
2888 } else {
2889 pd = p->phys_offset;
2892 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2893 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2894 !(pd & IO_MEM_ROMD)) {
2895 /* do nothing */
2896 } else {
2897 unsigned long addr1;
2898 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2899 /* ROM/RAM case */
2900 ptr = phys_ram_base + addr1;
2901 memcpy(ptr, buf, l);
2903 len -= l;
2904 buf += l;
2905 addr += l;
2910 /* warning: addr must be aligned */
2911 uint32_t ldl_phys(target_phys_addr_t addr)
2913 int io_index;
2914 uint8_t *ptr;
2915 uint32_t val;
2916 unsigned long pd;
2917 PhysPageDesc *p;
2919 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2920 if (!p) {
2921 pd = IO_MEM_UNASSIGNED;
2922 } else {
2923 pd = p->phys_offset;
2926 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2927 !(pd & IO_MEM_ROMD)) {
2928 /* I/O case */
2929 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2930 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2931 } else {
2932 /* RAM case */
2933 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2934 (addr & ~TARGET_PAGE_MASK);
2935 val = ldl_p(ptr);
2937 return val;
2940 /* warning: addr must be aligned */
2941 uint64_t ldq_phys(target_phys_addr_t addr)
2943 int io_index;
2944 uint8_t *ptr;
2945 uint64_t val;
2946 unsigned long pd;
2947 PhysPageDesc *p;
2949 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2950 if (!p) {
2951 pd = IO_MEM_UNASSIGNED;
2952 } else {
2953 pd = p->phys_offset;
2956 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2957 !(pd & IO_MEM_ROMD)) {
2958 /* I/O case */
2959 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2960 #ifdef TARGET_WORDS_BIGENDIAN
2961 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2962 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2963 #else
2964 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2965 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2966 #endif
2967 } else {
2968 /* RAM case */
2969 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2970 (addr & ~TARGET_PAGE_MASK);
2971 val = ldq_p(ptr);
2973 return val;
2976 /* XXX: optimize */
2977 uint32_t ldub_phys(target_phys_addr_t addr)
2979 uint8_t val;
2980 cpu_physical_memory_read(addr, &val, 1);
2981 return val;
2984 /* XXX: optimize */
2985 uint32_t lduw_phys(target_phys_addr_t addr)
2987 uint16_t val;
2988 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2989 return tswap16(val);
2992 #ifdef __GNUC__
2993 #define likely(x) __builtin_expect(!!(x), 1)
2994 #define unlikely(x) __builtin_expect(!!(x), 0)
2995 #else
2996 #define likely(x) x
2997 #define unlikely(x) x
2998 #endif
3000 /* warning: addr must be aligned. The ram page is not masked as dirty
3001 and the code inside is not invalidated. It is useful if the dirty
3002 bits are used to track modified PTEs */
3003 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3005 int io_index;
3006 uint8_t *ptr;
3007 unsigned long pd;
3008 PhysPageDesc *p;
3010 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3011 if (!p) {
3012 pd = IO_MEM_UNASSIGNED;
3013 } else {
3014 pd = p->phys_offset;
3017 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3018 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3019 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3020 } else {
3021 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3022 ptr = phys_ram_base + addr1;
3023 stl_p(ptr, val);
3025 if (unlikely(in_migration)) {
3026 if (!cpu_physical_memory_is_dirty(addr1)) {
3027 /* invalidate code */
3028 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3029 /* set dirty bit */
3030 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3031 (0xff & ~CODE_DIRTY_FLAG);
3037 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3039 int io_index;
3040 uint8_t *ptr;
3041 unsigned long pd;
3042 PhysPageDesc *p;
3044 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3045 if (!p) {
3046 pd = IO_MEM_UNASSIGNED;
3047 } else {
3048 pd = p->phys_offset;
3051 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3052 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3053 #ifdef TARGET_WORDS_BIGENDIAN
3054 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3055 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3056 #else
3057 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3058 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3059 #endif
3060 } else {
3061 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3062 (addr & ~TARGET_PAGE_MASK);
3063 stq_p(ptr, val);
3067 /* warning: addr must be aligned */
3068 void stl_phys(target_phys_addr_t addr, uint32_t val)
3070 int io_index;
3071 uint8_t *ptr;
3072 unsigned long pd;
3073 PhysPageDesc *p;
3075 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3076 if (!p) {
3077 pd = IO_MEM_UNASSIGNED;
3078 } else {
3079 pd = p->phys_offset;
3082 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3083 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3084 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3085 } else {
3086 unsigned long addr1;
3087 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3088 /* RAM case */
3089 ptr = phys_ram_base + addr1;
3090 stl_p(ptr, val);
3091 if (!cpu_physical_memory_is_dirty(addr1)) {
3092 /* invalidate code */
3093 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3094 /* set dirty bit */
3095 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3096 (0xff & ~CODE_DIRTY_FLAG);
3101 /* XXX: optimize */
3102 void stb_phys(target_phys_addr_t addr, uint32_t val)
3104 uint8_t v = val;
3105 cpu_physical_memory_write(addr, &v, 1);
3108 /* XXX: optimize */
3109 void stw_phys(target_phys_addr_t addr, uint32_t val)
3111 uint16_t v = tswap16(val);
3112 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3115 /* XXX: optimize */
3116 void stq_phys(target_phys_addr_t addr, uint64_t val)
3118 val = tswap64(val);
3119 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3122 #endif
3124 /* virtual memory access for debug */
3125 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3126 uint8_t *buf, int len, int is_write)
3128 int l;
3129 target_phys_addr_t phys_addr;
3130 target_ulong page;
3132 while (len > 0) {
3133 page = addr & TARGET_PAGE_MASK;
3134 phys_addr = cpu_get_phys_page_debug(env, page);
3135 /* if no physical page mapped, return an error */
3136 if (phys_addr == -1)
3137 return -1;
3138 l = (page + TARGET_PAGE_SIZE) - addr;
3139 if (l > len)
3140 l = len;
3141 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3142 buf, l, is_write);
3143 len -= l;
3144 buf += l;
3145 addr += l;
3147 return 0;
3150 void dump_exec_info(FILE *f,
3151 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3153 int i, target_code_size, max_target_code_size;
3154 int direct_jmp_count, direct_jmp2_count, cross_page;
3155 TranslationBlock *tb;
3157 target_code_size = 0;
3158 max_target_code_size = 0;
3159 cross_page = 0;
3160 direct_jmp_count = 0;
3161 direct_jmp2_count = 0;
3162 for(i = 0; i < nb_tbs; i++) {
3163 tb = &tbs[i];
3164 target_code_size += tb->size;
3165 if (tb->size > max_target_code_size)
3166 max_target_code_size = tb->size;
3167 if (tb->page_addr[1] != -1)
3168 cross_page++;
3169 if (tb->tb_next_offset[0] != 0xffff) {
3170 direct_jmp_count++;
3171 if (tb->tb_next_offset[1] != 0xffff) {
3172 direct_jmp2_count++;
3176 /* XXX: avoid using doubles ? */
3177 cpu_fprintf(f, "Translation buffer state:\n");
3178 cpu_fprintf(f, "gen code size %ld/%ld\n",
3179 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3180 cpu_fprintf(f, "TB count %d/%d\n",
3181 nb_tbs, code_gen_max_blocks);
3182 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3183 nb_tbs ? target_code_size / nb_tbs : 0,
3184 max_target_code_size);
3185 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3186 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3187 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3188 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3189 cross_page,
3190 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3191 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3192 direct_jmp_count,
3193 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3194 direct_jmp2_count,
3195 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3196 cpu_fprintf(f, "\nStatistics:\n");
3197 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3198 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3199 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3200 tcg_dump_info(f, cpu_fprintf);
3203 #if !defined(CONFIG_USER_ONLY)
3205 #define MMUSUFFIX _cmmu
3206 #define GETPC() NULL
3207 #define env cpu_single_env
3208 #define SOFTMMU_CODE_ACCESS
3210 #define SHIFT 0
3211 #include "softmmu_template.h"
3213 #define SHIFT 1
3214 #include "softmmu_template.h"
3216 #define SHIFT 2
3217 #include "softmmu_template.h"
3219 #define SHIFT 3
3220 #include "softmmu_template.h"
3222 #undef env
3224 #endif