libkvm: accept EAGAIN to restart kvm_run
[qemu-kvm/fedora.git] / exec.c
blob2883b076a18340d5fc448c8a6c492e45d4fd21cf
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #ifdef USE_KVM
38 #include "qemu-kvm.h"
39 #endif
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
55 #undef DEBUG_TB_CHECK
56 #endif
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_PPC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 42
70 #elif USE_KQEMU
71 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
72 #define TARGET_PHYS_ADDR_SPACE_BITS 32
73 #elif TARGET_X86_64
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #else
76 #define TARGET_PHYS_ADDR_SPACE_BITS 32
77 #endif
79 #ifdef USE_KVM
80 extern int kvm_allowed;
81 #endif
83 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
90 uint8_t *code_gen_ptr;
92 ram_addr_t phys_ram_size;
93 int phys_ram_fd;
94 uint8_t *phys_ram_base;
95 uint8_t *phys_ram_dirty;
96 uint8_t *bios_mem;
97 static int in_migration;
99 CPUState *first_cpu;
100 /* current CPU in the current thread. It is only valid inside
101 cpu_exec() */
102 CPUState *cpu_single_env;
104 typedef struct PageDesc {
105 /* list of TBs intersecting this ram page */
106 TranslationBlock *first_tb;
107 /* in order to optimize self modifying code, we count the number
108 of lookups we do to a given page to use a bitmap */
109 unsigned int code_write_count;
110 uint8_t *code_bitmap;
111 #if defined(CONFIG_USER_ONLY)
112 unsigned long flags;
113 #endif
114 } PageDesc;
116 typedef struct PhysPageDesc {
117 /* offset in host memory of the page + io_index in the low 12 bits */
118 ram_addr_t phys_offset;
119 } PhysPageDesc;
121 #define L2_BITS 10
122 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
124 #define L1_SIZE (1 << L1_BITS)
125 #define L2_SIZE (1 << L2_BITS)
127 static void io_mem_init(void);
129 unsigned long qemu_real_host_page_size;
130 unsigned long qemu_host_page_bits;
131 unsigned long qemu_host_page_size;
132 unsigned long qemu_host_page_mask;
134 /* XXX: for system emulation, it could just be an array */
135 static PageDesc *l1_map[L1_SIZE];
136 PhysPageDesc **l1_phys_map;
138 /* io memory support */
139 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
140 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
141 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
142 static int io_mem_nb;
144 /* log support */
145 char *logfilename = "/tmp/qemu.log";
146 FILE *logfile;
147 int loglevel;
149 /* statistics */
150 static int tlb_flush_count;
151 static int tb_flush_count;
152 static int tb_phys_invalidate_count;
154 static void page_init(void)
156 /* NOTE: we can always suppose that qemu_host_page_size >=
157 TARGET_PAGE_SIZE */
158 #ifdef _WIN32
160 SYSTEM_INFO system_info;
161 DWORD old_protect;
163 GetSystemInfo(&system_info);
164 qemu_real_host_page_size = system_info.dwPageSize;
166 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
167 PAGE_EXECUTE_READWRITE, &old_protect);
169 #else
170 qemu_real_host_page_size = getpagesize();
172 unsigned long start, end;
174 start = (unsigned long)code_gen_buffer;
175 start &= ~(qemu_real_host_page_size - 1);
177 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
178 end += qemu_real_host_page_size - 1;
179 end &= ~(qemu_real_host_page_size - 1);
181 mprotect((void *)start, end - start,
182 PROT_READ | PROT_WRITE | PROT_EXEC);
184 #endif
186 if (qemu_host_page_size == 0)
187 qemu_host_page_size = qemu_real_host_page_size;
188 if (qemu_host_page_size < TARGET_PAGE_SIZE)
189 qemu_host_page_size = TARGET_PAGE_SIZE;
190 qemu_host_page_bits = 0;
191 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
192 qemu_host_page_bits++;
193 qemu_host_page_mask = ~(qemu_host_page_size - 1);
194 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
195 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
198 static inline PageDesc *page_find_alloc(unsigned int index)
200 PageDesc **lp, *p;
202 lp = &l1_map[index >> L2_BITS];
203 p = *lp;
204 if (!p) {
205 /* allocate if not found */
206 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
207 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
208 *lp = p;
210 return p + (index & (L2_SIZE - 1));
213 static inline PageDesc *page_find(unsigned int index)
215 PageDesc *p;
217 p = l1_map[index >> L2_BITS];
218 if (!p)
219 return 0;
220 return p + (index & (L2_SIZE - 1));
223 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
225 void **lp, **p;
226 PhysPageDesc *pd;
228 p = (void **)l1_phys_map;
229 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
231 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
232 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
233 #endif
234 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
235 p = *lp;
236 if (!p) {
237 /* allocate if not found */
238 if (!alloc)
239 return NULL;
240 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
241 memset(p, 0, sizeof(void *) * L1_SIZE);
242 *lp = p;
244 #endif
245 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
246 pd = *lp;
247 if (!pd) {
248 int i;
249 /* allocate if not found */
250 if (!alloc)
251 return NULL;
252 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
253 *lp = pd;
254 for (i = 0; i < L2_SIZE; i++)
255 pd[i].phys_offset = IO_MEM_UNASSIGNED;
257 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
260 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
262 return phys_page_find_alloc(index, 0);
265 #if !defined(CONFIG_USER_ONLY)
266 static void tlb_protect_code(ram_addr_t ram_addr);
267 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
268 target_ulong vaddr);
269 #endif
271 void cpu_exec_init(CPUState *env)
273 CPUState **penv;
274 int cpu_index;
276 if (!code_gen_ptr) {
277 code_gen_ptr = code_gen_buffer;
278 page_init();
279 io_mem_init();
281 env->next_cpu = NULL;
282 penv = &first_cpu;
283 cpu_index = 0;
284 while (*penv != NULL) {
285 penv = (CPUState **)&(*penv)->next_cpu;
286 cpu_index++;
288 env->cpu_index = cpu_index;
289 *penv = env;
292 static inline void invalidate_page_bitmap(PageDesc *p)
294 if (p->code_bitmap) {
295 qemu_free(p->code_bitmap);
296 p->code_bitmap = NULL;
298 p->code_write_count = 0;
301 /* set to NULL all the 'first_tb' fields in all PageDescs */
302 static void page_flush_tb(void)
304 int i, j;
305 PageDesc *p;
307 for(i = 0; i < L1_SIZE; i++) {
308 p = l1_map[i];
309 if (p) {
310 for(j = 0; j < L2_SIZE; j++) {
311 p->first_tb = NULL;
312 invalidate_page_bitmap(p);
313 p++;
319 /* flush all the translation blocks */
320 /* XXX: tb_flush is currently not thread safe */
321 void tb_flush(CPUState *env1)
323 CPUState *env;
324 #if defined(DEBUG_FLUSH)
325 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
326 code_gen_ptr - code_gen_buffer,
327 nb_tbs,
328 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
329 #endif
330 nb_tbs = 0;
332 for(env = first_cpu; env != NULL; env = env->next_cpu) {
333 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
336 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
337 page_flush_tb();
339 code_gen_ptr = code_gen_buffer;
340 /* XXX: flush processor icache at this point if cache flush is
341 expensive */
342 tb_flush_count++;
345 #ifdef DEBUG_TB_CHECK
347 static void tb_invalidate_check(unsigned long address)
349 TranslationBlock *tb;
350 int i;
351 address &= TARGET_PAGE_MASK;
352 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
353 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
354 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
355 address >= tb->pc + tb->size)) {
356 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
357 address, (long)tb->pc, tb->size);
363 /* verify that all the pages have correct rights for code */
364 static void tb_page_check(void)
366 TranslationBlock *tb;
367 int i, flags1, flags2;
369 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
371 flags1 = page_get_flags(tb->pc);
372 flags2 = page_get_flags(tb->pc + tb->size - 1);
373 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
374 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
375 (long)tb->pc, tb->size, flags1, flags2);
381 void tb_jmp_check(TranslationBlock *tb)
383 TranslationBlock *tb1;
384 unsigned int n1;
386 /* suppress any remaining jumps to this TB */
387 tb1 = tb->jmp_first;
388 for(;;) {
389 n1 = (long)tb1 & 3;
390 tb1 = (TranslationBlock *)((long)tb1 & ~3);
391 if (n1 == 2)
392 break;
393 tb1 = tb1->jmp_next[n1];
395 /* check end of list */
396 if (tb1 != tb) {
397 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
401 #endif
403 /* invalidate one TB */
404 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
405 int next_offset)
407 TranslationBlock *tb1;
408 for(;;) {
409 tb1 = *ptb;
410 if (tb1 == tb) {
411 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
412 break;
414 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
418 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
420 TranslationBlock *tb1;
421 unsigned int n1;
423 for(;;) {
424 tb1 = *ptb;
425 n1 = (long)tb1 & 3;
426 tb1 = (TranslationBlock *)((long)tb1 & ~3);
427 if (tb1 == tb) {
428 *ptb = tb1->page_next[n1];
429 break;
431 ptb = &tb1->page_next[n1];
435 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
437 TranslationBlock *tb1, **ptb;
438 unsigned int n1;
440 ptb = &tb->jmp_next[n];
441 tb1 = *ptb;
442 if (tb1) {
443 /* find tb(n) in circular list */
444 for(;;) {
445 tb1 = *ptb;
446 n1 = (long)tb1 & 3;
447 tb1 = (TranslationBlock *)((long)tb1 & ~3);
448 if (n1 == n && tb1 == tb)
449 break;
450 if (n1 == 2) {
451 ptb = &tb1->jmp_first;
452 } else {
453 ptb = &tb1->jmp_next[n1];
456 /* now we can suppress tb(n) from the list */
457 *ptb = tb->jmp_next[n];
459 tb->jmp_next[n] = NULL;
463 /* reset the jump entry 'n' of a TB so that it is not chained to
464 another TB */
465 static inline void tb_reset_jump(TranslationBlock *tb, int n)
467 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
470 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
472 CPUState *env;
473 PageDesc *p;
474 unsigned int h, n1;
475 target_ulong phys_pc;
476 TranslationBlock *tb1, *tb2;
478 /* remove the TB from the hash list */
479 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
480 h = tb_phys_hash_func(phys_pc);
481 tb_remove(&tb_phys_hash[h], tb,
482 offsetof(TranslationBlock, phys_hash_next));
484 /* remove the TB from the page list */
485 if (tb->page_addr[0] != page_addr) {
486 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
487 tb_page_remove(&p->first_tb, tb);
488 invalidate_page_bitmap(p);
490 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
491 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
492 tb_page_remove(&p->first_tb, tb);
493 invalidate_page_bitmap(p);
496 tb_invalidated_flag = 1;
498 /* remove the TB from the hash list */
499 h = tb_jmp_cache_hash_func(tb->pc);
500 for(env = first_cpu; env != NULL; env = env->next_cpu) {
501 if (env->tb_jmp_cache[h] == tb)
502 env->tb_jmp_cache[h] = NULL;
505 /* suppress this TB from the two jump lists */
506 tb_jmp_remove(tb, 0);
507 tb_jmp_remove(tb, 1);
509 /* suppress any remaining jumps to this TB */
510 tb1 = tb->jmp_first;
511 for(;;) {
512 n1 = (long)tb1 & 3;
513 if (n1 == 2)
514 break;
515 tb1 = (TranslationBlock *)((long)tb1 & ~3);
516 tb2 = tb1->jmp_next[n1];
517 tb_reset_jump(tb1, n1);
518 tb1->jmp_next[n1] = NULL;
519 tb1 = tb2;
521 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
523 tb_phys_invalidate_count++;
526 static inline void set_bits(uint8_t *tab, int start, int len)
528 int end, mask, end1;
530 end = start + len;
531 tab += start >> 3;
532 mask = 0xff << (start & 7);
533 if ((start & ~7) == (end & ~7)) {
534 if (start < end) {
535 mask &= ~(0xff << (end & 7));
536 *tab |= mask;
538 } else {
539 *tab++ |= mask;
540 start = (start + 8) & ~7;
541 end1 = end & ~7;
542 while (start < end1) {
543 *tab++ = 0xff;
544 start += 8;
546 if (start < end) {
547 mask = ~(0xff << (end & 7));
548 *tab |= mask;
553 static void build_page_bitmap(PageDesc *p)
555 int n, tb_start, tb_end;
556 TranslationBlock *tb;
558 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
559 if (!p->code_bitmap)
560 return;
561 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
563 tb = p->first_tb;
564 while (tb != NULL) {
565 n = (long)tb & 3;
566 tb = (TranslationBlock *)((long)tb & ~3);
567 /* NOTE: this is subtle as a TB may span two physical pages */
568 if (n == 0) {
569 /* NOTE: tb_end may be after the end of the page, but
570 it is not a problem */
571 tb_start = tb->pc & ~TARGET_PAGE_MASK;
572 tb_end = tb_start + tb->size;
573 if (tb_end > TARGET_PAGE_SIZE)
574 tb_end = TARGET_PAGE_SIZE;
575 } else {
576 tb_start = 0;
577 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
579 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
580 tb = tb->page_next[n];
584 #ifdef TARGET_HAS_PRECISE_SMC
586 static void tb_gen_code(CPUState *env,
587 target_ulong pc, target_ulong cs_base, int flags,
588 int cflags)
590 TranslationBlock *tb;
591 uint8_t *tc_ptr;
592 target_ulong phys_pc, phys_page2, virt_page2;
593 int code_gen_size;
595 phys_pc = get_phys_addr_code(env, pc);
596 tb = tb_alloc(pc);
597 if (!tb) {
598 /* flush must be done */
599 tb_flush(env);
600 /* cannot fail at this point */
601 tb = tb_alloc(pc);
603 tc_ptr = code_gen_ptr;
604 tb->tc_ptr = tc_ptr;
605 tb->cs_base = cs_base;
606 tb->flags = flags;
607 tb->cflags = cflags;
608 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
609 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
611 /* check next page if needed */
612 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
613 phys_page2 = -1;
614 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
615 phys_page2 = get_phys_addr_code(env, virt_page2);
617 tb_link_phys(tb, phys_pc, phys_page2);
619 #endif
621 /* invalidate all TBs which intersect with the target physical page
622 starting in range [start;end[. NOTE: start and end must refer to
623 the same physical page. 'is_cpu_write_access' should be true if called
624 from a real cpu write access: the virtual CPU will exit the current
625 TB if code is modified inside this TB. */
626 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
627 int is_cpu_write_access)
629 int n, current_tb_modified, current_tb_not_found, current_flags;
630 CPUState *env = cpu_single_env;
631 PageDesc *p;
632 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
633 target_ulong tb_start, tb_end;
634 target_ulong current_pc, current_cs_base;
636 p = page_find(start >> TARGET_PAGE_BITS);
637 if (!p)
638 return;
639 if (!p->code_bitmap &&
640 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
641 is_cpu_write_access) {
642 /* build code bitmap */
643 build_page_bitmap(p);
646 /* we remove all the TBs in the range [start, end[ */
647 /* XXX: see if in some cases it could be faster to invalidate all the code */
648 current_tb_not_found = is_cpu_write_access;
649 current_tb_modified = 0;
650 current_tb = NULL; /* avoid warning */
651 current_pc = 0; /* avoid warning */
652 current_cs_base = 0; /* avoid warning */
653 current_flags = 0; /* avoid warning */
654 tb = p->first_tb;
655 while (tb != NULL) {
656 n = (long)tb & 3;
657 tb = (TranslationBlock *)((long)tb & ~3);
658 tb_next = tb->page_next[n];
659 /* NOTE: this is subtle as a TB may span two physical pages */
660 if (n == 0) {
661 /* NOTE: tb_end may be after the end of the page, but
662 it is not a problem */
663 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
664 tb_end = tb_start + tb->size;
665 } else {
666 tb_start = tb->page_addr[1];
667 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
669 if (!(tb_end <= start || tb_start >= end)) {
670 #ifdef TARGET_HAS_PRECISE_SMC
671 if (current_tb_not_found) {
672 current_tb_not_found = 0;
673 current_tb = NULL;
674 if (env->mem_write_pc) {
675 /* now we have a real cpu fault */
676 current_tb = tb_find_pc(env->mem_write_pc);
679 if (current_tb == tb &&
680 !(current_tb->cflags & CF_SINGLE_INSN)) {
681 /* If we are modifying the current TB, we must stop
682 its execution. We could be more precise by checking
683 that the modification is after the current PC, but it
684 would require a specialized function to partially
685 restore the CPU state */
687 current_tb_modified = 1;
688 cpu_restore_state(current_tb, env,
689 env->mem_write_pc, NULL);
690 #if defined(TARGET_I386)
691 current_flags = env->hflags;
692 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
693 current_cs_base = (target_ulong)env->segs[R_CS].base;
694 current_pc = current_cs_base + env->eip;
695 #else
696 #error unsupported CPU
697 #endif
699 #endif /* TARGET_HAS_PRECISE_SMC */
700 /* we need to do that to handle the case where a signal
701 occurs while doing tb_phys_invalidate() */
702 saved_tb = NULL;
703 if (env) {
704 saved_tb = env->current_tb;
705 env->current_tb = NULL;
707 tb_phys_invalidate(tb, -1);
708 if (env) {
709 env->current_tb = saved_tb;
710 if (env->interrupt_request && env->current_tb)
711 cpu_interrupt(env, env->interrupt_request);
714 tb = tb_next;
716 #if !defined(CONFIG_USER_ONLY)
717 /* if no code remaining, no need to continue to use slow writes */
718 if (!p->first_tb) {
719 invalidate_page_bitmap(p);
720 if (is_cpu_write_access) {
721 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
724 #endif
725 #ifdef TARGET_HAS_PRECISE_SMC
726 if (current_tb_modified) {
727 /* we generate a block containing just the instruction
728 modifying the memory. It will ensure that it cannot modify
729 itself */
730 env->current_tb = NULL;
731 tb_gen_code(env, current_pc, current_cs_base, current_flags,
732 CF_SINGLE_INSN);
733 cpu_resume_from_signal(env, NULL);
735 #endif
738 /* len must be <= 8 and start must be a multiple of len */
739 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
741 PageDesc *p;
742 int offset, b;
743 #if 0
744 if (1) {
745 if (loglevel) {
746 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
747 cpu_single_env->mem_write_vaddr, len,
748 cpu_single_env->eip,
749 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
752 #endif
753 p = page_find(start >> TARGET_PAGE_BITS);
754 if (!p)
755 return;
756 if (p->code_bitmap) {
757 offset = start & ~TARGET_PAGE_MASK;
758 b = p->code_bitmap[offset >> 3] >> (offset & 7);
759 if (b & ((1 << len) - 1))
760 goto do_invalidate;
761 } else {
762 do_invalidate:
763 tb_invalidate_phys_page_range(start, start + len, 1);
767 #if !defined(CONFIG_SOFTMMU)
768 static void tb_invalidate_phys_page(target_ulong addr,
769 unsigned long pc, void *puc)
771 int n, current_flags, current_tb_modified;
772 target_ulong current_pc, current_cs_base;
773 PageDesc *p;
774 TranslationBlock *tb, *current_tb;
775 #ifdef TARGET_HAS_PRECISE_SMC
776 CPUState *env = cpu_single_env;
777 #endif
779 addr &= TARGET_PAGE_MASK;
780 p = page_find(addr >> TARGET_PAGE_BITS);
781 if (!p)
782 return;
783 tb = p->first_tb;
784 current_tb_modified = 0;
785 current_tb = NULL;
786 current_pc = 0; /* avoid warning */
787 current_cs_base = 0; /* avoid warning */
788 current_flags = 0; /* avoid warning */
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (tb && pc != 0) {
791 current_tb = tb_find_pc(pc);
793 #endif
794 while (tb != NULL) {
795 n = (long)tb & 3;
796 tb = (TranslationBlock *)((long)tb & ~3);
797 #ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb == tb &&
799 !(current_tb->cflags & CF_SINGLE_INSN)) {
800 /* If we are modifying the current TB, we must stop
801 its execution. We could be more precise by checking
802 that the modification is after the current PC, but it
803 would require a specialized function to partially
804 restore the CPU state */
806 current_tb_modified = 1;
807 cpu_restore_state(current_tb, env, pc, puc);
808 #if defined(TARGET_I386)
809 current_flags = env->hflags;
810 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
811 current_cs_base = (target_ulong)env->segs[R_CS].base;
812 current_pc = current_cs_base + env->eip;
813 #else
814 #error unsupported CPU
815 #endif
817 #endif /* TARGET_HAS_PRECISE_SMC */
818 tb_phys_invalidate(tb, addr);
819 tb = tb->page_next[n];
821 p->first_tb = NULL;
822 #ifdef TARGET_HAS_PRECISE_SMC
823 if (current_tb_modified) {
824 /* we generate a block containing just the instruction
825 modifying the memory. It will ensure that it cannot modify
826 itself */
827 env->current_tb = NULL;
828 tb_gen_code(env, current_pc, current_cs_base, current_flags,
829 CF_SINGLE_INSN);
830 cpu_resume_from_signal(env, puc);
832 #endif
834 #endif
836 /* add the tb in the target page and protect it if necessary */
837 static inline void tb_alloc_page(TranslationBlock *tb,
838 unsigned int n, target_ulong page_addr)
840 PageDesc *p;
841 TranslationBlock *last_first_tb;
843 tb->page_addr[n] = page_addr;
844 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
845 tb->page_next[n] = p->first_tb;
846 last_first_tb = p->first_tb;
847 p->first_tb = (TranslationBlock *)((long)tb | n);
848 invalidate_page_bitmap(p);
850 #if defined(TARGET_HAS_SMC) || 1
852 #if defined(CONFIG_USER_ONLY)
853 if (p->flags & PAGE_WRITE) {
854 target_ulong addr;
855 PageDesc *p2;
856 int prot;
858 /* force the host page as non writable (writes will have a
859 page fault + mprotect overhead) */
860 page_addr &= qemu_host_page_mask;
861 prot = 0;
862 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
863 addr += TARGET_PAGE_SIZE) {
865 p2 = page_find (addr >> TARGET_PAGE_BITS);
866 if (!p2)
867 continue;
868 prot |= p2->flags;
869 p2->flags &= ~PAGE_WRITE;
870 page_get_flags(addr);
872 mprotect(g2h(page_addr), qemu_host_page_size,
873 (prot & PAGE_BITS) & ~PAGE_WRITE);
874 #ifdef DEBUG_TB_INVALIDATE
875 printf("protecting code page: 0x%08lx\n",
876 page_addr);
877 #endif
879 #else
880 /* if some code is already present, then the pages are already
881 protected. So we handle the case where only the first TB is
882 allocated in a physical page */
883 if (!last_first_tb) {
884 tlb_protect_code(page_addr);
886 #endif
888 #endif /* TARGET_HAS_SMC */
891 /* Allocate a new translation block. Flush the translation buffer if
892 too many translation blocks or too much generated code. */
893 TranslationBlock *tb_alloc(target_ulong pc)
895 TranslationBlock *tb;
897 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
898 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
899 return NULL;
900 tb = &tbs[nb_tbs++];
901 tb->pc = pc;
902 tb->cflags = 0;
903 return tb;
906 /* add a new TB and link it to the physical page tables. phys_page2 is
907 (-1) to indicate that only one page contains the TB. */
908 void tb_link_phys(TranslationBlock *tb,
909 target_ulong phys_pc, target_ulong phys_page2)
911 unsigned int h;
912 TranslationBlock **ptb;
914 /* add in the physical hash table */
915 h = tb_phys_hash_func(phys_pc);
916 ptb = &tb_phys_hash[h];
917 tb->phys_hash_next = *ptb;
918 *ptb = tb;
920 /* add in the page list */
921 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
922 if (phys_page2 != -1)
923 tb_alloc_page(tb, 1, phys_page2);
924 else
925 tb->page_addr[1] = -1;
927 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
928 tb->jmp_next[0] = NULL;
929 tb->jmp_next[1] = NULL;
930 #ifdef USE_CODE_COPY
931 tb->cflags &= ~CF_FP_USED;
932 if (tb->cflags & CF_TB_FP_USED)
933 tb->cflags |= CF_FP_USED;
934 #endif
936 /* init original jump addresses */
937 if (tb->tb_next_offset[0] != 0xffff)
938 tb_reset_jump(tb, 0);
939 if (tb->tb_next_offset[1] != 0xffff)
940 tb_reset_jump(tb, 1);
942 #ifdef DEBUG_TB_CHECK
943 tb_page_check();
944 #endif
947 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
948 tb[1].tc_ptr. Return NULL if not found */
949 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
951 int m_min, m_max, m;
952 unsigned long v;
953 TranslationBlock *tb;
955 if (nb_tbs <= 0)
956 return NULL;
957 if (tc_ptr < (unsigned long)code_gen_buffer ||
958 tc_ptr >= (unsigned long)code_gen_ptr)
959 return NULL;
960 /* binary search (cf Knuth) */
961 m_min = 0;
962 m_max = nb_tbs - 1;
963 while (m_min <= m_max) {
964 m = (m_min + m_max) >> 1;
965 tb = &tbs[m];
966 v = (unsigned long)tb->tc_ptr;
967 if (v == tc_ptr)
968 return tb;
969 else if (tc_ptr < v) {
970 m_max = m - 1;
971 } else {
972 m_min = m + 1;
975 return &tbs[m_max];
978 static void tb_reset_jump_recursive(TranslationBlock *tb);
980 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
982 TranslationBlock *tb1, *tb_next, **ptb;
983 unsigned int n1;
985 tb1 = tb->jmp_next[n];
986 if (tb1 != NULL) {
987 /* find head of list */
988 for(;;) {
989 n1 = (long)tb1 & 3;
990 tb1 = (TranslationBlock *)((long)tb1 & ~3);
991 if (n1 == 2)
992 break;
993 tb1 = tb1->jmp_next[n1];
995 /* we are now sure now that tb jumps to tb1 */
996 tb_next = tb1;
998 /* remove tb from the jmp_first list */
999 ptb = &tb_next->jmp_first;
1000 for(;;) {
1001 tb1 = *ptb;
1002 n1 = (long)tb1 & 3;
1003 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1004 if (n1 == n && tb1 == tb)
1005 break;
1006 ptb = &tb1->jmp_next[n1];
1008 *ptb = tb->jmp_next[n];
1009 tb->jmp_next[n] = NULL;
1011 /* suppress the jump to next tb in generated code */
1012 tb_reset_jump(tb, n);
1014 /* suppress jumps in the tb on which we could have jumped */
1015 tb_reset_jump_recursive(tb_next);
1019 static void tb_reset_jump_recursive(TranslationBlock *tb)
1021 tb_reset_jump_recursive2(tb, 0);
1022 tb_reset_jump_recursive2(tb, 1);
1025 #if defined(TARGET_HAS_ICE)
1026 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1028 target_ulong addr, pd;
1029 ram_addr_t ram_addr;
1030 PhysPageDesc *p;
1032 addr = cpu_get_phys_page_debug(env, pc);
1033 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1034 if (!p) {
1035 pd = IO_MEM_UNASSIGNED;
1036 } else {
1037 pd = p->phys_offset;
1039 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1040 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1042 #endif
1044 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1045 breakpoint is reached */
1046 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1048 #if defined(TARGET_HAS_ICE)
1049 int i;
1051 for(i = 0; i < env->nb_breakpoints; i++) {
1052 if (env->breakpoints[i] == pc)
1053 return 0;
1056 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1057 return -1;
1058 env->breakpoints[env->nb_breakpoints++] = pc;
1060 #ifdef USE_KVM
1061 if (kvm_allowed)
1062 kvm_update_debugger(env);
1063 #endif
1065 breakpoint_invalidate(env, pc);
1066 return 0;
1067 #else
1068 return -1;
1069 #endif
1072 /* remove a breakpoint */
1073 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1075 #if defined(TARGET_HAS_ICE)
1076 int i;
1077 for(i = 0; i < env->nb_breakpoints; i++) {
1078 if (env->breakpoints[i] == pc)
1079 goto found;
1081 return -1;
1082 found:
1083 env->nb_breakpoints--;
1084 if (i < env->nb_breakpoints)
1085 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1087 #ifdef USE_KVM
1088 if (kvm_allowed)
1089 kvm_update_debugger(env);
1090 #endif
1092 breakpoint_invalidate(env, pc);
1093 return 0;
1094 #else
1095 return -1;
1096 #endif
1099 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1100 CPU loop after each instruction */
1101 void cpu_single_step(CPUState *env, int enabled)
1103 #if defined(TARGET_HAS_ICE)
1104 if (env->singlestep_enabled != enabled) {
1105 env->singlestep_enabled = enabled;
1106 /* must flush all the translated code to avoid inconsistancies */
1107 /* XXX: only flush what is necessary */
1108 tb_flush(env);
1110 #ifdef USE_KVM
1111 if (kvm_allowed)
1112 kvm_update_debugger(env);
1113 #endif
1114 #endif
1117 /* enable or disable low levels log */
1118 void cpu_set_log(int log_flags)
1120 loglevel = log_flags;
1121 if (loglevel && !logfile) {
1122 logfile = fopen(logfilename, "w");
1123 if (!logfile) {
1124 perror(logfilename);
1125 _exit(1);
1127 #if !defined(CONFIG_SOFTMMU)
1128 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1130 static uint8_t logfile_buf[4096];
1131 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1133 #else
1134 setvbuf(logfile, NULL, _IOLBF, 0);
1135 #endif
1139 void cpu_set_log_filename(const char *filename)
1141 logfilename = strdup(filename);
1144 /* mask must never be zero, except for A20 change call */
1145 void cpu_interrupt(CPUState *env, int mask)
1147 TranslationBlock *tb;
1148 static int interrupt_lock;
1150 env->interrupt_request |= mask;
1151 #ifdef USE_KVM
1152 if (kvm_allowed)
1153 kvm_update_interrupt_request(env);
1154 #endif
1155 /* if the cpu is currently executing code, we must unlink it and
1156 all the potentially executing TB */
1157 tb = env->current_tb;
1158 if (tb && !testandset(&interrupt_lock)) {
1159 env->current_tb = NULL;
1160 tb_reset_jump_recursive(tb);
1161 interrupt_lock = 0;
1165 void cpu_reset_interrupt(CPUState *env, int mask)
1167 env->interrupt_request &= ~mask;
1170 CPULogItem cpu_log_items[] = {
1171 { CPU_LOG_TB_OUT_ASM, "out_asm",
1172 "show generated host assembly code for each compiled TB" },
1173 { CPU_LOG_TB_IN_ASM, "in_asm",
1174 "show target assembly code for each compiled TB" },
1175 { CPU_LOG_TB_OP, "op",
1176 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1177 #ifdef TARGET_I386
1178 { CPU_LOG_TB_OP_OPT, "op_opt",
1179 "show micro ops after optimization for each compiled TB" },
1180 #endif
1181 { CPU_LOG_INT, "int",
1182 "show interrupts/exceptions in short format" },
1183 { CPU_LOG_EXEC, "exec",
1184 "show trace before each executed TB (lots of logs)" },
1185 { CPU_LOG_TB_CPU, "cpu",
1186 "show CPU state before bloc translation" },
1187 #ifdef TARGET_I386
1188 { CPU_LOG_PCALL, "pcall",
1189 "show protected mode far calls/returns/exceptions" },
1190 #endif
1191 #ifdef DEBUG_IOPORT
1192 { CPU_LOG_IOPORT, "ioport",
1193 "show all i/o ports accesses" },
1194 #endif
1195 { 0, NULL, NULL },
1198 static int cmp1(const char *s1, int n, const char *s2)
1200 if (strlen(s2) != n)
1201 return 0;
1202 return memcmp(s1, s2, n) == 0;
1205 /* takes a comma separated list of log masks. Return 0 if error. */
1206 int cpu_str_to_log_mask(const char *str)
1208 CPULogItem *item;
1209 int mask;
1210 const char *p, *p1;
1212 p = str;
1213 mask = 0;
1214 for(;;) {
1215 p1 = strchr(p, ',');
1216 if (!p1)
1217 p1 = p + strlen(p);
1218 if(cmp1(p,p1-p,"all")) {
1219 for(item = cpu_log_items; item->mask != 0; item++) {
1220 mask |= item->mask;
1222 } else {
1223 for(item = cpu_log_items; item->mask != 0; item++) {
1224 if (cmp1(p, p1 - p, item->name))
1225 goto found;
1227 return 0;
1229 found:
1230 mask |= item->mask;
1231 if (*p1 != ',')
1232 break;
1233 p = p1 + 1;
1235 return mask;
1238 void cpu_abort(CPUState *env, const char *fmt, ...)
1240 va_list ap;
1242 va_start(ap, fmt);
1243 fprintf(stderr, "qemu: fatal: ");
1244 vfprintf(stderr, fmt, ap);
1245 fprintf(stderr, "\n");
1246 #ifdef TARGET_I386
1247 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1248 #else
1249 cpu_dump_state(env, stderr, fprintf, 0);
1250 #endif
1251 va_end(ap);
1252 abort();
1255 #if !defined(CONFIG_USER_ONLY)
1257 /* NOTE: if flush_global is true, also flush global entries (not
1258 implemented yet) */
1259 void tlb_flush(CPUState *env, int flush_global)
1261 int i;
1263 #if defined(DEBUG_TLB)
1264 printf("tlb_flush:\n");
1265 #endif
1266 /* must reset current TB so that interrupts cannot modify the
1267 links while we are modifying them */
1268 env->current_tb = NULL;
1270 for(i = 0; i < CPU_TLB_SIZE; i++) {
1271 env->tlb_table[0][i].addr_read = -1;
1272 env->tlb_table[0][i].addr_write = -1;
1273 env->tlb_table[0][i].addr_code = -1;
1274 env->tlb_table[1][i].addr_read = -1;
1275 env->tlb_table[1][i].addr_write = -1;
1276 env->tlb_table[1][i].addr_code = -1;
1279 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1281 #if !defined(CONFIG_SOFTMMU)
1282 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1283 #endif
1284 #ifdef USE_KQEMU
1285 if (env->kqemu_enabled) {
1286 kqemu_flush(env, flush_global);
1288 #endif
1289 tlb_flush_count++;
1292 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1294 if (addr == (tlb_entry->addr_read &
1295 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1296 addr == (tlb_entry->addr_write &
1297 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1298 addr == (tlb_entry->addr_code &
1299 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1300 tlb_entry->addr_read = -1;
1301 tlb_entry->addr_write = -1;
1302 tlb_entry->addr_code = -1;
1306 void tlb_flush_page(CPUState *env, target_ulong addr)
1308 int i;
1309 TranslationBlock *tb;
1311 #if defined(DEBUG_TLB)
1312 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1313 #endif
1314 /* must reset current TB so that interrupts cannot modify the
1315 links while we are modifying them */
1316 env->current_tb = NULL;
1318 addr &= TARGET_PAGE_MASK;
1319 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1320 tlb_flush_entry(&env->tlb_table[0][i], addr);
1321 tlb_flush_entry(&env->tlb_table[1][i], addr);
1323 /* Discard jump cache entries for any tb which might potentially
1324 overlap the flushed page. */
1325 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1326 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1328 i = tb_jmp_cache_hash_page(addr);
1329 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1331 #if !defined(CONFIG_SOFTMMU)
1332 if (addr < MMAP_AREA_END)
1333 munmap((void *)addr, TARGET_PAGE_SIZE);
1334 #endif
1335 #ifdef USE_KQEMU
1336 if (env->kqemu_enabled) {
1337 kqemu_flush_page(env, addr);
1339 #endif
1342 /* update the TLBs so that writes to code in the virtual page 'addr'
1343 can be detected */
1344 static void tlb_protect_code(ram_addr_t ram_addr)
1346 cpu_physical_memory_reset_dirty(ram_addr,
1347 ram_addr + TARGET_PAGE_SIZE,
1348 CODE_DIRTY_FLAG);
1351 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1352 tested for self modifying code */
1353 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1354 target_ulong vaddr)
1356 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1359 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1360 unsigned long start, unsigned long length)
1362 unsigned long addr;
1363 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1364 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1365 if ((addr - start) < length) {
1366 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1371 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1372 int dirty_flags)
1374 CPUState *env;
1375 unsigned long length, start1;
1376 int i, mask, len;
1377 uint8_t *p;
1379 start &= TARGET_PAGE_MASK;
1380 end = TARGET_PAGE_ALIGN(end);
1382 length = end - start;
1383 if (length == 0)
1384 return;
1385 len = length >> TARGET_PAGE_BITS;
1386 #ifdef USE_KQEMU
1387 /* XXX: should not depend on cpu context */
1388 env = first_cpu;
1389 if (env->kqemu_enabled) {
1390 ram_addr_t addr;
1391 addr = start;
1392 for(i = 0; i < len; i++) {
1393 kqemu_set_notdirty(env, addr);
1394 addr += TARGET_PAGE_SIZE;
1397 #endif
1398 mask = ~dirty_flags;
1399 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1400 for(i = 0; i < len; i++)
1401 p[i] &= mask;
1403 /* we modify the TLB cache so that the dirty bit will be set again
1404 when accessing the range */
1405 start1 = start + (unsigned long)phys_ram_base;
1406 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1407 for(i = 0; i < CPU_TLB_SIZE; i++)
1408 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1409 for(i = 0; i < CPU_TLB_SIZE; i++)
1410 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1413 #if !defined(CONFIG_SOFTMMU)
1414 /* XXX: this is expensive */
1416 VirtPageDesc *p;
1417 int j;
1418 target_ulong addr;
1420 for(i = 0; i < L1_SIZE; i++) {
1421 p = l1_virt_map[i];
1422 if (p) {
1423 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1424 for(j = 0; j < L2_SIZE; j++) {
1425 if (p->valid_tag == virt_valid_tag &&
1426 p->phys_addr >= start && p->phys_addr < end &&
1427 (p->prot & PROT_WRITE)) {
1428 if (addr < MMAP_AREA_END) {
1429 mprotect((void *)addr, TARGET_PAGE_SIZE,
1430 p->prot & ~PROT_WRITE);
1433 addr += TARGET_PAGE_SIZE;
1434 p++;
1439 #endif
1442 int cpu_physical_memory_set_dirty_tracking(int enable)
1444 int r=0;
1446 #ifdef USE_KVM
1447 r = kvm_physical_memory_set_dirty_tracking(enable);
1448 #endif
1449 in_migration = enable;
1450 return r;
1453 int cpu_physical_memory_get_dirty_tracking(void)
1455 return in_migration;
1458 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1460 ram_addr_t ram_addr;
1462 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1463 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1464 tlb_entry->addend - (unsigned long)phys_ram_base;
1465 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1466 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1471 /* update the TLB according to the current state of the dirty bits */
1472 void cpu_tlb_update_dirty(CPUState *env)
1474 int i;
1475 for(i = 0; i < CPU_TLB_SIZE; i++)
1476 tlb_update_dirty(&env->tlb_table[0][i]);
1477 for(i = 0; i < CPU_TLB_SIZE; i++)
1478 tlb_update_dirty(&env->tlb_table[1][i]);
1481 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1482 unsigned long start)
1484 unsigned long addr;
1485 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1486 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1487 if (addr == start) {
1488 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1493 /* update the TLB corresponding to virtual page vaddr and phys addr
1494 addr so that it is no longer dirty */
1495 static inline void tlb_set_dirty(CPUState *env,
1496 unsigned long addr, target_ulong vaddr)
1498 int i;
1500 addr &= TARGET_PAGE_MASK;
1501 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1502 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1503 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1506 /* add a new TLB entry. At most one entry for a given virtual address
1507 is permitted. Return 0 if OK or 2 if the page could not be mapped
1508 (can only happen in non SOFTMMU mode for I/O pages or pages
1509 conflicting with the host address space). */
1510 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1511 target_phys_addr_t paddr, int prot,
1512 int is_user, int is_softmmu)
1514 PhysPageDesc *p;
1515 unsigned long pd;
1516 unsigned int index;
1517 target_ulong address;
1518 target_phys_addr_t addend;
1519 int ret;
1520 CPUTLBEntry *te;
1522 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1523 if (!p) {
1524 pd = IO_MEM_UNASSIGNED;
1525 } else {
1526 pd = p->phys_offset;
1528 #if defined(DEBUG_TLB)
1529 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1530 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1531 #endif
1533 ret = 0;
1534 #if !defined(CONFIG_SOFTMMU)
1535 if (is_softmmu)
1536 #endif
1538 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1539 /* IO memory case */
1540 address = vaddr | pd;
1541 addend = paddr;
1542 } else {
1543 /* standard memory */
1544 address = vaddr;
1545 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1548 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1549 addend -= vaddr;
1550 te = &env->tlb_table[is_user][index];
1551 te->addend = addend;
1552 if (prot & PAGE_READ) {
1553 te->addr_read = address;
1554 } else {
1555 te->addr_read = -1;
1557 if (prot & PAGE_EXEC) {
1558 te->addr_code = address;
1559 } else {
1560 te->addr_code = -1;
1562 if (prot & PAGE_WRITE) {
1563 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1564 (pd & IO_MEM_ROMD)) {
1565 /* write access calls the I/O callback */
1566 te->addr_write = vaddr |
1567 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1568 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1569 !cpu_physical_memory_is_dirty(pd)) {
1570 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1571 } else {
1572 te->addr_write = address;
1574 } else {
1575 te->addr_write = -1;
1578 #if !defined(CONFIG_SOFTMMU)
1579 else {
1580 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1581 /* IO access: no mapping is done as it will be handled by the
1582 soft MMU */
1583 if (!(env->hflags & HF_SOFTMMU_MASK))
1584 ret = 2;
1585 } else {
1586 void *map_addr;
1588 if (vaddr >= MMAP_AREA_END) {
1589 ret = 2;
1590 } else {
1591 if (prot & PROT_WRITE) {
1592 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1593 #if defined(TARGET_HAS_SMC) || 1
1594 first_tb ||
1595 #endif
1596 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1597 !cpu_physical_memory_is_dirty(pd))) {
1598 /* ROM: we do as if code was inside */
1599 /* if code is present, we only map as read only and save the
1600 original mapping */
1601 VirtPageDesc *vp;
1603 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1604 vp->phys_addr = pd;
1605 vp->prot = prot;
1606 vp->valid_tag = virt_valid_tag;
1607 prot &= ~PAGE_WRITE;
1610 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1611 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1612 if (map_addr == MAP_FAILED) {
1613 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1614 paddr, vaddr);
1619 #endif
1620 return ret;
1623 /* called from signal handler: invalidate the code and unprotect the
1624 page. Return TRUE if the fault was succesfully handled. */
1625 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1627 #if !defined(CONFIG_SOFTMMU)
1628 VirtPageDesc *vp;
1630 #if defined(DEBUG_TLB)
1631 printf("page_unprotect: addr=0x%08x\n", addr);
1632 #endif
1633 addr &= TARGET_PAGE_MASK;
1635 /* if it is not mapped, no need to worry here */
1636 if (addr >= MMAP_AREA_END)
1637 return 0;
1638 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1639 if (!vp)
1640 return 0;
1641 /* NOTE: in this case, validate_tag is _not_ tested as it
1642 validates only the code TLB */
1643 if (vp->valid_tag != virt_valid_tag)
1644 return 0;
1645 if (!(vp->prot & PAGE_WRITE))
1646 return 0;
1647 #if defined(DEBUG_TLB)
1648 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1649 addr, vp->phys_addr, vp->prot);
1650 #endif
1651 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1652 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1653 (unsigned long)addr, vp->prot);
1654 /* set the dirty bit */
1655 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1656 /* flush the code inside */
1657 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1658 return 1;
1659 #else
1660 return 0;
1661 #endif
1664 #else
1666 void tlb_flush(CPUState *env, int flush_global)
1670 void tlb_flush_page(CPUState *env, target_ulong addr)
1674 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1675 target_phys_addr_t paddr, int prot,
1676 int is_user, int is_softmmu)
1678 return 0;
1681 /* dump memory mappings */
1682 void page_dump(FILE *f)
1684 unsigned long start, end;
1685 int i, j, prot, prot1;
1686 PageDesc *p;
1688 fprintf(f, "%-8s %-8s %-8s %s\n",
1689 "start", "end", "size", "prot");
1690 start = -1;
1691 end = -1;
1692 prot = 0;
1693 for(i = 0; i <= L1_SIZE; i++) {
1694 if (i < L1_SIZE)
1695 p = l1_map[i];
1696 else
1697 p = NULL;
1698 for(j = 0;j < L2_SIZE; j++) {
1699 if (!p)
1700 prot1 = 0;
1701 else
1702 prot1 = p[j].flags;
1703 if (prot1 != prot) {
1704 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1705 if (start != -1) {
1706 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1707 start, end, end - start,
1708 prot & PAGE_READ ? 'r' : '-',
1709 prot & PAGE_WRITE ? 'w' : '-',
1710 prot & PAGE_EXEC ? 'x' : '-');
1712 if (prot1 != 0)
1713 start = end;
1714 else
1715 start = -1;
1716 prot = prot1;
1718 if (!p)
1719 break;
1724 int page_get_flags(target_ulong address)
1726 PageDesc *p;
1728 p = page_find(address >> TARGET_PAGE_BITS);
1729 if (!p)
1730 return 0;
1731 return p->flags;
1734 /* modify the flags of a page and invalidate the code if
1735 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1736 depending on PAGE_WRITE */
1737 void page_set_flags(target_ulong start, target_ulong end, int flags)
1739 PageDesc *p;
1740 target_ulong addr;
1742 start = start & TARGET_PAGE_MASK;
1743 end = TARGET_PAGE_ALIGN(end);
1744 if (flags & PAGE_WRITE)
1745 flags |= PAGE_WRITE_ORG;
1746 spin_lock(&tb_lock);
1747 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1748 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1749 /* if the write protection is set, then we invalidate the code
1750 inside */
1751 if (!(p->flags & PAGE_WRITE) &&
1752 (flags & PAGE_WRITE) &&
1753 p->first_tb) {
1754 tb_invalidate_phys_page(addr, 0, NULL);
1756 p->flags = flags;
1758 spin_unlock(&tb_lock);
1761 /* called from signal handler: invalidate the code and unprotect the
1762 page. Return TRUE if the fault was succesfully handled. */
1763 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1765 unsigned int page_index, prot, pindex;
1766 PageDesc *p, *p1;
1767 target_ulong host_start, host_end, addr;
1769 host_start = address & qemu_host_page_mask;
1770 page_index = host_start >> TARGET_PAGE_BITS;
1771 p1 = page_find(page_index);
1772 if (!p1)
1773 return 0;
1774 host_end = host_start + qemu_host_page_size;
1775 p = p1;
1776 prot = 0;
1777 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1778 prot |= p->flags;
1779 p++;
1781 /* if the page was really writable, then we change its
1782 protection back to writable */
1783 if (prot & PAGE_WRITE_ORG) {
1784 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1785 if (!(p1[pindex].flags & PAGE_WRITE)) {
1786 mprotect((void *)g2h(host_start), qemu_host_page_size,
1787 (prot & PAGE_BITS) | PAGE_WRITE);
1788 p1[pindex].flags |= PAGE_WRITE;
1789 /* and since the content will be modified, we must invalidate
1790 the corresponding translated code. */
1791 tb_invalidate_phys_page(address, pc, puc);
1792 #ifdef DEBUG_TB_CHECK
1793 tb_invalidate_check(address);
1794 #endif
1795 return 1;
1798 return 0;
1801 /* call this function when system calls directly modify a memory area */
1802 /* ??? This should be redundant now we have lock_user. */
1803 void page_unprotect_range(target_ulong data, target_ulong data_size)
1805 target_ulong start, end, addr;
1807 start = data;
1808 end = start + data_size;
1809 start &= TARGET_PAGE_MASK;
1810 end = TARGET_PAGE_ALIGN(end);
1811 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1812 page_unprotect(addr, 0, NULL);
1816 static inline void tlb_set_dirty(CPUState *env,
1817 unsigned long addr, target_ulong vaddr)
1820 #endif /* defined(CONFIG_USER_ONLY) */
1822 /* register physical memory. 'size' must be a multiple of the target
1823 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1824 io memory page */
1825 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1826 unsigned long size,
1827 unsigned long phys_offset)
1829 target_phys_addr_t addr, end_addr;
1830 PhysPageDesc *p;
1831 CPUState *env;
1833 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1834 end_addr = start_addr + size;
1835 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1836 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1837 p->phys_offset = phys_offset;
1838 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1839 (phys_offset & IO_MEM_ROMD))
1840 phys_offset += TARGET_PAGE_SIZE;
1843 /* since each CPU stores ram addresses in its TLB cache, we must
1844 reset the modified entries */
1845 /* XXX: slow ! */
1846 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1847 tlb_flush(env, 1);
1851 /* XXX: temporary until new memory mapping API */
1852 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1854 PhysPageDesc *p;
1856 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1857 if (!p)
1858 return IO_MEM_UNASSIGNED;
1859 return p->phys_offset;
1862 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1864 #ifdef DEBUG_UNASSIGNED
1865 printf("Unassigned mem read 0x%08x\n", (int)addr);
1866 #endif
1867 return 0;
1870 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1872 #ifdef DEBUG_UNASSIGNED
1873 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1874 #endif
1877 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1878 unassigned_mem_readb,
1879 unassigned_mem_readb,
1880 unassigned_mem_readb,
1883 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1884 unassigned_mem_writeb,
1885 unassigned_mem_writeb,
1886 unassigned_mem_writeb,
1889 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1891 unsigned long ram_addr;
1892 int dirty_flags;
1893 ram_addr = addr - (unsigned long)phys_ram_base;
1894 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1895 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1896 #if !defined(CONFIG_USER_ONLY)
1897 tb_invalidate_phys_page_fast(ram_addr, 1);
1898 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1899 #endif
1901 stb_p((uint8_t *)(long)addr, val);
1902 #ifdef USE_KQEMU
1903 if (cpu_single_env->kqemu_enabled &&
1904 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1905 kqemu_modify_page(cpu_single_env, ram_addr);
1906 #endif
1907 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1908 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1909 /* we remove the notdirty callback only if the code has been
1910 flushed */
1911 if (dirty_flags == 0xff)
1912 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1915 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1917 unsigned long ram_addr;
1918 int dirty_flags;
1919 ram_addr = addr - (unsigned long)phys_ram_base;
1920 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1921 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1922 #if !defined(CONFIG_USER_ONLY)
1923 tb_invalidate_phys_page_fast(ram_addr, 2);
1924 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1925 #endif
1927 stw_p((uint8_t *)(long)addr, val);
1928 #ifdef USE_KQEMU
1929 if (cpu_single_env->kqemu_enabled &&
1930 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1931 kqemu_modify_page(cpu_single_env, ram_addr);
1932 #endif
1933 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1934 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1935 /* we remove the notdirty callback only if the code has been
1936 flushed */
1937 if (dirty_flags == 0xff)
1938 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1941 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1943 unsigned long ram_addr;
1944 int dirty_flags;
1945 ram_addr = addr - (unsigned long)phys_ram_base;
1946 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1947 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1948 #if !defined(CONFIG_USER_ONLY)
1949 tb_invalidate_phys_page_fast(ram_addr, 4);
1950 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1951 #endif
1953 stl_p((uint8_t *)(long)addr, val);
1954 #ifdef USE_KQEMU
1955 if (cpu_single_env->kqemu_enabled &&
1956 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1957 kqemu_modify_page(cpu_single_env, ram_addr);
1958 #endif
1959 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1960 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1961 /* we remove the notdirty callback only if the code has been
1962 flushed */
1963 if (dirty_flags == 0xff)
1964 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1967 static CPUReadMemoryFunc *error_mem_read[3] = {
1968 NULL, /* never used */
1969 NULL, /* never used */
1970 NULL, /* never used */
1973 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1974 notdirty_mem_writeb,
1975 notdirty_mem_writew,
1976 notdirty_mem_writel,
1979 static void io_mem_init(void)
1981 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1982 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1983 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1984 io_mem_nb = 5;
1986 /* alloc dirty bits array */
1987 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1988 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1991 /* mem_read and mem_write are arrays of functions containing the
1992 function to access byte (index 0), word (index 1) and dword (index
1993 2). All functions must be supplied. If io_index is non zero, the
1994 corresponding io zone is modified. If it is zero, a new io zone is
1995 allocated. The return value can be used with
1996 cpu_register_physical_memory(). (-1) is returned if error. */
1997 int cpu_register_io_memory(int io_index,
1998 CPUReadMemoryFunc **mem_read,
1999 CPUWriteMemoryFunc **mem_write,
2000 void *opaque)
2002 int i;
2004 if (io_index <= 0) {
2005 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2006 return -1;
2007 io_index = io_mem_nb++;
2008 } else {
2009 if (io_index >= IO_MEM_NB_ENTRIES)
2010 return -1;
2013 for(i = 0;i < 3; i++) {
2014 io_mem_read[io_index][i] = mem_read[i];
2015 io_mem_write[io_index][i] = mem_write[i];
2017 io_mem_opaque[io_index] = opaque;
2018 return io_index << IO_MEM_SHIFT;
2021 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2023 return io_mem_write[io_index >> IO_MEM_SHIFT];
2026 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2028 return io_mem_read[io_index >> IO_MEM_SHIFT];
2031 /* physical memory access (slow version, mainly for debug) */
2032 #if defined(CONFIG_USER_ONLY)
2033 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2034 int len, int is_write)
2036 int l, flags;
2037 target_ulong page;
2038 void * p;
2040 while (len > 0) {
2041 page = addr & TARGET_PAGE_MASK;
2042 l = (page + TARGET_PAGE_SIZE) - addr;
2043 if (l > len)
2044 l = len;
2045 flags = page_get_flags(page);
2046 if (!(flags & PAGE_VALID))
2047 return;
2048 if (is_write) {
2049 if (!(flags & PAGE_WRITE))
2050 return;
2051 p = lock_user(addr, len, 0);
2052 memcpy(p, buf, len);
2053 unlock_user(p, addr, len);
2054 } else {
2055 if (!(flags & PAGE_READ))
2056 return;
2057 p = lock_user(addr, len, 1);
2058 memcpy(buf, p, len);
2059 unlock_user(p, addr, 0);
2061 len -= l;
2062 buf += l;
2063 addr += l;
2067 #else
2068 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2069 int len, int is_write)
2071 int l, io_index;
2072 uint8_t *ptr;
2073 uint32_t val;
2074 target_phys_addr_t page;
2075 unsigned long pd;
2076 PhysPageDesc *p;
2078 while (len > 0) {
2079 page = addr & TARGET_PAGE_MASK;
2080 l = (page + TARGET_PAGE_SIZE) - addr;
2081 if (l > len)
2082 l = len;
2083 p = phys_page_find(page >> TARGET_PAGE_BITS);
2084 if (!p) {
2085 pd = IO_MEM_UNASSIGNED;
2086 } else {
2087 pd = p->phys_offset;
2090 if (is_write) {
2091 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2092 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2093 /* XXX: could force cpu_single_env to NULL to avoid
2094 potential bugs */
2095 if (l >= 4 && ((addr & 3) == 0)) {
2096 /* 32 bit write access */
2097 val = ldl_p(buf);
2098 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2099 l = 4;
2100 } else if (l >= 2 && ((addr & 1) == 0)) {
2101 /* 16 bit write access */
2102 val = lduw_p(buf);
2103 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2104 l = 2;
2105 } else {
2106 /* 8 bit write access */
2107 val = ldub_p(buf);
2108 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2109 l = 1;
2111 } else {
2112 unsigned long addr1;
2113 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2114 /* RAM case */
2115 ptr = phys_ram_base + addr1;
2116 memcpy(ptr, buf, l);
2117 if (!cpu_physical_memory_is_dirty(addr1)) {
2118 /* invalidate code */
2119 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2120 /* set dirty bit */
2121 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2122 (0xff & ~CODE_DIRTY_FLAG);
2125 } else {
2126 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2127 !(pd & IO_MEM_ROMD)) {
2128 /* I/O case */
2129 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2130 if (l >= 4 && ((addr & 3) == 0)) {
2131 /* 32 bit read access */
2132 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2133 stl_p(buf, val);
2134 l = 4;
2135 } else if (l >= 2 && ((addr & 1) == 0)) {
2136 /* 16 bit read access */
2137 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2138 stw_p(buf, val);
2139 l = 2;
2140 } else {
2141 /* 8 bit read access */
2142 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2143 stb_p(buf, val);
2144 l = 1;
2146 } else {
2147 /* RAM case */
2148 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2149 (addr & ~TARGET_PAGE_MASK);
2150 memcpy(buf, ptr, l);
2153 len -= l;
2154 buf += l;
2155 addr += l;
2159 /* used for ROM loading : can write in RAM and ROM */
2160 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2161 const uint8_t *buf, int len)
2163 int l;
2164 uint8_t *ptr;
2165 target_phys_addr_t page;
2166 unsigned long pd;
2167 PhysPageDesc *p;
2169 while (len > 0) {
2170 page = addr & TARGET_PAGE_MASK;
2171 l = (page + TARGET_PAGE_SIZE) - addr;
2172 if (l > len)
2173 l = len;
2174 p = phys_page_find(page >> TARGET_PAGE_BITS);
2175 if (!p) {
2176 pd = IO_MEM_UNASSIGNED;
2177 } else {
2178 pd = p->phys_offset;
2181 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2182 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2183 !(pd & IO_MEM_ROMD)) {
2184 /* do nothing */
2185 } else {
2186 unsigned long addr1;
2187 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2188 /* ROM/RAM case */
2189 ptr = phys_ram_base + addr1;
2190 memcpy(ptr, buf, l);
2192 len -= l;
2193 buf += l;
2194 addr += l;
2199 /* warning: addr must be aligned */
2200 uint32_t ldl_phys(target_phys_addr_t addr)
2202 int io_index;
2203 uint8_t *ptr;
2204 uint32_t val;
2205 unsigned long pd;
2206 PhysPageDesc *p;
2208 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2209 if (!p) {
2210 pd = IO_MEM_UNASSIGNED;
2211 } else {
2212 pd = p->phys_offset;
2215 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2216 !(pd & IO_MEM_ROMD)) {
2217 /* I/O case */
2218 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2219 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2220 } else {
2221 /* RAM case */
2222 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2223 (addr & ~TARGET_PAGE_MASK);
2224 val = ldl_p(ptr);
2226 return val;
2229 /* warning: addr must be aligned */
2230 uint64_t ldq_phys(target_phys_addr_t addr)
2232 int io_index;
2233 uint8_t *ptr;
2234 uint64_t val;
2235 unsigned long pd;
2236 PhysPageDesc *p;
2238 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2239 if (!p) {
2240 pd = IO_MEM_UNASSIGNED;
2241 } else {
2242 pd = p->phys_offset;
2245 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2246 !(pd & IO_MEM_ROMD)) {
2247 /* I/O case */
2248 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2249 #ifdef TARGET_WORDS_BIGENDIAN
2250 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2251 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2252 #else
2253 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2254 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2255 #endif
2256 } else {
2257 /* RAM case */
2258 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2259 (addr & ~TARGET_PAGE_MASK);
2260 val = ldq_p(ptr);
2262 return val;
2265 /* XXX: optimize */
2266 uint32_t ldub_phys(target_phys_addr_t addr)
2268 uint8_t val;
2269 cpu_physical_memory_read(addr, &val, 1);
2270 return val;
2273 /* XXX: optimize */
2274 uint32_t lduw_phys(target_phys_addr_t addr)
2276 uint16_t val;
2277 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2278 return tswap16(val);
2281 #ifdef __GNUC__
2282 #define likely(x) __builtin_expect(!!(x), 1)
2283 #define unlikely(x) __builtin_expect(!!(x), 0)
2284 #else
2285 #define likely(x) x
2286 #define unlikely(x) x
2287 #endif
2289 /* warning: addr must be aligned. The ram page is not masked as dirty
2290 and the code inside is not invalidated. It is useful if the dirty
2291 bits are used to track modified PTEs */
2292 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2294 int io_index;
2295 uint8_t *ptr;
2296 unsigned long pd;
2297 PhysPageDesc *p;
2299 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2300 if (!p) {
2301 pd = IO_MEM_UNASSIGNED;
2302 } else {
2303 pd = p->phys_offset;
2306 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2307 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2308 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2309 } else {
2310 unsigned long addr1;
2311 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2313 ptr = phys_ram_base + addr1;
2314 stl_p(ptr, val);
2316 if (unlikely(in_migration)) {
2317 if (!cpu_physical_memory_is_dirty(addr1)) {
2318 /* invalidate code */
2319 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2320 /* set dirty bit */
2321 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2322 (0xff & ~CODE_DIRTY_FLAG);
2328 /* warning: addr must be aligned */
2329 void stl_phys(target_phys_addr_t addr, uint32_t val)
2331 int io_index;
2332 uint8_t *ptr;
2333 unsigned long pd;
2334 PhysPageDesc *p;
2336 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2337 if (!p) {
2338 pd = IO_MEM_UNASSIGNED;
2339 } else {
2340 pd = p->phys_offset;
2343 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2344 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2345 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2346 } else {
2347 unsigned long addr1;
2348 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2349 /* RAM case */
2350 ptr = phys_ram_base + addr1;
2351 stl_p(ptr, val);
2352 if (!cpu_physical_memory_is_dirty(addr1)) {
2353 /* invalidate code */
2354 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2355 /* set dirty bit */
2356 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2357 (0xff & ~CODE_DIRTY_FLAG);
2362 /* XXX: optimize */
2363 void stb_phys(target_phys_addr_t addr, uint32_t val)
2365 uint8_t v = val;
2366 cpu_physical_memory_write(addr, &v, 1);
2369 /* XXX: optimize */
2370 void stw_phys(target_phys_addr_t addr, uint32_t val)
2372 uint16_t v = tswap16(val);
2373 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2376 /* XXX: optimize */
2377 void stq_phys(target_phys_addr_t addr, uint64_t val)
2379 val = tswap64(val);
2380 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2383 #endif
2385 /* virtual memory access for debug */
2386 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2387 uint8_t *buf, int len, int is_write)
2389 int l;
2390 target_ulong page, phys_addr;
2392 while (len > 0) {
2393 page = addr & TARGET_PAGE_MASK;
2394 phys_addr = cpu_get_phys_page_debug(env, page);
2395 /* if no physical page mapped, return an error */
2396 if (phys_addr == -1)
2397 return -1;
2398 l = (page + TARGET_PAGE_SIZE) - addr;
2399 if (l > len)
2400 l = len;
2401 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2402 buf, l, is_write);
2403 len -= l;
2404 buf += l;
2405 addr += l;
2407 return 0;
2410 void dump_exec_info(FILE *f,
2411 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2413 int i, target_code_size, max_target_code_size;
2414 int direct_jmp_count, direct_jmp2_count, cross_page;
2415 TranslationBlock *tb;
2417 target_code_size = 0;
2418 max_target_code_size = 0;
2419 cross_page = 0;
2420 direct_jmp_count = 0;
2421 direct_jmp2_count = 0;
2422 for(i = 0; i < nb_tbs; i++) {
2423 tb = &tbs[i];
2424 target_code_size += tb->size;
2425 if (tb->size > max_target_code_size)
2426 max_target_code_size = tb->size;
2427 if (tb->page_addr[1] != -1)
2428 cross_page++;
2429 if (tb->tb_next_offset[0] != 0xffff) {
2430 direct_jmp_count++;
2431 if (tb->tb_next_offset[1] != 0xffff) {
2432 direct_jmp2_count++;
2436 /* XXX: avoid using doubles ? */
2437 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2438 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2439 nb_tbs ? target_code_size / nb_tbs : 0,
2440 max_target_code_size);
2441 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2442 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2443 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2444 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2445 cross_page,
2446 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2447 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2448 direct_jmp_count,
2449 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2450 direct_jmp2_count,
2451 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2452 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2453 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2454 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2457 #if !defined(CONFIG_USER_ONLY)
2459 #define MMUSUFFIX _cmmu
2460 #define GETPC() NULL
2461 #define env cpu_single_env
2462 #define SOFTMMU_CODE_ACCESS
2464 #define SHIFT 0
2465 #include "softmmu_template.h"
2467 #define SHIFT 1
2468 #include "softmmu_template.h"
2470 #define SHIFT 2
2471 #include "softmmu_template.h"
2473 #define SHIFT 3
2474 #include "softmmu_template.h"
2476 #undef env
2478 #endif