kvm: libkvm: remove some unused parameters
[qemu-kvm/fedora.git] / exec.c
blob3765530228df784b60df7b72655cfc464f7d0fa7
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 #if !defined(CONFIG_USER_ONLY)
51 /* TB consistency checks only implemented for usermode emulation. */
52 #undef DEBUG_TB_CHECK
53 #endif
55 /* threshold to flush the translated code buffer */
56 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
58 #define SMC_BITMAP_USE_THRESHOLD 10
60 #define MMAP_AREA_START 0x00000000
61 #define MMAP_AREA_END 0xa8000000
63 #if defined(TARGET_SPARC64)
64 #define TARGET_PHYS_ADDR_SPACE_BITS 41
65 #elif defined(TARGET_PPC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 42
67 #else
68 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69 #define TARGET_PHYS_ADDR_SPACE_BITS 32
70 #endif
72 #ifdef USE_KVM
73 extern int kvm_allowed;
74 #endif
76 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
77 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
78 int nb_tbs;
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
82 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
83 uint8_t *code_gen_ptr;
85 int phys_ram_size;
86 int phys_ram_fd;
87 uint8_t *phys_ram_base;
88 uint8_t *phys_ram_dirty;
89 uint8_t *bios_mem;
90 static int in_migration;
92 CPUState *first_cpu;
93 /* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
95 CPUState *cpu_single_env;
97 typedef struct PageDesc {
98 /* list of TBs intersecting this ram page */
99 TranslationBlock *first_tb;
100 /* in order to optimize self modifying code, we count the number
101 of lookups we do to a given page to use a bitmap */
102 unsigned int code_write_count;
103 uint8_t *code_bitmap;
104 #if defined(CONFIG_USER_ONLY)
105 unsigned long flags;
106 #endif
107 } PageDesc;
109 typedef struct PhysPageDesc {
110 /* offset in host memory of the page + io_index in the low 12 bits */
111 uint32_t phys_offset;
112 } PhysPageDesc;
114 #define L2_BITS 10
115 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
117 #define L1_SIZE (1 << L1_BITS)
118 #define L2_SIZE (1 << L2_BITS)
120 static void io_mem_init(void);
122 unsigned long qemu_real_host_page_size;
123 unsigned long qemu_host_page_bits;
124 unsigned long qemu_host_page_size;
125 unsigned long qemu_host_page_mask;
127 /* XXX: for system emulation, it could just be an array */
128 static PageDesc *l1_map[L1_SIZE];
129 PhysPageDesc **l1_phys_map;
131 /* io memory support */
132 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
133 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
134 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
135 static int io_mem_nb;
137 /* log support */
138 char *logfilename = "/tmp/qemu.log";
139 FILE *logfile;
140 int loglevel;
142 /* statistics */
143 static int tlb_flush_count;
144 static int tb_flush_count;
145 static int tb_phys_invalidate_count;
147 static void page_init(void)
149 /* NOTE: we can always suppose that qemu_host_page_size >=
150 TARGET_PAGE_SIZE */
151 #ifdef _WIN32
153 SYSTEM_INFO system_info;
154 DWORD old_protect;
156 GetSystemInfo(&system_info);
157 qemu_real_host_page_size = system_info.dwPageSize;
159 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
160 PAGE_EXECUTE_READWRITE, &old_protect);
162 #else
163 qemu_real_host_page_size = getpagesize();
165 unsigned long start, end;
167 start = (unsigned long)code_gen_buffer;
168 start &= ~(qemu_real_host_page_size - 1);
170 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
171 end += qemu_real_host_page_size - 1;
172 end &= ~(qemu_real_host_page_size - 1);
174 mprotect((void *)start, end - start,
175 PROT_READ | PROT_WRITE | PROT_EXEC);
177 #endif
179 if (qemu_host_page_size == 0)
180 qemu_host_page_size = qemu_real_host_page_size;
181 if (qemu_host_page_size < TARGET_PAGE_SIZE)
182 qemu_host_page_size = TARGET_PAGE_SIZE;
183 qemu_host_page_bits = 0;
184 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
185 qemu_host_page_bits++;
186 qemu_host_page_mask = ~(qemu_host_page_size - 1);
187 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
188 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
191 static inline PageDesc *page_find_alloc(unsigned int index)
193 PageDesc **lp, *p;
195 lp = &l1_map[index >> L2_BITS];
196 p = *lp;
197 if (!p) {
198 /* allocate if not found */
199 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
200 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
201 *lp = p;
203 return p + (index & (L2_SIZE - 1));
206 static inline PageDesc *page_find(unsigned int index)
208 PageDesc *p;
210 p = l1_map[index >> L2_BITS];
211 if (!p)
212 return 0;
213 return p + (index & (L2_SIZE - 1));
216 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
218 void **lp, **p;
219 PhysPageDesc *pd;
221 p = (void **)l1_phys_map;
222 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
224 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
225 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
226 #endif
227 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
228 p = *lp;
229 if (!p) {
230 /* allocate if not found */
231 if (!alloc)
232 return NULL;
233 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
234 memset(p, 0, sizeof(void *) * L1_SIZE);
235 *lp = p;
237 #endif
238 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
239 pd = *lp;
240 if (!pd) {
241 int i;
242 /* allocate if not found */
243 if (!alloc)
244 return NULL;
245 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
246 *lp = pd;
247 for (i = 0; i < L2_SIZE; i++)
248 pd[i].phys_offset = IO_MEM_UNASSIGNED;
250 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
253 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
255 return phys_page_find_alloc(index, 0);
258 #if !defined(CONFIG_USER_ONLY)
259 static void tlb_protect_code(ram_addr_t ram_addr);
260 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
261 target_ulong vaddr);
262 #endif
264 void cpu_exec_init(CPUState *env)
266 CPUState **penv;
267 int cpu_index;
269 if (!code_gen_ptr) {
270 code_gen_ptr = code_gen_buffer;
271 page_init();
272 io_mem_init();
274 env->next_cpu = NULL;
275 penv = &first_cpu;
276 cpu_index = 0;
277 while (*penv != NULL) {
278 penv = (CPUState **)&(*penv)->next_cpu;
279 cpu_index++;
281 env->cpu_index = cpu_index;
282 *penv = env;
285 static inline void invalidate_page_bitmap(PageDesc *p)
287 if (p->code_bitmap) {
288 qemu_free(p->code_bitmap);
289 p->code_bitmap = NULL;
291 p->code_write_count = 0;
294 /* set to NULL all the 'first_tb' fields in all PageDescs */
295 static void page_flush_tb(void)
297 int i, j;
298 PageDesc *p;
300 for(i = 0; i < L1_SIZE; i++) {
301 p = l1_map[i];
302 if (p) {
303 for(j = 0; j < L2_SIZE; j++) {
304 p->first_tb = NULL;
305 invalidate_page_bitmap(p);
306 p++;
312 /* flush all the translation blocks */
313 /* XXX: tb_flush is currently not thread safe */
314 void tb_flush(CPUState *env1)
316 CPUState *env;
317 #if defined(DEBUG_FLUSH)
318 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
319 code_gen_ptr - code_gen_buffer,
320 nb_tbs,
321 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
322 #endif
323 nb_tbs = 0;
325 for(env = first_cpu; env != NULL; env = env->next_cpu) {
326 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
329 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
330 page_flush_tb();
332 code_gen_ptr = code_gen_buffer;
333 /* XXX: flush processor icache at this point if cache flush is
334 expensive */
335 tb_flush_count++;
338 #ifdef DEBUG_TB_CHECK
340 static void tb_invalidate_check(unsigned long address)
342 TranslationBlock *tb;
343 int i;
344 address &= TARGET_PAGE_MASK;
345 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
346 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
347 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
348 address >= tb->pc + tb->size)) {
349 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
350 address, (long)tb->pc, tb->size);
356 /* verify that all the pages have correct rights for code */
357 static void tb_page_check(void)
359 TranslationBlock *tb;
360 int i, flags1, flags2;
362 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
363 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
364 flags1 = page_get_flags(tb->pc);
365 flags2 = page_get_flags(tb->pc + tb->size - 1);
366 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
367 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
368 (long)tb->pc, tb->size, flags1, flags2);
374 void tb_jmp_check(TranslationBlock *tb)
376 TranslationBlock *tb1;
377 unsigned int n1;
379 /* suppress any remaining jumps to this TB */
380 tb1 = tb->jmp_first;
381 for(;;) {
382 n1 = (long)tb1 & 3;
383 tb1 = (TranslationBlock *)((long)tb1 & ~3);
384 if (n1 == 2)
385 break;
386 tb1 = tb1->jmp_next[n1];
388 /* check end of list */
389 if (tb1 != tb) {
390 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
394 #endif
396 /* invalidate one TB */
397 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
398 int next_offset)
400 TranslationBlock *tb1;
401 for(;;) {
402 tb1 = *ptb;
403 if (tb1 == tb) {
404 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
405 break;
407 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
411 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
413 TranslationBlock *tb1;
414 unsigned int n1;
416 for(;;) {
417 tb1 = *ptb;
418 n1 = (long)tb1 & 3;
419 tb1 = (TranslationBlock *)((long)tb1 & ~3);
420 if (tb1 == tb) {
421 *ptb = tb1->page_next[n1];
422 break;
424 ptb = &tb1->page_next[n1];
428 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
430 TranslationBlock *tb1, **ptb;
431 unsigned int n1;
433 ptb = &tb->jmp_next[n];
434 tb1 = *ptb;
435 if (tb1) {
436 /* find tb(n) in circular list */
437 for(;;) {
438 tb1 = *ptb;
439 n1 = (long)tb1 & 3;
440 tb1 = (TranslationBlock *)((long)tb1 & ~3);
441 if (n1 == n && tb1 == tb)
442 break;
443 if (n1 == 2) {
444 ptb = &tb1->jmp_first;
445 } else {
446 ptb = &tb1->jmp_next[n1];
449 /* now we can suppress tb(n) from the list */
450 *ptb = tb->jmp_next[n];
452 tb->jmp_next[n] = NULL;
456 /* reset the jump entry 'n' of a TB so that it is not chained to
457 another TB */
458 static inline void tb_reset_jump(TranslationBlock *tb, int n)
460 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
463 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
465 CPUState *env;
466 PageDesc *p;
467 unsigned int h, n1;
468 target_ulong phys_pc;
469 TranslationBlock *tb1, *tb2;
471 /* remove the TB from the hash list */
472 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
473 h = tb_phys_hash_func(phys_pc);
474 tb_remove(&tb_phys_hash[h], tb,
475 offsetof(TranslationBlock, phys_hash_next));
477 /* remove the TB from the page list */
478 if (tb->page_addr[0] != page_addr) {
479 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
480 tb_page_remove(&p->first_tb, tb);
481 invalidate_page_bitmap(p);
483 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
484 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
485 tb_page_remove(&p->first_tb, tb);
486 invalidate_page_bitmap(p);
489 tb_invalidated_flag = 1;
491 /* remove the TB from the hash list */
492 h = tb_jmp_cache_hash_func(tb->pc);
493 for(env = first_cpu; env != NULL; env = env->next_cpu) {
494 if (env->tb_jmp_cache[h] == tb)
495 env->tb_jmp_cache[h] = NULL;
498 /* suppress this TB from the two jump lists */
499 tb_jmp_remove(tb, 0);
500 tb_jmp_remove(tb, 1);
502 /* suppress any remaining jumps to this TB */
503 tb1 = tb->jmp_first;
504 for(;;) {
505 n1 = (long)tb1 & 3;
506 if (n1 == 2)
507 break;
508 tb1 = (TranslationBlock *)((long)tb1 & ~3);
509 tb2 = tb1->jmp_next[n1];
510 tb_reset_jump(tb1, n1);
511 tb1->jmp_next[n1] = NULL;
512 tb1 = tb2;
514 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
516 tb_phys_invalidate_count++;
519 static inline void set_bits(uint8_t *tab, int start, int len)
521 int end, mask, end1;
523 end = start + len;
524 tab += start >> 3;
525 mask = 0xff << (start & 7);
526 if ((start & ~7) == (end & ~7)) {
527 if (start < end) {
528 mask &= ~(0xff << (end & 7));
529 *tab |= mask;
531 } else {
532 *tab++ |= mask;
533 start = (start + 8) & ~7;
534 end1 = end & ~7;
535 while (start < end1) {
536 *tab++ = 0xff;
537 start += 8;
539 if (start < end) {
540 mask = ~(0xff << (end & 7));
541 *tab |= mask;
546 static void build_page_bitmap(PageDesc *p)
548 int n, tb_start, tb_end;
549 TranslationBlock *tb;
551 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
552 if (!p->code_bitmap)
553 return;
554 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
556 tb = p->first_tb;
557 while (tb != NULL) {
558 n = (long)tb & 3;
559 tb = (TranslationBlock *)((long)tb & ~3);
560 /* NOTE: this is subtle as a TB may span two physical pages */
561 if (n == 0) {
562 /* NOTE: tb_end may be after the end of the page, but
563 it is not a problem */
564 tb_start = tb->pc & ~TARGET_PAGE_MASK;
565 tb_end = tb_start + tb->size;
566 if (tb_end > TARGET_PAGE_SIZE)
567 tb_end = TARGET_PAGE_SIZE;
568 } else {
569 tb_start = 0;
570 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
572 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
573 tb = tb->page_next[n];
577 #ifdef TARGET_HAS_PRECISE_SMC
579 static void tb_gen_code(CPUState *env,
580 target_ulong pc, target_ulong cs_base, int flags,
581 int cflags)
583 TranslationBlock *tb;
584 uint8_t *tc_ptr;
585 target_ulong phys_pc, phys_page2, virt_page2;
586 int code_gen_size;
588 phys_pc = get_phys_addr_code(env, pc);
589 tb = tb_alloc(pc);
590 if (!tb) {
591 /* flush must be done */
592 tb_flush(env);
593 /* cannot fail at this point */
594 tb = tb_alloc(pc);
596 tc_ptr = code_gen_ptr;
597 tb->tc_ptr = tc_ptr;
598 tb->cs_base = cs_base;
599 tb->flags = flags;
600 tb->cflags = cflags;
601 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
602 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
604 /* check next page if needed */
605 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
606 phys_page2 = -1;
607 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
608 phys_page2 = get_phys_addr_code(env, virt_page2);
610 tb_link_phys(tb, phys_pc, phys_page2);
612 #endif
614 /* invalidate all TBs which intersect with the target physical page
615 starting in range [start;end[. NOTE: start and end must refer to
616 the same physical page. 'is_cpu_write_access' should be true if called
617 from a real cpu write access: the virtual CPU will exit the current
618 TB if code is modified inside this TB. */
619 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
620 int is_cpu_write_access)
622 int n, current_tb_modified, current_tb_not_found, current_flags;
623 CPUState *env = cpu_single_env;
624 PageDesc *p;
625 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
626 target_ulong tb_start, tb_end;
627 target_ulong current_pc, current_cs_base;
629 p = page_find(start >> TARGET_PAGE_BITS);
630 if (!p)
631 return;
632 if (!p->code_bitmap &&
633 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
634 is_cpu_write_access) {
635 /* build code bitmap */
636 build_page_bitmap(p);
639 /* we remove all the TBs in the range [start, end[ */
640 /* XXX: see if in some cases it could be faster to invalidate all the code */
641 current_tb_not_found = is_cpu_write_access;
642 current_tb_modified = 0;
643 current_tb = NULL; /* avoid warning */
644 current_pc = 0; /* avoid warning */
645 current_cs_base = 0; /* avoid warning */
646 current_flags = 0; /* avoid warning */
647 tb = p->first_tb;
648 while (tb != NULL) {
649 n = (long)tb & 3;
650 tb = (TranslationBlock *)((long)tb & ~3);
651 tb_next = tb->page_next[n];
652 /* NOTE: this is subtle as a TB may span two physical pages */
653 if (n == 0) {
654 /* NOTE: tb_end may be after the end of the page, but
655 it is not a problem */
656 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
657 tb_end = tb_start + tb->size;
658 } else {
659 tb_start = tb->page_addr[1];
660 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
662 if (!(tb_end <= start || tb_start >= end)) {
663 #ifdef TARGET_HAS_PRECISE_SMC
664 if (current_tb_not_found) {
665 current_tb_not_found = 0;
666 current_tb = NULL;
667 if (env->mem_write_pc) {
668 /* now we have a real cpu fault */
669 current_tb = tb_find_pc(env->mem_write_pc);
672 if (current_tb == tb &&
673 !(current_tb->cflags & CF_SINGLE_INSN)) {
674 /* If we are modifying the current TB, we must stop
675 its execution. We could be more precise by checking
676 that the modification is after the current PC, but it
677 would require a specialized function to partially
678 restore the CPU state */
680 current_tb_modified = 1;
681 cpu_restore_state(current_tb, env,
682 env->mem_write_pc, NULL);
683 #if defined(TARGET_I386)
684 current_flags = env->hflags;
685 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
686 current_cs_base = (target_ulong)env->segs[R_CS].base;
687 current_pc = current_cs_base + env->eip;
688 #else
689 #error unsupported CPU
690 #endif
692 #endif /* TARGET_HAS_PRECISE_SMC */
693 /* we need to do that to handle the case where a signal
694 occurs while doing tb_phys_invalidate() */
695 saved_tb = NULL;
696 if (env) {
697 saved_tb = env->current_tb;
698 env->current_tb = NULL;
700 tb_phys_invalidate(tb, -1);
701 if (env) {
702 env->current_tb = saved_tb;
703 if (env->interrupt_request && env->current_tb)
704 cpu_interrupt(env, env->interrupt_request);
707 tb = tb_next;
709 #if !defined(CONFIG_USER_ONLY)
710 /* if no code remaining, no need to continue to use slow writes */
711 if (!p->first_tb) {
712 invalidate_page_bitmap(p);
713 if (is_cpu_write_access) {
714 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
717 #endif
718 #ifdef TARGET_HAS_PRECISE_SMC
719 if (current_tb_modified) {
720 /* we generate a block containing just the instruction
721 modifying the memory. It will ensure that it cannot modify
722 itself */
723 env->current_tb = NULL;
724 tb_gen_code(env, current_pc, current_cs_base, current_flags,
725 CF_SINGLE_INSN);
726 cpu_resume_from_signal(env, NULL);
728 #endif
731 /* len must be <= 8 and start must be a multiple of len */
732 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
734 PageDesc *p;
735 int offset, b;
736 #if 0
737 if (1) {
738 if (loglevel) {
739 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
740 cpu_single_env->mem_write_vaddr, len,
741 cpu_single_env->eip,
742 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
745 #endif
746 p = page_find(start >> TARGET_PAGE_BITS);
747 if (!p)
748 return;
749 if (p->code_bitmap) {
750 offset = start & ~TARGET_PAGE_MASK;
751 b = p->code_bitmap[offset >> 3] >> (offset & 7);
752 if (b & ((1 << len) - 1))
753 goto do_invalidate;
754 } else {
755 do_invalidate:
756 tb_invalidate_phys_page_range(start, start + len, 1);
760 #if !defined(CONFIG_SOFTMMU)
761 static void tb_invalidate_phys_page(target_ulong addr,
762 unsigned long pc, void *puc)
764 int n, current_flags, current_tb_modified;
765 target_ulong current_pc, current_cs_base;
766 PageDesc *p;
767 TranslationBlock *tb, *current_tb;
768 #ifdef TARGET_HAS_PRECISE_SMC
769 CPUState *env = cpu_single_env;
770 #endif
772 addr &= TARGET_PAGE_MASK;
773 p = page_find(addr >> TARGET_PAGE_BITS);
774 if (!p)
775 return;
776 tb = p->first_tb;
777 current_tb_modified = 0;
778 current_tb = NULL;
779 current_pc = 0; /* avoid warning */
780 current_cs_base = 0; /* avoid warning */
781 current_flags = 0; /* avoid warning */
782 #ifdef TARGET_HAS_PRECISE_SMC
783 if (tb && pc != 0) {
784 current_tb = tb_find_pc(pc);
786 #endif
787 while (tb != NULL) {
788 n = (long)tb & 3;
789 tb = (TranslationBlock *)((long)tb & ~3);
790 #ifdef TARGET_HAS_PRECISE_SMC
791 if (current_tb == tb &&
792 !(current_tb->cflags & CF_SINGLE_INSN)) {
793 /* If we are modifying the current TB, we must stop
794 its execution. We could be more precise by checking
795 that the modification is after the current PC, but it
796 would require a specialized function to partially
797 restore the CPU state */
799 current_tb_modified = 1;
800 cpu_restore_state(current_tb, env, pc, puc);
801 #if defined(TARGET_I386)
802 current_flags = env->hflags;
803 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
804 current_cs_base = (target_ulong)env->segs[R_CS].base;
805 current_pc = current_cs_base + env->eip;
806 #else
807 #error unsupported CPU
808 #endif
810 #endif /* TARGET_HAS_PRECISE_SMC */
811 tb_phys_invalidate(tb, addr);
812 tb = tb->page_next[n];
814 p->first_tb = NULL;
815 #ifdef TARGET_HAS_PRECISE_SMC
816 if (current_tb_modified) {
817 /* we generate a block containing just the instruction
818 modifying the memory. It will ensure that it cannot modify
819 itself */
820 env->current_tb = NULL;
821 tb_gen_code(env, current_pc, current_cs_base, current_flags,
822 CF_SINGLE_INSN);
823 cpu_resume_from_signal(env, puc);
825 #endif
827 #endif
829 /* add the tb in the target page and protect it if necessary */
830 static inline void tb_alloc_page(TranslationBlock *tb,
831 unsigned int n, target_ulong page_addr)
833 PageDesc *p;
834 TranslationBlock *last_first_tb;
836 tb->page_addr[n] = page_addr;
837 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
838 tb->page_next[n] = p->first_tb;
839 last_first_tb = p->first_tb;
840 p->first_tb = (TranslationBlock *)((long)tb | n);
841 invalidate_page_bitmap(p);
843 #if defined(TARGET_HAS_SMC) || 1
845 #if defined(CONFIG_USER_ONLY)
846 if (p->flags & PAGE_WRITE) {
847 target_ulong addr;
848 PageDesc *p2;
849 int prot;
851 /* force the host page as non writable (writes will have a
852 page fault + mprotect overhead) */
853 page_addr &= qemu_host_page_mask;
854 prot = 0;
855 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
856 addr += TARGET_PAGE_SIZE) {
858 p2 = page_find (addr >> TARGET_PAGE_BITS);
859 if (!p2)
860 continue;
861 prot |= p2->flags;
862 p2->flags &= ~PAGE_WRITE;
863 page_get_flags(addr);
865 mprotect(g2h(page_addr), qemu_host_page_size,
866 (prot & PAGE_BITS) & ~PAGE_WRITE);
867 #ifdef DEBUG_TB_INVALIDATE
868 printf("protecting code page: 0x%08lx\n",
869 page_addr);
870 #endif
872 #else
873 /* if some code is already present, then the pages are already
874 protected. So we handle the case where only the first TB is
875 allocated in a physical page */
876 if (!last_first_tb) {
877 tlb_protect_code(page_addr);
879 #endif
881 #endif /* TARGET_HAS_SMC */
884 /* Allocate a new translation block. Flush the translation buffer if
885 too many translation blocks or too much generated code. */
886 TranslationBlock *tb_alloc(target_ulong pc)
888 TranslationBlock *tb;
890 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
891 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
892 return NULL;
893 tb = &tbs[nb_tbs++];
894 tb->pc = pc;
895 tb->cflags = 0;
896 return tb;
899 /* add a new TB and link it to the physical page tables. phys_page2 is
900 (-1) to indicate that only one page contains the TB. */
901 void tb_link_phys(TranslationBlock *tb,
902 target_ulong phys_pc, target_ulong phys_page2)
904 unsigned int h;
905 TranslationBlock **ptb;
907 /* add in the physical hash table */
908 h = tb_phys_hash_func(phys_pc);
909 ptb = &tb_phys_hash[h];
910 tb->phys_hash_next = *ptb;
911 *ptb = tb;
913 /* add in the page list */
914 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
915 if (phys_page2 != -1)
916 tb_alloc_page(tb, 1, phys_page2);
917 else
918 tb->page_addr[1] = -1;
920 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
921 tb->jmp_next[0] = NULL;
922 tb->jmp_next[1] = NULL;
923 #ifdef USE_CODE_COPY
924 tb->cflags &= ~CF_FP_USED;
925 if (tb->cflags & CF_TB_FP_USED)
926 tb->cflags |= CF_FP_USED;
927 #endif
929 /* init original jump addresses */
930 if (tb->tb_next_offset[0] != 0xffff)
931 tb_reset_jump(tb, 0);
932 if (tb->tb_next_offset[1] != 0xffff)
933 tb_reset_jump(tb, 1);
935 #ifdef DEBUG_TB_CHECK
936 tb_page_check();
937 #endif
940 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
941 tb[1].tc_ptr. Return NULL if not found */
942 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
944 int m_min, m_max, m;
945 unsigned long v;
946 TranslationBlock *tb;
948 if (nb_tbs <= 0)
949 return NULL;
950 if (tc_ptr < (unsigned long)code_gen_buffer ||
951 tc_ptr >= (unsigned long)code_gen_ptr)
952 return NULL;
953 /* binary search (cf Knuth) */
954 m_min = 0;
955 m_max = nb_tbs - 1;
956 while (m_min <= m_max) {
957 m = (m_min + m_max) >> 1;
958 tb = &tbs[m];
959 v = (unsigned long)tb->tc_ptr;
960 if (v == tc_ptr)
961 return tb;
962 else if (tc_ptr < v) {
963 m_max = m - 1;
964 } else {
965 m_min = m + 1;
968 return &tbs[m_max];
971 static void tb_reset_jump_recursive(TranslationBlock *tb);
973 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
975 TranslationBlock *tb1, *tb_next, **ptb;
976 unsigned int n1;
978 tb1 = tb->jmp_next[n];
979 if (tb1 != NULL) {
980 /* find head of list */
981 for(;;) {
982 n1 = (long)tb1 & 3;
983 tb1 = (TranslationBlock *)((long)tb1 & ~3);
984 if (n1 == 2)
985 break;
986 tb1 = tb1->jmp_next[n1];
988 /* we are now sure now that tb jumps to tb1 */
989 tb_next = tb1;
991 /* remove tb from the jmp_first list */
992 ptb = &tb_next->jmp_first;
993 for(;;) {
994 tb1 = *ptb;
995 n1 = (long)tb1 & 3;
996 tb1 = (TranslationBlock *)((long)tb1 & ~3);
997 if (n1 == n && tb1 == tb)
998 break;
999 ptb = &tb1->jmp_next[n1];
1001 *ptb = tb->jmp_next[n];
1002 tb->jmp_next[n] = NULL;
1004 /* suppress the jump to next tb in generated code */
1005 tb_reset_jump(tb, n);
1007 /* suppress jumps in the tb on which we could have jumped */
1008 tb_reset_jump_recursive(tb_next);
1012 static void tb_reset_jump_recursive(TranslationBlock *tb)
1014 tb_reset_jump_recursive2(tb, 0);
1015 tb_reset_jump_recursive2(tb, 1);
1018 #if defined(TARGET_HAS_ICE)
1019 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1021 target_ulong addr, pd;
1022 ram_addr_t ram_addr;
1023 PhysPageDesc *p;
1025 addr = cpu_get_phys_page_debug(env, pc);
1026 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1027 if (!p) {
1028 pd = IO_MEM_UNASSIGNED;
1029 } else {
1030 pd = p->phys_offset;
1032 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1033 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1035 #endif
1037 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1038 breakpoint is reached */
1039 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1041 #if defined(TARGET_HAS_ICE)
1042 int i;
1044 for(i = 0; i < env->nb_breakpoints; i++) {
1045 if (env->breakpoints[i] == pc)
1046 return 0;
1049 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1050 return -1;
1051 env->breakpoints[env->nb_breakpoints++] = pc;
1053 #ifdef USE_KVM
1054 if (kvm_allowed)
1055 kvm_update_debugger(env);
1056 #endif
1058 breakpoint_invalidate(env, pc);
1059 return 0;
1060 #else
1061 return -1;
1062 #endif
1065 /* remove a breakpoint */
1066 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1068 #if defined(TARGET_HAS_ICE)
1069 int i;
1070 for(i = 0; i < env->nb_breakpoints; i++) {
1071 if (env->breakpoints[i] == pc)
1072 goto found;
1074 return -1;
1075 found:
1076 env->nb_breakpoints--;
1077 if (i < env->nb_breakpoints)
1078 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1080 #ifdef USE_KVM
1081 if (kvm_allowed)
1082 kvm_update_debugger(env);
1083 #endif
1085 breakpoint_invalidate(env, pc);
1086 return 0;
1087 #else
1088 return -1;
1089 #endif
1092 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1093 CPU loop after each instruction */
1094 void cpu_single_step(CPUState *env, int enabled)
1096 #if defined(TARGET_HAS_ICE)
1097 if (env->singlestep_enabled != enabled) {
1098 env->singlestep_enabled = enabled;
1099 /* must flush all the translated code to avoid inconsistancies */
1100 /* XXX: only flush what is necessary */
1101 tb_flush(env);
1103 #ifdef USE_KVM
1104 if (kvm_allowed)
1105 kvm_update_debugger(env);
1106 #endif
1107 #endif
1110 /* enable or disable low levels log */
1111 void cpu_set_log(int log_flags)
1113 loglevel = log_flags;
1114 if (loglevel && !logfile) {
1115 logfile = fopen(logfilename, "w");
1116 if (!logfile) {
1117 perror(logfilename);
1118 _exit(1);
1120 #if !defined(CONFIG_SOFTMMU)
1121 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1123 static uint8_t logfile_buf[4096];
1124 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1126 #else
1127 setvbuf(logfile, NULL, _IOLBF, 0);
1128 #endif
1132 void cpu_set_log_filename(const char *filename)
1134 logfilename = strdup(filename);
1137 /* mask must never be zero, except for A20 change call */
1138 void cpu_interrupt(CPUState *env, int mask)
1140 TranslationBlock *tb;
1141 static int interrupt_lock;
1143 env->interrupt_request |= mask;
1144 /* if the cpu is currently executing code, we must unlink it and
1145 all the potentially executing TB */
1146 tb = env->current_tb;
1147 if (tb && !testandset(&interrupt_lock)) {
1148 env->current_tb = NULL;
1149 tb_reset_jump_recursive(tb);
1150 interrupt_lock = 0;
1154 void cpu_reset_interrupt(CPUState *env, int mask)
1156 env->interrupt_request &= ~mask;
1159 CPULogItem cpu_log_items[] = {
1160 { CPU_LOG_TB_OUT_ASM, "out_asm",
1161 "show generated host assembly code for each compiled TB" },
1162 { CPU_LOG_TB_IN_ASM, "in_asm",
1163 "show target assembly code for each compiled TB" },
1164 { CPU_LOG_TB_OP, "op",
1165 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1166 #ifdef TARGET_I386
1167 { CPU_LOG_TB_OP_OPT, "op_opt",
1168 "show micro ops after optimization for each compiled TB" },
1169 #endif
1170 { CPU_LOG_INT, "int",
1171 "show interrupts/exceptions in short format" },
1172 { CPU_LOG_EXEC, "exec",
1173 "show trace before each executed TB (lots of logs)" },
1174 { CPU_LOG_TB_CPU, "cpu",
1175 "show CPU state before bloc translation" },
1176 #ifdef TARGET_I386
1177 { CPU_LOG_PCALL, "pcall",
1178 "show protected mode far calls/returns/exceptions" },
1179 #endif
1180 #ifdef DEBUG_IOPORT
1181 { CPU_LOG_IOPORT, "ioport",
1182 "show all i/o ports accesses" },
1183 #endif
1184 { 0, NULL, NULL },
1187 static int cmp1(const char *s1, int n, const char *s2)
1189 if (strlen(s2) != n)
1190 return 0;
1191 return memcmp(s1, s2, n) == 0;
1194 /* takes a comma separated list of log masks. Return 0 if error. */
1195 int cpu_str_to_log_mask(const char *str)
1197 CPULogItem *item;
1198 int mask;
1199 const char *p, *p1;
1201 p = str;
1202 mask = 0;
1203 for(;;) {
1204 p1 = strchr(p, ',');
1205 if (!p1)
1206 p1 = p + strlen(p);
1207 if(cmp1(p,p1-p,"all")) {
1208 for(item = cpu_log_items; item->mask != 0; item++) {
1209 mask |= item->mask;
1211 } else {
1212 for(item = cpu_log_items; item->mask != 0; item++) {
1213 if (cmp1(p, p1 - p, item->name))
1214 goto found;
1216 return 0;
1218 found:
1219 mask |= item->mask;
1220 if (*p1 != ',')
1221 break;
1222 p = p1 + 1;
1224 return mask;
1227 void cpu_abort(CPUState *env, const char *fmt, ...)
1229 va_list ap;
1231 va_start(ap, fmt);
1232 fprintf(stderr, "qemu: fatal: ");
1233 vfprintf(stderr, fmt, ap);
1234 fprintf(stderr, "\n");
1235 #ifdef TARGET_I386
1236 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1237 #else
1238 cpu_dump_state(env, stderr, fprintf, 0);
1239 #endif
1240 va_end(ap);
1241 abort();
1244 #if !defined(CONFIG_USER_ONLY)
1246 /* NOTE: if flush_global is true, also flush global entries (not
1247 implemented yet) */
1248 void tlb_flush(CPUState *env, int flush_global)
1250 int i;
1252 #if defined(DEBUG_TLB)
1253 printf("tlb_flush:\n");
1254 #endif
1255 /* must reset current TB so that interrupts cannot modify the
1256 links while we are modifying them */
1257 env->current_tb = NULL;
1259 for(i = 0; i < CPU_TLB_SIZE; i++) {
1260 env->tlb_table[0][i].addr_read = -1;
1261 env->tlb_table[0][i].addr_write = -1;
1262 env->tlb_table[0][i].addr_code = -1;
1263 env->tlb_table[1][i].addr_read = -1;
1264 env->tlb_table[1][i].addr_write = -1;
1265 env->tlb_table[1][i].addr_code = -1;
1268 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1270 #if !defined(CONFIG_SOFTMMU)
1271 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1272 #endif
1273 #ifdef USE_KQEMU
1274 if (env->kqemu_enabled) {
1275 kqemu_flush(env, flush_global);
1277 #endif
1278 tlb_flush_count++;
1281 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1283 if (addr == (tlb_entry->addr_read &
1284 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1285 addr == (tlb_entry->addr_write &
1286 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1287 addr == (tlb_entry->addr_code &
1288 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1289 tlb_entry->addr_read = -1;
1290 tlb_entry->addr_write = -1;
1291 tlb_entry->addr_code = -1;
1295 void tlb_flush_page(CPUState *env, target_ulong addr)
1297 int i;
1298 TranslationBlock *tb;
1300 #if defined(DEBUG_TLB)
1301 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1302 #endif
1303 /* must reset current TB so that interrupts cannot modify the
1304 links while we are modifying them */
1305 env->current_tb = NULL;
1307 addr &= TARGET_PAGE_MASK;
1308 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1309 tlb_flush_entry(&env->tlb_table[0][i], addr);
1310 tlb_flush_entry(&env->tlb_table[1][i], addr);
1312 /* Discard jump cache entries for any tb which might potentially
1313 overlap the flushed page. */
1314 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1315 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1317 i = tb_jmp_cache_hash_page(addr);
1318 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1320 #if !defined(CONFIG_SOFTMMU)
1321 if (addr < MMAP_AREA_END)
1322 munmap((void *)addr, TARGET_PAGE_SIZE);
1323 #endif
1324 #ifdef USE_KQEMU
1325 if (env->kqemu_enabled) {
1326 kqemu_flush_page(env, addr);
1328 #endif
1331 /* update the TLBs so that writes to code in the virtual page 'addr'
1332 can be detected */
1333 static void tlb_protect_code(ram_addr_t ram_addr)
1335 cpu_physical_memory_reset_dirty(ram_addr,
1336 ram_addr + TARGET_PAGE_SIZE,
1337 CODE_DIRTY_FLAG);
1340 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1341 tested for self modifying code */
1342 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1343 target_ulong vaddr)
1345 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1348 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1349 unsigned long start, unsigned long length)
1351 unsigned long addr;
1352 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1353 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1354 if ((addr - start) < length) {
1355 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1360 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1361 int dirty_flags)
1363 CPUState *env;
1364 unsigned long length, start1;
1365 int i, mask, len;
1366 uint8_t *p;
1368 start &= TARGET_PAGE_MASK;
1369 end = TARGET_PAGE_ALIGN(end);
1371 length = end - start;
1372 if (length == 0)
1373 return;
1374 len = length >> TARGET_PAGE_BITS;
1375 #ifdef USE_KQEMU
1376 /* XXX: should not depend on cpu context */
1377 env = first_cpu;
1378 if (env->kqemu_enabled) {
1379 ram_addr_t addr;
1380 addr = start;
1381 for(i = 0; i < len; i++) {
1382 kqemu_set_notdirty(env, addr);
1383 addr += TARGET_PAGE_SIZE;
1386 #endif
1387 mask = ~dirty_flags;
1388 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1389 for(i = 0; i < len; i++)
1390 p[i] &= mask;
1392 /* we modify the TLB cache so that the dirty bit will be set again
1393 when accessing the range */
1394 start1 = start + (unsigned long)phys_ram_base;
1395 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1396 for(i = 0; i < CPU_TLB_SIZE; i++)
1397 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1398 for(i = 0; i < CPU_TLB_SIZE; i++)
1399 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1402 #if !defined(CONFIG_SOFTMMU)
1403 /* XXX: this is expensive */
1405 VirtPageDesc *p;
1406 int j;
1407 target_ulong addr;
1409 for(i = 0; i < L1_SIZE; i++) {
1410 p = l1_virt_map[i];
1411 if (p) {
1412 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1413 for(j = 0; j < L2_SIZE; j++) {
1414 if (p->valid_tag == virt_valid_tag &&
1415 p->phys_addr >= start && p->phys_addr < end &&
1416 (p->prot & PROT_WRITE)) {
1417 if (addr < MMAP_AREA_END) {
1418 mprotect((void *)addr, TARGET_PAGE_SIZE,
1419 p->prot & ~PROT_WRITE);
1422 addr += TARGET_PAGE_SIZE;
1423 p++;
1428 #endif
1431 int cpu_physical_memory_set_dirty_tracking(int enable)
1433 int r=0;
1435 #ifdef USE_KVM
1436 r = kvm_physical_memory_set_dirty_tracking(enable);
1437 #endif
1438 in_migration = enable;
1439 return r;
1442 int cpu_physical_memory_get_dirty_tracking(void)
1444 return in_migration;
1447 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1449 ram_addr_t ram_addr;
1451 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1452 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1453 tlb_entry->addend - (unsigned long)phys_ram_base;
1454 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1455 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1460 /* update the TLB according to the current state of the dirty bits */
1461 void cpu_tlb_update_dirty(CPUState *env)
1463 int i;
1464 for(i = 0; i < CPU_TLB_SIZE; i++)
1465 tlb_update_dirty(&env->tlb_table[0][i]);
1466 for(i = 0; i < CPU_TLB_SIZE; i++)
1467 tlb_update_dirty(&env->tlb_table[1][i]);
1470 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1471 unsigned long start)
1473 unsigned long addr;
1474 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1475 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1476 if (addr == start) {
1477 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1482 /* update the TLB corresponding to virtual page vaddr and phys addr
1483 addr so that it is no longer dirty */
1484 static inline void tlb_set_dirty(CPUState *env,
1485 unsigned long addr, target_ulong vaddr)
1487 int i;
1489 addr &= TARGET_PAGE_MASK;
1490 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1491 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1492 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1495 /* add a new TLB entry. At most one entry for a given virtual address
1496 is permitted. Return 0 if OK or 2 if the page could not be mapped
1497 (can only happen in non SOFTMMU mode for I/O pages or pages
1498 conflicting with the host address space). */
1499 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1500 target_phys_addr_t paddr, int prot,
1501 int is_user, int is_softmmu)
1503 PhysPageDesc *p;
1504 unsigned long pd;
1505 unsigned int index;
1506 target_ulong address;
1507 target_phys_addr_t addend;
1508 int ret;
1509 CPUTLBEntry *te;
1511 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1512 if (!p) {
1513 pd = IO_MEM_UNASSIGNED;
1514 } else {
1515 pd = p->phys_offset;
1517 #if defined(DEBUG_TLB)
1518 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1519 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1520 #endif
1522 ret = 0;
1523 #if !defined(CONFIG_SOFTMMU)
1524 if (is_softmmu)
1525 #endif
1527 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1528 /* IO memory case */
1529 address = vaddr | pd;
1530 addend = paddr;
1531 } else {
1532 /* standard memory */
1533 address = vaddr;
1534 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1537 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1538 addend -= vaddr;
1539 te = &env->tlb_table[is_user][index];
1540 te->addend = addend;
1541 if (prot & PAGE_READ) {
1542 te->addr_read = address;
1543 } else {
1544 te->addr_read = -1;
1546 if (prot & PAGE_EXEC) {
1547 te->addr_code = address;
1548 } else {
1549 te->addr_code = -1;
1551 if (prot & PAGE_WRITE) {
1552 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1553 (pd & IO_MEM_ROMD)) {
1554 /* write access calls the I/O callback */
1555 te->addr_write = vaddr |
1556 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1557 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1558 !cpu_physical_memory_is_dirty(pd)) {
1559 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1560 } else {
1561 te->addr_write = address;
1563 } else {
1564 te->addr_write = -1;
1567 #if !defined(CONFIG_SOFTMMU)
1568 else {
1569 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1570 /* IO access: no mapping is done as it will be handled by the
1571 soft MMU */
1572 if (!(env->hflags & HF_SOFTMMU_MASK))
1573 ret = 2;
1574 } else {
1575 void *map_addr;
1577 if (vaddr >= MMAP_AREA_END) {
1578 ret = 2;
1579 } else {
1580 if (prot & PROT_WRITE) {
1581 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1582 #if defined(TARGET_HAS_SMC) || 1
1583 first_tb ||
1584 #endif
1585 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1586 !cpu_physical_memory_is_dirty(pd))) {
1587 /* ROM: we do as if code was inside */
1588 /* if code is present, we only map as read only and save the
1589 original mapping */
1590 VirtPageDesc *vp;
1592 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1593 vp->phys_addr = pd;
1594 vp->prot = prot;
1595 vp->valid_tag = virt_valid_tag;
1596 prot &= ~PAGE_WRITE;
1599 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1600 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1601 if (map_addr == MAP_FAILED) {
1602 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1603 paddr, vaddr);
1608 #endif
1609 return ret;
1612 /* called from signal handler: invalidate the code and unprotect the
1613 page. Return TRUE if the fault was succesfully handled. */
1614 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1616 #if !defined(CONFIG_SOFTMMU)
1617 VirtPageDesc *vp;
1619 #if defined(DEBUG_TLB)
1620 printf("page_unprotect: addr=0x%08x\n", addr);
1621 #endif
1622 addr &= TARGET_PAGE_MASK;
1624 /* if it is not mapped, no need to worry here */
1625 if (addr >= MMAP_AREA_END)
1626 return 0;
1627 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1628 if (!vp)
1629 return 0;
1630 /* NOTE: in this case, validate_tag is _not_ tested as it
1631 validates only the code TLB */
1632 if (vp->valid_tag != virt_valid_tag)
1633 return 0;
1634 if (!(vp->prot & PAGE_WRITE))
1635 return 0;
1636 #if defined(DEBUG_TLB)
1637 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1638 addr, vp->phys_addr, vp->prot);
1639 #endif
1640 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1641 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1642 (unsigned long)addr, vp->prot);
1643 /* set the dirty bit */
1644 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1645 /* flush the code inside */
1646 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1647 return 1;
1648 #else
1649 return 0;
1650 #endif
1653 #else
1655 void tlb_flush(CPUState *env, int flush_global)
1659 void tlb_flush_page(CPUState *env, target_ulong addr)
1663 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1664 target_phys_addr_t paddr, int prot,
1665 int is_user, int is_softmmu)
1667 return 0;
1670 /* dump memory mappings */
1671 void page_dump(FILE *f)
1673 unsigned long start, end;
1674 int i, j, prot, prot1;
1675 PageDesc *p;
1677 fprintf(f, "%-8s %-8s %-8s %s\n",
1678 "start", "end", "size", "prot");
1679 start = -1;
1680 end = -1;
1681 prot = 0;
1682 for(i = 0; i <= L1_SIZE; i++) {
1683 if (i < L1_SIZE)
1684 p = l1_map[i];
1685 else
1686 p = NULL;
1687 for(j = 0;j < L2_SIZE; j++) {
1688 if (!p)
1689 prot1 = 0;
1690 else
1691 prot1 = p[j].flags;
1692 if (prot1 != prot) {
1693 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1694 if (start != -1) {
1695 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1696 start, end, end - start,
1697 prot & PAGE_READ ? 'r' : '-',
1698 prot & PAGE_WRITE ? 'w' : '-',
1699 prot & PAGE_EXEC ? 'x' : '-');
1701 if (prot1 != 0)
1702 start = end;
1703 else
1704 start = -1;
1705 prot = prot1;
1707 if (!p)
1708 break;
1713 int page_get_flags(target_ulong address)
1715 PageDesc *p;
1717 p = page_find(address >> TARGET_PAGE_BITS);
1718 if (!p)
1719 return 0;
1720 return p->flags;
1723 /* modify the flags of a page and invalidate the code if
1724 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1725 depending on PAGE_WRITE */
1726 void page_set_flags(target_ulong start, target_ulong end, int flags)
1728 PageDesc *p;
1729 target_ulong addr;
1731 start = start & TARGET_PAGE_MASK;
1732 end = TARGET_PAGE_ALIGN(end);
1733 if (flags & PAGE_WRITE)
1734 flags |= PAGE_WRITE_ORG;
1735 spin_lock(&tb_lock);
1736 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1737 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1738 /* if the write protection is set, then we invalidate the code
1739 inside */
1740 if (!(p->flags & PAGE_WRITE) &&
1741 (flags & PAGE_WRITE) &&
1742 p->first_tb) {
1743 tb_invalidate_phys_page(addr, 0, NULL);
1745 p->flags = flags;
1747 spin_unlock(&tb_lock);
1750 /* called from signal handler: invalidate the code and unprotect the
1751 page. Return TRUE if the fault was succesfully handled. */
1752 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1754 unsigned int page_index, prot, pindex;
1755 PageDesc *p, *p1;
1756 target_ulong host_start, host_end, addr;
1758 host_start = address & qemu_host_page_mask;
1759 page_index = host_start >> TARGET_PAGE_BITS;
1760 p1 = page_find(page_index);
1761 if (!p1)
1762 return 0;
1763 host_end = host_start + qemu_host_page_size;
1764 p = p1;
1765 prot = 0;
1766 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1767 prot |= p->flags;
1768 p++;
1770 /* if the page was really writable, then we change its
1771 protection back to writable */
1772 if (prot & PAGE_WRITE_ORG) {
1773 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1774 if (!(p1[pindex].flags & PAGE_WRITE)) {
1775 mprotect((void *)g2h(host_start), qemu_host_page_size,
1776 (prot & PAGE_BITS) | PAGE_WRITE);
1777 p1[pindex].flags |= PAGE_WRITE;
1778 /* and since the content will be modified, we must invalidate
1779 the corresponding translated code. */
1780 tb_invalidate_phys_page(address, pc, puc);
1781 #ifdef DEBUG_TB_CHECK
1782 tb_invalidate_check(address);
1783 #endif
1784 return 1;
1787 return 0;
1790 /* call this function when system calls directly modify a memory area */
1791 /* ??? This should be redundant now we have lock_user. */
1792 void page_unprotect_range(target_ulong data, target_ulong data_size)
1794 target_ulong start, end, addr;
1796 start = data;
1797 end = start + data_size;
1798 start &= TARGET_PAGE_MASK;
1799 end = TARGET_PAGE_ALIGN(end);
1800 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1801 page_unprotect(addr, 0, NULL);
1805 static inline void tlb_set_dirty(CPUState *env,
1806 unsigned long addr, target_ulong vaddr)
1809 #endif /* defined(CONFIG_USER_ONLY) */
1811 /* register physical memory. 'size' must be a multiple of the target
1812 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1813 io memory page */
1814 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1815 unsigned long size,
1816 unsigned long phys_offset)
1818 target_phys_addr_t addr, end_addr;
1819 PhysPageDesc *p;
1820 CPUState *env;
1822 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1823 end_addr = start_addr + size;
1824 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1825 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1826 p->phys_offset = phys_offset;
1827 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1828 (phys_offset & IO_MEM_ROMD))
1829 phys_offset += TARGET_PAGE_SIZE;
1832 /* since each CPU stores ram addresses in its TLB cache, we must
1833 reset the modified entries */
1834 /* XXX: slow ! */
1835 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1836 tlb_flush(env, 1);
1840 /* XXX: temporary until new memory mapping API */
1841 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1843 PhysPageDesc *p;
1845 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1846 if (!p)
1847 return IO_MEM_UNASSIGNED;
1848 return p->phys_offset;
1851 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1853 #ifdef DEBUG_UNASSIGNED
1854 printf("Unassigned mem read 0x%08x\n", (int)addr);
1855 #endif
1856 return 0;
1859 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1861 #ifdef DEBUG_UNASSIGNED
1862 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1863 #endif
1866 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1867 unassigned_mem_readb,
1868 unassigned_mem_readb,
1869 unassigned_mem_readb,
1872 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1873 unassigned_mem_writeb,
1874 unassigned_mem_writeb,
1875 unassigned_mem_writeb,
1878 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1880 unsigned long ram_addr;
1881 int dirty_flags;
1882 ram_addr = addr - (unsigned long)phys_ram_base;
1883 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1884 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1885 #if !defined(CONFIG_USER_ONLY)
1886 tb_invalidate_phys_page_fast(ram_addr, 1);
1887 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1888 #endif
1890 stb_p((uint8_t *)(long)addr, val);
1891 #ifdef USE_KQEMU
1892 if (cpu_single_env->kqemu_enabled &&
1893 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1894 kqemu_modify_page(cpu_single_env, ram_addr);
1895 #endif
1896 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1897 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1898 /* we remove the notdirty callback only if the code has been
1899 flushed */
1900 if (dirty_flags == 0xff)
1901 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1904 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1906 unsigned long ram_addr;
1907 int dirty_flags;
1908 ram_addr = addr - (unsigned long)phys_ram_base;
1909 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1910 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1911 #if !defined(CONFIG_USER_ONLY)
1912 tb_invalidate_phys_page_fast(ram_addr, 2);
1913 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1914 #endif
1916 stw_p((uint8_t *)(long)addr, val);
1917 #ifdef USE_KQEMU
1918 if (cpu_single_env->kqemu_enabled &&
1919 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1920 kqemu_modify_page(cpu_single_env, ram_addr);
1921 #endif
1922 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1923 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1924 /* we remove the notdirty callback only if the code has been
1925 flushed */
1926 if (dirty_flags == 0xff)
1927 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1930 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1932 unsigned long ram_addr;
1933 int dirty_flags;
1934 ram_addr = addr - (unsigned long)phys_ram_base;
1935 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1936 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1937 #if !defined(CONFIG_USER_ONLY)
1938 tb_invalidate_phys_page_fast(ram_addr, 4);
1939 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1940 #endif
1942 stl_p((uint8_t *)(long)addr, val);
1943 #ifdef USE_KQEMU
1944 if (cpu_single_env->kqemu_enabled &&
1945 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1946 kqemu_modify_page(cpu_single_env, ram_addr);
1947 #endif
1948 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1949 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1950 /* we remove the notdirty callback only if the code has been
1951 flushed */
1952 if (dirty_flags == 0xff)
1953 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1956 static CPUReadMemoryFunc *error_mem_read[3] = {
1957 NULL, /* never used */
1958 NULL, /* never used */
1959 NULL, /* never used */
1962 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1963 notdirty_mem_writeb,
1964 notdirty_mem_writew,
1965 notdirty_mem_writel,
1968 static void io_mem_init(void)
1970 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1971 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1972 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1973 io_mem_nb = 5;
1975 /* alloc dirty bits array */
1976 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1977 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1980 /* mem_read and mem_write are arrays of functions containing the
1981 function to access byte (index 0), word (index 1) and dword (index
1982 2). All functions must be supplied. If io_index is non zero, the
1983 corresponding io zone is modified. If it is zero, a new io zone is
1984 allocated. The return value can be used with
1985 cpu_register_physical_memory(). (-1) is returned if error. */
1986 int cpu_register_io_memory(int io_index,
1987 CPUReadMemoryFunc **mem_read,
1988 CPUWriteMemoryFunc **mem_write,
1989 void *opaque)
1991 int i;
1993 if (io_index <= 0) {
1994 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1995 return -1;
1996 io_index = io_mem_nb++;
1997 } else {
1998 if (io_index >= IO_MEM_NB_ENTRIES)
1999 return -1;
2002 for(i = 0;i < 3; i++) {
2003 io_mem_read[io_index][i] = mem_read[i];
2004 io_mem_write[io_index][i] = mem_write[i];
2006 io_mem_opaque[io_index] = opaque;
2007 return io_index << IO_MEM_SHIFT;
2010 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2012 return io_mem_write[io_index >> IO_MEM_SHIFT];
2015 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2017 return io_mem_read[io_index >> IO_MEM_SHIFT];
2020 /* physical memory access (slow version, mainly for debug) */
2021 #if defined(CONFIG_USER_ONLY)
2022 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2023 int len, int is_write)
2025 int l, flags;
2026 target_ulong page;
2027 void * p;
2029 while (len > 0) {
2030 page = addr & TARGET_PAGE_MASK;
2031 l = (page + TARGET_PAGE_SIZE) - addr;
2032 if (l > len)
2033 l = len;
2034 flags = page_get_flags(page);
2035 if (!(flags & PAGE_VALID))
2036 return;
2037 if (is_write) {
2038 if (!(flags & PAGE_WRITE))
2039 return;
2040 p = lock_user(addr, len, 0);
2041 memcpy(p, buf, len);
2042 unlock_user(p, addr, len);
2043 } else {
2044 if (!(flags & PAGE_READ))
2045 return;
2046 p = lock_user(addr, len, 1);
2047 memcpy(buf, p, len);
2048 unlock_user(p, addr, 0);
2050 len -= l;
2051 buf += l;
2052 addr += l;
2056 #else
2057 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2058 int len, int is_write)
2060 int l, io_index;
2061 uint8_t *ptr;
2062 uint32_t val;
2063 target_phys_addr_t page;
2064 unsigned long pd;
2065 PhysPageDesc *p;
2067 while (len > 0) {
2068 page = addr & TARGET_PAGE_MASK;
2069 l = (page + TARGET_PAGE_SIZE) - addr;
2070 if (l > len)
2071 l = len;
2072 p = phys_page_find(page >> TARGET_PAGE_BITS);
2073 if (!p) {
2074 pd = IO_MEM_UNASSIGNED;
2075 } else {
2076 pd = p->phys_offset;
2079 if (is_write) {
2080 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2081 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2082 /* XXX: could force cpu_single_env to NULL to avoid
2083 potential bugs */
2084 if (l >= 4 && ((addr & 3) == 0)) {
2085 /* 32 bit write access */
2086 val = ldl_p(buf);
2087 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2088 l = 4;
2089 } else if (l >= 2 && ((addr & 1) == 0)) {
2090 /* 16 bit write access */
2091 val = lduw_p(buf);
2092 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2093 l = 2;
2094 } else {
2095 /* 8 bit write access */
2096 val = ldub_p(buf);
2097 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2098 l = 1;
2100 } else {
2101 unsigned long addr1;
2102 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2103 /* RAM case */
2104 ptr = phys_ram_base + addr1;
2105 memcpy(ptr, buf, l);
2106 if (!cpu_physical_memory_is_dirty(addr1)) {
2107 /* invalidate code */
2108 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2109 /* set dirty bit */
2110 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2111 (0xff & ~CODE_DIRTY_FLAG);
2114 } else {
2115 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2116 !(pd & IO_MEM_ROMD)) {
2117 /* I/O case */
2118 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2119 if (l >= 4 && ((addr & 3) == 0)) {
2120 /* 32 bit read access */
2121 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2122 stl_p(buf, val);
2123 l = 4;
2124 } else if (l >= 2 && ((addr & 1) == 0)) {
2125 /* 16 bit read access */
2126 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2127 stw_p(buf, val);
2128 l = 2;
2129 } else {
2130 /* 8 bit read access */
2131 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2132 stb_p(buf, val);
2133 l = 1;
2135 } else {
2136 /* RAM case */
2137 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2138 (addr & ~TARGET_PAGE_MASK);
2139 memcpy(buf, ptr, l);
2142 len -= l;
2143 buf += l;
2144 addr += l;
2148 /* used for ROM loading : can write in RAM and ROM */
2149 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2150 const uint8_t *buf, int len)
2152 int l;
2153 uint8_t *ptr;
2154 target_phys_addr_t page;
2155 unsigned long pd;
2156 PhysPageDesc *p;
2158 while (len > 0) {
2159 page = addr & TARGET_PAGE_MASK;
2160 l = (page + TARGET_PAGE_SIZE) - addr;
2161 if (l > len)
2162 l = len;
2163 p = phys_page_find(page >> TARGET_PAGE_BITS);
2164 if (!p) {
2165 pd = IO_MEM_UNASSIGNED;
2166 } else {
2167 pd = p->phys_offset;
2170 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2171 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2172 !(pd & IO_MEM_ROMD)) {
2173 /* do nothing */
2174 } else {
2175 unsigned long addr1;
2176 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2177 /* ROM/RAM case */
2178 ptr = phys_ram_base + addr1;
2179 memcpy(ptr, buf, l);
2181 len -= l;
2182 buf += l;
2183 addr += l;
2188 /* warning: addr must be aligned */
2189 uint32_t ldl_phys(target_phys_addr_t addr)
2191 int io_index;
2192 uint8_t *ptr;
2193 uint32_t val;
2194 unsigned long pd;
2195 PhysPageDesc *p;
2197 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2198 if (!p) {
2199 pd = IO_MEM_UNASSIGNED;
2200 } else {
2201 pd = p->phys_offset;
2204 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2205 !(pd & IO_MEM_ROMD)) {
2206 /* I/O case */
2207 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2208 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2209 } else {
2210 /* RAM case */
2211 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2212 (addr & ~TARGET_PAGE_MASK);
2213 val = ldl_p(ptr);
2215 return val;
2218 /* warning: addr must be aligned */
2219 uint64_t ldq_phys(target_phys_addr_t addr)
2221 int io_index;
2222 uint8_t *ptr;
2223 uint64_t val;
2224 unsigned long pd;
2225 PhysPageDesc *p;
2227 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2228 if (!p) {
2229 pd = IO_MEM_UNASSIGNED;
2230 } else {
2231 pd = p->phys_offset;
2234 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2235 !(pd & IO_MEM_ROMD)) {
2236 /* I/O case */
2237 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2238 #ifdef TARGET_WORDS_BIGENDIAN
2239 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2240 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2241 #else
2242 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2243 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2244 #endif
2245 } else {
2246 /* RAM case */
2247 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2248 (addr & ~TARGET_PAGE_MASK);
2249 val = ldq_p(ptr);
2251 return val;
2254 /* XXX: optimize */
2255 uint32_t ldub_phys(target_phys_addr_t addr)
2257 uint8_t val;
2258 cpu_physical_memory_read(addr, &val, 1);
2259 return val;
2262 /* XXX: optimize */
2263 uint32_t lduw_phys(target_phys_addr_t addr)
2265 uint16_t val;
2266 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2267 return tswap16(val);
2270 #ifdef __GNUC__
2271 #define likely(x) __builtin_expect(!!(x), 1)
2272 #define unlikely(x) __builtin_expect(!!(x), 0)
2273 #else
2274 #define likely(x) x
2275 #define unlikely(x) x
2276 #endif
2278 /* warning: addr must be aligned. The ram page is not masked as dirty
2279 and the code inside is not invalidated. It is useful if the dirty
2280 bits are used to track modified PTEs */
2281 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2283 int io_index;
2284 uint8_t *ptr;
2285 unsigned long pd;
2286 PhysPageDesc *p;
2288 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2289 if (!p) {
2290 pd = IO_MEM_UNASSIGNED;
2291 } else {
2292 pd = p->phys_offset;
2295 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2296 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2297 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2298 } else {
2299 unsigned long addr1;
2300 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2302 ptr = phys_ram_base + addr1;
2303 stl_p(ptr, val);
2305 if (unlikely(in_migration)) {
2306 if (!cpu_physical_memory_is_dirty(addr1)) {
2307 /* invalidate code */
2308 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2309 /* set dirty bit */
2310 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2311 (0xff & ~CODE_DIRTY_FLAG);
2317 /* warning: addr must be aligned */
2318 void stl_phys(target_phys_addr_t addr, uint32_t val)
2320 int io_index;
2321 uint8_t *ptr;
2322 unsigned long pd;
2323 PhysPageDesc *p;
2325 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2326 if (!p) {
2327 pd = IO_MEM_UNASSIGNED;
2328 } else {
2329 pd = p->phys_offset;
2332 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2333 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2334 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2335 } else {
2336 unsigned long addr1;
2337 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2338 /* RAM case */
2339 ptr = phys_ram_base + addr1;
2340 stl_p(ptr, val);
2341 if (!cpu_physical_memory_is_dirty(addr1)) {
2342 /* invalidate code */
2343 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2344 /* set dirty bit */
2345 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2346 (0xff & ~CODE_DIRTY_FLAG);
2351 /* XXX: optimize */
2352 void stb_phys(target_phys_addr_t addr, uint32_t val)
2354 uint8_t v = val;
2355 cpu_physical_memory_write(addr, &v, 1);
2358 /* XXX: optimize */
2359 void stw_phys(target_phys_addr_t addr, uint32_t val)
2361 uint16_t v = tswap16(val);
2362 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2365 /* XXX: optimize */
2366 void stq_phys(target_phys_addr_t addr, uint64_t val)
2368 val = tswap64(val);
2369 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2372 #endif
2374 /* virtual memory access for debug */
2375 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2376 uint8_t *buf, int len, int is_write)
2378 int l;
2379 target_ulong page, phys_addr;
2381 while (len > 0) {
2382 page = addr & TARGET_PAGE_MASK;
2383 phys_addr = cpu_get_phys_page_debug(env, page);
2384 /* if no physical page mapped, return an error */
2385 if (phys_addr == -1)
2386 return -1;
2387 l = (page + TARGET_PAGE_SIZE) - addr;
2388 if (l > len)
2389 l = len;
2390 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2391 buf, l, is_write);
2392 len -= l;
2393 buf += l;
2394 addr += l;
2396 return 0;
2399 void dump_exec_info(FILE *f,
2400 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2402 int i, target_code_size, max_target_code_size;
2403 int direct_jmp_count, direct_jmp2_count, cross_page;
2404 TranslationBlock *tb;
2406 target_code_size = 0;
2407 max_target_code_size = 0;
2408 cross_page = 0;
2409 direct_jmp_count = 0;
2410 direct_jmp2_count = 0;
2411 for(i = 0; i < nb_tbs; i++) {
2412 tb = &tbs[i];
2413 target_code_size += tb->size;
2414 if (tb->size > max_target_code_size)
2415 max_target_code_size = tb->size;
2416 if (tb->page_addr[1] != -1)
2417 cross_page++;
2418 if (tb->tb_next_offset[0] != 0xffff) {
2419 direct_jmp_count++;
2420 if (tb->tb_next_offset[1] != 0xffff) {
2421 direct_jmp2_count++;
2425 /* XXX: avoid using doubles ? */
2426 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2427 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2428 nb_tbs ? target_code_size / nb_tbs : 0,
2429 max_target_code_size);
2430 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2431 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2432 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2433 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2434 cross_page,
2435 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2436 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2437 direct_jmp_count,
2438 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2439 direct_jmp2_count,
2440 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2441 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2442 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2443 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2446 #if !defined(CONFIG_USER_ONLY)
2448 #define MMUSUFFIX _cmmu
2449 #define GETPC() NULL
2450 #define env cpu_single_env
2451 #define SOFTMMU_CODE_ACCESS
2453 #define SHIFT 0
2454 #include "softmmu_template.h"
2456 #define SHIFT 1
2457 #include "softmmu_template.h"
2459 #define SHIFT 2
2460 #include "softmmu_template.h"
2462 #define SHIFT 3
2463 #include "softmmu_template.h"
2465 #undef env
2467 #endif