kvm: external module: update for slab updates
[qemu-kvm/fedora.git] / exec.c
blob2b050d18e1875c05d720323b9b35aa6752f6c2e2
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #ifdef USE_KVM
38 #include "qemu-kvm.h"
39 #endif
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
55 #undef DEBUG_TB_CHECK
56 #endif
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_PPC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 42
70 #else
71 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
72 #define TARGET_PHYS_ADDR_SPACE_BITS 32
73 #endif
75 #ifdef USE_KVM
76 extern int kvm_allowed;
77 #endif
79 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
80 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
81 int nb_tbs;
82 /* any access to the tbs or the page table must use this lock */
83 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
86 uint8_t *code_gen_ptr;
88 int phys_ram_size;
89 int phys_ram_fd;
90 uint8_t *phys_ram_base;
91 uint8_t *phys_ram_dirty;
92 uint8_t *bios_mem;
93 static int in_migration;
95 CPUState *first_cpu;
96 /* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98 CPUState *cpu_single_env;
100 typedef struct PageDesc {
101 /* list of TBs intersecting this ram page */
102 TranslationBlock *first_tb;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107 #if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109 #endif
110 } PageDesc;
112 typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset;
115 } PhysPageDesc;
117 #define L2_BITS 10
118 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
120 #define L1_SIZE (1 << L1_BITS)
121 #define L2_SIZE (1 << L2_BITS)
123 static void io_mem_init(void);
125 unsigned long qemu_real_host_page_size;
126 unsigned long qemu_host_page_bits;
127 unsigned long qemu_host_page_size;
128 unsigned long qemu_host_page_mask;
130 /* XXX: for system emulation, it could just be an array */
131 static PageDesc *l1_map[L1_SIZE];
132 PhysPageDesc **l1_phys_map;
134 /* io memory support */
135 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
136 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
137 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
138 static int io_mem_nb;
140 /* log support */
141 char *logfilename = "/tmp/qemu.log";
142 FILE *logfile;
143 int loglevel;
145 /* statistics */
146 static int tlb_flush_count;
147 static int tb_flush_count;
148 static int tb_phys_invalidate_count;
150 static void page_init(void)
152 /* NOTE: we can always suppose that qemu_host_page_size >=
153 TARGET_PAGE_SIZE */
154 #ifdef _WIN32
156 SYSTEM_INFO system_info;
157 DWORD old_protect;
159 GetSystemInfo(&system_info);
160 qemu_real_host_page_size = system_info.dwPageSize;
162 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
163 PAGE_EXECUTE_READWRITE, &old_protect);
165 #else
166 qemu_real_host_page_size = getpagesize();
168 unsigned long start, end;
170 start = (unsigned long)code_gen_buffer;
171 start &= ~(qemu_real_host_page_size - 1);
173 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
174 end += qemu_real_host_page_size - 1;
175 end &= ~(qemu_real_host_page_size - 1);
177 mprotect((void *)start, end - start,
178 PROT_READ | PROT_WRITE | PROT_EXEC);
180 #endif
182 if (qemu_host_page_size == 0)
183 qemu_host_page_size = qemu_real_host_page_size;
184 if (qemu_host_page_size < TARGET_PAGE_SIZE)
185 qemu_host_page_size = TARGET_PAGE_SIZE;
186 qemu_host_page_bits = 0;
187 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
188 qemu_host_page_bits++;
189 qemu_host_page_mask = ~(qemu_host_page_size - 1);
190 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
191 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
194 static inline PageDesc *page_find_alloc(unsigned int index)
196 PageDesc **lp, *p;
198 lp = &l1_map[index >> L2_BITS];
199 p = *lp;
200 if (!p) {
201 /* allocate if not found */
202 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
203 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
204 *lp = p;
206 return p + (index & (L2_SIZE - 1));
209 static inline PageDesc *page_find(unsigned int index)
211 PageDesc *p;
213 p = l1_map[index >> L2_BITS];
214 if (!p)
215 return 0;
216 return p + (index & (L2_SIZE - 1));
219 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
221 void **lp, **p;
222 PhysPageDesc *pd;
224 p = (void **)l1_phys_map;
225 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
227 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
228 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
229 #endif
230 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
231 p = *lp;
232 if (!p) {
233 /* allocate if not found */
234 if (!alloc)
235 return NULL;
236 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
237 memset(p, 0, sizeof(void *) * L1_SIZE);
238 *lp = p;
240 #endif
241 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
242 pd = *lp;
243 if (!pd) {
244 int i;
245 /* allocate if not found */
246 if (!alloc)
247 return NULL;
248 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
249 *lp = pd;
250 for (i = 0; i < L2_SIZE; i++)
251 pd[i].phys_offset = IO_MEM_UNASSIGNED;
253 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
256 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
258 return phys_page_find_alloc(index, 0);
261 #if !defined(CONFIG_USER_ONLY)
262 static void tlb_protect_code(ram_addr_t ram_addr);
263 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
264 target_ulong vaddr);
265 #endif
267 void cpu_exec_init(CPUState *env)
269 CPUState **penv;
270 int cpu_index;
272 if (!code_gen_ptr) {
273 code_gen_ptr = code_gen_buffer;
274 page_init();
275 io_mem_init();
277 env->next_cpu = NULL;
278 penv = &first_cpu;
279 cpu_index = 0;
280 while (*penv != NULL) {
281 penv = (CPUState **)&(*penv)->next_cpu;
282 cpu_index++;
284 env->cpu_index = cpu_index;
285 *penv = env;
288 static inline void invalidate_page_bitmap(PageDesc *p)
290 if (p->code_bitmap) {
291 qemu_free(p->code_bitmap);
292 p->code_bitmap = NULL;
294 p->code_write_count = 0;
297 /* set to NULL all the 'first_tb' fields in all PageDescs */
298 static void page_flush_tb(void)
300 int i, j;
301 PageDesc *p;
303 for(i = 0; i < L1_SIZE; i++) {
304 p = l1_map[i];
305 if (p) {
306 for(j = 0; j < L2_SIZE; j++) {
307 p->first_tb = NULL;
308 invalidate_page_bitmap(p);
309 p++;
315 /* flush all the translation blocks */
316 /* XXX: tb_flush is currently not thread safe */
317 void tb_flush(CPUState *env1)
319 CPUState *env;
320 #if defined(DEBUG_FLUSH)
321 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
322 code_gen_ptr - code_gen_buffer,
323 nb_tbs,
324 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
325 #endif
326 nb_tbs = 0;
328 for(env = first_cpu; env != NULL; env = env->next_cpu) {
329 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
332 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
333 page_flush_tb();
335 code_gen_ptr = code_gen_buffer;
336 /* XXX: flush processor icache at this point if cache flush is
337 expensive */
338 tb_flush_count++;
341 #ifdef DEBUG_TB_CHECK
343 static void tb_invalidate_check(unsigned long address)
345 TranslationBlock *tb;
346 int i;
347 address &= TARGET_PAGE_MASK;
348 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
349 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
350 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
351 address >= tb->pc + tb->size)) {
352 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
353 address, (long)tb->pc, tb->size);
359 /* verify that all the pages have correct rights for code */
360 static void tb_page_check(void)
362 TranslationBlock *tb;
363 int i, flags1, flags2;
365 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
366 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
367 flags1 = page_get_flags(tb->pc);
368 flags2 = page_get_flags(tb->pc + tb->size - 1);
369 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
370 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
371 (long)tb->pc, tb->size, flags1, flags2);
377 void tb_jmp_check(TranslationBlock *tb)
379 TranslationBlock *tb1;
380 unsigned int n1;
382 /* suppress any remaining jumps to this TB */
383 tb1 = tb->jmp_first;
384 for(;;) {
385 n1 = (long)tb1 & 3;
386 tb1 = (TranslationBlock *)((long)tb1 & ~3);
387 if (n1 == 2)
388 break;
389 tb1 = tb1->jmp_next[n1];
391 /* check end of list */
392 if (tb1 != tb) {
393 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
397 #endif
399 /* invalidate one TB */
400 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
401 int next_offset)
403 TranslationBlock *tb1;
404 for(;;) {
405 tb1 = *ptb;
406 if (tb1 == tb) {
407 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
408 break;
410 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
414 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
416 TranslationBlock *tb1;
417 unsigned int n1;
419 for(;;) {
420 tb1 = *ptb;
421 n1 = (long)tb1 & 3;
422 tb1 = (TranslationBlock *)((long)tb1 & ~3);
423 if (tb1 == tb) {
424 *ptb = tb1->page_next[n1];
425 break;
427 ptb = &tb1->page_next[n1];
431 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
433 TranslationBlock *tb1, **ptb;
434 unsigned int n1;
436 ptb = &tb->jmp_next[n];
437 tb1 = *ptb;
438 if (tb1) {
439 /* find tb(n) in circular list */
440 for(;;) {
441 tb1 = *ptb;
442 n1 = (long)tb1 & 3;
443 tb1 = (TranslationBlock *)((long)tb1 & ~3);
444 if (n1 == n && tb1 == tb)
445 break;
446 if (n1 == 2) {
447 ptb = &tb1->jmp_first;
448 } else {
449 ptb = &tb1->jmp_next[n1];
452 /* now we can suppress tb(n) from the list */
453 *ptb = tb->jmp_next[n];
455 tb->jmp_next[n] = NULL;
459 /* reset the jump entry 'n' of a TB so that it is not chained to
460 another TB */
461 static inline void tb_reset_jump(TranslationBlock *tb, int n)
463 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
466 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
468 CPUState *env;
469 PageDesc *p;
470 unsigned int h, n1;
471 target_ulong phys_pc;
472 TranslationBlock *tb1, *tb2;
474 /* remove the TB from the hash list */
475 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
476 h = tb_phys_hash_func(phys_pc);
477 tb_remove(&tb_phys_hash[h], tb,
478 offsetof(TranslationBlock, phys_hash_next));
480 /* remove the TB from the page list */
481 if (tb->page_addr[0] != page_addr) {
482 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
483 tb_page_remove(&p->first_tb, tb);
484 invalidate_page_bitmap(p);
486 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
487 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
488 tb_page_remove(&p->first_tb, tb);
489 invalidate_page_bitmap(p);
492 tb_invalidated_flag = 1;
494 /* remove the TB from the hash list */
495 h = tb_jmp_cache_hash_func(tb->pc);
496 for(env = first_cpu; env != NULL; env = env->next_cpu) {
497 if (env->tb_jmp_cache[h] == tb)
498 env->tb_jmp_cache[h] = NULL;
501 /* suppress this TB from the two jump lists */
502 tb_jmp_remove(tb, 0);
503 tb_jmp_remove(tb, 1);
505 /* suppress any remaining jumps to this TB */
506 tb1 = tb->jmp_first;
507 for(;;) {
508 n1 = (long)tb1 & 3;
509 if (n1 == 2)
510 break;
511 tb1 = (TranslationBlock *)((long)tb1 & ~3);
512 tb2 = tb1->jmp_next[n1];
513 tb_reset_jump(tb1, n1);
514 tb1->jmp_next[n1] = NULL;
515 tb1 = tb2;
517 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
519 tb_phys_invalidate_count++;
522 static inline void set_bits(uint8_t *tab, int start, int len)
524 int end, mask, end1;
526 end = start + len;
527 tab += start >> 3;
528 mask = 0xff << (start & 7);
529 if ((start & ~7) == (end & ~7)) {
530 if (start < end) {
531 mask &= ~(0xff << (end & 7));
532 *tab |= mask;
534 } else {
535 *tab++ |= mask;
536 start = (start + 8) & ~7;
537 end1 = end & ~7;
538 while (start < end1) {
539 *tab++ = 0xff;
540 start += 8;
542 if (start < end) {
543 mask = ~(0xff << (end & 7));
544 *tab |= mask;
549 static void build_page_bitmap(PageDesc *p)
551 int n, tb_start, tb_end;
552 TranslationBlock *tb;
554 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
555 if (!p->code_bitmap)
556 return;
557 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
559 tb = p->first_tb;
560 while (tb != NULL) {
561 n = (long)tb & 3;
562 tb = (TranslationBlock *)((long)tb & ~3);
563 /* NOTE: this is subtle as a TB may span two physical pages */
564 if (n == 0) {
565 /* NOTE: tb_end may be after the end of the page, but
566 it is not a problem */
567 tb_start = tb->pc & ~TARGET_PAGE_MASK;
568 tb_end = tb_start + tb->size;
569 if (tb_end > TARGET_PAGE_SIZE)
570 tb_end = TARGET_PAGE_SIZE;
571 } else {
572 tb_start = 0;
573 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
575 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
576 tb = tb->page_next[n];
580 #ifdef TARGET_HAS_PRECISE_SMC
582 static void tb_gen_code(CPUState *env,
583 target_ulong pc, target_ulong cs_base, int flags,
584 int cflags)
586 TranslationBlock *tb;
587 uint8_t *tc_ptr;
588 target_ulong phys_pc, phys_page2, virt_page2;
589 int code_gen_size;
591 phys_pc = get_phys_addr_code(env, pc);
592 tb = tb_alloc(pc);
593 if (!tb) {
594 /* flush must be done */
595 tb_flush(env);
596 /* cannot fail at this point */
597 tb = tb_alloc(pc);
599 tc_ptr = code_gen_ptr;
600 tb->tc_ptr = tc_ptr;
601 tb->cs_base = cs_base;
602 tb->flags = flags;
603 tb->cflags = cflags;
604 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
605 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
607 /* check next page if needed */
608 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
609 phys_page2 = -1;
610 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
611 phys_page2 = get_phys_addr_code(env, virt_page2);
613 tb_link_phys(tb, phys_pc, phys_page2);
615 #endif
617 /* invalidate all TBs which intersect with the target physical page
618 starting in range [start;end[. NOTE: start and end must refer to
619 the same physical page. 'is_cpu_write_access' should be true if called
620 from a real cpu write access: the virtual CPU will exit the current
621 TB if code is modified inside this TB. */
622 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
623 int is_cpu_write_access)
625 int n, current_tb_modified, current_tb_not_found, current_flags;
626 CPUState *env = cpu_single_env;
627 PageDesc *p;
628 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
629 target_ulong tb_start, tb_end;
630 target_ulong current_pc, current_cs_base;
632 p = page_find(start >> TARGET_PAGE_BITS);
633 if (!p)
634 return;
635 if (!p->code_bitmap &&
636 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
637 is_cpu_write_access) {
638 /* build code bitmap */
639 build_page_bitmap(p);
642 /* we remove all the TBs in the range [start, end[ */
643 /* XXX: see if in some cases it could be faster to invalidate all the code */
644 current_tb_not_found = is_cpu_write_access;
645 current_tb_modified = 0;
646 current_tb = NULL; /* avoid warning */
647 current_pc = 0; /* avoid warning */
648 current_cs_base = 0; /* avoid warning */
649 current_flags = 0; /* avoid warning */
650 tb = p->first_tb;
651 while (tb != NULL) {
652 n = (long)tb & 3;
653 tb = (TranslationBlock *)((long)tb & ~3);
654 tb_next = tb->page_next[n];
655 /* NOTE: this is subtle as a TB may span two physical pages */
656 if (n == 0) {
657 /* NOTE: tb_end may be after the end of the page, but
658 it is not a problem */
659 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
660 tb_end = tb_start + tb->size;
661 } else {
662 tb_start = tb->page_addr[1];
663 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
665 if (!(tb_end <= start || tb_start >= end)) {
666 #ifdef TARGET_HAS_PRECISE_SMC
667 if (current_tb_not_found) {
668 current_tb_not_found = 0;
669 current_tb = NULL;
670 if (env->mem_write_pc) {
671 /* now we have a real cpu fault */
672 current_tb = tb_find_pc(env->mem_write_pc);
675 if (current_tb == tb &&
676 !(current_tb->cflags & CF_SINGLE_INSN)) {
677 /* If we are modifying the current TB, we must stop
678 its execution. We could be more precise by checking
679 that the modification is after the current PC, but it
680 would require a specialized function to partially
681 restore the CPU state */
683 current_tb_modified = 1;
684 cpu_restore_state(current_tb, env,
685 env->mem_write_pc, NULL);
686 #if defined(TARGET_I386)
687 current_flags = env->hflags;
688 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
689 current_cs_base = (target_ulong)env->segs[R_CS].base;
690 current_pc = current_cs_base + env->eip;
691 #else
692 #error unsupported CPU
693 #endif
695 #endif /* TARGET_HAS_PRECISE_SMC */
696 /* we need to do that to handle the case where a signal
697 occurs while doing tb_phys_invalidate() */
698 saved_tb = NULL;
699 if (env) {
700 saved_tb = env->current_tb;
701 env->current_tb = NULL;
703 tb_phys_invalidate(tb, -1);
704 if (env) {
705 env->current_tb = saved_tb;
706 if (env->interrupt_request && env->current_tb)
707 cpu_interrupt(env, env->interrupt_request);
710 tb = tb_next;
712 #if !defined(CONFIG_USER_ONLY)
713 /* if no code remaining, no need to continue to use slow writes */
714 if (!p->first_tb) {
715 invalidate_page_bitmap(p);
716 if (is_cpu_write_access) {
717 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
720 #endif
721 #ifdef TARGET_HAS_PRECISE_SMC
722 if (current_tb_modified) {
723 /* we generate a block containing just the instruction
724 modifying the memory. It will ensure that it cannot modify
725 itself */
726 env->current_tb = NULL;
727 tb_gen_code(env, current_pc, current_cs_base, current_flags,
728 CF_SINGLE_INSN);
729 cpu_resume_from_signal(env, NULL);
731 #endif
734 /* len must be <= 8 and start must be a multiple of len */
735 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
737 PageDesc *p;
738 int offset, b;
739 #if 0
740 if (1) {
741 if (loglevel) {
742 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
743 cpu_single_env->mem_write_vaddr, len,
744 cpu_single_env->eip,
745 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
748 #endif
749 p = page_find(start >> TARGET_PAGE_BITS);
750 if (!p)
751 return;
752 if (p->code_bitmap) {
753 offset = start & ~TARGET_PAGE_MASK;
754 b = p->code_bitmap[offset >> 3] >> (offset & 7);
755 if (b & ((1 << len) - 1))
756 goto do_invalidate;
757 } else {
758 do_invalidate:
759 tb_invalidate_phys_page_range(start, start + len, 1);
763 #if !defined(CONFIG_SOFTMMU)
764 static void tb_invalidate_phys_page(target_ulong addr,
765 unsigned long pc, void *puc)
767 int n, current_flags, current_tb_modified;
768 target_ulong current_pc, current_cs_base;
769 PageDesc *p;
770 TranslationBlock *tb, *current_tb;
771 #ifdef TARGET_HAS_PRECISE_SMC
772 CPUState *env = cpu_single_env;
773 #endif
775 addr &= TARGET_PAGE_MASK;
776 p = page_find(addr >> TARGET_PAGE_BITS);
777 if (!p)
778 return;
779 tb = p->first_tb;
780 current_tb_modified = 0;
781 current_tb = NULL;
782 current_pc = 0; /* avoid warning */
783 current_cs_base = 0; /* avoid warning */
784 current_flags = 0; /* avoid warning */
785 #ifdef TARGET_HAS_PRECISE_SMC
786 if (tb && pc != 0) {
787 current_tb = tb_find_pc(pc);
789 #endif
790 while (tb != NULL) {
791 n = (long)tb & 3;
792 tb = (TranslationBlock *)((long)tb & ~3);
793 #ifdef TARGET_HAS_PRECISE_SMC
794 if (current_tb == tb &&
795 !(current_tb->cflags & CF_SINGLE_INSN)) {
796 /* If we are modifying the current TB, we must stop
797 its execution. We could be more precise by checking
798 that the modification is after the current PC, but it
799 would require a specialized function to partially
800 restore the CPU state */
802 current_tb_modified = 1;
803 cpu_restore_state(current_tb, env, pc, puc);
804 #if defined(TARGET_I386)
805 current_flags = env->hflags;
806 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
807 current_cs_base = (target_ulong)env->segs[R_CS].base;
808 current_pc = current_cs_base + env->eip;
809 #else
810 #error unsupported CPU
811 #endif
813 #endif /* TARGET_HAS_PRECISE_SMC */
814 tb_phys_invalidate(tb, addr);
815 tb = tb->page_next[n];
817 p->first_tb = NULL;
818 #ifdef TARGET_HAS_PRECISE_SMC
819 if (current_tb_modified) {
820 /* we generate a block containing just the instruction
821 modifying the memory. It will ensure that it cannot modify
822 itself */
823 env->current_tb = NULL;
824 tb_gen_code(env, current_pc, current_cs_base, current_flags,
825 CF_SINGLE_INSN);
826 cpu_resume_from_signal(env, puc);
828 #endif
830 #endif
832 /* add the tb in the target page and protect it if necessary */
833 static inline void tb_alloc_page(TranslationBlock *tb,
834 unsigned int n, target_ulong page_addr)
836 PageDesc *p;
837 TranslationBlock *last_first_tb;
839 tb->page_addr[n] = page_addr;
840 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
841 tb->page_next[n] = p->first_tb;
842 last_first_tb = p->first_tb;
843 p->first_tb = (TranslationBlock *)((long)tb | n);
844 invalidate_page_bitmap(p);
846 #if defined(TARGET_HAS_SMC) || 1
848 #if defined(CONFIG_USER_ONLY)
849 if (p->flags & PAGE_WRITE) {
850 target_ulong addr;
851 PageDesc *p2;
852 int prot;
854 /* force the host page as non writable (writes will have a
855 page fault + mprotect overhead) */
856 page_addr &= qemu_host_page_mask;
857 prot = 0;
858 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
859 addr += TARGET_PAGE_SIZE) {
861 p2 = page_find (addr >> TARGET_PAGE_BITS);
862 if (!p2)
863 continue;
864 prot |= p2->flags;
865 p2->flags &= ~PAGE_WRITE;
866 page_get_flags(addr);
868 mprotect(g2h(page_addr), qemu_host_page_size,
869 (prot & PAGE_BITS) & ~PAGE_WRITE);
870 #ifdef DEBUG_TB_INVALIDATE
871 printf("protecting code page: 0x%08lx\n",
872 page_addr);
873 #endif
875 #else
876 /* if some code is already present, then the pages are already
877 protected. So we handle the case where only the first TB is
878 allocated in a physical page */
879 if (!last_first_tb) {
880 tlb_protect_code(page_addr);
882 #endif
884 #endif /* TARGET_HAS_SMC */
887 /* Allocate a new translation block. Flush the translation buffer if
888 too many translation blocks or too much generated code. */
889 TranslationBlock *tb_alloc(target_ulong pc)
891 TranslationBlock *tb;
893 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
894 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
895 return NULL;
896 tb = &tbs[nb_tbs++];
897 tb->pc = pc;
898 tb->cflags = 0;
899 return tb;
902 /* add a new TB and link it to the physical page tables. phys_page2 is
903 (-1) to indicate that only one page contains the TB. */
904 void tb_link_phys(TranslationBlock *tb,
905 target_ulong phys_pc, target_ulong phys_page2)
907 unsigned int h;
908 TranslationBlock **ptb;
910 /* add in the physical hash table */
911 h = tb_phys_hash_func(phys_pc);
912 ptb = &tb_phys_hash[h];
913 tb->phys_hash_next = *ptb;
914 *ptb = tb;
916 /* add in the page list */
917 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
918 if (phys_page2 != -1)
919 tb_alloc_page(tb, 1, phys_page2);
920 else
921 tb->page_addr[1] = -1;
923 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
924 tb->jmp_next[0] = NULL;
925 tb->jmp_next[1] = NULL;
926 #ifdef USE_CODE_COPY
927 tb->cflags &= ~CF_FP_USED;
928 if (tb->cflags & CF_TB_FP_USED)
929 tb->cflags |= CF_FP_USED;
930 #endif
932 /* init original jump addresses */
933 if (tb->tb_next_offset[0] != 0xffff)
934 tb_reset_jump(tb, 0);
935 if (tb->tb_next_offset[1] != 0xffff)
936 tb_reset_jump(tb, 1);
938 #ifdef DEBUG_TB_CHECK
939 tb_page_check();
940 #endif
943 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
944 tb[1].tc_ptr. Return NULL if not found */
945 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
947 int m_min, m_max, m;
948 unsigned long v;
949 TranslationBlock *tb;
951 if (nb_tbs <= 0)
952 return NULL;
953 if (tc_ptr < (unsigned long)code_gen_buffer ||
954 tc_ptr >= (unsigned long)code_gen_ptr)
955 return NULL;
956 /* binary search (cf Knuth) */
957 m_min = 0;
958 m_max = nb_tbs - 1;
959 while (m_min <= m_max) {
960 m = (m_min + m_max) >> 1;
961 tb = &tbs[m];
962 v = (unsigned long)tb->tc_ptr;
963 if (v == tc_ptr)
964 return tb;
965 else if (tc_ptr < v) {
966 m_max = m - 1;
967 } else {
968 m_min = m + 1;
971 return &tbs[m_max];
974 static void tb_reset_jump_recursive(TranslationBlock *tb);
976 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
978 TranslationBlock *tb1, *tb_next, **ptb;
979 unsigned int n1;
981 tb1 = tb->jmp_next[n];
982 if (tb1 != NULL) {
983 /* find head of list */
984 for(;;) {
985 n1 = (long)tb1 & 3;
986 tb1 = (TranslationBlock *)((long)tb1 & ~3);
987 if (n1 == 2)
988 break;
989 tb1 = tb1->jmp_next[n1];
991 /* we are now sure now that tb jumps to tb1 */
992 tb_next = tb1;
994 /* remove tb from the jmp_first list */
995 ptb = &tb_next->jmp_first;
996 for(;;) {
997 tb1 = *ptb;
998 n1 = (long)tb1 & 3;
999 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1000 if (n1 == n && tb1 == tb)
1001 break;
1002 ptb = &tb1->jmp_next[n1];
1004 *ptb = tb->jmp_next[n];
1005 tb->jmp_next[n] = NULL;
1007 /* suppress the jump to next tb in generated code */
1008 tb_reset_jump(tb, n);
1010 /* suppress jumps in the tb on which we could have jumped */
1011 tb_reset_jump_recursive(tb_next);
1015 static void tb_reset_jump_recursive(TranslationBlock *tb)
1017 tb_reset_jump_recursive2(tb, 0);
1018 tb_reset_jump_recursive2(tb, 1);
1021 #if defined(TARGET_HAS_ICE)
1022 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1024 target_ulong addr, pd;
1025 ram_addr_t ram_addr;
1026 PhysPageDesc *p;
1028 addr = cpu_get_phys_page_debug(env, pc);
1029 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1030 if (!p) {
1031 pd = IO_MEM_UNASSIGNED;
1032 } else {
1033 pd = p->phys_offset;
1035 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1036 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1038 #endif
1040 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1041 breakpoint is reached */
1042 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1044 #if defined(TARGET_HAS_ICE)
1045 int i;
1047 for(i = 0; i < env->nb_breakpoints; i++) {
1048 if (env->breakpoints[i] == pc)
1049 return 0;
1052 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1053 return -1;
1054 env->breakpoints[env->nb_breakpoints++] = pc;
1056 #ifdef USE_KVM
1057 if (kvm_allowed)
1058 kvm_update_debugger(env);
1059 #endif
1061 breakpoint_invalidate(env, pc);
1062 return 0;
1063 #else
1064 return -1;
1065 #endif
1068 /* remove a breakpoint */
1069 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1071 #if defined(TARGET_HAS_ICE)
1072 int i;
1073 for(i = 0; i < env->nb_breakpoints; i++) {
1074 if (env->breakpoints[i] == pc)
1075 goto found;
1077 return -1;
1078 found:
1079 env->nb_breakpoints--;
1080 if (i < env->nb_breakpoints)
1081 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1083 #ifdef USE_KVM
1084 if (kvm_allowed)
1085 kvm_update_debugger(env);
1086 #endif
1088 breakpoint_invalidate(env, pc);
1089 return 0;
1090 #else
1091 return -1;
1092 #endif
1095 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1096 CPU loop after each instruction */
1097 void cpu_single_step(CPUState *env, int enabled)
1099 #if defined(TARGET_HAS_ICE)
1100 if (env->singlestep_enabled != enabled) {
1101 env->singlestep_enabled = enabled;
1102 /* must flush all the translated code to avoid inconsistancies */
1103 /* XXX: only flush what is necessary */
1104 tb_flush(env);
1106 #ifdef USE_KVM
1107 if (kvm_allowed)
1108 kvm_update_debugger(env);
1109 #endif
1110 #endif
1113 /* enable or disable low levels log */
1114 void cpu_set_log(int log_flags)
1116 loglevel = log_flags;
1117 if (loglevel && !logfile) {
1118 logfile = fopen(logfilename, "w");
1119 if (!logfile) {
1120 perror(logfilename);
1121 _exit(1);
1123 #if !defined(CONFIG_SOFTMMU)
1124 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1126 static uint8_t logfile_buf[4096];
1127 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1129 #else
1130 setvbuf(logfile, NULL, _IOLBF, 0);
1131 #endif
1135 void cpu_set_log_filename(const char *filename)
1137 logfilename = strdup(filename);
1140 /* mask must never be zero, except for A20 change call */
1141 void cpu_interrupt(CPUState *env, int mask)
1143 TranslationBlock *tb;
1144 static int interrupt_lock;
1146 env->interrupt_request |= mask;
1147 #ifdef USE_KVM
1148 if (kvm_allowed)
1149 kvm_update_interrupt_request(env);
1150 #endif
1151 /* if the cpu is currently executing code, we must unlink it and
1152 all the potentially executing TB */
1153 tb = env->current_tb;
1154 if (tb && !testandset(&interrupt_lock)) {
1155 env->current_tb = NULL;
1156 tb_reset_jump_recursive(tb);
1157 interrupt_lock = 0;
1161 void cpu_reset_interrupt(CPUState *env, int mask)
1163 env->interrupt_request &= ~mask;
1166 CPULogItem cpu_log_items[] = {
1167 { CPU_LOG_TB_OUT_ASM, "out_asm",
1168 "show generated host assembly code for each compiled TB" },
1169 { CPU_LOG_TB_IN_ASM, "in_asm",
1170 "show target assembly code for each compiled TB" },
1171 { CPU_LOG_TB_OP, "op",
1172 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1173 #ifdef TARGET_I386
1174 { CPU_LOG_TB_OP_OPT, "op_opt",
1175 "show micro ops after optimization for each compiled TB" },
1176 #endif
1177 { CPU_LOG_INT, "int",
1178 "show interrupts/exceptions in short format" },
1179 { CPU_LOG_EXEC, "exec",
1180 "show trace before each executed TB (lots of logs)" },
1181 { CPU_LOG_TB_CPU, "cpu",
1182 "show CPU state before bloc translation" },
1183 #ifdef TARGET_I386
1184 { CPU_LOG_PCALL, "pcall",
1185 "show protected mode far calls/returns/exceptions" },
1186 #endif
1187 #ifdef DEBUG_IOPORT
1188 { CPU_LOG_IOPORT, "ioport",
1189 "show all i/o ports accesses" },
1190 #endif
1191 { 0, NULL, NULL },
1194 static int cmp1(const char *s1, int n, const char *s2)
1196 if (strlen(s2) != n)
1197 return 0;
1198 return memcmp(s1, s2, n) == 0;
1201 /* takes a comma separated list of log masks. Return 0 if error. */
1202 int cpu_str_to_log_mask(const char *str)
1204 CPULogItem *item;
1205 int mask;
1206 const char *p, *p1;
1208 p = str;
1209 mask = 0;
1210 for(;;) {
1211 p1 = strchr(p, ',');
1212 if (!p1)
1213 p1 = p + strlen(p);
1214 if(cmp1(p,p1-p,"all")) {
1215 for(item = cpu_log_items; item->mask != 0; item++) {
1216 mask |= item->mask;
1218 } else {
1219 for(item = cpu_log_items; item->mask != 0; item++) {
1220 if (cmp1(p, p1 - p, item->name))
1221 goto found;
1223 return 0;
1225 found:
1226 mask |= item->mask;
1227 if (*p1 != ',')
1228 break;
1229 p = p1 + 1;
1231 return mask;
1234 void cpu_abort(CPUState *env, const char *fmt, ...)
1236 va_list ap;
1238 va_start(ap, fmt);
1239 fprintf(stderr, "qemu: fatal: ");
1240 vfprintf(stderr, fmt, ap);
1241 fprintf(stderr, "\n");
1242 #ifdef TARGET_I386
1243 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1244 #else
1245 cpu_dump_state(env, stderr, fprintf, 0);
1246 #endif
1247 va_end(ap);
1248 abort();
1251 #if !defined(CONFIG_USER_ONLY)
1253 /* NOTE: if flush_global is true, also flush global entries (not
1254 implemented yet) */
1255 void tlb_flush(CPUState *env, int flush_global)
1257 int i;
1259 #if defined(DEBUG_TLB)
1260 printf("tlb_flush:\n");
1261 #endif
1262 /* must reset current TB so that interrupts cannot modify the
1263 links while we are modifying them */
1264 env->current_tb = NULL;
1266 for(i = 0; i < CPU_TLB_SIZE; i++) {
1267 env->tlb_table[0][i].addr_read = -1;
1268 env->tlb_table[0][i].addr_write = -1;
1269 env->tlb_table[0][i].addr_code = -1;
1270 env->tlb_table[1][i].addr_read = -1;
1271 env->tlb_table[1][i].addr_write = -1;
1272 env->tlb_table[1][i].addr_code = -1;
1275 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1277 #if !defined(CONFIG_SOFTMMU)
1278 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1279 #endif
1280 #ifdef USE_KQEMU
1281 if (env->kqemu_enabled) {
1282 kqemu_flush(env, flush_global);
1284 #endif
1285 tlb_flush_count++;
1288 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1290 if (addr == (tlb_entry->addr_read &
1291 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1292 addr == (tlb_entry->addr_write &
1293 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1294 addr == (tlb_entry->addr_code &
1295 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1296 tlb_entry->addr_read = -1;
1297 tlb_entry->addr_write = -1;
1298 tlb_entry->addr_code = -1;
1302 void tlb_flush_page(CPUState *env, target_ulong addr)
1304 int i;
1305 TranslationBlock *tb;
1307 #if defined(DEBUG_TLB)
1308 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1309 #endif
1310 /* must reset current TB so that interrupts cannot modify the
1311 links while we are modifying them */
1312 env->current_tb = NULL;
1314 addr &= TARGET_PAGE_MASK;
1315 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1316 tlb_flush_entry(&env->tlb_table[0][i], addr);
1317 tlb_flush_entry(&env->tlb_table[1][i], addr);
1319 /* Discard jump cache entries for any tb which might potentially
1320 overlap the flushed page. */
1321 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1322 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1324 i = tb_jmp_cache_hash_page(addr);
1325 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1327 #if !defined(CONFIG_SOFTMMU)
1328 if (addr < MMAP_AREA_END)
1329 munmap((void *)addr, TARGET_PAGE_SIZE);
1330 #endif
1331 #ifdef USE_KQEMU
1332 if (env->kqemu_enabled) {
1333 kqemu_flush_page(env, addr);
1335 #endif
1338 /* update the TLBs so that writes to code in the virtual page 'addr'
1339 can be detected */
1340 static void tlb_protect_code(ram_addr_t ram_addr)
1342 cpu_physical_memory_reset_dirty(ram_addr,
1343 ram_addr + TARGET_PAGE_SIZE,
1344 CODE_DIRTY_FLAG);
1347 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1348 tested for self modifying code */
1349 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1350 target_ulong vaddr)
1352 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1355 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1356 unsigned long start, unsigned long length)
1358 unsigned long addr;
1359 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1360 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1361 if ((addr - start) < length) {
1362 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1367 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1368 int dirty_flags)
1370 CPUState *env;
1371 unsigned long length, start1;
1372 int i, mask, len;
1373 uint8_t *p;
1375 start &= TARGET_PAGE_MASK;
1376 end = TARGET_PAGE_ALIGN(end);
1378 length = end - start;
1379 if (length == 0)
1380 return;
1381 len = length >> TARGET_PAGE_BITS;
1382 #ifdef USE_KQEMU
1383 /* XXX: should not depend on cpu context */
1384 env = first_cpu;
1385 if (env->kqemu_enabled) {
1386 ram_addr_t addr;
1387 addr = start;
1388 for(i = 0; i < len; i++) {
1389 kqemu_set_notdirty(env, addr);
1390 addr += TARGET_PAGE_SIZE;
1393 #endif
1394 mask = ~dirty_flags;
1395 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1396 for(i = 0; i < len; i++)
1397 p[i] &= mask;
1399 /* we modify the TLB cache so that the dirty bit will be set again
1400 when accessing the range */
1401 start1 = start + (unsigned long)phys_ram_base;
1402 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1403 for(i = 0; i < CPU_TLB_SIZE; i++)
1404 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1405 for(i = 0; i < CPU_TLB_SIZE; i++)
1406 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1409 #if !defined(CONFIG_SOFTMMU)
1410 /* XXX: this is expensive */
1412 VirtPageDesc *p;
1413 int j;
1414 target_ulong addr;
1416 for(i = 0; i < L1_SIZE; i++) {
1417 p = l1_virt_map[i];
1418 if (p) {
1419 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1420 for(j = 0; j < L2_SIZE; j++) {
1421 if (p->valid_tag == virt_valid_tag &&
1422 p->phys_addr >= start && p->phys_addr < end &&
1423 (p->prot & PROT_WRITE)) {
1424 if (addr < MMAP_AREA_END) {
1425 mprotect((void *)addr, TARGET_PAGE_SIZE,
1426 p->prot & ~PROT_WRITE);
1429 addr += TARGET_PAGE_SIZE;
1430 p++;
1435 #endif
1438 int cpu_physical_memory_set_dirty_tracking(int enable)
1440 int r=0;
1442 #ifdef USE_KVM
1443 r = kvm_physical_memory_set_dirty_tracking(enable);
1444 #endif
1445 in_migration = enable;
1446 return r;
1449 int cpu_physical_memory_get_dirty_tracking(void)
1451 return in_migration;
1454 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1456 ram_addr_t ram_addr;
1458 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1459 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1460 tlb_entry->addend - (unsigned long)phys_ram_base;
1461 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1462 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1467 /* update the TLB according to the current state of the dirty bits */
1468 void cpu_tlb_update_dirty(CPUState *env)
1470 int i;
1471 for(i = 0; i < CPU_TLB_SIZE; i++)
1472 tlb_update_dirty(&env->tlb_table[0][i]);
1473 for(i = 0; i < CPU_TLB_SIZE; i++)
1474 tlb_update_dirty(&env->tlb_table[1][i]);
1477 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1478 unsigned long start)
1480 unsigned long addr;
1481 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1482 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1483 if (addr == start) {
1484 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1489 /* update the TLB corresponding to virtual page vaddr and phys addr
1490 addr so that it is no longer dirty */
1491 static inline void tlb_set_dirty(CPUState *env,
1492 unsigned long addr, target_ulong vaddr)
1494 int i;
1496 addr &= TARGET_PAGE_MASK;
1497 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1498 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1499 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1502 /* add a new TLB entry. At most one entry for a given virtual address
1503 is permitted. Return 0 if OK or 2 if the page could not be mapped
1504 (can only happen in non SOFTMMU mode for I/O pages or pages
1505 conflicting with the host address space). */
1506 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1507 target_phys_addr_t paddr, int prot,
1508 int is_user, int is_softmmu)
1510 PhysPageDesc *p;
1511 unsigned long pd;
1512 unsigned int index;
1513 target_ulong address;
1514 target_phys_addr_t addend;
1515 int ret;
1516 CPUTLBEntry *te;
1518 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1519 if (!p) {
1520 pd = IO_MEM_UNASSIGNED;
1521 } else {
1522 pd = p->phys_offset;
1524 #if defined(DEBUG_TLB)
1525 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1526 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1527 #endif
1529 ret = 0;
1530 #if !defined(CONFIG_SOFTMMU)
1531 if (is_softmmu)
1532 #endif
1534 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1535 /* IO memory case */
1536 address = vaddr | pd;
1537 addend = paddr;
1538 } else {
1539 /* standard memory */
1540 address = vaddr;
1541 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1544 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1545 addend -= vaddr;
1546 te = &env->tlb_table[is_user][index];
1547 te->addend = addend;
1548 if (prot & PAGE_READ) {
1549 te->addr_read = address;
1550 } else {
1551 te->addr_read = -1;
1553 if (prot & PAGE_EXEC) {
1554 te->addr_code = address;
1555 } else {
1556 te->addr_code = -1;
1558 if (prot & PAGE_WRITE) {
1559 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1560 (pd & IO_MEM_ROMD)) {
1561 /* write access calls the I/O callback */
1562 te->addr_write = vaddr |
1563 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1564 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1565 !cpu_physical_memory_is_dirty(pd)) {
1566 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1567 } else {
1568 te->addr_write = address;
1570 } else {
1571 te->addr_write = -1;
1574 #if !defined(CONFIG_SOFTMMU)
1575 else {
1576 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1577 /* IO access: no mapping is done as it will be handled by the
1578 soft MMU */
1579 if (!(env->hflags & HF_SOFTMMU_MASK))
1580 ret = 2;
1581 } else {
1582 void *map_addr;
1584 if (vaddr >= MMAP_AREA_END) {
1585 ret = 2;
1586 } else {
1587 if (prot & PROT_WRITE) {
1588 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1589 #if defined(TARGET_HAS_SMC) || 1
1590 first_tb ||
1591 #endif
1592 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1593 !cpu_physical_memory_is_dirty(pd))) {
1594 /* ROM: we do as if code was inside */
1595 /* if code is present, we only map as read only and save the
1596 original mapping */
1597 VirtPageDesc *vp;
1599 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1600 vp->phys_addr = pd;
1601 vp->prot = prot;
1602 vp->valid_tag = virt_valid_tag;
1603 prot &= ~PAGE_WRITE;
1606 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1607 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1608 if (map_addr == MAP_FAILED) {
1609 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1610 paddr, vaddr);
1615 #endif
1616 return ret;
1619 /* called from signal handler: invalidate the code and unprotect the
1620 page. Return TRUE if the fault was succesfully handled. */
1621 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1623 #if !defined(CONFIG_SOFTMMU)
1624 VirtPageDesc *vp;
1626 #if defined(DEBUG_TLB)
1627 printf("page_unprotect: addr=0x%08x\n", addr);
1628 #endif
1629 addr &= TARGET_PAGE_MASK;
1631 /* if it is not mapped, no need to worry here */
1632 if (addr >= MMAP_AREA_END)
1633 return 0;
1634 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1635 if (!vp)
1636 return 0;
1637 /* NOTE: in this case, validate_tag is _not_ tested as it
1638 validates only the code TLB */
1639 if (vp->valid_tag != virt_valid_tag)
1640 return 0;
1641 if (!(vp->prot & PAGE_WRITE))
1642 return 0;
1643 #if defined(DEBUG_TLB)
1644 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1645 addr, vp->phys_addr, vp->prot);
1646 #endif
1647 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1648 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1649 (unsigned long)addr, vp->prot);
1650 /* set the dirty bit */
1651 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1652 /* flush the code inside */
1653 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1654 return 1;
1655 #else
1656 return 0;
1657 #endif
1660 #else
1662 void tlb_flush(CPUState *env, int flush_global)
1666 void tlb_flush_page(CPUState *env, target_ulong addr)
1670 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1671 target_phys_addr_t paddr, int prot,
1672 int is_user, int is_softmmu)
1674 return 0;
1677 /* dump memory mappings */
1678 void page_dump(FILE *f)
1680 unsigned long start, end;
1681 int i, j, prot, prot1;
1682 PageDesc *p;
1684 fprintf(f, "%-8s %-8s %-8s %s\n",
1685 "start", "end", "size", "prot");
1686 start = -1;
1687 end = -1;
1688 prot = 0;
1689 for(i = 0; i <= L1_SIZE; i++) {
1690 if (i < L1_SIZE)
1691 p = l1_map[i];
1692 else
1693 p = NULL;
1694 for(j = 0;j < L2_SIZE; j++) {
1695 if (!p)
1696 prot1 = 0;
1697 else
1698 prot1 = p[j].flags;
1699 if (prot1 != prot) {
1700 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1701 if (start != -1) {
1702 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1703 start, end, end - start,
1704 prot & PAGE_READ ? 'r' : '-',
1705 prot & PAGE_WRITE ? 'w' : '-',
1706 prot & PAGE_EXEC ? 'x' : '-');
1708 if (prot1 != 0)
1709 start = end;
1710 else
1711 start = -1;
1712 prot = prot1;
1714 if (!p)
1715 break;
1720 int page_get_flags(target_ulong address)
1722 PageDesc *p;
1724 p = page_find(address >> TARGET_PAGE_BITS);
1725 if (!p)
1726 return 0;
1727 return p->flags;
1730 /* modify the flags of a page and invalidate the code if
1731 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1732 depending on PAGE_WRITE */
1733 void page_set_flags(target_ulong start, target_ulong end, int flags)
1735 PageDesc *p;
1736 target_ulong addr;
1738 start = start & TARGET_PAGE_MASK;
1739 end = TARGET_PAGE_ALIGN(end);
1740 if (flags & PAGE_WRITE)
1741 flags |= PAGE_WRITE_ORG;
1742 spin_lock(&tb_lock);
1743 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1744 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1745 /* if the write protection is set, then we invalidate the code
1746 inside */
1747 if (!(p->flags & PAGE_WRITE) &&
1748 (flags & PAGE_WRITE) &&
1749 p->first_tb) {
1750 tb_invalidate_phys_page(addr, 0, NULL);
1752 p->flags = flags;
1754 spin_unlock(&tb_lock);
1757 /* called from signal handler: invalidate the code and unprotect the
1758 page. Return TRUE if the fault was succesfully handled. */
1759 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1761 unsigned int page_index, prot, pindex;
1762 PageDesc *p, *p1;
1763 target_ulong host_start, host_end, addr;
1765 host_start = address & qemu_host_page_mask;
1766 page_index = host_start >> TARGET_PAGE_BITS;
1767 p1 = page_find(page_index);
1768 if (!p1)
1769 return 0;
1770 host_end = host_start + qemu_host_page_size;
1771 p = p1;
1772 prot = 0;
1773 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1774 prot |= p->flags;
1775 p++;
1777 /* if the page was really writable, then we change its
1778 protection back to writable */
1779 if (prot & PAGE_WRITE_ORG) {
1780 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1781 if (!(p1[pindex].flags & PAGE_WRITE)) {
1782 mprotect((void *)g2h(host_start), qemu_host_page_size,
1783 (prot & PAGE_BITS) | PAGE_WRITE);
1784 p1[pindex].flags |= PAGE_WRITE;
1785 /* and since the content will be modified, we must invalidate
1786 the corresponding translated code. */
1787 tb_invalidate_phys_page(address, pc, puc);
1788 #ifdef DEBUG_TB_CHECK
1789 tb_invalidate_check(address);
1790 #endif
1791 return 1;
1794 return 0;
1797 /* call this function when system calls directly modify a memory area */
1798 /* ??? This should be redundant now we have lock_user. */
1799 void page_unprotect_range(target_ulong data, target_ulong data_size)
1801 target_ulong start, end, addr;
1803 start = data;
1804 end = start + data_size;
1805 start &= TARGET_PAGE_MASK;
1806 end = TARGET_PAGE_ALIGN(end);
1807 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1808 page_unprotect(addr, 0, NULL);
1812 static inline void tlb_set_dirty(CPUState *env,
1813 unsigned long addr, target_ulong vaddr)
1816 #endif /* defined(CONFIG_USER_ONLY) */
1818 /* register physical memory. 'size' must be a multiple of the target
1819 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1820 io memory page */
1821 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1822 unsigned long size,
1823 unsigned long phys_offset)
1825 target_phys_addr_t addr, end_addr;
1826 PhysPageDesc *p;
1827 CPUState *env;
1829 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1830 end_addr = start_addr + size;
1831 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1832 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1833 p->phys_offset = phys_offset;
1834 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1835 (phys_offset & IO_MEM_ROMD))
1836 phys_offset += TARGET_PAGE_SIZE;
1839 /* since each CPU stores ram addresses in its TLB cache, we must
1840 reset the modified entries */
1841 /* XXX: slow ! */
1842 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1843 tlb_flush(env, 1);
1847 /* XXX: temporary until new memory mapping API */
1848 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1850 PhysPageDesc *p;
1852 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1853 if (!p)
1854 return IO_MEM_UNASSIGNED;
1855 return p->phys_offset;
1858 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1860 #ifdef DEBUG_UNASSIGNED
1861 printf("Unassigned mem read 0x%08x\n", (int)addr);
1862 #endif
1863 return 0;
1866 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1868 #ifdef DEBUG_UNASSIGNED
1869 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1870 #endif
1873 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1874 unassigned_mem_readb,
1875 unassigned_mem_readb,
1876 unassigned_mem_readb,
1879 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1880 unassigned_mem_writeb,
1881 unassigned_mem_writeb,
1882 unassigned_mem_writeb,
1885 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1887 unsigned long ram_addr;
1888 int dirty_flags;
1889 ram_addr = addr - (unsigned long)phys_ram_base;
1890 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1891 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1892 #if !defined(CONFIG_USER_ONLY)
1893 tb_invalidate_phys_page_fast(ram_addr, 1);
1894 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1895 #endif
1897 stb_p((uint8_t *)(long)addr, val);
1898 #ifdef USE_KQEMU
1899 if (cpu_single_env->kqemu_enabled &&
1900 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1901 kqemu_modify_page(cpu_single_env, ram_addr);
1902 #endif
1903 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1904 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1905 /* we remove the notdirty callback only if the code has been
1906 flushed */
1907 if (dirty_flags == 0xff)
1908 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1911 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1913 unsigned long ram_addr;
1914 int dirty_flags;
1915 ram_addr = addr - (unsigned long)phys_ram_base;
1916 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1917 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1918 #if !defined(CONFIG_USER_ONLY)
1919 tb_invalidate_phys_page_fast(ram_addr, 2);
1920 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1921 #endif
1923 stw_p((uint8_t *)(long)addr, val);
1924 #ifdef USE_KQEMU
1925 if (cpu_single_env->kqemu_enabled &&
1926 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1927 kqemu_modify_page(cpu_single_env, ram_addr);
1928 #endif
1929 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1930 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1931 /* we remove the notdirty callback only if the code has been
1932 flushed */
1933 if (dirty_flags == 0xff)
1934 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1937 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1939 unsigned long ram_addr;
1940 int dirty_flags;
1941 ram_addr = addr - (unsigned long)phys_ram_base;
1942 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1943 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1944 #if !defined(CONFIG_USER_ONLY)
1945 tb_invalidate_phys_page_fast(ram_addr, 4);
1946 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1947 #endif
1949 stl_p((uint8_t *)(long)addr, val);
1950 #ifdef USE_KQEMU
1951 if (cpu_single_env->kqemu_enabled &&
1952 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1953 kqemu_modify_page(cpu_single_env, ram_addr);
1954 #endif
1955 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1956 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1957 /* we remove the notdirty callback only if the code has been
1958 flushed */
1959 if (dirty_flags == 0xff)
1960 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1963 static CPUReadMemoryFunc *error_mem_read[3] = {
1964 NULL, /* never used */
1965 NULL, /* never used */
1966 NULL, /* never used */
1969 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1970 notdirty_mem_writeb,
1971 notdirty_mem_writew,
1972 notdirty_mem_writel,
1975 static void io_mem_init(void)
1977 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1978 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1979 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1980 io_mem_nb = 5;
1982 /* alloc dirty bits array */
1983 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1984 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1987 /* mem_read and mem_write are arrays of functions containing the
1988 function to access byte (index 0), word (index 1) and dword (index
1989 2). All functions must be supplied. If io_index is non zero, the
1990 corresponding io zone is modified. If it is zero, a new io zone is
1991 allocated. The return value can be used with
1992 cpu_register_physical_memory(). (-1) is returned if error. */
1993 int cpu_register_io_memory(int io_index,
1994 CPUReadMemoryFunc **mem_read,
1995 CPUWriteMemoryFunc **mem_write,
1996 void *opaque)
1998 int i;
2000 if (io_index <= 0) {
2001 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2002 return -1;
2003 io_index = io_mem_nb++;
2004 } else {
2005 if (io_index >= IO_MEM_NB_ENTRIES)
2006 return -1;
2009 for(i = 0;i < 3; i++) {
2010 io_mem_read[io_index][i] = mem_read[i];
2011 io_mem_write[io_index][i] = mem_write[i];
2013 io_mem_opaque[io_index] = opaque;
2014 return io_index << IO_MEM_SHIFT;
2017 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2019 return io_mem_write[io_index >> IO_MEM_SHIFT];
2022 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2024 return io_mem_read[io_index >> IO_MEM_SHIFT];
2027 /* physical memory access (slow version, mainly for debug) */
2028 #if defined(CONFIG_USER_ONLY)
2029 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2030 int len, int is_write)
2032 int l, flags;
2033 target_ulong page;
2034 void * p;
2036 while (len > 0) {
2037 page = addr & TARGET_PAGE_MASK;
2038 l = (page + TARGET_PAGE_SIZE) - addr;
2039 if (l > len)
2040 l = len;
2041 flags = page_get_flags(page);
2042 if (!(flags & PAGE_VALID))
2043 return;
2044 if (is_write) {
2045 if (!(flags & PAGE_WRITE))
2046 return;
2047 p = lock_user(addr, len, 0);
2048 memcpy(p, buf, len);
2049 unlock_user(p, addr, len);
2050 } else {
2051 if (!(flags & PAGE_READ))
2052 return;
2053 p = lock_user(addr, len, 1);
2054 memcpy(buf, p, len);
2055 unlock_user(p, addr, 0);
2057 len -= l;
2058 buf += l;
2059 addr += l;
2063 #else
2064 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2065 int len, int is_write)
2067 int l, io_index;
2068 uint8_t *ptr;
2069 uint32_t val;
2070 target_phys_addr_t page;
2071 unsigned long pd;
2072 PhysPageDesc *p;
2074 while (len > 0) {
2075 page = addr & TARGET_PAGE_MASK;
2076 l = (page + TARGET_PAGE_SIZE) - addr;
2077 if (l > len)
2078 l = len;
2079 p = phys_page_find(page >> TARGET_PAGE_BITS);
2080 if (!p) {
2081 pd = IO_MEM_UNASSIGNED;
2082 } else {
2083 pd = p->phys_offset;
2086 if (is_write) {
2087 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2088 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2089 /* XXX: could force cpu_single_env to NULL to avoid
2090 potential bugs */
2091 if (l >= 4 && ((addr & 3) == 0)) {
2092 /* 32 bit write access */
2093 val = ldl_p(buf);
2094 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2095 l = 4;
2096 } else if (l >= 2 && ((addr & 1) == 0)) {
2097 /* 16 bit write access */
2098 val = lduw_p(buf);
2099 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2100 l = 2;
2101 } else {
2102 /* 8 bit write access */
2103 val = ldub_p(buf);
2104 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2105 l = 1;
2107 } else {
2108 unsigned long addr1;
2109 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2110 /* RAM case */
2111 ptr = phys_ram_base + addr1;
2112 memcpy(ptr, buf, l);
2113 if (!cpu_physical_memory_is_dirty(addr1)) {
2114 /* invalidate code */
2115 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2116 /* set dirty bit */
2117 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2118 (0xff & ~CODE_DIRTY_FLAG);
2121 } else {
2122 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2123 !(pd & IO_MEM_ROMD)) {
2124 /* I/O case */
2125 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2126 if (l >= 4 && ((addr & 3) == 0)) {
2127 /* 32 bit read access */
2128 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2129 stl_p(buf, val);
2130 l = 4;
2131 } else if (l >= 2 && ((addr & 1) == 0)) {
2132 /* 16 bit read access */
2133 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2134 stw_p(buf, val);
2135 l = 2;
2136 } else {
2137 /* 8 bit read access */
2138 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2139 stb_p(buf, val);
2140 l = 1;
2142 } else {
2143 /* RAM case */
2144 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2145 (addr & ~TARGET_PAGE_MASK);
2146 memcpy(buf, ptr, l);
2149 len -= l;
2150 buf += l;
2151 addr += l;
2155 /* used for ROM loading : can write in RAM and ROM */
2156 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2157 const uint8_t *buf, int len)
2159 int l;
2160 uint8_t *ptr;
2161 target_phys_addr_t page;
2162 unsigned long pd;
2163 PhysPageDesc *p;
2165 while (len > 0) {
2166 page = addr & TARGET_PAGE_MASK;
2167 l = (page + TARGET_PAGE_SIZE) - addr;
2168 if (l > len)
2169 l = len;
2170 p = phys_page_find(page >> TARGET_PAGE_BITS);
2171 if (!p) {
2172 pd = IO_MEM_UNASSIGNED;
2173 } else {
2174 pd = p->phys_offset;
2177 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2178 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2179 !(pd & IO_MEM_ROMD)) {
2180 /* do nothing */
2181 } else {
2182 unsigned long addr1;
2183 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2184 /* ROM/RAM case */
2185 ptr = phys_ram_base + addr1;
2186 memcpy(ptr, buf, l);
2188 len -= l;
2189 buf += l;
2190 addr += l;
2195 /* warning: addr must be aligned */
2196 uint32_t ldl_phys(target_phys_addr_t addr)
2198 int io_index;
2199 uint8_t *ptr;
2200 uint32_t val;
2201 unsigned long pd;
2202 PhysPageDesc *p;
2204 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2205 if (!p) {
2206 pd = IO_MEM_UNASSIGNED;
2207 } else {
2208 pd = p->phys_offset;
2211 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2212 !(pd & IO_MEM_ROMD)) {
2213 /* I/O case */
2214 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2215 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2216 } else {
2217 /* RAM case */
2218 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2219 (addr & ~TARGET_PAGE_MASK);
2220 val = ldl_p(ptr);
2222 return val;
2225 /* warning: addr must be aligned */
2226 uint64_t ldq_phys(target_phys_addr_t addr)
2228 int io_index;
2229 uint8_t *ptr;
2230 uint64_t val;
2231 unsigned long pd;
2232 PhysPageDesc *p;
2234 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2235 if (!p) {
2236 pd = IO_MEM_UNASSIGNED;
2237 } else {
2238 pd = p->phys_offset;
2241 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2242 !(pd & IO_MEM_ROMD)) {
2243 /* I/O case */
2244 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2245 #ifdef TARGET_WORDS_BIGENDIAN
2246 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2247 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2248 #else
2249 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2250 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2251 #endif
2252 } else {
2253 /* RAM case */
2254 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2255 (addr & ~TARGET_PAGE_MASK);
2256 val = ldq_p(ptr);
2258 return val;
2261 /* XXX: optimize */
2262 uint32_t ldub_phys(target_phys_addr_t addr)
2264 uint8_t val;
2265 cpu_physical_memory_read(addr, &val, 1);
2266 return val;
2269 /* XXX: optimize */
2270 uint32_t lduw_phys(target_phys_addr_t addr)
2272 uint16_t val;
2273 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2274 return tswap16(val);
2277 #ifdef __GNUC__
2278 #define likely(x) __builtin_expect(!!(x), 1)
2279 #define unlikely(x) __builtin_expect(!!(x), 0)
2280 #else
2281 #define likely(x) x
2282 #define unlikely(x) x
2283 #endif
2285 /* warning: addr must be aligned. The ram page is not masked as dirty
2286 and the code inside is not invalidated. It is useful if the dirty
2287 bits are used to track modified PTEs */
2288 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2290 int io_index;
2291 uint8_t *ptr;
2292 unsigned long pd;
2293 PhysPageDesc *p;
2295 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2296 if (!p) {
2297 pd = IO_MEM_UNASSIGNED;
2298 } else {
2299 pd = p->phys_offset;
2302 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2303 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2304 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2305 } else {
2306 unsigned long addr1;
2307 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2309 ptr = phys_ram_base + addr1;
2310 stl_p(ptr, val);
2312 if (unlikely(in_migration)) {
2313 if (!cpu_physical_memory_is_dirty(addr1)) {
2314 /* invalidate code */
2315 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2316 /* set dirty bit */
2317 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2318 (0xff & ~CODE_DIRTY_FLAG);
2324 /* warning: addr must be aligned */
2325 void stl_phys(target_phys_addr_t addr, uint32_t val)
2327 int io_index;
2328 uint8_t *ptr;
2329 unsigned long pd;
2330 PhysPageDesc *p;
2332 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2333 if (!p) {
2334 pd = IO_MEM_UNASSIGNED;
2335 } else {
2336 pd = p->phys_offset;
2339 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2340 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2341 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2342 } else {
2343 unsigned long addr1;
2344 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2345 /* RAM case */
2346 ptr = phys_ram_base + addr1;
2347 stl_p(ptr, val);
2348 if (!cpu_physical_memory_is_dirty(addr1)) {
2349 /* invalidate code */
2350 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2351 /* set dirty bit */
2352 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2353 (0xff & ~CODE_DIRTY_FLAG);
2358 /* XXX: optimize */
2359 void stb_phys(target_phys_addr_t addr, uint32_t val)
2361 uint8_t v = val;
2362 cpu_physical_memory_write(addr, &v, 1);
2365 /* XXX: optimize */
2366 void stw_phys(target_phys_addr_t addr, uint32_t val)
2368 uint16_t v = tswap16(val);
2369 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2372 /* XXX: optimize */
2373 void stq_phys(target_phys_addr_t addr, uint64_t val)
2375 val = tswap64(val);
2376 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2379 #endif
2381 /* virtual memory access for debug */
2382 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2383 uint8_t *buf, int len, int is_write)
2385 int l;
2386 target_ulong page, phys_addr;
2388 while (len > 0) {
2389 page = addr & TARGET_PAGE_MASK;
2390 phys_addr = cpu_get_phys_page_debug(env, page);
2391 /* if no physical page mapped, return an error */
2392 if (phys_addr == -1)
2393 return -1;
2394 l = (page + TARGET_PAGE_SIZE) - addr;
2395 if (l > len)
2396 l = len;
2397 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2398 buf, l, is_write);
2399 len -= l;
2400 buf += l;
2401 addr += l;
2403 return 0;
2406 void dump_exec_info(FILE *f,
2407 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2409 int i, target_code_size, max_target_code_size;
2410 int direct_jmp_count, direct_jmp2_count, cross_page;
2411 TranslationBlock *tb;
2413 target_code_size = 0;
2414 max_target_code_size = 0;
2415 cross_page = 0;
2416 direct_jmp_count = 0;
2417 direct_jmp2_count = 0;
2418 for(i = 0; i < nb_tbs; i++) {
2419 tb = &tbs[i];
2420 target_code_size += tb->size;
2421 if (tb->size > max_target_code_size)
2422 max_target_code_size = tb->size;
2423 if (tb->page_addr[1] != -1)
2424 cross_page++;
2425 if (tb->tb_next_offset[0] != 0xffff) {
2426 direct_jmp_count++;
2427 if (tb->tb_next_offset[1] != 0xffff) {
2428 direct_jmp2_count++;
2432 /* XXX: avoid using doubles ? */
2433 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2434 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2435 nb_tbs ? target_code_size / nb_tbs : 0,
2436 max_target_code_size);
2437 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2438 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2439 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2440 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2441 cross_page,
2442 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2443 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2444 direct_jmp_count,
2445 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2446 direct_jmp2_count,
2447 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2448 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2449 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2450 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2453 #if !defined(CONFIG_USER_ONLY)
2455 #define MMUSUFFIX _cmmu
2456 #define GETPC() NULL
2457 #define env cpu_single_env
2458 #define SOFTMMU_CODE_ACCESS
2460 #define SHIFT 0
2461 #include "softmmu_template.h"
2463 #define SHIFT 1
2464 #include "softmmu_template.h"
2466 #define SHIFT 2
2467 #include "softmmu_template.h"
2469 #define SHIFT 3
2470 #include "softmmu_template.h"
2472 #undef env
2474 #endif