kvm: revert r4386
[qemu-kvm/fedora.git] / exec.c
blob4c18cf5cad281c91399d818026c1d6d95ee3fd0b
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 #if !defined(CONFIG_USER_ONLY)
51 /* TB consistency checks only implemented for usermode emulation. */
52 #undef DEBUG_TB_CHECK
53 #endif
55 /* threshold to flush the translated code buffer */
56 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
58 #define SMC_BITMAP_USE_THRESHOLD 10
60 #define MMAP_AREA_START 0x00000000
61 #define MMAP_AREA_END 0xa8000000
63 #if defined(TARGET_SPARC64)
64 #define TARGET_PHYS_ADDR_SPACE_BITS 41
65 #elif defined(TARGET_PPC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 42
67 #else
68 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69 #define TARGET_PHYS_ADDR_SPACE_BITS 32
70 #endif
72 #ifdef USE_KVM
73 extern int kvm_allowed;
74 #endif
76 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
77 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
78 int nb_tbs;
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
82 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
83 uint8_t *code_gen_ptr;
85 int phys_ram_size;
86 int phys_ram_fd;
87 uint8_t *phys_ram_base;
88 uint8_t *phys_ram_dirty;
89 uint8_t *bios_mem;
91 CPUState *first_cpu;
92 /* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
94 CPUState *cpu_single_env;
96 typedef struct PageDesc {
97 /* list of TBs intersecting this ram page */
98 TranslationBlock *first_tb;
99 /* in order to optimize self modifying code, we count the number
100 of lookups we do to a given page to use a bitmap */
101 unsigned int code_write_count;
102 uint8_t *code_bitmap;
103 #if defined(CONFIG_USER_ONLY)
104 unsigned long flags;
105 #endif
106 } PageDesc;
108 typedef struct PhysPageDesc {
109 /* offset in host memory of the page + io_index in the low 12 bits */
110 uint32_t phys_offset;
111 } PhysPageDesc;
113 #define L2_BITS 10
114 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
116 #define L1_SIZE (1 << L1_BITS)
117 #define L2_SIZE (1 << L2_BITS)
119 static void io_mem_init(void);
121 unsigned long qemu_real_host_page_size;
122 unsigned long qemu_host_page_bits;
123 unsigned long qemu_host_page_size;
124 unsigned long qemu_host_page_mask;
126 /* XXX: for system emulation, it could just be an array */
127 static PageDesc *l1_map[L1_SIZE];
128 PhysPageDesc **l1_phys_map;
130 /* io memory support */
131 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
132 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
133 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
134 static int io_mem_nb;
136 /* log support */
137 char *logfilename = "/tmp/qemu.log";
138 FILE *logfile;
139 int loglevel;
141 /* statistics */
142 static int tlb_flush_count;
143 static int tb_flush_count;
144 static int tb_phys_invalidate_count;
146 static void page_init(void)
148 /* NOTE: we can always suppose that qemu_host_page_size >=
149 TARGET_PAGE_SIZE */
150 #ifdef _WIN32
152 SYSTEM_INFO system_info;
153 DWORD old_protect;
155 GetSystemInfo(&system_info);
156 qemu_real_host_page_size = system_info.dwPageSize;
158 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
159 PAGE_EXECUTE_READWRITE, &old_protect);
161 #else
162 qemu_real_host_page_size = getpagesize();
164 unsigned long start, end;
166 start = (unsigned long)code_gen_buffer;
167 start &= ~(qemu_real_host_page_size - 1);
169 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
170 end += qemu_real_host_page_size - 1;
171 end &= ~(qemu_real_host_page_size - 1);
173 mprotect((void *)start, end - start,
174 PROT_READ | PROT_WRITE | PROT_EXEC);
176 #endif
178 if (qemu_host_page_size == 0)
179 qemu_host_page_size = qemu_real_host_page_size;
180 if (qemu_host_page_size < TARGET_PAGE_SIZE)
181 qemu_host_page_size = TARGET_PAGE_SIZE;
182 qemu_host_page_bits = 0;
183 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
184 qemu_host_page_bits++;
185 qemu_host_page_mask = ~(qemu_host_page_size - 1);
186 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
187 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
190 static inline PageDesc *page_find_alloc(unsigned int index)
192 PageDesc **lp, *p;
194 lp = &l1_map[index >> L2_BITS];
195 p = *lp;
196 if (!p) {
197 /* allocate if not found */
198 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
199 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
200 *lp = p;
202 return p + (index & (L2_SIZE - 1));
205 static inline PageDesc *page_find(unsigned int index)
207 PageDesc *p;
209 p = l1_map[index >> L2_BITS];
210 if (!p)
211 return 0;
212 return p + (index & (L2_SIZE - 1));
215 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
217 void **lp, **p;
218 PhysPageDesc *pd;
220 p = (void **)l1_phys_map;
221 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
223 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
224 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
225 #endif
226 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
227 p = *lp;
228 if (!p) {
229 /* allocate if not found */
230 if (!alloc)
231 return NULL;
232 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
233 memset(p, 0, sizeof(void *) * L1_SIZE);
234 *lp = p;
236 #endif
237 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
238 pd = *lp;
239 if (!pd) {
240 int i;
241 /* allocate if not found */
242 if (!alloc)
243 return NULL;
244 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
245 *lp = pd;
246 for (i = 0; i < L2_SIZE; i++)
247 pd[i].phys_offset = IO_MEM_UNASSIGNED;
249 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
252 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
254 return phys_page_find_alloc(index, 0);
257 #if !defined(CONFIG_USER_ONLY)
258 static void tlb_protect_code(ram_addr_t ram_addr);
259 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
260 target_ulong vaddr);
261 #endif
263 void cpu_exec_init(CPUState *env)
265 CPUState **penv;
266 int cpu_index;
268 if (!code_gen_ptr) {
269 code_gen_ptr = code_gen_buffer;
270 page_init();
271 io_mem_init();
273 env->next_cpu = NULL;
274 penv = &first_cpu;
275 cpu_index = 0;
276 while (*penv != NULL) {
277 penv = (CPUState **)&(*penv)->next_cpu;
278 cpu_index++;
280 env->cpu_index = cpu_index;
281 *penv = env;
284 static inline void invalidate_page_bitmap(PageDesc *p)
286 if (p->code_bitmap) {
287 qemu_free(p->code_bitmap);
288 p->code_bitmap = NULL;
290 p->code_write_count = 0;
293 /* set to NULL all the 'first_tb' fields in all PageDescs */
294 static void page_flush_tb(void)
296 int i, j;
297 PageDesc *p;
299 for(i = 0; i < L1_SIZE; i++) {
300 p = l1_map[i];
301 if (p) {
302 for(j = 0; j < L2_SIZE; j++) {
303 p->first_tb = NULL;
304 invalidate_page_bitmap(p);
305 p++;
311 /* flush all the translation blocks */
312 /* XXX: tb_flush is currently not thread safe */
313 void tb_flush(CPUState *env1)
315 CPUState *env;
316 #if defined(DEBUG_FLUSH)
317 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
318 code_gen_ptr - code_gen_buffer,
319 nb_tbs,
320 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
321 #endif
322 nb_tbs = 0;
324 for(env = first_cpu; env != NULL; env = env->next_cpu) {
325 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
328 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
329 page_flush_tb();
331 code_gen_ptr = code_gen_buffer;
332 /* XXX: flush processor icache at this point if cache flush is
333 expensive */
334 tb_flush_count++;
337 #ifdef DEBUG_TB_CHECK
339 static void tb_invalidate_check(unsigned long address)
341 TranslationBlock *tb;
342 int i;
343 address &= TARGET_PAGE_MASK;
344 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
345 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
346 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
347 address >= tb->pc + tb->size)) {
348 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
349 address, (long)tb->pc, tb->size);
355 /* verify that all the pages have correct rights for code */
356 static void tb_page_check(void)
358 TranslationBlock *tb;
359 int i, flags1, flags2;
361 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
362 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
363 flags1 = page_get_flags(tb->pc);
364 flags2 = page_get_flags(tb->pc + tb->size - 1);
365 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
366 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
367 (long)tb->pc, tb->size, flags1, flags2);
373 void tb_jmp_check(TranslationBlock *tb)
375 TranslationBlock *tb1;
376 unsigned int n1;
378 /* suppress any remaining jumps to this TB */
379 tb1 = tb->jmp_first;
380 for(;;) {
381 n1 = (long)tb1 & 3;
382 tb1 = (TranslationBlock *)((long)tb1 & ~3);
383 if (n1 == 2)
384 break;
385 tb1 = tb1->jmp_next[n1];
387 /* check end of list */
388 if (tb1 != tb) {
389 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
393 #endif
395 /* invalidate one TB */
396 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
397 int next_offset)
399 TranslationBlock *tb1;
400 for(;;) {
401 tb1 = *ptb;
402 if (tb1 == tb) {
403 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
404 break;
406 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
410 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
412 TranslationBlock *tb1;
413 unsigned int n1;
415 for(;;) {
416 tb1 = *ptb;
417 n1 = (long)tb1 & 3;
418 tb1 = (TranslationBlock *)((long)tb1 & ~3);
419 if (tb1 == tb) {
420 *ptb = tb1->page_next[n1];
421 break;
423 ptb = &tb1->page_next[n1];
427 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
429 TranslationBlock *tb1, **ptb;
430 unsigned int n1;
432 ptb = &tb->jmp_next[n];
433 tb1 = *ptb;
434 if (tb1) {
435 /* find tb(n) in circular list */
436 for(;;) {
437 tb1 = *ptb;
438 n1 = (long)tb1 & 3;
439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
440 if (n1 == n && tb1 == tb)
441 break;
442 if (n1 == 2) {
443 ptb = &tb1->jmp_first;
444 } else {
445 ptb = &tb1->jmp_next[n1];
448 /* now we can suppress tb(n) from the list */
449 *ptb = tb->jmp_next[n];
451 tb->jmp_next[n] = NULL;
455 /* reset the jump entry 'n' of a TB so that it is not chained to
456 another TB */
457 static inline void tb_reset_jump(TranslationBlock *tb, int n)
459 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
462 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
464 CPUState *env;
465 PageDesc *p;
466 unsigned int h, n1;
467 target_ulong phys_pc;
468 TranslationBlock *tb1, *tb2;
470 /* remove the TB from the hash list */
471 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
472 h = tb_phys_hash_func(phys_pc);
473 tb_remove(&tb_phys_hash[h], tb,
474 offsetof(TranslationBlock, phys_hash_next));
476 /* remove the TB from the page list */
477 if (tb->page_addr[0] != page_addr) {
478 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
479 tb_page_remove(&p->first_tb, tb);
480 invalidate_page_bitmap(p);
482 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
483 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
484 tb_page_remove(&p->first_tb, tb);
485 invalidate_page_bitmap(p);
488 tb_invalidated_flag = 1;
490 /* remove the TB from the hash list */
491 h = tb_jmp_cache_hash_func(tb->pc);
492 for(env = first_cpu; env != NULL; env = env->next_cpu) {
493 if (env->tb_jmp_cache[h] == tb)
494 env->tb_jmp_cache[h] = NULL;
497 /* suppress this TB from the two jump lists */
498 tb_jmp_remove(tb, 0);
499 tb_jmp_remove(tb, 1);
501 /* suppress any remaining jumps to this TB */
502 tb1 = tb->jmp_first;
503 for(;;) {
504 n1 = (long)tb1 & 3;
505 if (n1 == 2)
506 break;
507 tb1 = (TranslationBlock *)((long)tb1 & ~3);
508 tb2 = tb1->jmp_next[n1];
509 tb_reset_jump(tb1, n1);
510 tb1->jmp_next[n1] = NULL;
511 tb1 = tb2;
513 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
515 tb_phys_invalidate_count++;
518 static inline void set_bits(uint8_t *tab, int start, int len)
520 int end, mask, end1;
522 end = start + len;
523 tab += start >> 3;
524 mask = 0xff << (start & 7);
525 if ((start & ~7) == (end & ~7)) {
526 if (start < end) {
527 mask &= ~(0xff << (end & 7));
528 *tab |= mask;
530 } else {
531 *tab++ |= mask;
532 start = (start + 8) & ~7;
533 end1 = end & ~7;
534 while (start < end1) {
535 *tab++ = 0xff;
536 start += 8;
538 if (start < end) {
539 mask = ~(0xff << (end & 7));
540 *tab |= mask;
545 static void build_page_bitmap(PageDesc *p)
547 int n, tb_start, tb_end;
548 TranslationBlock *tb;
550 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
551 if (!p->code_bitmap)
552 return;
553 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
555 tb = p->first_tb;
556 while (tb != NULL) {
557 n = (long)tb & 3;
558 tb = (TranslationBlock *)((long)tb & ~3);
559 /* NOTE: this is subtle as a TB may span two physical pages */
560 if (n == 0) {
561 /* NOTE: tb_end may be after the end of the page, but
562 it is not a problem */
563 tb_start = tb->pc & ~TARGET_PAGE_MASK;
564 tb_end = tb_start + tb->size;
565 if (tb_end > TARGET_PAGE_SIZE)
566 tb_end = TARGET_PAGE_SIZE;
567 } else {
568 tb_start = 0;
569 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
571 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
572 tb = tb->page_next[n];
576 #ifdef TARGET_HAS_PRECISE_SMC
578 static void tb_gen_code(CPUState *env,
579 target_ulong pc, target_ulong cs_base, int flags,
580 int cflags)
582 TranslationBlock *tb;
583 uint8_t *tc_ptr;
584 target_ulong phys_pc, phys_page2, virt_page2;
585 int code_gen_size;
587 phys_pc = get_phys_addr_code(env, pc);
588 tb = tb_alloc(pc);
589 if (!tb) {
590 /* flush must be done */
591 tb_flush(env);
592 /* cannot fail at this point */
593 tb = tb_alloc(pc);
595 tc_ptr = code_gen_ptr;
596 tb->tc_ptr = tc_ptr;
597 tb->cs_base = cs_base;
598 tb->flags = flags;
599 tb->cflags = cflags;
600 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
601 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
603 /* check next page if needed */
604 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
605 phys_page2 = -1;
606 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
607 phys_page2 = get_phys_addr_code(env, virt_page2);
609 tb_link_phys(tb, phys_pc, phys_page2);
611 #endif
613 /* invalidate all TBs which intersect with the target physical page
614 starting in range [start;end[. NOTE: start and end must refer to
615 the same physical page. 'is_cpu_write_access' should be true if called
616 from a real cpu write access: the virtual CPU will exit the current
617 TB if code is modified inside this TB. */
618 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
619 int is_cpu_write_access)
621 int n, current_tb_modified, current_tb_not_found, current_flags;
622 CPUState *env = cpu_single_env;
623 PageDesc *p;
624 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
625 target_ulong tb_start, tb_end;
626 target_ulong current_pc, current_cs_base;
628 p = page_find(start >> TARGET_PAGE_BITS);
629 if (!p)
630 return;
631 if (!p->code_bitmap &&
632 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
633 is_cpu_write_access) {
634 /* build code bitmap */
635 build_page_bitmap(p);
638 /* we remove all the TBs in the range [start, end[ */
639 /* XXX: see if in some cases it could be faster to invalidate all the code */
640 current_tb_not_found = is_cpu_write_access;
641 current_tb_modified = 0;
642 current_tb = NULL; /* avoid warning */
643 current_pc = 0; /* avoid warning */
644 current_cs_base = 0; /* avoid warning */
645 current_flags = 0; /* avoid warning */
646 tb = p->first_tb;
647 while (tb != NULL) {
648 n = (long)tb & 3;
649 tb = (TranslationBlock *)((long)tb & ~3);
650 tb_next = tb->page_next[n];
651 /* NOTE: this is subtle as a TB may span two physical pages */
652 if (n == 0) {
653 /* NOTE: tb_end may be after the end of the page, but
654 it is not a problem */
655 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
656 tb_end = tb_start + tb->size;
657 } else {
658 tb_start = tb->page_addr[1];
659 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
661 if (!(tb_end <= start || tb_start >= end)) {
662 #ifdef TARGET_HAS_PRECISE_SMC
663 if (current_tb_not_found) {
664 current_tb_not_found = 0;
665 current_tb = NULL;
666 if (env->mem_write_pc) {
667 /* now we have a real cpu fault */
668 current_tb = tb_find_pc(env->mem_write_pc);
671 if (current_tb == tb &&
672 !(current_tb->cflags & CF_SINGLE_INSN)) {
673 /* If we are modifying the current TB, we must stop
674 its execution. We could be more precise by checking
675 that the modification is after the current PC, but it
676 would require a specialized function to partially
677 restore the CPU state */
679 current_tb_modified = 1;
680 cpu_restore_state(current_tb, env,
681 env->mem_write_pc, NULL);
682 #if defined(TARGET_I386)
683 current_flags = env->hflags;
684 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
685 current_cs_base = (target_ulong)env->segs[R_CS].base;
686 current_pc = current_cs_base + env->eip;
687 #else
688 #error unsupported CPU
689 #endif
691 #endif /* TARGET_HAS_PRECISE_SMC */
692 /* we need to do that to handle the case where a signal
693 occurs while doing tb_phys_invalidate() */
694 saved_tb = NULL;
695 if (env) {
696 saved_tb = env->current_tb;
697 env->current_tb = NULL;
699 tb_phys_invalidate(tb, -1);
700 if (env) {
701 env->current_tb = saved_tb;
702 if (env->interrupt_request && env->current_tb)
703 cpu_interrupt(env, env->interrupt_request);
706 tb = tb_next;
708 #if !defined(CONFIG_USER_ONLY)
709 /* if no code remaining, no need to continue to use slow writes */
710 if (!p->first_tb) {
711 invalidate_page_bitmap(p);
712 if (is_cpu_write_access) {
713 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
716 #endif
717 #ifdef TARGET_HAS_PRECISE_SMC
718 if (current_tb_modified) {
719 /* we generate a block containing just the instruction
720 modifying the memory. It will ensure that it cannot modify
721 itself */
722 env->current_tb = NULL;
723 tb_gen_code(env, current_pc, current_cs_base, current_flags,
724 CF_SINGLE_INSN);
725 cpu_resume_from_signal(env, NULL);
727 #endif
730 /* len must be <= 8 and start must be a multiple of len */
731 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
733 PageDesc *p;
734 int offset, b;
735 #if 0
736 if (1) {
737 if (loglevel) {
738 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
739 cpu_single_env->mem_write_vaddr, len,
740 cpu_single_env->eip,
741 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
744 #endif
745 p = page_find(start >> TARGET_PAGE_BITS);
746 if (!p)
747 return;
748 if (p->code_bitmap) {
749 offset = start & ~TARGET_PAGE_MASK;
750 b = p->code_bitmap[offset >> 3] >> (offset & 7);
751 if (b & ((1 << len) - 1))
752 goto do_invalidate;
753 } else {
754 do_invalidate:
755 tb_invalidate_phys_page_range(start, start + len, 1);
759 #if !defined(CONFIG_SOFTMMU)
760 static void tb_invalidate_phys_page(target_ulong addr,
761 unsigned long pc, void *puc)
763 int n, current_flags, current_tb_modified;
764 target_ulong current_pc, current_cs_base;
765 PageDesc *p;
766 TranslationBlock *tb, *current_tb;
767 #ifdef TARGET_HAS_PRECISE_SMC
768 CPUState *env = cpu_single_env;
769 #endif
771 addr &= TARGET_PAGE_MASK;
772 p = page_find(addr >> TARGET_PAGE_BITS);
773 if (!p)
774 return;
775 tb = p->first_tb;
776 current_tb_modified = 0;
777 current_tb = NULL;
778 current_pc = 0; /* avoid warning */
779 current_cs_base = 0; /* avoid warning */
780 current_flags = 0; /* avoid warning */
781 #ifdef TARGET_HAS_PRECISE_SMC
782 if (tb && pc != 0) {
783 current_tb = tb_find_pc(pc);
785 #endif
786 while (tb != NULL) {
787 n = (long)tb & 3;
788 tb = (TranslationBlock *)((long)tb & ~3);
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb == tb &&
791 !(current_tb->cflags & CF_SINGLE_INSN)) {
792 /* If we are modifying the current TB, we must stop
793 its execution. We could be more precise by checking
794 that the modification is after the current PC, but it
795 would require a specialized function to partially
796 restore the CPU state */
798 current_tb_modified = 1;
799 cpu_restore_state(current_tb, env, pc, puc);
800 #if defined(TARGET_I386)
801 current_flags = env->hflags;
802 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
803 current_cs_base = (target_ulong)env->segs[R_CS].base;
804 current_pc = current_cs_base + env->eip;
805 #else
806 #error unsupported CPU
807 #endif
809 #endif /* TARGET_HAS_PRECISE_SMC */
810 tb_phys_invalidate(tb, addr);
811 tb = tb->page_next[n];
813 p->first_tb = NULL;
814 #ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb_modified) {
816 /* we generate a block containing just the instruction
817 modifying the memory. It will ensure that it cannot modify
818 itself */
819 env->current_tb = NULL;
820 tb_gen_code(env, current_pc, current_cs_base, current_flags,
821 CF_SINGLE_INSN);
822 cpu_resume_from_signal(env, puc);
824 #endif
826 #endif
828 /* add the tb in the target page and protect it if necessary */
829 static inline void tb_alloc_page(TranslationBlock *tb,
830 unsigned int n, target_ulong page_addr)
832 PageDesc *p;
833 TranslationBlock *last_first_tb;
835 tb->page_addr[n] = page_addr;
836 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
837 tb->page_next[n] = p->first_tb;
838 last_first_tb = p->first_tb;
839 p->first_tb = (TranslationBlock *)((long)tb | n);
840 invalidate_page_bitmap(p);
842 #if defined(TARGET_HAS_SMC) || 1
844 #if defined(CONFIG_USER_ONLY)
845 if (p->flags & PAGE_WRITE) {
846 target_ulong addr;
847 PageDesc *p2;
848 int prot;
850 /* force the host page as non writable (writes will have a
851 page fault + mprotect overhead) */
852 page_addr &= qemu_host_page_mask;
853 prot = 0;
854 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
855 addr += TARGET_PAGE_SIZE) {
857 p2 = page_find (addr >> TARGET_PAGE_BITS);
858 if (!p2)
859 continue;
860 prot |= p2->flags;
861 p2->flags &= ~PAGE_WRITE;
862 page_get_flags(addr);
864 mprotect(g2h(page_addr), qemu_host_page_size,
865 (prot & PAGE_BITS) & ~PAGE_WRITE);
866 #ifdef DEBUG_TB_INVALIDATE
867 printf("protecting code page: 0x%08lx\n",
868 page_addr);
869 #endif
871 #else
872 /* if some code is already present, then the pages are already
873 protected. So we handle the case where only the first TB is
874 allocated in a physical page */
875 if (!last_first_tb) {
876 tlb_protect_code(page_addr);
878 #endif
880 #endif /* TARGET_HAS_SMC */
883 /* Allocate a new translation block. Flush the translation buffer if
884 too many translation blocks or too much generated code. */
885 TranslationBlock *tb_alloc(target_ulong pc)
887 TranslationBlock *tb;
889 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
890 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
891 return NULL;
892 tb = &tbs[nb_tbs++];
893 tb->pc = pc;
894 tb->cflags = 0;
895 return tb;
898 /* add a new TB and link it to the physical page tables. phys_page2 is
899 (-1) to indicate that only one page contains the TB. */
900 void tb_link_phys(TranslationBlock *tb,
901 target_ulong phys_pc, target_ulong phys_page2)
903 unsigned int h;
904 TranslationBlock **ptb;
906 /* add in the physical hash table */
907 h = tb_phys_hash_func(phys_pc);
908 ptb = &tb_phys_hash[h];
909 tb->phys_hash_next = *ptb;
910 *ptb = tb;
912 /* add in the page list */
913 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
914 if (phys_page2 != -1)
915 tb_alloc_page(tb, 1, phys_page2);
916 else
917 tb->page_addr[1] = -1;
919 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
920 tb->jmp_next[0] = NULL;
921 tb->jmp_next[1] = NULL;
922 #ifdef USE_CODE_COPY
923 tb->cflags &= ~CF_FP_USED;
924 if (tb->cflags & CF_TB_FP_USED)
925 tb->cflags |= CF_FP_USED;
926 #endif
928 /* init original jump addresses */
929 if (tb->tb_next_offset[0] != 0xffff)
930 tb_reset_jump(tb, 0);
931 if (tb->tb_next_offset[1] != 0xffff)
932 tb_reset_jump(tb, 1);
934 #ifdef DEBUG_TB_CHECK
935 tb_page_check();
936 #endif
939 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
940 tb[1].tc_ptr. Return NULL if not found */
941 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
943 int m_min, m_max, m;
944 unsigned long v;
945 TranslationBlock *tb;
947 if (nb_tbs <= 0)
948 return NULL;
949 if (tc_ptr < (unsigned long)code_gen_buffer ||
950 tc_ptr >= (unsigned long)code_gen_ptr)
951 return NULL;
952 /* binary search (cf Knuth) */
953 m_min = 0;
954 m_max = nb_tbs - 1;
955 while (m_min <= m_max) {
956 m = (m_min + m_max) >> 1;
957 tb = &tbs[m];
958 v = (unsigned long)tb->tc_ptr;
959 if (v == tc_ptr)
960 return tb;
961 else if (tc_ptr < v) {
962 m_max = m - 1;
963 } else {
964 m_min = m + 1;
967 return &tbs[m_max];
970 static void tb_reset_jump_recursive(TranslationBlock *tb);
972 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
974 TranslationBlock *tb1, *tb_next, **ptb;
975 unsigned int n1;
977 tb1 = tb->jmp_next[n];
978 if (tb1 != NULL) {
979 /* find head of list */
980 for(;;) {
981 n1 = (long)tb1 & 3;
982 tb1 = (TranslationBlock *)((long)tb1 & ~3);
983 if (n1 == 2)
984 break;
985 tb1 = tb1->jmp_next[n1];
987 /* we are now sure now that tb jumps to tb1 */
988 tb_next = tb1;
990 /* remove tb from the jmp_first list */
991 ptb = &tb_next->jmp_first;
992 for(;;) {
993 tb1 = *ptb;
994 n1 = (long)tb1 & 3;
995 tb1 = (TranslationBlock *)((long)tb1 & ~3);
996 if (n1 == n && tb1 == tb)
997 break;
998 ptb = &tb1->jmp_next[n1];
1000 *ptb = tb->jmp_next[n];
1001 tb->jmp_next[n] = NULL;
1003 /* suppress the jump to next tb in generated code */
1004 tb_reset_jump(tb, n);
1006 /* suppress jumps in the tb on which we could have jumped */
1007 tb_reset_jump_recursive(tb_next);
1011 static void tb_reset_jump_recursive(TranslationBlock *tb)
1013 tb_reset_jump_recursive2(tb, 0);
1014 tb_reset_jump_recursive2(tb, 1);
1017 #if defined(TARGET_HAS_ICE)
1018 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1020 target_ulong addr, pd;
1021 ram_addr_t ram_addr;
1022 PhysPageDesc *p;
1024 addr = cpu_get_phys_page_debug(env, pc);
1025 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1026 if (!p) {
1027 pd = IO_MEM_UNASSIGNED;
1028 } else {
1029 pd = p->phys_offset;
1031 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1032 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1034 #endif
1036 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1037 breakpoint is reached */
1038 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1040 #if defined(TARGET_HAS_ICE)
1041 int i;
1043 for(i = 0; i < env->nb_breakpoints; i++) {
1044 if (env->breakpoints[i] == pc)
1045 return 0;
1048 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1049 return -1;
1050 env->breakpoints[env->nb_breakpoints++] = pc;
1052 #ifdef USE_KVM
1053 if (kvm_allowed)
1054 kvm_update_debugger(env);
1055 #endif
1057 breakpoint_invalidate(env, pc);
1058 return 0;
1059 #else
1060 return -1;
1061 #endif
1064 /* remove a breakpoint */
1065 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1067 #if defined(TARGET_HAS_ICE)
1068 int i;
1069 for(i = 0; i < env->nb_breakpoints; i++) {
1070 if (env->breakpoints[i] == pc)
1071 goto found;
1073 return -1;
1074 found:
1075 env->nb_breakpoints--;
1076 if (i < env->nb_breakpoints)
1077 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1079 #ifdef USE_KVM
1080 if (kvm_allowed)
1081 kvm_update_debugger(env);
1082 #endif
1084 breakpoint_invalidate(env, pc);
1085 return 0;
1086 #else
1087 return -1;
1088 #endif
1091 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1092 CPU loop after each instruction */
1093 void cpu_single_step(CPUState *env, int enabled)
1095 #if defined(TARGET_HAS_ICE)
1096 if (env->singlestep_enabled != enabled) {
1097 env->singlestep_enabled = enabled;
1098 /* must flush all the translated code to avoid inconsistancies */
1099 /* XXX: only flush what is necessary */
1100 tb_flush(env);
1102 #ifdef USE_KVM
1103 if (kvm_allowed)
1104 kvm_update_debugger(env);
1105 #endif
1106 #endif
1109 /* enable or disable low levels log */
1110 void cpu_set_log(int log_flags)
1112 loglevel = log_flags;
1113 if (loglevel && !logfile) {
1114 logfile = fopen(logfilename, "w");
1115 if (!logfile) {
1116 perror(logfilename);
1117 _exit(1);
1119 #if !defined(CONFIG_SOFTMMU)
1120 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1122 static uint8_t logfile_buf[4096];
1123 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1125 #else
1126 setvbuf(logfile, NULL, _IOLBF, 0);
1127 #endif
1131 void cpu_set_log_filename(const char *filename)
1133 logfilename = strdup(filename);
1136 /* mask must never be zero, except for A20 change call */
1137 void cpu_interrupt(CPUState *env, int mask)
1139 TranslationBlock *tb;
1140 static int interrupt_lock;
1142 env->interrupt_request |= mask;
1143 /* if the cpu is currently executing code, we must unlink it and
1144 all the potentially executing TB */
1145 tb = env->current_tb;
1146 if (tb && !testandset(&interrupt_lock)) {
1147 env->current_tb = NULL;
1148 tb_reset_jump_recursive(tb);
1149 interrupt_lock = 0;
1153 void cpu_reset_interrupt(CPUState *env, int mask)
1155 env->interrupt_request &= ~mask;
1158 CPULogItem cpu_log_items[] = {
1159 { CPU_LOG_TB_OUT_ASM, "out_asm",
1160 "show generated host assembly code for each compiled TB" },
1161 { CPU_LOG_TB_IN_ASM, "in_asm",
1162 "show target assembly code for each compiled TB" },
1163 { CPU_LOG_TB_OP, "op",
1164 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1165 #ifdef TARGET_I386
1166 { CPU_LOG_TB_OP_OPT, "op_opt",
1167 "show micro ops after optimization for each compiled TB" },
1168 #endif
1169 { CPU_LOG_INT, "int",
1170 "show interrupts/exceptions in short format" },
1171 { CPU_LOG_EXEC, "exec",
1172 "show trace before each executed TB (lots of logs)" },
1173 { CPU_LOG_TB_CPU, "cpu",
1174 "show CPU state before bloc translation" },
1175 #ifdef TARGET_I386
1176 { CPU_LOG_PCALL, "pcall",
1177 "show protected mode far calls/returns/exceptions" },
1178 #endif
1179 #ifdef DEBUG_IOPORT
1180 { CPU_LOG_IOPORT, "ioport",
1181 "show all i/o ports accesses" },
1182 #endif
1183 { 0, NULL, NULL },
1186 static int cmp1(const char *s1, int n, const char *s2)
1188 if (strlen(s2) != n)
1189 return 0;
1190 return memcmp(s1, s2, n) == 0;
1193 /* takes a comma separated list of log masks. Return 0 if error. */
1194 int cpu_str_to_log_mask(const char *str)
1196 CPULogItem *item;
1197 int mask;
1198 const char *p, *p1;
1200 p = str;
1201 mask = 0;
1202 for(;;) {
1203 p1 = strchr(p, ',');
1204 if (!p1)
1205 p1 = p + strlen(p);
1206 if(cmp1(p,p1-p,"all")) {
1207 for(item = cpu_log_items; item->mask != 0; item++) {
1208 mask |= item->mask;
1210 } else {
1211 for(item = cpu_log_items; item->mask != 0; item++) {
1212 if (cmp1(p, p1 - p, item->name))
1213 goto found;
1215 return 0;
1217 found:
1218 mask |= item->mask;
1219 if (*p1 != ',')
1220 break;
1221 p = p1 + 1;
1223 return mask;
1226 void cpu_abort(CPUState *env, const char *fmt, ...)
1228 va_list ap;
1230 va_start(ap, fmt);
1231 fprintf(stderr, "qemu: fatal: ");
1232 vfprintf(stderr, fmt, ap);
1233 fprintf(stderr, "\n");
1234 #ifdef TARGET_I386
1235 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1236 #else
1237 cpu_dump_state(env, stderr, fprintf, 0);
1238 #endif
1239 va_end(ap);
1240 abort();
1243 #if !defined(CONFIG_USER_ONLY)
1245 /* NOTE: if flush_global is true, also flush global entries (not
1246 implemented yet) */
1247 void tlb_flush(CPUState *env, int flush_global)
1249 int i;
1251 #if defined(DEBUG_TLB)
1252 printf("tlb_flush:\n");
1253 #endif
1254 /* must reset current TB so that interrupts cannot modify the
1255 links while we are modifying them */
1256 env->current_tb = NULL;
1258 for(i = 0; i < CPU_TLB_SIZE; i++) {
1259 env->tlb_table[0][i].addr_read = -1;
1260 env->tlb_table[0][i].addr_write = -1;
1261 env->tlb_table[0][i].addr_code = -1;
1262 env->tlb_table[1][i].addr_read = -1;
1263 env->tlb_table[1][i].addr_write = -1;
1264 env->tlb_table[1][i].addr_code = -1;
1267 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1269 #if !defined(CONFIG_SOFTMMU)
1270 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1271 #endif
1272 #ifdef USE_KQEMU
1273 if (env->kqemu_enabled) {
1274 kqemu_flush(env, flush_global);
1276 #endif
1277 tlb_flush_count++;
1280 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1282 if (addr == (tlb_entry->addr_read &
1283 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1284 addr == (tlb_entry->addr_write &
1285 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1286 addr == (tlb_entry->addr_code &
1287 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1288 tlb_entry->addr_read = -1;
1289 tlb_entry->addr_write = -1;
1290 tlb_entry->addr_code = -1;
1294 void tlb_flush_page(CPUState *env, target_ulong addr)
1296 int i;
1297 TranslationBlock *tb;
1299 #if defined(DEBUG_TLB)
1300 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1301 #endif
1302 /* must reset current TB so that interrupts cannot modify the
1303 links while we are modifying them */
1304 env->current_tb = NULL;
1306 addr &= TARGET_PAGE_MASK;
1307 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1308 tlb_flush_entry(&env->tlb_table[0][i], addr);
1309 tlb_flush_entry(&env->tlb_table[1][i], addr);
1311 /* Discard jump cache entries for any tb which might potentially
1312 overlap the flushed page. */
1313 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1314 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1316 i = tb_jmp_cache_hash_page(addr);
1317 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1319 #if !defined(CONFIG_SOFTMMU)
1320 if (addr < MMAP_AREA_END)
1321 munmap((void *)addr, TARGET_PAGE_SIZE);
1322 #endif
1323 #ifdef USE_KQEMU
1324 if (env->kqemu_enabled) {
1325 kqemu_flush_page(env, addr);
1327 #endif
1330 /* update the TLBs so that writes to code in the virtual page 'addr'
1331 can be detected */
1332 static void tlb_protect_code(ram_addr_t ram_addr)
1334 cpu_physical_memory_reset_dirty(ram_addr,
1335 ram_addr + TARGET_PAGE_SIZE,
1336 CODE_DIRTY_FLAG);
1339 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1340 tested for self modifying code */
1341 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1342 target_ulong vaddr)
1344 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1347 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1348 unsigned long start, unsigned long length)
1350 unsigned long addr;
1351 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1352 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1353 if ((addr - start) < length) {
1354 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1359 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1360 int dirty_flags)
1362 CPUState *env;
1363 unsigned long length, start1;
1364 int i, mask, len;
1365 uint8_t *p;
1367 start &= TARGET_PAGE_MASK;
1368 end = TARGET_PAGE_ALIGN(end);
1370 length = end - start;
1371 if (length == 0)
1372 return;
1373 len = length >> TARGET_PAGE_BITS;
1374 #ifdef USE_KQEMU
1375 /* XXX: should not depend on cpu context */
1376 env = first_cpu;
1377 if (env->kqemu_enabled) {
1378 ram_addr_t addr;
1379 addr = start;
1380 for(i = 0; i < len; i++) {
1381 kqemu_set_notdirty(env, addr);
1382 addr += TARGET_PAGE_SIZE;
1385 #endif
1386 mask = ~dirty_flags;
1387 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1388 for(i = 0; i < len; i++)
1389 p[i] &= mask;
1391 /* we modify the TLB cache so that the dirty bit will be set again
1392 when accessing the range */
1393 start1 = start + (unsigned long)phys_ram_base;
1394 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1395 for(i = 0; i < CPU_TLB_SIZE; i++)
1396 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1397 for(i = 0; i < CPU_TLB_SIZE; i++)
1398 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1401 #if !defined(CONFIG_SOFTMMU)
1402 /* XXX: this is expensive */
1404 VirtPageDesc *p;
1405 int j;
1406 target_ulong addr;
1408 for(i = 0; i < L1_SIZE; i++) {
1409 p = l1_virt_map[i];
1410 if (p) {
1411 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1412 for(j = 0; j < L2_SIZE; j++) {
1413 if (p->valid_tag == virt_valid_tag &&
1414 p->phys_addr >= start && p->phys_addr < end &&
1415 (p->prot & PROT_WRITE)) {
1416 if (addr < MMAP_AREA_END) {
1417 mprotect((void *)addr, TARGET_PAGE_SIZE,
1418 p->prot & ~PROT_WRITE);
1421 addr += TARGET_PAGE_SIZE;
1422 p++;
1427 #endif
1430 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1432 ram_addr_t ram_addr;
1434 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1435 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1436 tlb_entry->addend - (unsigned long)phys_ram_base;
1437 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1438 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1443 /* update the TLB according to the current state of the dirty bits */
1444 void cpu_tlb_update_dirty(CPUState *env)
1446 int i;
1447 for(i = 0; i < CPU_TLB_SIZE; i++)
1448 tlb_update_dirty(&env->tlb_table[0][i]);
1449 for(i = 0; i < CPU_TLB_SIZE; i++)
1450 tlb_update_dirty(&env->tlb_table[1][i]);
1453 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1454 unsigned long start)
1456 unsigned long addr;
1457 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1458 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1459 if (addr == start) {
1460 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1465 /* update the TLB corresponding to virtual page vaddr and phys addr
1466 addr so that it is no longer dirty */
1467 static inline void tlb_set_dirty(CPUState *env,
1468 unsigned long addr, target_ulong vaddr)
1470 int i;
1472 addr &= TARGET_PAGE_MASK;
1473 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1474 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1475 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1478 /* add a new TLB entry. At most one entry for a given virtual address
1479 is permitted. Return 0 if OK or 2 if the page could not be mapped
1480 (can only happen in non SOFTMMU mode for I/O pages or pages
1481 conflicting with the host address space). */
1482 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1483 target_phys_addr_t paddr, int prot,
1484 int is_user, int is_softmmu)
1486 PhysPageDesc *p;
1487 unsigned long pd;
1488 unsigned int index;
1489 target_ulong address;
1490 target_phys_addr_t addend;
1491 int ret;
1492 CPUTLBEntry *te;
1494 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1495 if (!p) {
1496 pd = IO_MEM_UNASSIGNED;
1497 } else {
1498 pd = p->phys_offset;
1500 #if defined(DEBUG_TLB)
1501 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1502 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1503 #endif
1505 ret = 0;
1506 #if !defined(CONFIG_SOFTMMU)
1507 if (is_softmmu)
1508 #endif
1510 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1511 /* IO memory case */
1512 address = vaddr | pd;
1513 addend = paddr;
1514 } else {
1515 /* standard memory */
1516 address = vaddr;
1517 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1520 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1521 addend -= vaddr;
1522 te = &env->tlb_table[is_user][index];
1523 te->addend = addend;
1524 if (prot & PAGE_READ) {
1525 te->addr_read = address;
1526 } else {
1527 te->addr_read = -1;
1529 if (prot & PAGE_EXEC) {
1530 te->addr_code = address;
1531 } else {
1532 te->addr_code = -1;
1534 if (prot & PAGE_WRITE) {
1535 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1536 (pd & IO_MEM_ROMD)) {
1537 /* write access calls the I/O callback */
1538 te->addr_write = vaddr |
1539 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1540 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1541 !cpu_physical_memory_is_dirty(pd)) {
1542 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1543 } else {
1544 te->addr_write = address;
1546 } else {
1547 te->addr_write = -1;
1550 #if !defined(CONFIG_SOFTMMU)
1551 else {
1552 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1553 /* IO access: no mapping is done as it will be handled by the
1554 soft MMU */
1555 if (!(env->hflags & HF_SOFTMMU_MASK))
1556 ret = 2;
1557 } else {
1558 void *map_addr;
1560 if (vaddr >= MMAP_AREA_END) {
1561 ret = 2;
1562 } else {
1563 if (prot & PROT_WRITE) {
1564 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1565 #if defined(TARGET_HAS_SMC) || 1
1566 first_tb ||
1567 #endif
1568 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1569 !cpu_physical_memory_is_dirty(pd))) {
1570 /* ROM: we do as if code was inside */
1571 /* if code is present, we only map as read only and save the
1572 original mapping */
1573 VirtPageDesc *vp;
1575 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1576 vp->phys_addr = pd;
1577 vp->prot = prot;
1578 vp->valid_tag = virt_valid_tag;
1579 prot &= ~PAGE_WRITE;
1582 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1583 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1584 if (map_addr == MAP_FAILED) {
1585 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1586 paddr, vaddr);
1591 #endif
1592 return ret;
1595 /* called from signal handler: invalidate the code and unprotect the
1596 page. Return TRUE if the fault was succesfully handled. */
1597 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1599 #if !defined(CONFIG_SOFTMMU)
1600 VirtPageDesc *vp;
1602 #if defined(DEBUG_TLB)
1603 printf("page_unprotect: addr=0x%08x\n", addr);
1604 #endif
1605 addr &= TARGET_PAGE_MASK;
1607 /* if it is not mapped, no need to worry here */
1608 if (addr >= MMAP_AREA_END)
1609 return 0;
1610 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1611 if (!vp)
1612 return 0;
1613 /* NOTE: in this case, validate_tag is _not_ tested as it
1614 validates only the code TLB */
1615 if (vp->valid_tag != virt_valid_tag)
1616 return 0;
1617 if (!(vp->prot & PAGE_WRITE))
1618 return 0;
1619 #if defined(DEBUG_TLB)
1620 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1621 addr, vp->phys_addr, vp->prot);
1622 #endif
1623 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1624 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1625 (unsigned long)addr, vp->prot);
1626 /* set the dirty bit */
1627 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1628 /* flush the code inside */
1629 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1630 return 1;
1631 #else
1632 return 0;
1633 #endif
1636 #else
1638 void tlb_flush(CPUState *env, int flush_global)
1642 void tlb_flush_page(CPUState *env, target_ulong addr)
1646 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1647 target_phys_addr_t paddr, int prot,
1648 int is_user, int is_softmmu)
1650 return 0;
1653 /* dump memory mappings */
1654 void page_dump(FILE *f)
1656 unsigned long start, end;
1657 int i, j, prot, prot1;
1658 PageDesc *p;
1660 fprintf(f, "%-8s %-8s %-8s %s\n",
1661 "start", "end", "size", "prot");
1662 start = -1;
1663 end = -1;
1664 prot = 0;
1665 for(i = 0; i <= L1_SIZE; i++) {
1666 if (i < L1_SIZE)
1667 p = l1_map[i];
1668 else
1669 p = NULL;
1670 for(j = 0;j < L2_SIZE; j++) {
1671 if (!p)
1672 prot1 = 0;
1673 else
1674 prot1 = p[j].flags;
1675 if (prot1 != prot) {
1676 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1677 if (start != -1) {
1678 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1679 start, end, end - start,
1680 prot & PAGE_READ ? 'r' : '-',
1681 prot & PAGE_WRITE ? 'w' : '-',
1682 prot & PAGE_EXEC ? 'x' : '-');
1684 if (prot1 != 0)
1685 start = end;
1686 else
1687 start = -1;
1688 prot = prot1;
1690 if (!p)
1691 break;
1696 int page_get_flags(target_ulong address)
1698 PageDesc *p;
1700 p = page_find(address >> TARGET_PAGE_BITS);
1701 if (!p)
1702 return 0;
1703 return p->flags;
1706 /* modify the flags of a page and invalidate the code if
1707 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1708 depending on PAGE_WRITE */
1709 void page_set_flags(target_ulong start, target_ulong end, int flags)
1711 PageDesc *p;
1712 target_ulong addr;
1714 start = start & TARGET_PAGE_MASK;
1715 end = TARGET_PAGE_ALIGN(end);
1716 if (flags & PAGE_WRITE)
1717 flags |= PAGE_WRITE_ORG;
1718 spin_lock(&tb_lock);
1719 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1720 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1721 /* if the write protection is set, then we invalidate the code
1722 inside */
1723 if (!(p->flags & PAGE_WRITE) &&
1724 (flags & PAGE_WRITE) &&
1725 p->first_tb) {
1726 tb_invalidate_phys_page(addr, 0, NULL);
1728 p->flags = flags;
1730 spin_unlock(&tb_lock);
1733 /* called from signal handler: invalidate the code and unprotect the
1734 page. Return TRUE if the fault was succesfully handled. */
1735 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1737 unsigned int page_index, prot, pindex;
1738 PageDesc *p, *p1;
1739 target_ulong host_start, host_end, addr;
1741 host_start = address & qemu_host_page_mask;
1742 page_index = host_start >> TARGET_PAGE_BITS;
1743 p1 = page_find(page_index);
1744 if (!p1)
1745 return 0;
1746 host_end = host_start + qemu_host_page_size;
1747 p = p1;
1748 prot = 0;
1749 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1750 prot |= p->flags;
1751 p++;
1753 /* if the page was really writable, then we change its
1754 protection back to writable */
1755 if (prot & PAGE_WRITE_ORG) {
1756 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1757 if (!(p1[pindex].flags & PAGE_WRITE)) {
1758 mprotect((void *)g2h(host_start), qemu_host_page_size,
1759 (prot & PAGE_BITS) | PAGE_WRITE);
1760 p1[pindex].flags |= PAGE_WRITE;
1761 /* and since the content will be modified, we must invalidate
1762 the corresponding translated code. */
1763 tb_invalidate_phys_page(address, pc, puc);
1764 #ifdef DEBUG_TB_CHECK
1765 tb_invalidate_check(address);
1766 #endif
1767 return 1;
1770 return 0;
1773 /* call this function when system calls directly modify a memory area */
1774 /* ??? This should be redundant now we have lock_user. */
1775 void page_unprotect_range(target_ulong data, target_ulong data_size)
1777 target_ulong start, end, addr;
1779 start = data;
1780 end = start + data_size;
1781 start &= TARGET_PAGE_MASK;
1782 end = TARGET_PAGE_ALIGN(end);
1783 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1784 page_unprotect(addr, 0, NULL);
1788 static inline void tlb_set_dirty(CPUState *env,
1789 unsigned long addr, target_ulong vaddr)
1792 #endif /* defined(CONFIG_USER_ONLY) */
1794 /* register physical memory. 'size' must be a multiple of the target
1795 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1796 io memory page */
1797 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1798 unsigned long size,
1799 unsigned long phys_offset)
1801 target_phys_addr_t addr, end_addr;
1802 PhysPageDesc *p;
1803 CPUState *env;
1805 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1806 end_addr = start_addr + size;
1807 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1808 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1809 p->phys_offset = phys_offset;
1810 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1811 (phys_offset & IO_MEM_ROMD))
1812 phys_offset += TARGET_PAGE_SIZE;
1815 /* since each CPU stores ram addresses in its TLB cache, we must
1816 reset the modified entries */
1817 /* XXX: slow ! */
1818 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1819 tlb_flush(env, 1);
1823 /* XXX: temporary until new memory mapping API */
1824 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1826 PhysPageDesc *p;
1828 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1829 if (!p)
1830 return IO_MEM_UNASSIGNED;
1831 return p->phys_offset;
1834 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1836 #ifdef DEBUG_UNASSIGNED
1837 printf("Unassigned mem read 0x%08x\n", (int)addr);
1838 #endif
1839 return 0;
1842 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1844 #ifdef DEBUG_UNASSIGNED
1845 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1846 #endif
1849 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1850 unassigned_mem_readb,
1851 unassigned_mem_readb,
1852 unassigned_mem_readb,
1855 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1856 unassigned_mem_writeb,
1857 unassigned_mem_writeb,
1858 unassigned_mem_writeb,
1861 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1863 unsigned long ram_addr;
1864 int dirty_flags;
1865 ram_addr = addr - (unsigned long)phys_ram_base;
1866 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1867 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1868 #if !defined(CONFIG_USER_ONLY)
1869 tb_invalidate_phys_page_fast(ram_addr, 1);
1870 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1871 #endif
1873 stb_p((uint8_t *)(long)addr, val);
1874 #ifdef USE_KQEMU
1875 if (cpu_single_env->kqemu_enabled &&
1876 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1877 kqemu_modify_page(cpu_single_env, ram_addr);
1878 #endif
1879 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1880 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1881 /* we remove the notdirty callback only if the code has been
1882 flushed */
1883 if (dirty_flags == 0xff)
1884 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1887 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1889 unsigned long ram_addr;
1890 int dirty_flags;
1891 ram_addr = addr - (unsigned long)phys_ram_base;
1892 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1893 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1894 #if !defined(CONFIG_USER_ONLY)
1895 tb_invalidate_phys_page_fast(ram_addr, 2);
1896 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1897 #endif
1899 stw_p((uint8_t *)(long)addr, val);
1900 #ifdef USE_KQEMU
1901 if (cpu_single_env->kqemu_enabled &&
1902 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1903 kqemu_modify_page(cpu_single_env, ram_addr);
1904 #endif
1905 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1906 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1907 /* we remove the notdirty callback only if the code has been
1908 flushed */
1909 if (dirty_flags == 0xff)
1910 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1913 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1915 unsigned long ram_addr;
1916 int dirty_flags;
1917 ram_addr = addr - (unsigned long)phys_ram_base;
1918 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1919 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1920 #if !defined(CONFIG_USER_ONLY)
1921 tb_invalidate_phys_page_fast(ram_addr, 4);
1922 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1923 #endif
1925 stl_p((uint8_t *)(long)addr, val);
1926 #ifdef USE_KQEMU
1927 if (cpu_single_env->kqemu_enabled &&
1928 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1929 kqemu_modify_page(cpu_single_env, ram_addr);
1930 #endif
1931 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1932 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1933 /* we remove the notdirty callback only if the code has been
1934 flushed */
1935 if (dirty_flags == 0xff)
1936 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1939 static CPUReadMemoryFunc *error_mem_read[3] = {
1940 NULL, /* never used */
1941 NULL, /* never used */
1942 NULL, /* never used */
1945 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1946 notdirty_mem_writeb,
1947 notdirty_mem_writew,
1948 notdirty_mem_writel,
1951 static void io_mem_init(void)
1953 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1954 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1955 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1956 io_mem_nb = 5;
1958 /* alloc dirty bits array */
1959 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1960 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1963 /* mem_read and mem_write are arrays of functions containing the
1964 function to access byte (index 0), word (index 1) and dword (index
1965 2). All functions must be supplied. If io_index is non zero, the
1966 corresponding io zone is modified. If it is zero, a new io zone is
1967 allocated. The return value can be used with
1968 cpu_register_physical_memory(). (-1) is returned if error. */
1969 int cpu_register_io_memory(int io_index,
1970 CPUReadMemoryFunc **mem_read,
1971 CPUWriteMemoryFunc **mem_write,
1972 void *opaque)
1974 int i;
1976 if (io_index <= 0) {
1977 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1978 return -1;
1979 io_index = io_mem_nb++;
1980 } else {
1981 if (io_index >= IO_MEM_NB_ENTRIES)
1982 return -1;
1985 for(i = 0;i < 3; i++) {
1986 io_mem_read[io_index][i] = mem_read[i];
1987 io_mem_write[io_index][i] = mem_write[i];
1989 io_mem_opaque[io_index] = opaque;
1990 return io_index << IO_MEM_SHIFT;
1993 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1995 return io_mem_write[io_index >> IO_MEM_SHIFT];
1998 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2000 return io_mem_read[io_index >> IO_MEM_SHIFT];
2003 /* physical memory access (slow version, mainly for debug) */
2004 #if defined(CONFIG_USER_ONLY)
2005 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2006 int len, int is_write)
2008 int l, flags;
2009 target_ulong page;
2010 void * p;
2012 while (len > 0) {
2013 page = addr & TARGET_PAGE_MASK;
2014 l = (page + TARGET_PAGE_SIZE) - addr;
2015 if (l > len)
2016 l = len;
2017 flags = page_get_flags(page);
2018 if (!(flags & PAGE_VALID))
2019 return;
2020 if (is_write) {
2021 if (!(flags & PAGE_WRITE))
2022 return;
2023 p = lock_user(addr, len, 0);
2024 memcpy(p, buf, len);
2025 unlock_user(p, addr, len);
2026 } else {
2027 if (!(flags & PAGE_READ))
2028 return;
2029 p = lock_user(addr, len, 1);
2030 memcpy(buf, p, len);
2031 unlock_user(p, addr, 0);
2033 len -= l;
2034 buf += l;
2035 addr += l;
2039 #else
2040 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2041 int len, int is_write)
2043 int l, io_index;
2044 uint8_t *ptr;
2045 uint32_t val;
2046 target_phys_addr_t page;
2047 unsigned long pd;
2048 PhysPageDesc *p;
2050 while (len > 0) {
2051 page = addr & TARGET_PAGE_MASK;
2052 l = (page + TARGET_PAGE_SIZE) - addr;
2053 if (l > len)
2054 l = len;
2055 p = phys_page_find(page >> TARGET_PAGE_BITS);
2056 if (!p) {
2057 pd = IO_MEM_UNASSIGNED;
2058 } else {
2059 pd = p->phys_offset;
2062 if (is_write) {
2063 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2064 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2065 /* XXX: could force cpu_single_env to NULL to avoid
2066 potential bugs */
2067 if (l >= 4 && ((addr & 3) == 0)) {
2068 /* 32 bit write access */
2069 val = ldl_p(buf);
2070 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2071 l = 4;
2072 } else if (l >= 2 && ((addr & 1) == 0)) {
2073 /* 16 bit write access */
2074 val = lduw_p(buf);
2075 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2076 l = 2;
2077 } else {
2078 /* 8 bit write access */
2079 val = ldub_p(buf);
2080 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2081 l = 1;
2083 } else {
2084 unsigned long addr1;
2085 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2086 /* RAM case */
2087 ptr = phys_ram_base + addr1;
2088 memcpy(ptr, buf, l);
2089 if (!cpu_physical_memory_is_dirty(addr1)) {
2090 /* invalidate code */
2091 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2092 /* set dirty bit */
2093 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2094 (0xff & ~CODE_DIRTY_FLAG);
2097 } else {
2098 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2099 !(pd & IO_MEM_ROMD)) {
2100 /* I/O case */
2101 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2102 if (l >= 4 && ((addr & 3) == 0)) {
2103 /* 32 bit read access */
2104 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2105 stl_p(buf, val);
2106 l = 4;
2107 } else if (l >= 2 && ((addr & 1) == 0)) {
2108 /* 16 bit read access */
2109 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2110 stw_p(buf, val);
2111 l = 2;
2112 } else {
2113 /* 8 bit read access */
2114 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2115 stb_p(buf, val);
2116 l = 1;
2118 } else {
2119 /* RAM case */
2120 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2121 (addr & ~TARGET_PAGE_MASK);
2122 memcpy(buf, ptr, l);
2125 len -= l;
2126 buf += l;
2127 addr += l;
2131 /* used for ROM loading : can write in RAM and ROM */
2132 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2133 const uint8_t *buf, int len)
2135 int l;
2136 uint8_t *ptr;
2137 target_phys_addr_t page;
2138 unsigned long pd;
2139 PhysPageDesc *p;
2141 while (len > 0) {
2142 page = addr & TARGET_PAGE_MASK;
2143 l = (page + TARGET_PAGE_SIZE) - addr;
2144 if (l > len)
2145 l = len;
2146 p = phys_page_find(page >> TARGET_PAGE_BITS);
2147 if (!p) {
2148 pd = IO_MEM_UNASSIGNED;
2149 } else {
2150 pd = p->phys_offset;
2153 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2154 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2155 !(pd & IO_MEM_ROMD)) {
2156 /* do nothing */
2157 } else {
2158 unsigned long addr1;
2159 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2160 /* ROM/RAM case */
2161 ptr = phys_ram_base + addr1;
2162 memcpy(ptr, buf, l);
2164 len -= l;
2165 buf += l;
2166 addr += l;
2171 /* warning: addr must be aligned */
2172 uint32_t ldl_phys(target_phys_addr_t addr)
2174 int io_index;
2175 uint8_t *ptr;
2176 uint32_t val;
2177 unsigned long pd;
2178 PhysPageDesc *p;
2180 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2181 if (!p) {
2182 pd = IO_MEM_UNASSIGNED;
2183 } else {
2184 pd = p->phys_offset;
2187 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2188 !(pd & IO_MEM_ROMD)) {
2189 /* I/O case */
2190 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2191 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2192 } else {
2193 /* RAM case */
2194 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2195 (addr & ~TARGET_PAGE_MASK);
2196 val = ldl_p(ptr);
2198 return val;
2201 /* warning: addr must be aligned */
2202 uint64_t ldq_phys(target_phys_addr_t addr)
2204 int io_index;
2205 uint8_t *ptr;
2206 uint64_t val;
2207 unsigned long pd;
2208 PhysPageDesc *p;
2210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2211 if (!p) {
2212 pd = IO_MEM_UNASSIGNED;
2213 } else {
2214 pd = p->phys_offset;
2217 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2218 !(pd & IO_MEM_ROMD)) {
2219 /* I/O case */
2220 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2221 #ifdef TARGET_WORDS_BIGENDIAN
2222 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2223 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2224 #else
2225 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2226 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2227 #endif
2228 } else {
2229 /* RAM case */
2230 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2231 (addr & ~TARGET_PAGE_MASK);
2232 val = ldq_p(ptr);
2234 return val;
2237 /* XXX: optimize */
2238 uint32_t ldub_phys(target_phys_addr_t addr)
2240 uint8_t val;
2241 cpu_physical_memory_read(addr, &val, 1);
2242 return val;
2245 /* XXX: optimize */
2246 uint32_t lduw_phys(target_phys_addr_t addr)
2248 uint16_t val;
2249 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2250 return tswap16(val);
2253 /* warning: addr must be aligned. The ram page is not masked as dirty
2254 and the code inside is not invalidated. It is useful if the dirty
2255 bits are used to track modified PTEs */
2256 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2258 int io_index;
2259 uint8_t *ptr;
2260 unsigned long pd;
2261 PhysPageDesc *p;
2263 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2264 if (!p) {
2265 pd = IO_MEM_UNASSIGNED;
2266 } else {
2267 pd = p->phys_offset;
2270 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2271 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2272 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2273 } else {
2274 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2275 (addr & ~TARGET_PAGE_MASK);
2276 stl_p(ptr, val);
2280 /* warning: addr must be aligned */
2281 void stl_phys(target_phys_addr_t addr, uint32_t val)
2283 int io_index;
2284 uint8_t *ptr;
2285 unsigned long pd;
2286 PhysPageDesc *p;
2288 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2289 if (!p) {
2290 pd = IO_MEM_UNASSIGNED;
2291 } else {
2292 pd = p->phys_offset;
2295 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2296 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2297 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2298 } else {
2299 unsigned long addr1;
2300 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2301 /* RAM case */
2302 ptr = phys_ram_base + addr1;
2303 stl_p(ptr, val);
2304 if (!cpu_physical_memory_is_dirty(addr1)) {
2305 /* invalidate code */
2306 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2307 /* set dirty bit */
2308 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2309 (0xff & ~CODE_DIRTY_FLAG);
2314 /* XXX: optimize */
2315 void stb_phys(target_phys_addr_t addr, uint32_t val)
2317 uint8_t v = val;
2318 cpu_physical_memory_write(addr, &v, 1);
2321 /* XXX: optimize */
2322 void stw_phys(target_phys_addr_t addr, uint32_t val)
2324 uint16_t v = tswap16(val);
2325 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2328 /* XXX: optimize */
2329 void stq_phys(target_phys_addr_t addr, uint64_t val)
2331 val = tswap64(val);
2332 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2335 #endif
2337 /* virtual memory access for debug */
2338 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2339 uint8_t *buf, int len, int is_write)
2341 int l;
2342 target_ulong page, phys_addr;
2344 while (len > 0) {
2345 page = addr & TARGET_PAGE_MASK;
2346 phys_addr = cpu_get_phys_page_debug(env, page);
2347 /* if no physical page mapped, return an error */
2348 if (phys_addr == -1)
2349 return -1;
2350 l = (page + TARGET_PAGE_SIZE) - addr;
2351 if (l > len)
2352 l = len;
2353 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2354 buf, l, is_write);
2355 len -= l;
2356 buf += l;
2357 addr += l;
2359 return 0;
2362 void dump_exec_info(FILE *f,
2363 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2365 int i, target_code_size, max_target_code_size;
2366 int direct_jmp_count, direct_jmp2_count, cross_page;
2367 TranslationBlock *tb;
2369 target_code_size = 0;
2370 max_target_code_size = 0;
2371 cross_page = 0;
2372 direct_jmp_count = 0;
2373 direct_jmp2_count = 0;
2374 for(i = 0; i < nb_tbs; i++) {
2375 tb = &tbs[i];
2376 target_code_size += tb->size;
2377 if (tb->size > max_target_code_size)
2378 max_target_code_size = tb->size;
2379 if (tb->page_addr[1] != -1)
2380 cross_page++;
2381 if (tb->tb_next_offset[0] != 0xffff) {
2382 direct_jmp_count++;
2383 if (tb->tb_next_offset[1] != 0xffff) {
2384 direct_jmp2_count++;
2388 /* XXX: avoid using doubles ? */
2389 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2390 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2391 nb_tbs ? target_code_size / nb_tbs : 0,
2392 max_target_code_size);
2393 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2394 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2395 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2396 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2397 cross_page,
2398 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2399 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2400 direct_jmp_count,
2401 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2402 direct_jmp2_count,
2403 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2404 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2405 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2406 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2409 #if !defined(CONFIG_USER_ONLY)
2411 #define MMUSUFFIX _cmmu
2412 #define GETPC() NULL
2413 #define env cpu_single_env
2414 #define SOFTMMU_CODE_ACCESS
2416 #define SHIFT 0
2417 #include "softmmu_template.h"
2419 #define SHIFT 1
2420 #include "softmmu_template.h"
2422 #define SHIFT 2
2423 #include "softmmu_template.h"
2425 #define SHIFT 3
2426 #include "softmmu_template.h"
2428 #undef env
2430 #endif