Replace the nic model with rtl8139.
[qemu-kvm/fedora.git] / exec.c
blobf85860a15227fe7ea747afb70b82b8d21ca5f2d2
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 //#define DEBUG_TLB_CHECK
49 #if !defined(CONFIG_USER_ONLY)
50 /* TB consistency checks only implemented for usermode emulation. */
51 #undef DEBUG_TB_CHECK
52 #endif
54 /* threshold to flush the translated code buffer */
55 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57 #define SMC_BITMAP_USE_THRESHOLD 10
59 #define MMAP_AREA_START 0x00000000
60 #define MMAP_AREA_END 0xa8000000
62 #if defined(TARGET_SPARC64)
63 #define TARGET_PHYS_ADDR_SPACE_BITS 41
64 #elif defined(TARGET_PPC64)
65 #define TARGET_PHYS_ADDR_SPACE_BITS 42
66 #else
67 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
68 #define TARGET_PHYS_ADDR_SPACE_BITS 32
69 #endif
71 #ifdef USE_KVM
72 extern int kvm_allowed;
73 #endif
75 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
76 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
77 int nb_tbs;
78 /* any access to the tbs or the page table must use this lock */
79 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
81 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
82 uint8_t *code_gen_ptr;
84 int phys_ram_size;
85 int phys_ram_fd;
86 uint8_t *phys_ram_base;
87 uint8_t *phys_ram_dirty;
88 uint8_t *bios_mem;
90 CPUState *first_cpu;
91 /* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
93 CPUState *cpu_single_env;
95 typedef struct PageDesc {
96 /* list of TBs intersecting this ram page */
97 TranslationBlock *first_tb;
98 /* in order to optimize self modifying code, we count the number
99 of lookups we do to a given page to use a bitmap */
100 unsigned int code_write_count;
101 uint8_t *code_bitmap;
102 #if defined(CONFIG_USER_ONLY)
103 unsigned long flags;
104 #endif
105 } PageDesc;
107 typedef struct PhysPageDesc {
108 /* offset in host memory of the page + io_index in the low 12 bits */
109 uint32_t phys_offset;
110 } PhysPageDesc;
112 #define L2_BITS 10
113 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
115 #define L1_SIZE (1 << L1_BITS)
116 #define L2_SIZE (1 << L2_BITS)
118 static void io_mem_init(void);
120 unsigned long qemu_real_host_page_size;
121 unsigned long qemu_host_page_bits;
122 unsigned long qemu_host_page_size;
123 unsigned long qemu_host_page_mask;
125 /* XXX: for system emulation, it could just be an array */
126 static PageDesc *l1_map[L1_SIZE];
127 PhysPageDesc **l1_phys_map;
129 /* io memory support */
130 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
131 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
132 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
133 static int io_mem_nb;
135 /* log support */
136 char *logfilename = "/tmp/qemu.log";
137 FILE *logfile;
138 int loglevel;
140 /* statistics */
141 static int tlb_flush_count;
142 static int tb_flush_count;
143 static int tb_phys_invalidate_count;
145 static void page_init(void)
147 /* NOTE: we can always suppose that qemu_host_page_size >=
148 TARGET_PAGE_SIZE */
149 #ifdef _WIN32
151 SYSTEM_INFO system_info;
152 DWORD old_protect;
154 GetSystemInfo(&system_info);
155 qemu_real_host_page_size = system_info.dwPageSize;
157 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
158 PAGE_EXECUTE_READWRITE, &old_protect);
160 #else
161 qemu_real_host_page_size = getpagesize();
163 unsigned long start, end;
165 start = (unsigned long)code_gen_buffer;
166 start &= ~(qemu_real_host_page_size - 1);
168 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
169 end += qemu_real_host_page_size - 1;
170 end &= ~(qemu_real_host_page_size - 1);
172 mprotect((void *)start, end - start,
173 PROT_READ | PROT_WRITE | PROT_EXEC);
175 #endif
177 if (qemu_host_page_size == 0)
178 qemu_host_page_size = qemu_real_host_page_size;
179 if (qemu_host_page_size < TARGET_PAGE_SIZE)
180 qemu_host_page_size = TARGET_PAGE_SIZE;
181 qemu_host_page_bits = 0;
182 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
183 qemu_host_page_bits++;
184 qemu_host_page_mask = ~(qemu_host_page_size - 1);
185 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
186 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
189 static inline PageDesc *page_find_alloc(unsigned int index)
191 PageDesc **lp, *p;
193 lp = &l1_map[index >> L2_BITS];
194 p = *lp;
195 if (!p) {
196 /* allocate if not found */
197 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
198 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
199 *lp = p;
201 return p + (index & (L2_SIZE - 1));
204 static inline PageDesc *page_find(unsigned int index)
206 PageDesc *p;
208 p = l1_map[index >> L2_BITS];
209 if (!p)
210 return 0;
211 return p + (index & (L2_SIZE - 1));
214 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
216 void **lp, **p;
217 PhysPageDesc *pd;
219 p = (void **)l1_phys_map;
220 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
222 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
223 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
224 #endif
225 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
226 p = *lp;
227 if (!p) {
228 /* allocate if not found */
229 if (!alloc)
230 return NULL;
231 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
232 memset(p, 0, sizeof(void *) * L1_SIZE);
233 *lp = p;
235 #endif
236 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
237 pd = *lp;
238 if (!pd) {
239 int i;
240 /* allocate if not found */
241 if (!alloc)
242 return NULL;
243 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
244 *lp = pd;
245 for (i = 0; i < L2_SIZE; i++)
246 pd[i].phys_offset = IO_MEM_UNASSIGNED;
248 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
251 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
253 return phys_page_find_alloc(index, 0);
256 #if !defined(CONFIG_USER_ONLY)
257 static void tlb_protect_code(ram_addr_t ram_addr);
258 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
259 target_ulong vaddr);
260 #endif
262 void cpu_exec_init(CPUState *env)
264 CPUState **penv;
265 int cpu_index;
267 if (!code_gen_ptr) {
268 code_gen_ptr = code_gen_buffer;
269 page_init();
270 io_mem_init();
272 env->next_cpu = NULL;
273 penv = &first_cpu;
274 cpu_index = 0;
275 while (*penv != NULL) {
276 penv = (CPUState **)&(*penv)->next_cpu;
277 cpu_index++;
279 env->cpu_index = cpu_index;
280 *penv = env;
283 static inline void invalidate_page_bitmap(PageDesc *p)
285 if (p->code_bitmap) {
286 qemu_free(p->code_bitmap);
287 p->code_bitmap = NULL;
289 p->code_write_count = 0;
292 /* set to NULL all the 'first_tb' fields in all PageDescs */
293 static void page_flush_tb(void)
295 int i, j;
296 PageDesc *p;
298 for(i = 0; i < L1_SIZE; i++) {
299 p = l1_map[i];
300 if (p) {
301 for(j = 0; j < L2_SIZE; j++) {
302 p->first_tb = NULL;
303 invalidate_page_bitmap(p);
304 p++;
310 /* flush all the translation blocks */
311 /* XXX: tb_flush is currently not thread safe */
312 void tb_flush(CPUState *env1)
314 CPUState *env;
315 #if defined(DEBUG_FLUSH)
316 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
317 code_gen_ptr - code_gen_buffer,
318 nb_tbs,
319 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
320 #endif
321 nb_tbs = 0;
323 for(env = first_cpu; env != NULL; env = env->next_cpu) {
324 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
327 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
328 page_flush_tb();
330 code_gen_ptr = code_gen_buffer;
331 /* XXX: flush processor icache at this point if cache flush is
332 expensive */
333 tb_flush_count++;
336 #ifdef DEBUG_TB_CHECK
338 static void tb_invalidate_check(unsigned long address)
340 TranslationBlock *tb;
341 int i;
342 address &= TARGET_PAGE_MASK;
343 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
344 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
345 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
346 address >= tb->pc + tb->size)) {
347 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
348 address, (long)tb->pc, tb->size);
354 /* verify that all the pages have correct rights for code */
355 static void tb_page_check(void)
357 TranslationBlock *tb;
358 int i, flags1, flags2;
360 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
361 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
362 flags1 = page_get_flags(tb->pc);
363 flags2 = page_get_flags(tb->pc + tb->size - 1);
364 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
365 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
366 (long)tb->pc, tb->size, flags1, flags2);
372 void tb_jmp_check(TranslationBlock *tb)
374 TranslationBlock *tb1;
375 unsigned int n1;
377 /* suppress any remaining jumps to this TB */
378 tb1 = tb->jmp_first;
379 for(;;) {
380 n1 = (long)tb1 & 3;
381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
382 if (n1 == 2)
383 break;
384 tb1 = tb1->jmp_next[n1];
386 /* check end of list */
387 if (tb1 != tb) {
388 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
392 #endif
394 /* invalidate one TB */
395 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
396 int next_offset)
398 TranslationBlock *tb1;
399 for(;;) {
400 tb1 = *ptb;
401 if (tb1 == tb) {
402 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
403 break;
405 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
409 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
411 TranslationBlock *tb1;
412 unsigned int n1;
414 for(;;) {
415 tb1 = *ptb;
416 n1 = (long)tb1 & 3;
417 tb1 = (TranslationBlock *)((long)tb1 & ~3);
418 if (tb1 == tb) {
419 *ptb = tb1->page_next[n1];
420 break;
422 ptb = &tb1->page_next[n1];
426 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
428 TranslationBlock *tb1, **ptb;
429 unsigned int n1;
431 ptb = &tb->jmp_next[n];
432 tb1 = *ptb;
433 if (tb1) {
434 /* find tb(n) in circular list */
435 for(;;) {
436 tb1 = *ptb;
437 n1 = (long)tb1 & 3;
438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
439 if (n1 == n && tb1 == tb)
440 break;
441 if (n1 == 2) {
442 ptb = &tb1->jmp_first;
443 } else {
444 ptb = &tb1->jmp_next[n1];
447 /* now we can suppress tb(n) from the list */
448 *ptb = tb->jmp_next[n];
450 tb->jmp_next[n] = NULL;
454 /* reset the jump entry 'n' of a TB so that it is not chained to
455 another TB */
456 static inline void tb_reset_jump(TranslationBlock *tb, int n)
458 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
461 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
463 CPUState *env;
464 PageDesc *p;
465 unsigned int h, n1;
466 target_ulong phys_pc;
467 TranslationBlock *tb1, *tb2;
469 /* remove the TB from the hash list */
470 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
471 h = tb_phys_hash_func(phys_pc);
472 tb_remove(&tb_phys_hash[h], tb,
473 offsetof(TranslationBlock, phys_hash_next));
475 /* remove the TB from the page list */
476 if (tb->page_addr[0] != page_addr) {
477 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
478 tb_page_remove(&p->first_tb, tb);
479 invalidate_page_bitmap(p);
481 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
482 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
483 tb_page_remove(&p->first_tb, tb);
484 invalidate_page_bitmap(p);
487 tb_invalidated_flag = 1;
489 /* remove the TB from the hash list */
490 h = tb_jmp_cache_hash_func(tb->pc);
491 for(env = first_cpu; env != NULL; env = env->next_cpu) {
492 if (env->tb_jmp_cache[h] == tb)
493 env->tb_jmp_cache[h] = NULL;
496 /* suppress this TB from the two jump lists */
497 tb_jmp_remove(tb, 0);
498 tb_jmp_remove(tb, 1);
500 /* suppress any remaining jumps to this TB */
501 tb1 = tb->jmp_first;
502 for(;;) {
503 n1 = (long)tb1 & 3;
504 if (n1 == 2)
505 break;
506 tb1 = (TranslationBlock *)((long)tb1 & ~3);
507 tb2 = tb1->jmp_next[n1];
508 tb_reset_jump(tb1, n1);
509 tb1->jmp_next[n1] = NULL;
510 tb1 = tb2;
512 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
514 tb_phys_invalidate_count++;
517 static inline void set_bits(uint8_t *tab, int start, int len)
519 int end, mask, end1;
521 end = start + len;
522 tab += start >> 3;
523 mask = 0xff << (start & 7);
524 if ((start & ~7) == (end & ~7)) {
525 if (start < end) {
526 mask &= ~(0xff << (end & 7));
527 *tab |= mask;
529 } else {
530 *tab++ |= mask;
531 start = (start + 8) & ~7;
532 end1 = end & ~7;
533 while (start < end1) {
534 *tab++ = 0xff;
535 start += 8;
537 if (start < end) {
538 mask = ~(0xff << (end & 7));
539 *tab |= mask;
544 static void build_page_bitmap(PageDesc *p)
546 int n, tb_start, tb_end;
547 TranslationBlock *tb;
549 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
550 if (!p->code_bitmap)
551 return;
552 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
554 tb = p->first_tb;
555 while (tb != NULL) {
556 n = (long)tb & 3;
557 tb = (TranslationBlock *)((long)tb & ~3);
558 /* NOTE: this is subtle as a TB may span two physical pages */
559 if (n == 0) {
560 /* NOTE: tb_end may be after the end of the page, but
561 it is not a problem */
562 tb_start = tb->pc & ~TARGET_PAGE_MASK;
563 tb_end = tb_start + tb->size;
564 if (tb_end > TARGET_PAGE_SIZE)
565 tb_end = TARGET_PAGE_SIZE;
566 } else {
567 tb_start = 0;
568 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
570 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
571 tb = tb->page_next[n];
575 #ifdef TARGET_HAS_PRECISE_SMC
577 static void tb_gen_code(CPUState *env,
578 target_ulong pc, target_ulong cs_base, int flags,
579 int cflags)
581 TranslationBlock *tb;
582 uint8_t *tc_ptr;
583 target_ulong phys_pc, phys_page2, virt_page2;
584 int code_gen_size;
586 phys_pc = get_phys_addr_code(env, pc);
587 tb = tb_alloc(pc);
588 if (!tb) {
589 /* flush must be done */
590 tb_flush(env);
591 /* cannot fail at this point */
592 tb = tb_alloc(pc);
594 tc_ptr = code_gen_ptr;
595 tb->tc_ptr = tc_ptr;
596 tb->cs_base = cs_base;
597 tb->flags = flags;
598 tb->cflags = cflags;
599 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
600 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
602 /* check next page if needed */
603 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
604 phys_page2 = -1;
605 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
606 phys_page2 = get_phys_addr_code(env, virt_page2);
608 tb_link_phys(tb, phys_pc, phys_page2);
610 #endif
612 /* invalidate all TBs which intersect with the target physical page
613 starting in range [start;end[. NOTE: start and end must refer to
614 the same physical page. 'is_cpu_write_access' should be true if called
615 from a real cpu write access: the virtual CPU will exit the current
616 TB if code is modified inside this TB. */
617 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
618 int is_cpu_write_access)
620 int n, current_tb_modified, current_tb_not_found, current_flags;
621 CPUState *env = cpu_single_env;
622 PageDesc *p;
623 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
624 target_ulong tb_start, tb_end;
625 target_ulong current_pc, current_cs_base;
627 p = page_find(start >> TARGET_PAGE_BITS);
628 if (!p)
629 return;
630 if (!p->code_bitmap &&
631 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
632 is_cpu_write_access) {
633 /* build code bitmap */
634 build_page_bitmap(p);
637 /* we remove all the TBs in the range [start, end[ */
638 /* XXX: see if in some cases it could be faster to invalidate all the code */
639 current_tb_not_found = is_cpu_write_access;
640 current_tb_modified = 0;
641 current_tb = NULL; /* avoid warning */
642 current_pc = 0; /* avoid warning */
643 current_cs_base = 0; /* avoid warning */
644 current_flags = 0; /* avoid warning */
645 tb = p->first_tb;
646 while (tb != NULL) {
647 n = (long)tb & 3;
648 tb = (TranslationBlock *)((long)tb & ~3);
649 tb_next = tb->page_next[n];
650 /* NOTE: this is subtle as a TB may span two physical pages */
651 if (n == 0) {
652 /* NOTE: tb_end may be after the end of the page, but
653 it is not a problem */
654 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
655 tb_end = tb_start + tb->size;
656 } else {
657 tb_start = tb->page_addr[1];
658 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
660 if (!(tb_end <= start || tb_start >= end)) {
661 #ifdef TARGET_HAS_PRECISE_SMC
662 if (current_tb_not_found) {
663 current_tb_not_found = 0;
664 current_tb = NULL;
665 if (env->mem_write_pc) {
666 /* now we have a real cpu fault */
667 current_tb = tb_find_pc(env->mem_write_pc);
670 if (current_tb == tb &&
671 !(current_tb->cflags & CF_SINGLE_INSN)) {
672 /* If we are modifying the current TB, we must stop
673 its execution. We could be more precise by checking
674 that the modification is after the current PC, but it
675 would require a specialized function to partially
676 restore the CPU state */
678 current_tb_modified = 1;
679 cpu_restore_state(current_tb, env,
680 env->mem_write_pc, NULL);
681 #if defined(TARGET_I386)
682 current_flags = env->hflags;
683 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
684 current_cs_base = (target_ulong)env->segs[R_CS].base;
685 current_pc = current_cs_base + env->eip;
686 #else
687 #error unsupported CPU
688 #endif
690 #endif /* TARGET_HAS_PRECISE_SMC */
691 /* we need to do that to handle the case where a signal
692 occurs while doing tb_phys_invalidate() */
693 saved_tb = NULL;
694 if (env) {
695 saved_tb = env->current_tb;
696 env->current_tb = NULL;
698 tb_phys_invalidate(tb, -1);
699 if (env) {
700 env->current_tb = saved_tb;
701 if (env->interrupt_request && env->current_tb)
702 cpu_interrupt(env, env->interrupt_request);
705 tb = tb_next;
707 #if !defined(CONFIG_USER_ONLY)
708 /* if no code remaining, no need to continue to use slow writes */
709 if (!p->first_tb) {
710 invalidate_page_bitmap(p);
711 if (is_cpu_write_access) {
712 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
715 #endif
716 #ifdef TARGET_HAS_PRECISE_SMC
717 if (current_tb_modified) {
718 /* we generate a block containing just the instruction
719 modifying the memory. It will ensure that it cannot modify
720 itself */
721 env->current_tb = NULL;
722 tb_gen_code(env, current_pc, current_cs_base, current_flags,
723 CF_SINGLE_INSN);
724 cpu_resume_from_signal(env, NULL);
726 #endif
729 /* len must be <= 8 and start must be a multiple of len */
730 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
732 PageDesc *p;
733 int offset, b;
734 #if 0
735 if (1) {
736 if (loglevel) {
737 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
738 cpu_single_env->mem_write_vaddr, len,
739 cpu_single_env->eip,
740 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
743 #endif
744 p = page_find(start >> TARGET_PAGE_BITS);
745 if (!p)
746 return;
747 if (p->code_bitmap) {
748 offset = start & ~TARGET_PAGE_MASK;
749 b = p->code_bitmap[offset >> 3] >> (offset & 7);
750 if (b & ((1 << len) - 1))
751 goto do_invalidate;
752 } else {
753 do_invalidate:
754 tb_invalidate_phys_page_range(start, start + len, 1);
758 #if !defined(CONFIG_SOFTMMU)
759 static void tb_invalidate_phys_page(target_ulong addr,
760 unsigned long pc, void *puc)
762 int n, current_flags, current_tb_modified;
763 target_ulong current_pc, current_cs_base;
764 PageDesc *p;
765 TranslationBlock *tb, *current_tb;
766 #ifdef TARGET_HAS_PRECISE_SMC
767 CPUState *env = cpu_single_env;
768 #endif
770 addr &= TARGET_PAGE_MASK;
771 p = page_find(addr >> TARGET_PAGE_BITS);
772 if (!p)
773 return;
774 tb = p->first_tb;
775 current_tb_modified = 0;
776 current_tb = NULL;
777 current_pc = 0; /* avoid warning */
778 current_cs_base = 0; /* avoid warning */
779 current_flags = 0; /* avoid warning */
780 #ifdef TARGET_HAS_PRECISE_SMC
781 if (tb && pc != 0) {
782 current_tb = tb_find_pc(pc);
784 #endif
785 while (tb != NULL) {
786 n = (long)tb & 3;
787 tb = (TranslationBlock *)((long)tb & ~3);
788 #ifdef TARGET_HAS_PRECISE_SMC
789 if (current_tb == tb &&
790 !(current_tb->cflags & CF_SINGLE_INSN)) {
791 /* If we are modifying the current TB, we must stop
792 its execution. We could be more precise by checking
793 that the modification is after the current PC, but it
794 would require a specialized function to partially
795 restore the CPU state */
797 current_tb_modified = 1;
798 cpu_restore_state(current_tb, env, pc, puc);
799 #if defined(TARGET_I386)
800 current_flags = env->hflags;
801 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
802 current_cs_base = (target_ulong)env->segs[R_CS].base;
803 current_pc = current_cs_base + env->eip;
804 #else
805 #error unsupported CPU
806 #endif
808 #endif /* TARGET_HAS_PRECISE_SMC */
809 tb_phys_invalidate(tb, addr);
810 tb = tb->page_next[n];
812 p->first_tb = NULL;
813 #ifdef TARGET_HAS_PRECISE_SMC
814 if (current_tb_modified) {
815 /* we generate a block containing just the instruction
816 modifying the memory. It will ensure that it cannot modify
817 itself */
818 env->current_tb = NULL;
819 tb_gen_code(env, current_pc, current_cs_base, current_flags,
820 CF_SINGLE_INSN);
821 cpu_resume_from_signal(env, puc);
823 #endif
825 #endif
827 /* add the tb in the target page and protect it if necessary */
828 static inline void tb_alloc_page(TranslationBlock *tb,
829 unsigned int n, target_ulong page_addr)
831 PageDesc *p;
832 TranslationBlock *last_first_tb;
834 tb->page_addr[n] = page_addr;
835 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
836 tb->page_next[n] = p->first_tb;
837 last_first_tb = p->first_tb;
838 p->first_tb = (TranslationBlock *)((long)tb | n);
839 invalidate_page_bitmap(p);
841 #if defined(TARGET_HAS_SMC) || 1
843 #if defined(CONFIG_USER_ONLY)
844 if (p->flags & PAGE_WRITE) {
845 target_ulong addr;
846 PageDesc *p2;
847 int prot;
849 /* force the host page as non writable (writes will have a
850 page fault + mprotect overhead) */
851 page_addr &= qemu_host_page_mask;
852 prot = 0;
853 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
854 addr += TARGET_PAGE_SIZE) {
856 p2 = page_find (addr >> TARGET_PAGE_BITS);
857 if (!p2)
858 continue;
859 prot |= p2->flags;
860 p2->flags &= ~PAGE_WRITE;
861 page_get_flags(addr);
863 mprotect(g2h(page_addr), qemu_host_page_size,
864 (prot & PAGE_BITS) & ~PAGE_WRITE);
865 #ifdef DEBUG_TB_INVALIDATE
866 printf("protecting code page: 0x%08lx\n",
867 page_addr);
868 #endif
870 #else
871 /* if some code is already present, then the pages are already
872 protected. So we handle the case where only the first TB is
873 allocated in a physical page */
874 if (!last_first_tb) {
875 tlb_protect_code(page_addr);
877 #endif
879 #endif /* TARGET_HAS_SMC */
882 /* Allocate a new translation block. Flush the translation buffer if
883 too many translation blocks or too much generated code. */
884 TranslationBlock *tb_alloc(target_ulong pc)
886 TranslationBlock *tb;
888 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
889 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
890 return NULL;
891 tb = &tbs[nb_tbs++];
892 tb->pc = pc;
893 tb->cflags = 0;
894 return tb;
897 /* add a new TB and link it to the physical page tables. phys_page2 is
898 (-1) to indicate that only one page contains the TB. */
899 void tb_link_phys(TranslationBlock *tb,
900 target_ulong phys_pc, target_ulong phys_page2)
902 unsigned int h;
903 TranslationBlock **ptb;
905 /* add in the physical hash table */
906 h = tb_phys_hash_func(phys_pc);
907 ptb = &tb_phys_hash[h];
908 tb->phys_hash_next = *ptb;
909 *ptb = tb;
911 /* add in the page list */
912 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
913 if (phys_page2 != -1)
914 tb_alloc_page(tb, 1, phys_page2);
915 else
916 tb->page_addr[1] = -1;
918 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
919 tb->jmp_next[0] = NULL;
920 tb->jmp_next[1] = NULL;
921 #ifdef USE_CODE_COPY
922 tb->cflags &= ~CF_FP_USED;
923 if (tb->cflags & CF_TB_FP_USED)
924 tb->cflags |= CF_FP_USED;
925 #endif
927 /* init original jump addresses */
928 if (tb->tb_next_offset[0] != 0xffff)
929 tb_reset_jump(tb, 0);
930 if (tb->tb_next_offset[1] != 0xffff)
931 tb_reset_jump(tb, 1);
933 #ifdef DEBUG_TB_CHECK
934 tb_page_check();
935 #endif
938 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
939 tb[1].tc_ptr. Return NULL if not found */
940 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
942 int m_min, m_max, m;
943 unsigned long v;
944 TranslationBlock *tb;
946 if (nb_tbs <= 0)
947 return NULL;
948 if (tc_ptr < (unsigned long)code_gen_buffer ||
949 tc_ptr >= (unsigned long)code_gen_ptr)
950 return NULL;
951 /* binary search (cf Knuth) */
952 m_min = 0;
953 m_max = nb_tbs - 1;
954 while (m_min <= m_max) {
955 m = (m_min + m_max) >> 1;
956 tb = &tbs[m];
957 v = (unsigned long)tb->tc_ptr;
958 if (v == tc_ptr)
959 return tb;
960 else if (tc_ptr < v) {
961 m_max = m - 1;
962 } else {
963 m_min = m + 1;
966 return &tbs[m_max];
969 static void tb_reset_jump_recursive(TranslationBlock *tb);
971 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
973 TranslationBlock *tb1, *tb_next, **ptb;
974 unsigned int n1;
976 tb1 = tb->jmp_next[n];
977 if (tb1 != NULL) {
978 /* find head of list */
979 for(;;) {
980 n1 = (long)tb1 & 3;
981 tb1 = (TranslationBlock *)((long)tb1 & ~3);
982 if (n1 == 2)
983 break;
984 tb1 = tb1->jmp_next[n1];
986 /* we are now sure now that tb jumps to tb1 */
987 tb_next = tb1;
989 /* remove tb from the jmp_first list */
990 ptb = &tb_next->jmp_first;
991 for(;;) {
992 tb1 = *ptb;
993 n1 = (long)tb1 & 3;
994 tb1 = (TranslationBlock *)((long)tb1 & ~3);
995 if (n1 == n && tb1 == tb)
996 break;
997 ptb = &tb1->jmp_next[n1];
999 *ptb = tb->jmp_next[n];
1000 tb->jmp_next[n] = NULL;
1002 /* suppress the jump to next tb in generated code */
1003 tb_reset_jump(tb, n);
1005 /* suppress jumps in the tb on which we could have jumped */
1006 tb_reset_jump_recursive(tb_next);
1010 static void tb_reset_jump_recursive(TranslationBlock *tb)
1012 tb_reset_jump_recursive2(tb, 0);
1013 tb_reset_jump_recursive2(tb, 1);
1016 #if defined(TARGET_HAS_ICE)
1017 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1019 target_ulong addr, pd;
1020 ram_addr_t ram_addr;
1021 PhysPageDesc *p;
1023 addr = cpu_get_phys_page_debug(env, pc);
1024 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1025 if (!p) {
1026 pd = IO_MEM_UNASSIGNED;
1027 } else {
1028 pd = p->phys_offset;
1030 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1031 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1033 #endif
1035 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1036 breakpoint is reached */
1037 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1039 #if defined(TARGET_HAS_ICE)
1040 int i;
1042 for(i = 0; i < env->nb_breakpoints; i++) {
1043 if (env->breakpoints[i] == pc)
1044 return 0;
1047 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1048 return -1;
1049 env->breakpoints[env->nb_breakpoints++] = pc;
1051 #ifdef USE_KVM
1052 if (kvm_allowed)
1053 kvm_update_debugger(env);
1054 #endif
1056 breakpoint_invalidate(env, pc);
1057 return 0;
1058 #else
1059 return -1;
1060 #endif
1063 /* remove a breakpoint */
1064 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1066 #if defined(TARGET_HAS_ICE)
1067 int i;
1068 for(i = 0; i < env->nb_breakpoints; i++) {
1069 if (env->breakpoints[i] == pc)
1070 goto found;
1072 return -1;
1073 found:
1074 env->nb_breakpoints--;
1075 if (i < env->nb_breakpoints)
1076 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1078 #ifdef USE_KVM
1079 if (kvm_allowed)
1080 kvm_update_debugger(env);
1081 #endif
1083 breakpoint_invalidate(env, pc);
1084 return 0;
1085 #else
1086 return -1;
1087 #endif
1090 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1091 CPU loop after each instruction */
1092 void cpu_single_step(CPUState *env, int enabled)
1094 #if defined(TARGET_HAS_ICE)
1095 if (env->singlestep_enabled != enabled) {
1096 env->singlestep_enabled = enabled;
1097 /* must flush all the translated code to avoid inconsistancies */
1098 /* XXX: only flush what is necessary */
1099 tb_flush(env);
1101 #ifdef USE_KVM
1102 if (kvm_allowed)
1103 kvm_update_debugger(env);
1104 #endif
1105 #endif
1108 /* enable or disable low levels log */
1109 void cpu_set_log(int log_flags)
1111 loglevel = log_flags;
1112 if (loglevel && !logfile) {
1113 logfile = fopen(logfilename, "w");
1114 if (!logfile) {
1115 perror(logfilename);
1116 _exit(1);
1118 #if !defined(CONFIG_SOFTMMU)
1119 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1121 static uint8_t logfile_buf[4096];
1122 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1124 #else
1125 setvbuf(logfile, NULL, _IOLBF, 0);
1126 #endif
1130 void cpu_set_log_filename(const char *filename)
1132 logfilename = strdup(filename);
1135 /* mask must never be zero, except for A20 change call */
1136 void cpu_interrupt(CPUState *env, int mask)
1138 TranslationBlock *tb;
1139 static int interrupt_lock;
1141 env->interrupt_request |= mask;
1142 /* if the cpu is currently executing code, we must unlink it and
1143 all the potentially executing TB */
1144 tb = env->current_tb;
1145 if (tb && !testandset(&interrupt_lock)) {
1146 env->current_tb = NULL;
1147 tb_reset_jump_recursive(tb);
1148 interrupt_lock = 0;
1152 void cpu_reset_interrupt(CPUState *env, int mask)
1154 env->interrupt_request &= ~mask;
1157 CPULogItem cpu_log_items[] = {
1158 { CPU_LOG_TB_OUT_ASM, "out_asm",
1159 "show generated host assembly code for each compiled TB" },
1160 { CPU_LOG_TB_IN_ASM, "in_asm",
1161 "show target assembly code for each compiled TB" },
1162 { CPU_LOG_TB_OP, "op",
1163 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1164 #ifdef TARGET_I386
1165 { CPU_LOG_TB_OP_OPT, "op_opt",
1166 "show micro ops after optimization for each compiled TB" },
1167 #endif
1168 { CPU_LOG_INT, "int",
1169 "show interrupts/exceptions in short format" },
1170 { CPU_LOG_EXEC, "exec",
1171 "show trace before each executed TB (lots of logs)" },
1172 { CPU_LOG_TB_CPU, "cpu",
1173 "show CPU state before bloc translation" },
1174 #ifdef TARGET_I386
1175 { CPU_LOG_PCALL, "pcall",
1176 "show protected mode far calls/returns/exceptions" },
1177 #endif
1178 #ifdef DEBUG_IOPORT
1179 { CPU_LOG_IOPORT, "ioport",
1180 "show all i/o ports accesses" },
1181 #endif
1182 { 0, NULL, NULL },
1185 static int cmp1(const char *s1, int n, const char *s2)
1187 if (strlen(s2) != n)
1188 return 0;
1189 return memcmp(s1, s2, n) == 0;
1192 /* takes a comma separated list of log masks. Return 0 if error. */
1193 int cpu_str_to_log_mask(const char *str)
1195 CPULogItem *item;
1196 int mask;
1197 const char *p, *p1;
1199 p = str;
1200 mask = 0;
1201 for(;;) {
1202 p1 = strchr(p, ',');
1203 if (!p1)
1204 p1 = p + strlen(p);
1205 if(cmp1(p,p1-p,"all")) {
1206 for(item = cpu_log_items; item->mask != 0; item++) {
1207 mask |= item->mask;
1209 } else {
1210 for(item = cpu_log_items; item->mask != 0; item++) {
1211 if (cmp1(p, p1 - p, item->name))
1212 goto found;
1214 return 0;
1216 found:
1217 mask |= item->mask;
1218 if (*p1 != ',')
1219 break;
1220 p = p1 + 1;
1222 return mask;
1225 void cpu_abort(CPUState *env, const char *fmt, ...)
1227 va_list ap;
1229 va_start(ap, fmt);
1230 fprintf(stderr, "qemu: fatal: ");
1231 vfprintf(stderr, fmt, ap);
1232 fprintf(stderr, "\n");
1233 #ifdef TARGET_I386
1234 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1235 #else
1236 cpu_dump_state(env, stderr, fprintf, 0);
1237 #endif
1238 va_end(ap);
1239 abort();
1242 #if !defined(CONFIG_USER_ONLY)
1244 /* NOTE: if flush_global is true, also flush global entries (not
1245 implemented yet) */
1246 void tlb_flush(CPUState *env, int flush_global)
1248 int i;
1250 #if defined(DEBUG_TLB)
1251 printf("tlb_flush:\n");
1252 #endif
1253 /* must reset current TB so that interrupts cannot modify the
1254 links while we are modifying them */
1255 env->current_tb = NULL;
1257 for(i = 0; i < CPU_TLB_SIZE; i++) {
1258 env->tlb_table[0][i].addr_read = -1;
1259 env->tlb_table[0][i].addr_write = -1;
1260 env->tlb_table[0][i].addr_code = -1;
1261 env->tlb_table[1][i].addr_read = -1;
1262 env->tlb_table[1][i].addr_write = -1;
1263 env->tlb_table[1][i].addr_code = -1;
1266 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1268 #if !defined(CONFIG_SOFTMMU)
1269 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1270 #endif
1271 #ifdef USE_KQEMU
1272 if (env->kqemu_enabled) {
1273 kqemu_flush(env, flush_global);
1275 #endif
1276 tlb_flush_count++;
1279 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1281 if (addr == (tlb_entry->addr_read &
1282 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1283 addr == (tlb_entry->addr_write &
1284 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1285 addr == (tlb_entry->addr_code &
1286 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1287 tlb_entry->addr_read = -1;
1288 tlb_entry->addr_write = -1;
1289 tlb_entry->addr_code = -1;
1293 void tlb_flush_page(CPUState *env, target_ulong addr)
1295 int i;
1296 TranslationBlock *tb;
1298 #if defined(DEBUG_TLB)
1299 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1300 #endif
1301 /* must reset current TB so that interrupts cannot modify the
1302 links while we are modifying them */
1303 env->current_tb = NULL;
1305 addr &= TARGET_PAGE_MASK;
1306 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1307 tlb_flush_entry(&env->tlb_table[0][i], addr);
1308 tlb_flush_entry(&env->tlb_table[1][i], addr);
1310 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1311 tb = env->tb_jmp_cache[i];
1312 if (tb &&
1313 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1314 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1315 env->tb_jmp_cache[i] = NULL;
1319 #if !defined(CONFIG_SOFTMMU)
1320 if (addr < MMAP_AREA_END)
1321 munmap((void *)addr, TARGET_PAGE_SIZE);
1322 #endif
1323 #ifdef USE_KQEMU
1324 if (env->kqemu_enabled) {
1325 kqemu_flush_page(env, addr);
1327 #endif
1330 /* update the TLBs so that writes to code in the virtual page 'addr'
1331 can be detected */
1332 static void tlb_protect_code(ram_addr_t ram_addr)
1334 cpu_physical_memory_reset_dirty(ram_addr,
1335 ram_addr + TARGET_PAGE_SIZE,
1336 CODE_DIRTY_FLAG);
1339 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1340 tested for self modifying code */
1341 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1342 target_ulong vaddr)
1344 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1347 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1348 unsigned long start, unsigned long length)
1350 unsigned long addr;
1351 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1352 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1353 if ((addr - start) < length) {
1354 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1359 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1360 int dirty_flags)
1362 CPUState *env;
1363 unsigned long length, start1;
1364 int i, mask, len;
1365 uint8_t *p;
1367 start &= TARGET_PAGE_MASK;
1368 end = TARGET_PAGE_ALIGN(end);
1370 length = end - start;
1371 if (length == 0)
1372 return;
1373 len = length >> TARGET_PAGE_BITS;
1374 #ifdef USE_KQEMU
1375 /* XXX: should not depend on cpu context */
1376 env = first_cpu;
1377 if (env->kqemu_enabled) {
1378 ram_addr_t addr;
1379 addr = start;
1380 for(i = 0; i < len; i++) {
1381 kqemu_set_notdirty(env, addr);
1382 addr += TARGET_PAGE_SIZE;
1385 #endif
1386 mask = ~dirty_flags;
1387 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1388 for(i = 0; i < len; i++)
1389 p[i] &= mask;
1391 /* we modify the TLB cache so that the dirty bit will be set again
1392 when accessing the range */
1393 start1 = start + (unsigned long)phys_ram_base;
1394 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1395 for(i = 0; i < CPU_TLB_SIZE; i++)
1396 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1397 for(i = 0; i < CPU_TLB_SIZE; i++)
1398 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1401 #if !defined(CONFIG_SOFTMMU)
1402 /* XXX: this is expensive */
1404 VirtPageDesc *p;
1405 int j;
1406 target_ulong addr;
1408 for(i = 0; i < L1_SIZE; i++) {
1409 p = l1_virt_map[i];
1410 if (p) {
1411 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1412 for(j = 0; j < L2_SIZE; j++) {
1413 if (p->valid_tag == virt_valid_tag &&
1414 p->phys_addr >= start && p->phys_addr < end &&
1415 (p->prot & PROT_WRITE)) {
1416 if (addr < MMAP_AREA_END) {
1417 mprotect((void *)addr, TARGET_PAGE_SIZE,
1418 p->prot & ~PROT_WRITE);
1421 addr += TARGET_PAGE_SIZE;
1422 p++;
1427 #endif
1430 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1432 ram_addr_t ram_addr;
1434 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1435 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1436 tlb_entry->addend - (unsigned long)phys_ram_base;
1437 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1438 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1443 /* update the TLB according to the current state of the dirty bits */
1444 void cpu_tlb_update_dirty(CPUState *env)
1446 int i;
1447 for(i = 0; i < CPU_TLB_SIZE; i++)
1448 tlb_update_dirty(&env->tlb_table[0][i]);
1449 for(i = 0; i < CPU_TLB_SIZE; i++)
1450 tlb_update_dirty(&env->tlb_table[1][i]);
1453 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1454 unsigned long start)
1456 unsigned long addr;
1457 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1458 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1459 if (addr == start) {
1460 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1465 /* update the TLB corresponding to virtual page vaddr and phys addr
1466 addr so that it is no longer dirty */
1467 static inline void tlb_set_dirty(CPUState *env,
1468 unsigned long addr, target_ulong vaddr)
1470 int i;
1472 addr &= TARGET_PAGE_MASK;
1473 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1474 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1475 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1478 /* add a new TLB entry. At most one entry for a given virtual address
1479 is permitted. Return 0 if OK or 2 if the page could not be mapped
1480 (can only happen in non SOFTMMU mode for I/O pages or pages
1481 conflicting with the host address space). */
1482 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1483 target_phys_addr_t paddr, int prot,
1484 int is_user, int is_softmmu)
1486 PhysPageDesc *p;
1487 unsigned long pd;
1488 unsigned int index;
1489 target_ulong address;
1490 target_phys_addr_t addend;
1491 int ret;
1492 CPUTLBEntry *te;
1494 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1495 if (!p) {
1496 pd = IO_MEM_UNASSIGNED;
1497 } else {
1498 pd = p->phys_offset;
1500 #if defined(DEBUG_TLB)
1501 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1502 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1503 #endif
1505 ret = 0;
1506 #if !defined(CONFIG_SOFTMMU)
1507 if (is_softmmu)
1508 #endif
1510 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1511 /* IO memory case */
1512 address = vaddr | pd;
1513 addend = paddr;
1514 } else {
1515 /* standard memory */
1516 address = vaddr;
1517 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1520 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1521 addend -= vaddr;
1522 te = &env->tlb_table[is_user][index];
1523 te->addend = addend;
1524 if (prot & PAGE_READ) {
1525 te->addr_read = address;
1526 } else {
1527 te->addr_read = -1;
1529 if (prot & PAGE_EXEC) {
1530 te->addr_code = address;
1531 } else {
1532 te->addr_code = -1;
1534 if (prot & PAGE_WRITE) {
1535 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1536 (pd & IO_MEM_ROMD)) {
1537 /* write access calls the I/O callback */
1538 te->addr_write = vaddr |
1539 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1540 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1541 !cpu_physical_memory_is_dirty(pd)) {
1542 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1543 } else {
1544 te->addr_write = address;
1546 } else {
1547 te->addr_write = -1;
1550 #if !defined(CONFIG_SOFTMMU)
1551 else {
1552 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1553 /* IO access: no mapping is done as it will be handled by the
1554 soft MMU */
1555 if (!(env->hflags & HF_SOFTMMU_MASK))
1556 ret = 2;
1557 } else {
1558 void *map_addr;
1560 if (vaddr >= MMAP_AREA_END) {
1561 ret = 2;
1562 } else {
1563 if (prot & PROT_WRITE) {
1564 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1565 #if defined(TARGET_HAS_SMC) || 1
1566 first_tb ||
1567 #endif
1568 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1569 !cpu_physical_memory_is_dirty(pd))) {
1570 /* ROM: we do as if code was inside */
1571 /* if code is present, we only map as read only and save the
1572 original mapping */
1573 VirtPageDesc *vp;
1575 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1576 vp->phys_addr = pd;
1577 vp->prot = prot;
1578 vp->valid_tag = virt_valid_tag;
1579 prot &= ~PAGE_WRITE;
1582 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1583 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1584 if (map_addr == MAP_FAILED) {
1585 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1586 paddr, vaddr);
1591 #endif
1592 return ret;
1595 /* called from signal handler: invalidate the code and unprotect the
1596 page. Return TRUE if the fault was succesfully handled. */
1597 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1599 #if !defined(CONFIG_SOFTMMU)
1600 VirtPageDesc *vp;
1602 #if defined(DEBUG_TLB)
1603 printf("page_unprotect: addr=0x%08x\n", addr);
1604 #endif
1605 addr &= TARGET_PAGE_MASK;
1607 /* if it is not mapped, no need to worry here */
1608 if (addr >= MMAP_AREA_END)
1609 return 0;
1610 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1611 if (!vp)
1612 return 0;
1613 /* NOTE: in this case, validate_tag is _not_ tested as it
1614 validates only the code TLB */
1615 if (vp->valid_tag != virt_valid_tag)
1616 return 0;
1617 if (!(vp->prot & PAGE_WRITE))
1618 return 0;
1619 #if defined(DEBUG_TLB)
1620 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1621 addr, vp->phys_addr, vp->prot);
1622 #endif
1623 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1624 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1625 (unsigned long)addr, vp->prot);
1626 /* set the dirty bit */
1627 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1628 /* flush the code inside */
1629 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1630 return 1;
1631 #else
1632 return 0;
1633 #endif
1636 #else
1638 void tlb_flush(CPUState *env, int flush_global)
1642 void tlb_flush_page(CPUState *env, target_ulong addr)
1646 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1647 target_phys_addr_t paddr, int prot,
1648 int is_user, int is_softmmu)
1650 return 0;
1653 /* dump memory mappings */
1654 void page_dump(FILE *f)
1656 unsigned long start, end;
1657 int i, j, prot, prot1;
1658 PageDesc *p;
1660 fprintf(f, "%-8s %-8s %-8s %s\n",
1661 "start", "end", "size", "prot");
1662 start = -1;
1663 end = -1;
1664 prot = 0;
1665 for(i = 0; i <= L1_SIZE; i++) {
1666 if (i < L1_SIZE)
1667 p = l1_map[i];
1668 else
1669 p = NULL;
1670 for(j = 0;j < L2_SIZE; j++) {
1671 if (!p)
1672 prot1 = 0;
1673 else
1674 prot1 = p[j].flags;
1675 if (prot1 != prot) {
1676 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1677 if (start != -1) {
1678 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1679 start, end, end - start,
1680 prot & PAGE_READ ? 'r' : '-',
1681 prot & PAGE_WRITE ? 'w' : '-',
1682 prot & PAGE_EXEC ? 'x' : '-');
1684 if (prot1 != 0)
1685 start = end;
1686 else
1687 start = -1;
1688 prot = prot1;
1690 if (!p)
1691 break;
1696 int page_get_flags(target_ulong address)
1698 PageDesc *p;
1700 p = page_find(address >> TARGET_PAGE_BITS);
1701 if (!p)
1702 return 0;
1703 return p->flags;
1706 /* modify the flags of a page and invalidate the code if
1707 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1708 depending on PAGE_WRITE */
1709 void page_set_flags(target_ulong start, target_ulong end, int flags)
1711 PageDesc *p;
1712 target_ulong addr;
1714 start = start & TARGET_PAGE_MASK;
1715 end = TARGET_PAGE_ALIGN(end);
1716 if (flags & PAGE_WRITE)
1717 flags |= PAGE_WRITE_ORG;
1718 spin_lock(&tb_lock);
1719 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1720 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1721 /* if the write protection is set, then we invalidate the code
1722 inside */
1723 if (!(p->flags & PAGE_WRITE) &&
1724 (flags & PAGE_WRITE) &&
1725 p->first_tb) {
1726 tb_invalidate_phys_page(addr, 0, NULL);
1728 p->flags = flags;
1730 spin_unlock(&tb_lock);
1733 /* called from signal handler: invalidate the code and unprotect the
1734 page. Return TRUE if the fault was succesfully handled. */
1735 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1737 unsigned int page_index, prot, pindex;
1738 PageDesc *p, *p1;
1739 target_ulong host_start, host_end, addr;
1741 host_start = address & qemu_host_page_mask;
1742 page_index = host_start >> TARGET_PAGE_BITS;
1743 p1 = page_find(page_index);
1744 if (!p1)
1745 return 0;
1746 host_end = host_start + qemu_host_page_size;
1747 p = p1;
1748 prot = 0;
1749 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1750 prot |= p->flags;
1751 p++;
1753 /* if the page was really writable, then we change its
1754 protection back to writable */
1755 if (prot & PAGE_WRITE_ORG) {
1756 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1757 if (!(p1[pindex].flags & PAGE_WRITE)) {
1758 mprotect((void *)g2h(host_start), qemu_host_page_size,
1759 (prot & PAGE_BITS) | PAGE_WRITE);
1760 p1[pindex].flags |= PAGE_WRITE;
1761 /* and since the content will be modified, we must invalidate
1762 the corresponding translated code. */
1763 tb_invalidate_phys_page(address, pc, puc);
1764 #ifdef DEBUG_TB_CHECK
1765 tb_invalidate_check(address);
1766 #endif
1767 return 1;
1770 return 0;
1773 /* call this function when system calls directly modify a memory area */
1774 /* ??? This should be redundant now we have lock_user. */
1775 void page_unprotect_range(target_ulong data, target_ulong data_size)
1777 target_ulong start, end, addr;
1779 start = data;
1780 end = start + data_size;
1781 start &= TARGET_PAGE_MASK;
1782 end = TARGET_PAGE_ALIGN(end);
1783 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1784 page_unprotect(addr, 0, NULL);
1788 static inline void tlb_set_dirty(CPUState *env,
1789 unsigned long addr, target_ulong vaddr)
1792 #endif /* defined(CONFIG_USER_ONLY) */
1794 /* register physical memory. 'size' must be a multiple of the target
1795 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1796 io memory page */
1797 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1798 unsigned long size,
1799 unsigned long phys_offset)
1801 target_phys_addr_t addr, end_addr;
1802 PhysPageDesc *p;
1803 CPUState *env;
1805 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1806 end_addr = start_addr + size;
1807 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1808 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1809 p->phys_offset = phys_offset;
1810 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1811 (phys_offset & IO_MEM_ROMD))
1812 phys_offset += TARGET_PAGE_SIZE;
1815 /* since each CPU stores ram addresses in its TLB cache, we must
1816 reset the modified entries */
1817 /* XXX: slow ! */
1818 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1819 tlb_flush(env, 1);
1823 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1825 return 0;
1828 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1832 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1833 unassigned_mem_readb,
1834 unassigned_mem_readb,
1835 unassigned_mem_readb,
1838 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1839 unassigned_mem_writeb,
1840 unassigned_mem_writeb,
1841 unassigned_mem_writeb,
1844 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1846 unsigned long ram_addr;
1847 int dirty_flags;
1848 ram_addr = addr - (unsigned long)phys_ram_base;
1849 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1850 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1851 #if !defined(CONFIG_USER_ONLY)
1852 tb_invalidate_phys_page_fast(ram_addr, 1);
1853 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1854 #endif
1856 stb_p((uint8_t *)(long)addr, val);
1857 #ifdef USE_KQEMU
1858 if (cpu_single_env->kqemu_enabled &&
1859 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1860 kqemu_modify_page(cpu_single_env, ram_addr);
1861 #endif
1862 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1863 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1864 /* we remove the notdirty callback only if the code has been
1865 flushed */
1866 if (dirty_flags == 0xff)
1867 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1870 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1872 unsigned long ram_addr;
1873 int dirty_flags;
1874 ram_addr = addr - (unsigned long)phys_ram_base;
1875 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1876 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1877 #if !defined(CONFIG_USER_ONLY)
1878 tb_invalidate_phys_page_fast(ram_addr, 2);
1879 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1880 #endif
1882 stw_p((uint8_t *)(long)addr, val);
1883 #ifdef USE_KQEMU
1884 if (cpu_single_env->kqemu_enabled &&
1885 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1886 kqemu_modify_page(cpu_single_env, ram_addr);
1887 #endif
1888 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1889 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1890 /* we remove the notdirty callback only if the code has been
1891 flushed */
1892 if (dirty_flags == 0xff)
1893 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1896 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1898 unsigned long ram_addr;
1899 int dirty_flags;
1900 ram_addr = addr - (unsigned long)phys_ram_base;
1901 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1902 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1903 #if !defined(CONFIG_USER_ONLY)
1904 tb_invalidate_phys_page_fast(ram_addr, 4);
1905 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1906 #endif
1908 stl_p((uint8_t *)(long)addr, val);
1909 #ifdef USE_KQEMU
1910 if (cpu_single_env->kqemu_enabled &&
1911 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1912 kqemu_modify_page(cpu_single_env, ram_addr);
1913 #endif
1914 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1915 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1916 /* we remove the notdirty callback only if the code has been
1917 flushed */
1918 if (dirty_flags == 0xff)
1919 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1922 static CPUReadMemoryFunc *error_mem_read[3] = {
1923 NULL, /* never used */
1924 NULL, /* never used */
1925 NULL, /* never used */
1928 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1929 notdirty_mem_writeb,
1930 notdirty_mem_writew,
1931 notdirty_mem_writel,
1934 static void io_mem_init(void)
1936 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1937 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1938 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1939 io_mem_nb = 5;
1941 /* alloc dirty bits array */
1942 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1943 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1946 /* mem_read and mem_write are arrays of functions containing the
1947 function to access byte (index 0), word (index 1) and dword (index
1948 2). All functions must be supplied. If io_index is non zero, the
1949 corresponding io zone is modified. If it is zero, a new io zone is
1950 allocated. The return value can be used with
1951 cpu_register_physical_memory(). (-1) is returned if error. */
1952 int cpu_register_io_memory(int io_index,
1953 CPUReadMemoryFunc **mem_read,
1954 CPUWriteMemoryFunc **mem_write,
1955 void *opaque)
1957 int i;
1959 if (io_index <= 0) {
1960 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1961 return -1;
1962 io_index = io_mem_nb++;
1963 } else {
1964 if (io_index >= IO_MEM_NB_ENTRIES)
1965 return -1;
1968 for(i = 0;i < 3; i++) {
1969 io_mem_read[io_index][i] = mem_read[i];
1970 io_mem_write[io_index][i] = mem_write[i];
1972 io_mem_opaque[io_index] = opaque;
1973 return io_index << IO_MEM_SHIFT;
1976 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1978 return io_mem_write[io_index >> IO_MEM_SHIFT];
1981 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1983 return io_mem_read[io_index >> IO_MEM_SHIFT];
1986 /* physical memory access (slow version, mainly for debug) */
1987 #if defined(CONFIG_USER_ONLY)
1988 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1989 int len, int is_write)
1991 int l, flags;
1992 target_ulong page;
1993 void * p;
1995 while (len > 0) {
1996 page = addr & TARGET_PAGE_MASK;
1997 l = (page + TARGET_PAGE_SIZE) - addr;
1998 if (l > len)
1999 l = len;
2000 flags = page_get_flags(page);
2001 if (!(flags & PAGE_VALID))
2002 return;
2003 if (is_write) {
2004 if (!(flags & PAGE_WRITE))
2005 return;
2006 p = lock_user(addr, len, 0);
2007 memcpy(p, buf, len);
2008 unlock_user(p, addr, len);
2009 } else {
2010 if (!(flags & PAGE_READ))
2011 return;
2012 p = lock_user(addr, len, 1);
2013 memcpy(buf, p, len);
2014 unlock_user(p, addr, 0);
2016 len -= l;
2017 buf += l;
2018 addr += l;
2022 #else
2023 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2024 int len, int is_write)
2026 int l, io_index;
2027 uint8_t *ptr;
2028 uint32_t val;
2029 target_phys_addr_t page;
2030 unsigned long pd;
2031 PhysPageDesc *p;
2033 while (len > 0) {
2034 page = addr & TARGET_PAGE_MASK;
2035 l = (page + TARGET_PAGE_SIZE) - addr;
2036 if (l > len)
2037 l = len;
2038 p = phys_page_find(page >> TARGET_PAGE_BITS);
2039 if (!p) {
2040 pd = IO_MEM_UNASSIGNED;
2041 } else {
2042 pd = p->phys_offset;
2045 if (is_write) {
2046 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2047 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2048 /* XXX: could force cpu_single_env to NULL to avoid
2049 potential bugs */
2050 if (l >= 4 && ((addr & 3) == 0)) {
2051 /* 32 bit write access */
2052 val = ldl_p(buf);
2053 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2054 l = 4;
2055 } else if (l >= 2 && ((addr & 1) == 0)) {
2056 /* 16 bit write access */
2057 val = lduw_p(buf);
2058 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2059 l = 2;
2060 } else {
2061 /* 8 bit write access */
2062 val = ldub_p(buf);
2063 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2064 l = 1;
2066 } else {
2067 unsigned long addr1;
2068 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2069 /* RAM case */
2070 ptr = phys_ram_base + addr1;
2071 memcpy(ptr, buf, l);
2072 if (!cpu_physical_memory_is_dirty(addr1)) {
2073 /* invalidate code */
2074 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2075 /* set dirty bit */
2076 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2077 (0xff & ~CODE_DIRTY_FLAG);
2080 } else {
2081 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2082 !(pd & IO_MEM_ROMD)) {
2083 /* I/O case */
2084 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2085 if (l >= 4 && ((addr & 3) == 0)) {
2086 /* 32 bit read access */
2087 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2088 stl_p(buf, val);
2089 l = 4;
2090 } else if (l >= 2 && ((addr & 1) == 0)) {
2091 /* 16 bit read access */
2092 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2093 stw_p(buf, val);
2094 l = 2;
2095 } else {
2096 /* 8 bit read access */
2097 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2098 stb_p(buf, val);
2099 l = 1;
2101 } else {
2102 /* RAM case */
2103 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2104 (addr & ~TARGET_PAGE_MASK);
2105 memcpy(buf, ptr, l);
2108 len -= l;
2109 buf += l;
2110 addr += l;
2114 /* used for ROM loading : can write in RAM and ROM */
2115 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2116 const uint8_t *buf, int len)
2118 int l;
2119 uint8_t *ptr;
2120 target_phys_addr_t page;
2121 unsigned long pd;
2122 PhysPageDesc *p;
2124 while (len > 0) {
2125 page = addr & TARGET_PAGE_MASK;
2126 l = (page + TARGET_PAGE_SIZE) - addr;
2127 if (l > len)
2128 l = len;
2129 p = phys_page_find(page >> TARGET_PAGE_BITS);
2130 if (!p) {
2131 pd = IO_MEM_UNASSIGNED;
2132 } else {
2133 pd = p->phys_offset;
2136 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2137 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2138 !(pd & IO_MEM_ROMD)) {
2139 /* do nothing */
2140 } else {
2141 unsigned long addr1;
2142 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2143 /* ROM/RAM case */
2144 ptr = phys_ram_base + addr1;
2145 memcpy(ptr, buf, l);
2147 len -= l;
2148 buf += l;
2149 addr += l;
2154 /* warning: addr must be aligned */
2155 uint32_t ldl_phys(target_phys_addr_t addr)
2157 int io_index;
2158 uint8_t *ptr;
2159 uint32_t val;
2160 unsigned long pd;
2161 PhysPageDesc *p;
2163 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2164 if (!p) {
2165 pd = IO_MEM_UNASSIGNED;
2166 } else {
2167 pd = p->phys_offset;
2170 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2171 !(pd & IO_MEM_ROMD)) {
2172 /* I/O case */
2173 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2174 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2175 } else {
2176 /* RAM case */
2177 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2178 (addr & ~TARGET_PAGE_MASK);
2179 val = ldl_p(ptr);
2181 return val;
2184 /* warning: addr must be aligned */
2185 uint64_t ldq_phys(target_phys_addr_t addr)
2187 int io_index;
2188 uint8_t *ptr;
2189 uint64_t val;
2190 unsigned long pd;
2191 PhysPageDesc *p;
2193 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2194 if (!p) {
2195 pd = IO_MEM_UNASSIGNED;
2196 } else {
2197 pd = p->phys_offset;
2200 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2201 !(pd & IO_MEM_ROMD)) {
2202 /* I/O case */
2203 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2204 #ifdef TARGET_WORDS_BIGENDIAN
2205 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2206 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2207 #else
2208 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2209 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2210 #endif
2211 } else {
2212 /* RAM case */
2213 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2214 (addr & ~TARGET_PAGE_MASK);
2215 val = ldq_p(ptr);
2217 return val;
2220 /* XXX: optimize */
2221 uint32_t ldub_phys(target_phys_addr_t addr)
2223 uint8_t val;
2224 cpu_physical_memory_read(addr, &val, 1);
2225 return val;
2228 /* XXX: optimize */
2229 uint32_t lduw_phys(target_phys_addr_t addr)
2231 uint16_t val;
2232 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2233 return tswap16(val);
2236 /* warning: addr must be aligned. The ram page is not masked as dirty
2237 and the code inside is not invalidated. It is useful if the dirty
2238 bits are used to track modified PTEs */
2239 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2241 int io_index;
2242 uint8_t *ptr;
2243 unsigned long pd;
2244 PhysPageDesc *p;
2246 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2247 if (!p) {
2248 pd = IO_MEM_UNASSIGNED;
2249 } else {
2250 pd = p->phys_offset;
2253 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2254 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2255 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2256 } else {
2257 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2258 (addr & ~TARGET_PAGE_MASK);
2259 stl_p(ptr, val);
2263 /* warning: addr must be aligned */
2264 void stl_phys(target_phys_addr_t addr, uint32_t val)
2266 int io_index;
2267 uint8_t *ptr;
2268 unsigned long pd;
2269 PhysPageDesc *p;
2271 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2272 if (!p) {
2273 pd = IO_MEM_UNASSIGNED;
2274 } else {
2275 pd = p->phys_offset;
2278 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2279 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2280 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2281 } else {
2282 unsigned long addr1;
2283 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2284 /* RAM case */
2285 ptr = phys_ram_base + addr1;
2286 stl_p(ptr, val);
2287 if (!cpu_physical_memory_is_dirty(addr1)) {
2288 /* invalidate code */
2289 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2290 /* set dirty bit */
2291 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2292 (0xff & ~CODE_DIRTY_FLAG);
2297 /* XXX: optimize */
2298 void stb_phys(target_phys_addr_t addr, uint32_t val)
2300 uint8_t v = val;
2301 cpu_physical_memory_write(addr, &v, 1);
2304 /* XXX: optimize */
2305 void stw_phys(target_phys_addr_t addr, uint32_t val)
2307 uint16_t v = tswap16(val);
2308 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2311 /* XXX: optimize */
2312 void stq_phys(target_phys_addr_t addr, uint64_t val)
2314 val = tswap64(val);
2315 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2318 #endif
2320 /* virtual memory access for debug */
2321 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2322 uint8_t *buf, int len, int is_write)
2324 int l;
2325 target_ulong page, phys_addr;
2327 while (len > 0) {
2328 page = addr & TARGET_PAGE_MASK;
2329 phys_addr = cpu_get_phys_page_debug(env, page);
2330 /* if no physical page mapped, return an error */
2331 if (phys_addr == -1)
2332 return -1;
2333 l = (page + TARGET_PAGE_SIZE) - addr;
2334 if (l > len)
2335 l = len;
2336 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2337 buf, l, is_write);
2338 len -= l;
2339 buf += l;
2340 addr += l;
2342 return 0;
2345 void dump_exec_info(FILE *f,
2346 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2348 int i, target_code_size, max_target_code_size;
2349 int direct_jmp_count, direct_jmp2_count, cross_page;
2350 TranslationBlock *tb;
2352 target_code_size = 0;
2353 max_target_code_size = 0;
2354 cross_page = 0;
2355 direct_jmp_count = 0;
2356 direct_jmp2_count = 0;
2357 for(i = 0; i < nb_tbs; i++) {
2358 tb = &tbs[i];
2359 target_code_size += tb->size;
2360 if (tb->size > max_target_code_size)
2361 max_target_code_size = tb->size;
2362 if (tb->page_addr[1] != -1)
2363 cross_page++;
2364 if (tb->tb_next_offset[0] != 0xffff) {
2365 direct_jmp_count++;
2366 if (tb->tb_next_offset[1] != 0xffff) {
2367 direct_jmp2_count++;
2371 /* XXX: avoid using doubles ? */
2372 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2373 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2374 nb_tbs ? target_code_size / nb_tbs : 0,
2375 max_target_code_size);
2376 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2377 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2378 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2379 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2380 cross_page,
2381 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2382 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2383 direct_jmp_count,
2384 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2385 direct_jmp2_count,
2386 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2387 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2388 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2389 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2392 #if !defined(CONFIG_USER_ONLY)
2394 #define MMUSUFFIX _cmmu
2395 #define GETPC() NULL
2396 #define env cpu_single_env
2397 #define SOFTMMU_CODE_ACCESS
2399 #define SHIFT 0
2400 #include "softmmu_template.h"
2402 #define SHIFT 1
2403 #include "softmmu_template.h"
2405 #define SHIFT 2
2406 #include "softmmu_template.h"
2408 #define SHIFT 3
2409 #include "softmmu_template.h"
2411 #undef env
2413 #endif