Add the kvm utility script to the rpm so developers can easily work with it
[qemu-kvm/fedora.git] / exec.c
blob8f7f3f6dca8d944f09e6920ecd26b536220f4d1e
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 //#define DEBUG_TLB_CHECK
49 #if !defined(CONFIG_USER_ONLY)
50 /* TB consistency checks only implemented for usermode emulation. */
51 #undef DEBUG_TB_CHECK
52 #endif
54 /* threshold to flush the translated code buffer */
55 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57 #define SMC_BITMAP_USE_THRESHOLD 10
59 #define MMAP_AREA_START 0x00000000
60 #define MMAP_AREA_END 0xa8000000
62 #if defined(TARGET_SPARC64)
63 #define TARGET_PHYS_ADDR_SPACE_BITS 41
64 #elif defined(TARGET_PPC64)
65 #define TARGET_PHYS_ADDR_SPACE_BITS 42
66 #else
67 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
68 #define TARGET_PHYS_ADDR_SPACE_BITS 32
69 #endif
71 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
72 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
73 int nb_tbs;
74 /* any access to the tbs or the page table must use this lock */
75 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
77 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
78 uint8_t *code_gen_ptr;
80 int phys_ram_size;
81 int phys_ram_fd;
82 uint8_t *phys_ram_base;
83 uint8_t *phys_ram_dirty;
84 uint8_t *bios_mem;
86 CPUState *first_cpu;
87 /* current CPU in the current thread. It is only valid inside
88 cpu_exec() */
89 CPUState *cpu_single_env;
91 typedef struct PageDesc {
92 /* list of TBs intersecting this ram page */
93 TranslationBlock *first_tb;
94 /* in order to optimize self modifying code, we count the number
95 of lookups we do to a given page to use a bitmap */
96 unsigned int code_write_count;
97 uint8_t *code_bitmap;
98 #if defined(CONFIG_USER_ONLY)
99 unsigned long flags;
100 #endif
101 } PageDesc;
103 typedef struct PhysPageDesc {
104 /* offset in host memory of the page + io_index in the low 12 bits */
105 uint32_t phys_offset;
106 } PhysPageDesc;
108 #define L2_BITS 10
109 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111 #define L1_SIZE (1 << L1_BITS)
112 #define L2_SIZE (1 << L2_BITS)
114 static void io_mem_init(void);
116 unsigned long qemu_real_host_page_size;
117 unsigned long qemu_host_page_bits;
118 unsigned long qemu_host_page_size;
119 unsigned long qemu_host_page_mask;
121 /* XXX: for system emulation, it could just be an array */
122 static PageDesc *l1_map[L1_SIZE];
123 PhysPageDesc **l1_phys_map;
125 /* io memory support */
126 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
127 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
128 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
129 static int io_mem_nb;
131 /* log support */
132 char *logfilename = "/tmp/qemu.log";
133 FILE *logfile;
134 int loglevel;
136 /* statistics */
137 static int tlb_flush_count;
138 static int tb_flush_count;
139 static int tb_phys_invalidate_count;
141 static void page_init(void)
143 /* NOTE: we can always suppose that qemu_host_page_size >=
144 TARGET_PAGE_SIZE */
145 #ifdef _WIN32
147 SYSTEM_INFO system_info;
148 DWORD old_protect;
150 GetSystemInfo(&system_info);
151 qemu_real_host_page_size = system_info.dwPageSize;
153 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
154 PAGE_EXECUTE_READWRITE, &old_protect);
156 #else
157 qemu_real_host_page_size = getpagesize();
159 unsigned long start, end;
161 start = (unsigned long)code_gen_buffer;
162 start &= ~(qemu_real_host_page_size - 1);
164 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
165 end += qemu_real_host_page_size - 1;
166 end &= ~(qemu_real_host_page_size - 1);
168 mprotect((void *)start, end - start,
169 PROT_READ | PROT_WRITE | PROT_EXEC);
171 #endif
173 if (qemu_host_page_size == 0)
174 qemu_host_page_size = qemu_real_host_page_size;
175 if (qemu_host_page_size < TARGET_PAGE_SIZE)
176 qemu_host_page_size = TARGET_PAGE_SIZE;
177 qemu_host_page_bits = 0;
178 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
179 qemu_host_page_bits++;
180 qemu_host_page_mask = ~(qemu_host_page_size - 1);
181 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
182 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
185 static inline PageDesc *page_find_alloc(unsigned int index)
187 PageDesc **lp, *p;
189 lp = &l1_map[index >> L2_BITS];
190 p = *lp;
191 if (!p) {
192 /* allocate if not found */
193 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
194 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
195 *lp = p;
197 return p + (index & (L2_SIZE - 1));
200 static inline PageDesc *page_find(unsigned int index)
202 PageDesc *p;
204 p = l1_map[index >> L2_BITS];
205 if (!p)
206 return 0;
207 return p + (index & (L2_SIZE - 1));
210 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
212 void **lp, **p;
213 PhysPageDesc *pd;
215 p = (void **)l1_phys_map;
216 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
218 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
219 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
220 #endif
221 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
222 p = *lp;
223 if (!p) {
224 /* allocate if not found */
225 if (!alloc)
226 return NULL;
227 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
228 memset(p, 0, sizeof(void *) * L1_SIZE);
229 *lp = p;
231 #endif
232 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
233 pd = *lp;
234 if (!pd) {
235 int i;
236 /* allocate if not found */
237 if (!alloc)
238 return NULL;
239 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
240 *lp = pd;
241 for (i = 0; i < L2_SIZE; i++)
242 pd[i].phys_offset = IO_MEM_UNASSIGNED;
244 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
247 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
249 return phys_page_find_alloc(index, 0);
252 #if !defined(CONFIG_USER_ONLY)
253 static void tlb_protect_code(ram_addr_t ram_addr);
254 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
255 target_ulong vaddr);
256 #endif
258 void cpu_exec_init(CPUState *env)
260 CPUState **penv;
261 int cpu_index;
263 if (!code_gen_ptr) {
264 code_gen_ptr = code_gen_buffer;
265 page_init();
266 io_mem_init();
268 env->next_cpu = NULL;
269 penv = &first_cpu;
270 cpu_index = 0;
271 while (*penv != NULL) {
272 penv = (CPUState **)&(*penv)->next_cpu;
273 cpu_index++;
275 env->cpu_index = cpu_index;
276 *penv = env;
279 static inline void invalidate_page_bitmap(PageDesc *p)
281 if (p->code_bitmap) {
282 qemu_free(p->code_bitmap);
283 p->code_bitmap = NULL;
285 p->code_write_count = 0;
288 /* set to NULL all the 'first_tb' fields in all PageDescs */
289 static void page_flush_tb(void)
291 int i, j;
292 PageDesc *p;
294 for(i = 0; i < L1_SIZE; i++) {
295 p = l1_map[i];
296 if (p) {
297 for(j = 0; j < L2_SIZE; j++) {
298 p->first_tb = NULL;
299 invalidate_page_bitmap(p);
300 p++;
306 /* flush all the translation blocks */
307 /* XXX: tb_flush is currently not thread safe */
308 void tb_flush(CPUState *env1)
310 CPUState *env;
311 #if defined(DEBUG_FLUSH)
312 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
313 code_gen_ptr - code_gen_buffer,
314 nb_tbs,
315 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
316 #endif
317 nb_tbs = 0;
319 for(env = first_cpu; env != NULL; env = env->next_cpu) {
320 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
323 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
324 page_flush_tb();
326 code_gen_ptr = code_gen_buffer;
327 /* XXX: flush processor icache at this point if cache flush is
328 expensive */
329 tb_flush_count++;
332 #ifdef DEBUG_TB_CHECK
334 static void tb_invalidate_check(unsigned long address)
336 TranslationBlock *tb;
337 int i;
338 address &= TARGET_PAGE_MASK;
339 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
340 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
341 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
342 address >= tb->pc + tb->size)) {
343 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
344 address, (long)tb->pc, tb->size);
350 /* verify that all the pages have correct rights for code */
351 static void tb_page_check(void)
353 TranslationBlock *tb;
354 int i, flags1, flags2;
356 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
357 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
358 flags1 = page_get_flags(tb->pc);
359 flags2 = page_get_flags(tb->pc + tb->size - 1);
360 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
361 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
362 (long)tb->pc, tb->size, flags1, flags2);
368 void tb_jmp_check(TranslationBlock *tb)
370 TranslationBlock *tb1;
371 unsigned int n1;
373 /* suppress any remaining jumps to this TB */
374 tb1 = tb->jmp_first;
375 for(;;) {
376 n1 = (long)tb1 & 3;
377 tb1 = (TranslationBlock *)((long)tb1 & ~3);
378 if (n1 == 2)
379 break;
380 tb1 = tb1->jmp_next[n1];
382 /* check end of list */
383 if (tb1 != tb) {
384 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
388 #endif
390 /* invalidate one TB */
391 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
392 int next_offset)
394 TranslationBlock *tb1;
395 for(;;) {
396 tb1 = *ptb;
397 if (tb1 == tb) {
398 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
399 break;
401 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
405 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
407 TranslationBlock *tb1;
408 unsigned int n1;
410 for(;;) {
411 tb1 = *ptb;
412 n1 = (long)tb1 & 3;
413 tb1 = (TranslationBlock *)((long)tb1 & ~3);
414 if (tb1 == tb) {
415 *ptb = tb1->page_next[n1];
416 break;
418 ptb = &tb1->page_next[n1];
422 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
424 TranslationBlock *tb1, **ptb;
425 unsigned int n1;
427 ptb = &tb->jmp_next[n];
428 tb1 = *ptb;
429 if (tb1) {
430 /* find tb(n) in circular list */
431 for(;;) {
432 tb1 = *ptb;
433 n1 = (long)tb1 & 3;
434 tb1 = (TranslationBlock *)((long)tb1 & ~3);
435 if (n1 == n && tb1 == tb)
436 break;
437 if (n1 == 2) {
438 ptb = &tb1->jmp_first;
439 } else {
440 ptb = &tb1->jmp_next[n1];
443 /* now we can suppress tb(n) from the list */
444 *ptb = tb->jmp_next[n];
446 tb->jmp_next[n] = NULL;
450 /* reset the jump entry 'n' of a TB so that it is not chained to
451 another TB */
452 static inline void tb_reset_jump(TranslationBlock *tb, int n)
454 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
457 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
459 CPUState *env;
460 PageDesc *p;
461 unsigned int h, n1;
462 target_ulong phys_pc;
463 TranslationBlock *tb1, *tb2;
465 /* remove the TB from the hash list */
466 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
467 h = tb_phys_hash_func(phys_pc);
468 tb_remove(&tb_phys_hash[h], tb,
469 offsetof(TranslationBlock, phys_hash_next));
471 /* remove the TB from the page list */
472 if (tb->page_addr[0] != page_addr) {
473 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
474 tb_page_remove(&p->first_tb, tb);
475 invalidate_page_bitmap(p);
477 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
478 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
479 tb_page_remove(&p->first_tb, tb);
480 invalidate_page_bitmap(p);
483 tb_invalidated_flag = 1;
485 /* remove the TB from the hash list */
486 h = tb_jmp_cache_hash_func(tb->pc);
487 for(env = first_cpu; env != NULL; env = env->next_cpu) {
488 if (env->tb_jmp_cache[h] == tb)
489 env->tb_jmp_cache[h] = NULL;
492 /* suppress this TB from the two jump lists */
493 tb_jmp_remove(tb, 0);
494 tb_jmp_remove(tb, 1);
496 /* suppress any remaining jumps to this TB */
497 tb1 = tb->jmp_first;
498 for(;;) {
499 n1 = (long)tb1 & 3;
500 if (n1 == 2)
501 break;
502 tb1 = (TranslationBlock *)((long)tb1 & ~3);
503 tb2 = tb1->jmp_next[n1];
504 tb_reset_jump(tb1, n1);
505 tb1->jmp_next[n1] = NULL;
506 tb1 = tb2;
508 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
510 tb_phys_invalidate_count++;
513 static inline void set_bits(uint8_t *tab, int start, int len)
515 int end, mask, end1;
517 end = start + len;
518 tab += start >> 3;
519 mask = 0xff << (start & 7);
520 if ((start & ~7) == (end & ~7)) {
521 if (start < end) {
522 mask &= ~(0xff << (end & 7));
523 *tab |= mask;
525 } else {
526 *tab++ |= mask;
527 start = (start + 8) & ~7;
528 end1 = end & ~7;
529 while (start < end1) {
530 *tab++ = 0xff;
531 start += 8;
533 if (start < end) {
534 mask = ~(0xff << (end & 7));
535 *tab |= mask;
540 static void build_page_bitmap(PageDesc *p)
542 int n, tb_start, tb_end;
543 TranslationBlock *tb;
545 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
546 if (!p->code_bitmap)
547 return;
548 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
550 tb = p->first_tb;
551 while (tb != NULL) {
552 n = (long)tb & 3;
553 tb = (TranslationBlock *)((long)tb & ~3);
554 /* NOTE: this is subtle as a TB may span two physical pages */
555 if (n == 0) {
556 /* NOTE: tb_end may be after the end of the page, but
557 it is not a problem */
558 tb_start = tb->pc & ~TARGET_PAGE_MASK;
559 tb_end = tb_start + tb->size;
560 if (tb_end > TARGET_PAGE_SIZE)
561 tb_end = TARGET_PAGE_SIZE;
562 } else {
563 tb_start = 0;
564 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
566 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
567 tb = tb->page_next[n];
571 #ifdef TARGET_HAS_PRECISE_SMC
573 static void tb_gen_code(CPUState *env,
574 target_ulong pc, target_ulong cs_base, int flags,
575 int cflags)
577 TranslationBlock *tb;
578 uint8_t *tc_ptr;
579 target_ulong phys_pc, phys_page2, virt_page2;
580 int code_gen_size;
582 phys_pc = get_phys_addr_code(env, pc);
583 tb = tb_alloc(pc);
584 if (!tb) {
585 /* flush must be done */
586 tb_flush(env);
587 /* cannot fail at this point */
588 tb = tb_alloc(pc);
590 tc_ptr = code_gen_ptr;
591 tb->tc_ptr = tc_ptr;
592 tb->cs_base = cs_base;
593 tb->flags = flags;
594 tb->cflags = cflags;
595 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
596 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
598 /* check next page if needed */
599 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
600 phys_page2 = -1;
601 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
602 phys_page2 = get_phys_addr_code(env, virt_page2);
604 tb_link_phys(tb, phys_pc, phys_page2);
606 #endif
608 /* invalidate all TBs which intersect with the target physical page
609 starting in range [start;end[. NOTE: start and end must refer to
610 the same physical page. 'is_cpu_write_access' should be true if called
611 from a real cpu write access: the virtual CPU will exit the current
612 TB if code is modified inside this TB. */
613 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
614 int is_cpu_write_access)
616 int n, current_tb_modified, current_tb_not_found, current_flags;
617 CPUState *env = cpu_single_env;
618 PageDesc *p;
619 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
620 target_ulong tb_start, tb_end;
621 target_ulong current_pc, current_cs_base;
623 p = page_find(start >> TARGET_PAGE_BITS);
624 if (!p)
625 return;
626 if (!p->code_bitmap &&
627 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
628 is_cpu_write_access) {
629 /* build code bitmap */
630 build_page_bitmap(p);
633 /* we remove all the TBs in the range [start, end[ */
634 /* XXX: see if in some cases it could be faster to invalidate all the code */
635 current_tb_not_found = is_cpu_write_access;
636 current_tb_modified = 0;
637 current_tb = NULL; /* avoid warning */
638 current_pc = 0; /* avoid warning */
639 current_cs_base = 0; /* avoid warning */
640 current_flags = 0; /* avoid warning */
641 tb = p->first_tb;
642 while (tb != NULL) {
643 n = (long)tb & 3;
644 tb = (TranslationBlock *)((long)tb & ~3);
645 tb_next = tb->page_next[n];
646 /* NOTE: this is subtle as a TB may span two physical pages */
647 if (n == 0) {
648 /* NOTE: tb_end may be after the end of the page, but
649 it is not a problem */
650 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
651 tb_end = tb_start + tb->size;
652 } else {
653 tb_start = tb->page_addr[1];
654 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
656 if (!(tb_end <= start || tb_start >= end)) {
657 #ifdef TARGET_HAS_PRECISE_SMC
658 if (current_tb_not_found) {
659 current_tb_not_found = 0;
660 current_tb = NULL;
661 if (env->mem_write_pc) {
662 /* now we have a real cpu fault */
663 current_tb = tb_find_pc(env->mem_write_pc);
666 if (current_tb == tb &&
667 !(current_tb->cflags & CF_SINGLE_INSN)) {
668 /* If we are modifying the current TB, we must stop
669 its execution. We could be more precise by checking
670 that the modification is after the current PC, but it
671 would require a specialized function to partially
672 restore the CPU state */
674 current_tb_modified = 1;
675 cpu_restore_state(current_tb, env,
676 env->mem_write_pc, NULL);
677 #if defined(TARGET_I386)
678 current_flags = env->hflags;
679 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
680 current_cs_base = (target_ulong)env->segs[R_CS].base;
681 current_pc = current_cs_base + env->eip;
682 #else
683 #error unsupported CPU
684 #endif
686 #endif /* TARGET_HAS_PRECISE_SMC */
687 /* we need to do that to handle the case where a signal
688 occurs while doing tb_phys_invalidate() */
689 saved_tb = NULL;
690 if (env) {
691 saved_tb = env->current_tb;
692 env->current_tb = NULL;
694 tb_phys_invalidate(tb, -1);
695 if (env) {
696 env->current_tb = saved_tb;
697 if (env->interrupt_request && env->current_tb)
698 cpu_interrupt(env, env->interrupt_request);
701 tb = tb_next;
703 #if !defined(CONFIG_USER_ONLY)
704 /* if no code remaining, no need to continue to use slow writes */
705 if (!p->first_tb) {
706 invalidate_page_bitmap(p);
707 if (is_cpu_write_access) {
708 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
711 #endif
712 #ifdef TARGET_HAS_PRECISE_SMC
713 if (current_tb_modified) {
714 /* we generate a block containing just the instruction
715 modifying the memory. It will ensure that it cannot modify
716 itself */
717 env->current_tb = NULL;
718 tb_gen_code(env, current_pc, current_cs_base, current_flags,
719 CF_SINGLE_INSN);
720 cpu_resume_from_signal(env, NULL);
722 #endif
725 /* len must be <= 8 and start must be a multiple of len */
726 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
728 PageDesc *p;
729 int offset, b;
730 #if 0
731 if (1) {
732 if (loglevel) {
733 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
734 cpu_single_env->mem_write_vaddr, len,
735 cpu_single_env->eip,
736 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
739 #endif
740 p = page_find(start >> TARGET_PAGE_BITS);
741 if (!p)
742 return;
743 if (p->code_bitmap) {
744 offset = start & ~TARGET_PAGE_MASK;
745 b = p->code_bitmap[offset >> 3] >> (offset & 7);
746 if (b & ((1 << len) - 1))
747 goto do_invalidate;
748 } else {
749 do_invalidate:
750 tb_invalidate_phys_page_range(start, start + len, 1);
754 #if !defined(CONFIG_SOFTMMU)
755 static void tb_invalidate_phys_page(target_ulong addr,
756 unsigned long pc, void *puc)
758 int n, current_flags, current_tb_modified;
759 target_ulong current_pc, current_cs_base;
760 PageDesc *p;
761 TranslationBlock *tb, *current_tb;
762 #ifdef TARGET_HAS_PRECISE_SMC
763 CPUState *env = cpu_single_env;
764 #endif
766 addr &= TARGET_PAGE_MASK;
767 p = page_find(addr >> TARGET_PAGE_BITS);
768 if (!p)
769 return;
770 tb = p->first_tb;
771 current_tb_modified = 0;
772 current_tb = NULL;
773 current_pc = 0; /* avoid warning */
774 current_cs_base = 0; /* avoid warning */
775 current_flags = 0; /* avoid warning */
776 #ifdef TARGET_HAS_PRECISE_SMC
777 if (tb && pc != 0) {
778 current_tb = tb_find_pc(pc);
780 #endif
781 while (tb != NULL) {
782 n = (long)tb & 3;
783 tb = (TranslationBlock *)((long)tb & ~3);
784 #ifdef TARGET_HAS_PRECISE_SMC
785 if (current_tb == tb &&
786 !(current_tb->cflags & CF_SINGLE_INSN)) {
787 /* If we are modifying the current TB, we must stop
788 its execution. We could be more precise by checking
789 that the modification is after the current PC, but it
790 would require a specialized function to partially
791 restore the CPU state */
793 current_tb_modified = 1;
794 cpu_restore_state(current_tb, env, pc, puc);
795 #if defined(TARGET_I386)
796 current_flags = env->hflags;
797 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
798 current_cs_base = (target_ulong)env->segs[R_CS].base;
799 current_pc = current_cs_base + env->eip;
800 #else
801 #error unsupported CPU
802 #endif
804 #endif /* TARGET_HAS_PRECISE_SMC */
805 tb_phys_invalidate(tb, addr);
806 tb = tb->page_next[n];
808 p->first_tb = NULL;
809 #ifdef TARGET_HAS_PRECISE_SMC
810 if (current_tb_modified) {
811 /* we generate a block containing just the instruction
812 modifying the memory. It will ensure that it cannot modify
813 itself */
814 env->current_tb = NULL;
815 tb_gen_code(env, current_pc, current_cs_base, current_flags,
816 CF_SINGLE_INSN);
817 cpu_resume_from_signal(env, puc);
819 #endif
821 #endif
823 /* add the tb in the target page and protect it if necessary */
824 static inline void tb_alloc_page(TranslationBlock *tb,
825 unsigned int n, target_ulong page_addr)
827 PageDesc *p;
828 TranslationBlock *last_first_tb;
830 tb->page_addr[n] = page_addr;
831 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
832 tb->page_next[n] = p->first_tb;
833 last_first_tb = p->first_tb;
834 p->first_tb = (TranslationBlock *)((long)tb | n);
835 invalidate_page_bitmap(p);
837 #if defined(TARGET_HAS_SMC) || 1
839 #if defined(CONFIG_USER_ONLY)
840 if (p->flags & PAGE_WRITE) {
841 target_ulong addr;
842 PageDesc *p2;
843 int prot;
845 /* force the host page as non writable (writes will have a
846 page fault + mprotect overhead) */
847 page_addr &= qemu_host_page_mask;
848 prot = 0;
849 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
850 addr += TARGET_PAGE_SIZE) {
852 p2 = page_find (addr >> TARGET_PAGE_BITS);
853 if (!p2)
854 continue;
855 prot |= p2->flags;
856 p2->flags &= ~PAGE_WRITE;
857 page_get_flags(addr);
859 mprotect(g2h(page_addr), qemu_host_page_size,
860 (prot & PAGE_BITS) & ~PAGE_WRITE);
861 #ifdef DEBUG_TB_INVALIDATE
862 printf("protecting code page: 0x%08lx\n",
863 page_addr);
864 #endif
866 #else
867 /* if some code is already present, then the pages are already
868 protected. So we handle the case where only the first TB is
869 allocated in a physical page */
870 if (!last_first_tb) {
871 tlb_protect_code(page_addr);
873 #endif
875 #endif /* TARGET_HAS_SMC */
878 /* Allocate a new translation block. Flush the translation buffer if
879 too many translation blocks or too much generated code. */
880 TranslationBlock *tb_alloc(target_ulong pc)
882 TranslationBlock *tb;
884 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
885 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
886 return NULL;
887 tb = &tbs[nb_tbs++];
888 tb->pc = pc;
889 tb->cflags = 0;
890 return tb;
893 /* add a new TB and link it to the physical page tables. phys_page2 is
894 (-1) to indicate that only one page contains the TB. */
895 void tb_link_phys(TranslationBlock *tb,
896 target_ulong phys_pc, target_ulong phys_page2)
898 unsigned int h;
899 TranslationBlock **ptb;
901 /* add in the physical hash table */
902 h = tb_phys_hash_func(phys_pc);
903 ptb = &tb_phys_hash[h];
904 tb->phys_hash_next = *ptb;
905 *ptb = tb;
907 /* add in the page list */
908 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
909 if (phys_page2 != -1)
910 tb_alloc_page(tb, 1, phys_page2);
911 else
912 tb->page_addr[1] = -1;
914 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
915 tb->jmp_next[0] = NULL;
916 tb->jmp_next[1] = NULL;
917 #ifdef USE_CODE_COPY
918 tb->cflags &= ~CF_FP_USED;
919 if (tb->cflags & CF_TB_FP_USED)
920 tb->cflags |= CF_FP_USED;
921 #endif
923 /* init original jump addresses */
924 if (tb->tb_next_offset[0] != 0xffff)
925 tb_reset_jump(tb, 0);
926 if (tb->tb_next_offset[1] != 0xffff)
927 tb_reset_jump(tb, 1);
929 #ifdef DEBUG_TB_CHECK
930 tb_page_check();
931 #endif
934 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
935 tb[1].tc_ptr. Return NULL if not found */
936 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
938 int m_min, m_max, m;
939 unsigned long v;
940 TranslationBlock *tb;
942 if (nb_tbs <= 0)
943 return NULL;
944 if (tc_ptr < (unsigned long)code_gen_buffer ||
945 tc_ptr >= (unsigned long)code_gen_ptr)
946 return NULL;
947 /* binary search (cf Knuth) */
948 m_min = 0;
949 m_max = nb_tbs - 1;
950 while (m_min <= m_max) {
951 m = (m_min + m_max) >> 1;
952 tb = &tbs[m];
953 v = (unsigned long)tb->tc_ptr;
954 if (v == tc_ptr)
955 return tb;
956 else if (tc_ptr < v) {
957 m_max = m - 1;
958 } else {
959 m_min = m + 1;
962 return &tbs[m_max];
965 static void tb_reset_jump_recursive(TranslationBlock *tb);
967 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
969 TranslationBlock *tb1, *tb_next, **ptb;
970 unsigned int n1;
972 tb1 = tb->jmp_next[n];
973 if (tb1 != NULL) {
974 /* find head of list */
975 for(;;) {
976 n1 = (long)tb1 & 3;
977 tb1 = (TranslationBlock *)((long)tb1 & ~3);
978 if (n1 == 2)
979 break;
980 tb1 = tb1->jmp_next[n1];
982 /* we are now sure now that tb jumps to tb1 */
983 tb_next = tb1;
985 /* remove tb from the jmp_first list */
986 ptb = &tb_next->jmp_first;
987 for(;;) {
988 tb1 = *ptb;
989 n1 = (long)tb1 & 3;
990 tb1 = (TranslationBlock *)((long)tb1 & ~3);
991 if (n1 == n && tb1 == tb)
992 break;
993 ptb = &tb1->jmp_next[n1];
995 *ptb = tb->jmp_next[n];
996 tb->jmp_next[n] = NULL;
998 /* suppress the jump to next tb in generated code */
999 tb_reset_jump(tb, n);
1001 /* suppress jumps in the tb on which we could have jumped */
1002 tb_reset_jump_recursive(tb_next);
1006 static void tb_reset_jump_recursive(TranslationBlock *tb)
1008 tb_reset_jump_recursive2(tb, 0);
1009 tb_reset_jump_recursive2(tb, 1);
1012 #if defined(TARGET_HAS_ICE)
1013 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1015 target_ulong addr, pd;
1016 ram_addr_t ram_addr;
1017 PhysPageDesc *p;
1019 addr = cpu_get_phys_page_debug(env, pc);
1020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1021 if (!p) {
1022 pd = IO_MEM_UNASSIGNED;
1023 } else {
1024 pd = p->phys_offset;
1026 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1027 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1029 #endif
1031 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1032 breakpoint is reached */
1033 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1035 #if defined(TARGET_HAS_ICE)
1036 int i;
1038 for(i = 0; i < env->nb_breakpoints; i++) {
1039 if (env->breakpoints[i] == pc)
1040 return 0;
1043 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1044 return -1;
1045 env->breakpoints[env->nb_breakpoints++] = pc;
1047 #ifdef USE_KVM
1048 kvm_update_debugger(env);
1049 #endif
1051 breakpoint_invalidate(env, pc);
1052 return 0;
1053 #else
1054 return -1;
1055 #endif
1058 /* remove a breakpoint */
1059 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1061 #if defined(TARGET_HAS_ICE)
1062 int i;
1063 for(i = 0; i < env->nb_breakpoints; i++) {
1064 if (env->breakpoints[i] == pc)
1065 goto found;
1067 return -1;
1068 found:
1069 env->nb_breakpoints--;
1070 if (i < env->nb_breakpoints)
1071 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1073 #ifdef USE_KVM
1074 kvm_update_debugger(env);
1075 #endif
1077 breakpoint_invalidate(env, pc);
1078 return 0;
1079 #else
1080 return -1;
1081 #endif
1084 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1085 CPU loop after each instruction */
1086 void cpu_single_step(CPUState *env, int enabled)
1088 #if defined(TARGET_HAS_ICE)
1089 if (env->singlestep_enabled != enabled) {
1090 env->singlestep_enabled = enabled;
1091 /* must flush all the translated code to avoid inconsistancies */
1092 /* XXX: only flush what is necessary */
1093 tb_flush(env);
1095 #ifdef USE_KVM
1096 kvm_update_debugger(env);
1097 #endif
1098 #endif
1101 /* enable or disable low levels log */
1102 void cpu_set_log(int log_flags)
1104 loglevel = log_flags;
1105 if (loglevel && !logfile) {
1106 logfile = fopen(logfilename, "w");
1107 if (!logfile) {
1108 perror(logfilename);
1109 _exit(1);
1111 #if !defined(CONFIG_SOFTMMU)
1112 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1114 static uint8_t logfile_buf[4096];
1115 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1117 #else
1118 setvbuf(logfile, NULL, _IOLBF, 0);
1119 #endif
1123 void cpu_set_log_filename(const char *filename)
1125 logfilename = strdup(filename);
1128 /* mask must never be zero, except for A20 change call */
1129 void cpu_interrupt(CPUState *env, int mask)
1131 TranslationBlock *tb;
1132 static int interrupt_lock;
1134 env->interrupt_request |= mask;
1135 /* if the cpu is currently executing code, we must unlink it and
1136 all the potentially executing TB */
1137 tb = env->current_tb;
1138 if (tb && !testandset(&interrupt_lock)) {
1139 env->current_tb = NULL;
1140 tb_reset_jump_recursive(tb);
1141 interrupt_lock = 0;
1145 void cpu_reset_interrupt(CPUState *env, int mask)
1147 env->interrupt_request &= ~mask;
1150 CPULogItem cpu_log_items[] = {
1151 { CPU_LOG_TB_OUT_ASM, "out_asm",
1152 "show generated host assembly code for each compiled TB" },
1153 { CPU_LOG_TB_IN_ASM, "in_asm",
1154 "show target assembly code for each compiled TB" },
1155 { CPU_LOG_TB_OP, "op",
1156 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1157 #ifdef TARGET_I386
1158 { CPU_LOG_TB_OP_OPT, "op_opt",
1159 "show micro ops after optimization for each compiled TB" },
1160 #endif
1161 { CPU_LOG_INT, "int",
1162 "show interrupts/exceptions in short format" },
1163 { CPU_LOG_EXEC, "exec",
1164 "show trace before each executed TB (lots of logs)" },
1165 { CPU_LOG_TB_CPU, "cpu",
1166 "show CPU state before bloc translation" },
1167 #ifdef TARGET_I386
1168 { CPU_LOG_PCALL, "pcall",
1169 "show protected mode far calls/returns/exceptions" },
1170 #endif
1171 #ifdef DEBUG_IOPORT
1172 { CPU_LOG_IOPORT, "ioport",
1173 "show all i/o ports accesses" },
1174 #endif
1175 { 0, NULL, NULL },
1178 static int cmp1(const char *s1, int n, const char *s2)
1180 if (strlen(s2) != n)
1181 return 0;
1182 return memcmp(s1, s2, n) == 0;
1185 /* takes a comma separated list of log masks. Return 0 if error. */
1186 int cpu_str_to_log_mask(const char *str)
1188 CPULogItem *item;
1189 int mask;
1190 const char *p, *p1;
1192 p = str;
1193 mask = 0;
1194 for(;;) {
1195 p1 = strchr(p, ',');
1196 if (!p1)
1197 p1 = p + strlen(p);
1198 if(cmp1(p,p1-p,"all")) {
1199 for(item = cpu_log_items; item->mask != 0; item++) {
1200 mask |= item->mask;
1202 } else {
1203 for(item = cpu_log_items; item->mask != 0; item++) {
1204 if (cmp1(p, p1 - p, item->name))
1205 goto found;
1207 return 0;
1209 found:
1210 mask |= item->mask;
1211 if (*p1 != ',')
1212 break;
1213 p = p1 + 1;
1215 return mask;
1218 void cpu_abort(CPUState *env, const char *fmt, ...)
1220 va_list ap;
1222 va_start(ap, fmt);
1223 fprintf(stderr, "qemu: fatal: ");
1224 vfprintf(stderr, fmt, ap);
1225 fprintf(stderr, "\n");
1226 #ifdef TARGET_I386
1227 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1228 #else
1229 cpu_dump_state(env, stderr, fprintf, 0);
1230 #endif
1231 va_end(ap);
1232 abort();
1235 #if !defined(CONFIG_USER_ONLY)
1237 /* NOTE: if flush_global is true, also flush global entries (not
1238 implemented yet) */
1239 void tlb_flush(CPUState *env, int flush_global)
1241 int i;
1243 #if defined(DEBUG_TLB)
1244 printf("tlb_flush:\n");
1245 #endif
1246 /* must reset current TB so that interrupts cannot modify the
1247 links while we are modifying them */
1248 env->current_tb = NULL;
1250 for(i = 0; i < CPU_TLB_SIZE; i++) {
1251 env->tlb_table[0][i].addr_read = -1;
1252 env->tlb_table[0][i].addr_write = -1;
1253 env->tlb_table[0][i].addr_code = -1;
1254 env->tlb_table[1][i].addr_read = -1;
1255 env->tlb_table[1][i].addr_write = -1;
1256 env->tlb_table[1][i].addr_code = -1;
1259 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1261 #if !defined(CONFIG_SOFTMMU)
1262 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1263 #endif
1264 #ifdef USE_KQEMU
1265 if (env->kqemu_enabled) {
1266 kqemu_flush(env, flush_global);
1268 #endif
1269 tlb_flush_count++;
1272 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1274 if (addr == (tlb_entry->addr_read &
1275 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1276 addr == (tlb_entry->addr_write &
1277 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1278 addr == (tlb_entry->addr_code &
1279 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1280 tlb_entry->addr_read = -1;
1281 tlb_entry->addr_write = -1;
1282 tlb_entry->addr_code = -1;
1286 void tlb_flush_page(CPUState *env, target_ulong addr)
1288 int i;
1289 TranslationBlock *tb;
1291 #if defined(DEBUG_TLB)
1292 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1293 #endif
1294 /* must reset current TB so that interrupts cannot modify the
1295 links while we are modifying them */
1296 env->current_tb = NULL;
1298 addr &= TARGET_PAGE_MASK;
1299 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1300 tlb_flush_entry(&env->tlb_table[0][i], addr);
1301 tlb_flush_entry(&env->tlb_table[1][i], addr);
1303 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1304 tb = env->tb_jmp_cache[i];
1305 if (tb &&
1306 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1307 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1308 env->tb_jmp_cache[i] = NULL;
1312 #if !defined(CONFIG_SOFTMMU)
1313 if (addr < MMAP_AREA_END)
1314 munmap((void *)addr, TARGET_PAGE_SIZE);
1315 #endif
1316 #ifdef USE_KQEMU
1317 if (env->kqemu_enabled) {
1318 kqemu_flush_page(env, addr);
1320 #endif
1323 /* update the TLBs so that writes to code in the virtual page 'addr'
1324 can be detected */
1325 static void tlb_protect_code(ram_addr_t ram_addr)
1327 cpu_physical_memory_reset_dirty(ram_addr,
1328 ram_addr + TARGET_PAGE_SIZE,
1329 CODE_DIRTY_FLAG);
1332 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1333 tested for self modifying code */
1334 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1335 target_ulong vaddr)
1337 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1340 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1341 unsigned long start, unsigned long length)
1343 unsigned long addr;
1344 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1345 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1346 if ((addr - start) < length) {
1347 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1352 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1353 int dirty_flags)
1355 CPUState *env;
1356 unsigned long length, start1;
1357 int i, mask, len;
1358 uint8_t *p;
1360 start &= TARGET_PAGE_MASK;
1361 end = TARGET_PAGE_ALIGN(end);
1363 length = end - start;
1364 if (length == 0)
1365 return;
1366 len = length >> TARGET_PAGE_BITS;
1367 #ifdef USE_KQEMU
1368 /* XXX: should not depend on cpu context */
1369 env = first_cpu;
1370 if (env->kqemu_enabled) {
1371 ram_addr_t addr;
1372 addr = start;
1373 for(i = 0; i < len; i++) {
1374 kqemu_set_notdirty(env, addr);
1375 addr += TARGET_PAGE_SIZE;
1378 #endif
1379 mask = ~dirty_flags;
1380 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1381 for(i = 0; i < len; i++)
1382 p[i] &= mask;
1384 /* we modify the TLB cache so that the dirty bit will be set again
1385 when accessing the range */
1386 start1 = start + (unsigned long)phys_ram_base;
1387 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1388 for(i = 0; i < CPU_TLB_SIZE; i++)
1389 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1390 for(i = 0; i < CPU_TLB_SIZE; i++)
1391 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1394 #if !defined(CONFIG_SOFTMMU)
1395 /* XXX: this is expensive */
1397 VirtPageDesc *p;
1398 int j;
1399 target_ulong addr;
1401 for(i = 0; i < L1_SIZE; i++) {
1402 p = l1_virt_map[i];
1403 if (p) {
1404 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1405 for(j = 0; j < L2_SIZE; j++) {
1406 if (p->valid_tag == virt_valid_tag &&
1407 p->phys_addr >= start && p->phys_addr < end &&
1408 (p->prot & PROT_WRITE)) {
1409 if (addr < MMAP_AREA_END) {
1410 mprotect((void *)addr, TARGET_PAGE_SIZE,
1411 p->prot & ~PROT_WRITE);
1414 addr += TARGET_PAGE_SIZE;
1415 p++;
1420 #endif
1423 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1425 ram_addr_t ram_addr;
1427 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1428 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1429 tlb_entry->addend - (unsigned long)phys_ram_base;
1430 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1431 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1436 /* update the TLB according to the current state of the dirty bits */
1437 void cpu_tlb_update_dirty(CPUState *env)
1439 int i;
1440 for(i = 0; i < CPU_TLB_SIZE; i++)
1441 tlb_update_dirty(&env->tlb_table[0][i]);
1442 for(i = 0; i < CPU_TLB_SIZE; i++)
1443 tlb_update_dirty(&env->tlb_table[1][i]);
1446 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1447 unsigned long start)
1449 unsigned long addr;
1450 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1451 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1452 if (addr == start) {
1453 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1458 /* update the TLB corresponding to virtual page vaddr and phys addr
1459 addr so that it is no longer dirty */
1460 static inline void tlb_set_dirty(CPUState *env,
1461 unsigned long addr, target_ulong vaddr)
1463 int i;
1465 addr &= TARGET_PAGE_MASK;
1466 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1467 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1468 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1471 /* add a new TLB entry. At most one entry for a given virtual address
1472 is permitted. Return 0 if OK or 2 if the page could not be mapped
1473 (can only happen in non SOFTMMU mode for I/O pages or pages
1474 conflicting with the host address space). */
1475 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1476 target_phys_addr_t paddr, int prot,
1477 int is_user, int is_softmmu)
1479 PhysPageDesc *p;
1480 unsigned long pd;
1481 unsigned int index;
1482 target_ulong address;
1483 target_phys_addr_t addend;
1484 int ret;
1485 CPUTLBEntry *te;
1487 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1488 if (!p) {
1489 pd = IO_MEM_UNASSIGNED;
1490 } else {
1491 pd = p->phys_offset;
1493 #if defined(DEBUG_TLB)
1494 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1495 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1496 #endif
1498 ret = 0;
1499 #if !defined(CONFIG_SOFTMMU)
1500 if (is_softmmu)
1501 #endif
1503 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1504 /* IO memory case */
1505 address = vaddr | pd;
1506 addend = paddr;
1507 } else {
1508 /* standard memory */
1509 address = vaddr;
1510 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1513 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1514 addend -= vaddr;
1515 te = &env->tlb_table[is_user][index];
1516 te->addend = addend;
1517 if (prot & PAGE_READ) {
1518 te->addr_read = address;
1519 } else {
1520 te->addr_read = -1;
1522 if (prot & PAGE_EXEC) {
1523 te->addr_code = address;
1524 } else {
1525 te->addr_code = -1;
1527 if (prot & PAGE_WRITE) {
1528 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1529 (pd & IO_MEM_ROMD)) {
1530 /* write access calls the I/O callback */
1531 te->addr_write = vaddr |
1532 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1533 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1534 !cpu_physical_memory_is_dirty(pd)) {
1535 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1536 } else {
1537 te->addr_write = address;
1539 } else {
1540 te->addr_write = -1;
1543 #if !defined(CONFIG_SOFTMMU)
1544 else {
1545 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1546 /* IO access: no mapping is done as it will be handled by the
1547 soft MMU */
1548 if (!(env->hflags & HF_SOFTMMU_MASK))
1549 ret = 2;
1550 } else {
1551 void *map_addr;
1553 if (vaddr >= MMAP_AREA_END) {
1554 ret = 2;
1555 } else {
1556 if (prot & PROT_WRITE) {
1557 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1558 #if defined(TARGET_HAS_SMC) || 1
1559 first_tb ||
1560 #endif
1561 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1562 !cpu_physical_memory_is_dirty(pd))) {
1563 /* ROM: we do as if code was inside */
1564 /* if code is present, we only map as read only and save the
1565 original mapping */
1566 VirtPageDesc *vp;
1568 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1569 vp->phys_addr = pd;
1570 vp->prot = prot;
1571 vp->valid_tag = virt_valid_tag;
1572 prot &= ~PAGE_WRITE;
1575 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1576 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1577 if (map_addr == MAP_FAILED) {
1578 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1579 paddr, vaddr);
1584 #endif
1585 return ret;
1588 /* called from signal handler: invalidate the code and unprotect the
1589 page. Return TRUE if the fault was succesfully handled. */
1590 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1592 #if !defined(CONFIG_SOFTMMU)
1593 VirtPageDesc *vp;
1595 #if defined(DEBUG_TLB)
1596 printf("page_unprotect: addr=0x%08x\n", addr);
1597 #endif
1598 addr &= TARGET_PAGE_MASK;
1600 /* if it is not mapped, no need to worry here */
1601 if (addr >= MMAP_AREA_END)
1602 return 0;
1603 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1604 if (!vp)
1605 return 0;
1606 /* NOTE: in this case, validate_tag is _not_ tested as it
1607 validates only the code TLB */
1608 if (vp->valid_tag != virt_valid_tag)
1609 return 0;
1610 if (!(vp->prot & PAGE_WRITE))
1611 return 0;
1612 #if defined(DEBUG_TLB)
1613 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1614 addr, vp->phys_addr, vp->prot);
1615 #endif
1616 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1617 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1618 (unsigned long)addr, vp->prot);
1619 /* set the dirty bit */
1620 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1621 /* flush the code inside */
1622 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1623 return 1;
1624 #else
1625 return 0;
1626 #endif
1629 #else
1631 void tlb_flush(CPUState *env, int flush_global)
1635 void tlb_flush_page(CPUState *env, target_ulong addr)
1639 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1640 target_phys_addr_t paddr, int prot,
1641 int is_user, int is_softmmu)
1643 return 0;
1646 /* dump memory mappings */
1647 void page_dump(FILE *f)
1649 unsigned long start, end;
1650 int i, j, prot, prot1;
1651 PageDesc *p;
1653 fprintf(f, "%-8s %-8s %-8s %s\n",
1654 "start", "end", "size", "prot");
1655 start = -1;
1656 end = -1;
1657 prot = 0;
1658 for(i = 0; i <= L1_SIZE; i++) {
1659 if (i < L1_SIZE)
1660 p = l1_map[i];
1661 else
1662 p = NULL;
1663 for(j = 0;j < L2_SIZE; j++) {
1664 if (!p)
1665 prot1 = 0;
1666 else
1667 prot1 = p[j].flags;
1668 if (prot1 != prot) {
1669 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1670 if (start != -1) {
1671 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1672 start, end, end - start,
1673 prot & PAGE_READ ? 'r' : '-',
1674 prot & PAGE_WRITE ? 'w' : '-',
1675 prot & PAGE_EXEC ? 'x' : '-');
1677 if (prot1 != 0)
1678 start = end;
1679 else
1680 start = -1;
1681 prot = prot1;
1683 if (!p)
1684 break;
1689 int page_get_flags(target_ulong address)
1691 PageDesc *p;
1693 p = page_find(address >> TARGET_PAGE_BITS);
1694 if (!p)
1695 return 0;
1696 return p->flags;
1699 /* modify the flags of a page and invalidate the code if
1700 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1701 depending on PAGE_WRITE */
1702 void page_set_flags(target_ulong start, target_ulong end, int flags)
1704 PageDesc *p;
1705 target_ulong addr;
1707 start = start & TARGET_PAGE_MASK;
1708 end = TARGET_PAGE_ALIGN(end);
1709 if (flags & PAGE_WRITE)
1710 flags |= PAGE_WRITE_ORG;
1711 spin_lock(&tb_lock);
1712 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1713 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1714 /* if the write protection is set, then we invalidate the code
1715 inside */
1716 if (!(p->flags & PAGE_WRITE) &&
1717 (flags & PAGE_WRITE) &&
1718 p->first_tb) {
1719 tb_invalidate_phys_page(addr, 0, NULL);
1721 p->flags = flags;
1723 spin_unlock(&tb_lock);
1726 /* called from signal handler: invalidate the code and unprotect the
1727 page. Return TRUE if the fault was succesfully handled. */
1728 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1730 unsigned int page_index, prot, pindex;
1731 PageDesc *p, *p1;
1732 target_ulong host_start, host_end, addr;
1734 host_start = address & qemu_host_page_mask;
1735 page_index = host_start >> TARGET_PAGE_BITS;
1736 p1 = page_find(page_index);
1737 if (!p1)
1738 return 0;
1739 host_end = host_start + qemu_host_page_size;
1740 p = p1;
1741 prot = 0;
1742 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1743 prot |= p->flags;
1744 p++;
1746 /* if the page was really writable, then we change its
1747 protection back to writable */
1748 if (prot & PAGE_WRITE_ORG) {
1749 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1750 if (!(p1[pindex].flags & PAGE_WRITE)) {
1751 mprotect((void *)g2h(host_start), qemu_host_page_size,
1752 (prot & PAGE_BITS) | PAGE_WRITE);
1753 p1[pindex].flags |= PAGE_WRITE;
1754 /* and since the content will be modified, we must invalidate
1755 the corresponding translated code. */
1756 tb_invalidate_phys_page(address, pc, puc);
1757 #ifdef DEBUG_TB_CHECK
1758 tb_invalidate_check(address);
1759 #endif
1760 return 1;
1763 return 0;
1766 /* call this function when system calls directly modify a memory area */
1767 /* ??? This should be redundant now we have lock_user. */
1768 void page_unprotect_range(target_ulong data, target_ulong data_size)
1770 target_ulong start, end, addr;
1772 start = data;
1773 end = start + data_size;
1774 start &= TARGET_PAGE_MASK;
1775 end = TARGET_PAGE_ALIGN(end);
1776 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1777 page_unprotect(addr, 0, NULL);
1781 static inline void tlb_set_dirty(CPUState *env,
1782 unsigned long addr, target_ulong vaddr)
1785 #endif /* defined(CONFIG_USER_ONLY) */
1787 /* register physical memory. 'size' must be a multiple of the target
1788 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1789 io memory page */
1790 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1791 unsigned long size,
1792 unsigned long phys_offset)
1794 target_phys_addr_t addr, end_addr;
1795 PhysPageDesc *p;
1796 CPUState *env;
1798 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1799 end_addr = start_addr + size;
1800 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1801 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1802 p->phys_offset = phys_offset;
1803 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1804 (phys_offset & IO_MEM_ROMD))
1805 phys_offset += TARGET_PAGE_SIZE;
1808 /* since each CPU stores ram addresses in its TLB cache, we must
1809 reset the modified entries */
1810 /* XXX: slow ! */
1811 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1812 tlb_flush(env, 1);
1816 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1818 return 0;
1821 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1825 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1826 unassigned_mem_readb,
1827 unassigned_mem_readb,
1828 unassigned_mem_readb,
1831 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1832 unassigned_mem_writeb,
1833 unassigned_mem_writeb,
1834 unassigned_mem_writeb,
1837 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1839 unsigned long ram_addr;
1840 int dirty_flags;
1841 ram_addr = addr - (unsigned long)phys_ram_base;
1842 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1843 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1844 #if !defined(CONFIG_USER_ONLY)
1845 tb_invalidate_phys_page_fast(ram_addr, 1);
1846 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1847 #endif
1849 stb_p((uint8_t *)(long)addr, val);
1850 #ifdef USE_KQEMU
1851 if (cpu_single_env->kqemu_enabled &&
1852 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1853 kqemu_modify_page(cpu_single_env, ram_addr);
1854 #endif
1855 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1856 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1857 /* we remove the notdirty callback only if the code has been
1858 flushed */
1859 if (dirty_flags == 0xff)
1860 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1863 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1865 unsigned long ram_addr;
1866 int dirty_flags;
1867 ram_addr = addr - (unsigned long)phys_ram_base;
1868 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1869 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1870 #if !defined(CONFIG_USER_ONLY)
1871 tb_invalidate_phys_page_fast(ram_addr, 2);
1872 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1873 #endif
1875 stw_p((uint8_t *)(long)addr, val);
1876 #ifdef USE_KQEMU
1877 if (cpu_single_env->kqemu_enabled &&
1878 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1879 kqemu_modify_page(cpu_single_env, ram_addr);
1880 #endif
1881 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1882 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1883 /* we remove the notdirty callback only if the code has been
1884 flushed */
1885 if (dirty_flags == 0xff)
1886 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1889 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1891 unsigned long ram_addr;
1892 int dirty_flags;
1893 ram_addr = addr - (unsigned long)phys_ram_base;
1894 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1895 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1896 #if !defined(CONFIG_USER_ONLY)
1897 tb_invalidate_phys_page_fast(ram_addr, 4);
1898 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1899 #endif
1901 stl_p((uint8_t *)(long)addr, val);
1902 #ifdef USE_KQEMU
1903 if (cpu_single_env->kqemu_enabled &&
1904 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1905 kqemu_modify_page(cpu_single_env, ram_addr);
1906 #endif
1907 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1908 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1909 /* we remove the notdirty callback only if the code has been
1910 flushed */
1911 if (dirty_flags == 0xff)
1912 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1915 static CPUReadMemoryFunc *error_mem_read[3] = {
1916 NULL, /* never used */
1917 NULL, /* never used */
1918 NULL, /* never used */
1921 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1922 notdirty_mem_writeb,
1923 notdirty_mem_writew,
1924 notdirty_mem_writel,
1927 static void io_mem_init(void)
1929 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1930 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1931 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1932 io_mem_nb = 5;
1934 /* alloc dirty bits array */
1935 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1936 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1939 /* mem_read and mem_write are arrays of functions containing the
1940 function to access byte (index 0), word (index 1) and dword (index
1941 2). All functions must be supplied. If io_index is non zero, the
1942 corresponding io zone is modified. If it is zero, a new io zone is
1943 allocated. The return value can be used with
1944 cpu_register_physical_memory(). (-1) is returned if error. */
1945 int cpu_register_io_memory(int io_index,
1946 CPUReadMemoryFunc **mem_read,
1947 CPUWriteMemoryFunc **mem_write,
1948 void *opaque)
1950 int i;
1952 if (io_index <= 0) {
1953 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1954 return -1;
1955 io_index = io_mem_nb++;
1956 } else {
1957 if (io_index >= IO_MEM_NB_ENTRIES)
1958 return -1;
1961 for(i = 0;i < 3; i++) {
1962 io_mem_read[io_index][i] = mem_read[i];
1963 io_mem_write[io_index][i] = mem_write[i];
1965 io_mem_opaque[io_index] = opaque;
1966 return io_index << IO_MEM_SHIFT;
1969 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1971 return io_mem_write[io_index >> IO_MEM_SHIFT];
1974 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1976 return io_mem_read[io_index >> IO_MEM_SHIFT];
1979 /* physical memory access (slow version, mainly for debug) */
1980 #if defined(CONFIG_USER_ONLY)
1981 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1982 int len, int is_write)
1984 int l, flags;
1985 target_ulong page;
1986 void * p;
1988 while (len > 0) {
1989 page = addr & TARGET_PAGE_MASK;
1990 l = (page + TARGET_PAGE_SIZE) - addr;
1991 if (l > len)
1992 l = len;
1993 flags = page_get_flags(page);
1994 if (!(flags & PAGE_VALID))
1995 return;
1996 if (is_write) {
1997 if (!(flags & PAGE_WRITE))
1998 return;
1999 p = lock_user(addr, len, 0);
2000 memcpy(p, buf, len);
2001 unlock_user(p, addr, len);
2002 } else {
2003 if (!(flags & PAGE_READ))
2004 return;
2005 p = lock_user(addr, len, 1);
2006 memcpy(buf, p, len);
2007 unlock_user(p, addr, 0);
2009 len -= l;
2010 buf += l;
2011 addr += l;
2015 #else
2016 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2017 int len, int is_write)
2019 int l, io_index;
2020 uint8_t *ptr;
2021 uint32_t val;
2022 target_phys_addr_t page;
2023 unsigned long pd;
2024 PhysPageDesc *p;
2026 while (len > 0) {
2027 page = addr & TARGET_PAGE_MASK;
2028 l = (page + TARGET_PAGE_SIZE) - addr;
2029 if (l > len)
2030 l = len;
2031 p = phys_page_find(page >> TARGET_PAGE_BITS);
2032 if (!p) {
2033 pd = IO_MEM_UNASSIGNED;
2034 } else {
2035 pd = p->phys_offset;
2038 if (is_write) {
2039 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2040 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2041 /* XXX: could force cpu_single_env to NULL to avoid
2042 potential bugs */
2043 if (l >= 4 && ((addr & 3) == 0)) {
2044 /* 32 bit write access */
2045 val = ldl_p(buf);
2046 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2047 l = 4;
2048 } else if (l >= 2 && ((addr & 1) == 0)) {
2049 /* 16 bit write access */
2050 val = lduw_p(buf);
2051 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2052 l = 2;
2053 } else {
2054 /* 8 bit write access */
2055 val = ldub_p(buf);
2056 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2057 l = 1;
2059 } else {
2060 unsigned long addr1;
2061 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2062 /* RAM case */
2063 ptr = phys_ram_base + addr1;
2064 memcpy(ptr, buf, l);
2065 if (!cpu_physical_memory_is_dirty(addr1)) {
2066 /* invalidate code */
2067 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2068 /* set dirty bit */
2069 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2070 (0xff & ~CODE_DIRTY_FLAG);
2073 } else {
2074 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2075 !(pd & IO_MEM_ROMD)) {
2076 /* I/O case */
2077 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2078 if (l >= 4 && ((addr & 3) == 0)) {
2079 /* 32 bit read access */
2080 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2081 stl_p(buf, val);
2082 l = 4;
2083 } else if (l >= 2 && ((addr & 1) == 0)) {
2084 /* 16 bit read access */
2085 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2086 stw_p(buf, val);
2087 l = 2;
2088 } else {
2089 /* 8 bit read access */
2090 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2091 stb_p(buf, val);
2092 l = 1;
2094 } else {
2095 /* RAM case */
2096 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2097 (addr & ~TARGET_PAGE_MASK);
2098 memcpy(buf, ptr, l);
2101 len -= l;
2102 buf += l;
2103 addr += l;
2107 /* used for ROM loading : can write in RAM and ROM */
2108 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2109 const uint8_t *buf, int len)
2111 int l;
2112 uint8_t *ptr;
2113 target_phys_addr_t page;
2114 unsigned long pd;
2115 PhysPageDesc *p;
2117 while (len > 0) {
2118 page = addr & TARGET_PAGE_MASK;
2119 l = (page + TARGET_PAGE_SIZE) - addr;
2120 if (l > len)
2121 l = len;
2122 p = phys_page_find(page >> TARGET_PAGE_BITS);
2123 if (!p) {
2124 pd = IO_MEM_UNASSIGNED;
2125 } else {
2126 pd = p->phys_offset;
2129 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2130 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2131 !(pd & IO_MEM_ROMD)) {
2132 /* do nothing */
2133 } else {
2134 unsigned long addr1;
2135 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2136 /* ROM/RAM case */
2137 ptr = phys_ram_base + addr1;
2138 memcpy(ptr, buf, l);
2140 len -= l;
2141 buf += l;
2142 addr += l;
2147 /* warning: addr must be aligned */
2148 uint32_t ldl_phys(target_phys_addr_t addr)
2150 int io_index;
2151 uint8_t *ptr;
2152 uint32_t val;
2153 unsigned long pd;
2154 PhysPageDesc *p;
2156 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2157 if (!p) {
2158 pd = IO_MEM_UNASSIGNED;
2159 } else {
2160 pd = p->phys_offset;
2163 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2164 !(pd & IO_MEM_ROMD)) {
2165 /* I/O case */
2166 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2167 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2168 } else {
2169 /* RAM case */
2170 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2171 (addr & ~TARGET_PAGE_MASK);
2172 val = ldl_p(ptr);
2174 return val;
2177 /* warning: addr must be aligned */
2178 uint64_t ldq_phys(target_phys_addr_t addr)
2180 int io_index;
2181 uint8_t *ptr;
2182 uint64_t val;
2183 unsigned long pd;
2184 PhysPageDesc *p;
2186 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2187 if (!p) {
2188 pd = IO_MEM_UNASSIGNED;
2189 } else {
2190 pd = p->phys_offset;
2193 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2194 !(pd & IO_MEM_ROMD)) {
2195 /* I/O case */
2196 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2197 #ifdef TARGET_WORDS_BIGENDIAN
2198 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2199 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2200 #else
2201 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2202 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2203 #endif
2204 } else {
2205 /* RAM case */
2206 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2207 (addr & ~TARGET_PAGE_MASK);
2208 val = ldq_p(ptr);
2210 return val;
2213 /* XXX: optimize */
2214 uint32_t ldub_phys(target_phys_addr_t addr)
2216 uint8_t val;
2217 cpu_physical_memory_read(addr, &val, 1);
2218 return val;
2221 /* XXX: optimize */
2222 uint32_t lduw_phys(target_phys_addr_t addr)
2224 uint16_t val;
2225 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2226 return tswap16(val);
2229 /* warning: addr must be aligned. The ram page is not masked as dirty
2230 and the code inside is not invalidated. It is useful if the dirty
2231 bits are used to track modified PTEs */
2232 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2234 int io_index;
2235 uint8_t *ptr;
2236 unsigned long pd;
2237 PhysPageDesc *p;
2239 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2240 if (!p) {
2241 pd = IO_MEM_UNASSIGNED;
2242 } else {
2243 pd = p->phys_offset;
2246 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2247 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2248 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2249 } else {
2250 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2251 (addr & ~TARGET_PAGE_MASK);
2252 stl_p(ptr, val);
2256 /* warning: addr must be aligned */
2257 void stl_phys(target_phys_addr_t addr, uint32_t val)
2259 int io_index;
2260 uint8_t *ptr;
2261 unsigned long pd;
2262 PhysPageDesc *p;
2264 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2265 if (!p) {
2266 pd = IO_MEM_UNASSIGNED;
2267 } else {
2268 pd = p->phys_offset;
2271 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2272 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2273 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2274 } else {
2275 unsigned long addr1;
2276 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2277 /* RAM case */
2278 ptr = phys_ram_base + addr1;
2279 stl_p(ptr, val);
2280 if (!cpu_physical_memory_is_dirty(addr1)) {
2281 /* invalidate code */
2282 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2283 /* set dirty bit */
2284 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2285 (0xff & ~CODE_DIRTY_FLAG);
2290 /* XXX: optimize */
2291 void stb_phys(target_phys_addr_t addr, uint32_t val)
2293 uint8_t v = val;
2294 cpu_physical_memory_write(addr, &v, 1);
2297 /* XXX: optimize */
2298 void stw_phys(target_phys_addr_t addr, uint32_t val)
2300 uint16_t v = tswap16(val);
2301 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2304 /* XXX: optimize */
2305 void stq_phys(target_phys_addr_t addr, uint64_t val)
2307 val = tswap64(val);
2308 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2311 #endif
2313 /* virtual memory access for debug */
2314 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2315 uint8_t *buf, int len, int is_write)
2317 int l;
2318 target_ulong page, phys_addr;
2320 while (len > 0) {
2321 page = addr & TARGET_PAGE_MASK;
2322 phys_addr = cpu_get_phys_page_debug(env, page);
2323 /* if no physical page mapped, return an error */
2324 if (phys_addr == -1)
2325 return -1;
2326 l = (page + TARGET_PAGE_SIZE) - addr;
2327 if (l > len)
2328 l = len;
2329 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2330 buf, l, is_write);
2331 len -= l;
2332 buf += l;
2333 addr += l;
2335 return 0;
2338 void dump_exec_info(FILE *f,
2339 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2341 int i, target_code_size, max_target_code_size;
2342 int direct_jmp_count, direct_jmp2_count, cross_page;
2343 TranslationBlock *tb;
2345 target_code_size = 0;
2346 max_target_code_size = 0;
2347 cross_page = 0;
2348 direct_jmp_count = 0;
2349 direct_jmp2_count = 0;
2350 for(i = 0; i < nb_tbs; i++) {
2351 tb = &tbs[i];
2352 target_code_size += tb->size;
2353 if (tb->size > max_target_code_size)
2354 max_target_code_size = tb->size;
2355 if (tb->page_addr[1] != -1)
2356 cross_page++;
2357 if (tb->tb_next_offset[0] != 0xffff) {
2358 direct_jmp_count++;
2359 if (tb->tb_next_offset[1] != 0xffff) {
2360 direct_jmp2_count++;
2364 /* XXX: avoid using doubles ? */
2365 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2366 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2367 nb_tbs ? target_code_size / nb_tbs : 0,
2368 max_target_code_size);
2369 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2370 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2371 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2372 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2373 cross_page,
2374 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2375 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2376 direct_jmp_count,
2377 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2378 direct_jmp2_count,
2379 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2380 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2381 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2382 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2385 #if !defined(CONFIG_USER_ONLY)
2387 #define MMUSUFFIX _cmmu
2388 #define GETPC() NULL
2389 #define env cpu_single_env
2390 #define SOFTMMU_CODE_ACCESS
2392 #define SHIFT 0
2393 #include "softmmu_template.h"
2395 #define SHIFT 1
2396 #include "softmmu_template.h"
2398 #define SHIFT 2
2399 #include "softmmu_template.h"
2401 #define SHIFT 3
2402 #include "softmmu_template.h"
2404 #undef env
2406 #endif