Constification
[qemu/qemu_0_9_1_stable.git] / exec.c
blob084b84409b267cbde8566b881615b242f23ececb
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
52 #if !defined(CONFIG_USER_ONLY)
53 /* TB consistency checks only implemented for usermode emulation. */
54 #undef DEBUG_TB_CHECK
55 #endif
57 /* threshold to flush the translated code buffer */
58 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60 #define SMC_BITMAP_USE_THRESHOLD 10
62 #define MMAP_AREA_START 0x00000000
63 #define MMAP_AREA_END 0xa8000000
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_ALPHA)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 42
69 #define TARGET_VIRT_ADDR_SPACE_BITS 42
70 #elif defined(TARGET_PPC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #else
73 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
74 #define TARGET_PHYS_ADDR_SPACE_BITS 32
75 #endif
77 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
78 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
79 int nb_tbs;
80 /* any access to the tbs or the page table must use this lock */
81 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
83 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
84 uint8_t *code_gen_ptr;
86 int phys_ram_size;
87 int phys_ram_fd;
88 uint8_t *phys_ram_base;
89 uint8_t *phys_ram_dirty;
90 static ram_addr_t phys_ram_alloc_offset = 0;
92 CPUState *first_cpu;
93 /* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
95 CPUState *cpu_single_env;
97 typedef struct PageDesc {
98 /* list of TBs intersecting this ram page */
99 TranslationBlock *first_tb;
100 /* in order to optimize self modifying code, we count the number
101 of lookups we do to a given page to use a bitmap */
102 unsigned int code_write_count;
103 uint8_t *code_bitmap;
104 #if defined(CONFIG_USER_ONLY)
105 unsigned long flags;
106 #endif
107 } PageDesc;
109 typedef struct PhysPageDesc {
110 /* offset in host memory of the page + io_index in the low 12 bits */
111 uint32_t phys_offset;
112 } PhysPageDesc;
114 #define L2_BITS 10
115 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
116 /* XXX: this is a temporary hack for alpha target.
117 * In the future, this is to be replaced by a multi-level table
118 * to actually be able to handle the complete 64 bits address space.
120 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
121 #else
122 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
123 #endif
125 #define L1_SIZE (1 << L1_BITS)
126 #define L2_SIZE (1 << L2_BITS)
128 static void io_mem_init(void);
130 unsigned long qemu_real_host_page_size;
131 unsigned long qemu_host_page_bits;
132 unsigned long qemu_host_page_size;
133 unsigned long qemu_host_page_mask;
135 /* XXX: for system emulation, it could just be an array */
136 static PageDesc *l1_map[L1_SIZE];
137 PhysPageDesc **l1_phys_map;
139 /* io memory support */
140 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
141 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
142 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
143 static int io_mem_nb;
144 #if defined(CONFIG_SOFTMMU)
145 static int io_mem_watch;
146 #endif
148 /* log support */
149 char *logfilename = "/tmp/qemu.log";
150 FILE *logfile;
151 int loglevel;
153 /* statistics */
154 static int tlb_flush_count;
155 static int tb_flush_count;
156 static int tb_phys_invalidate_count;
158 static void page_init(void)
160 /* NOTE: we can always suppose that qemu_host_page_size >=
161 TARGET_PAGE_SIZE */
162 #ifdef _WIN32
164 SYSTEM_INFO system_info;
165 DWORD old_protect;
167 GetSystemInfo(&system_info);
168 qemu_real_host_page_size = system_info.dwPageSize;
170 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
171 PAGE_EXECUTE_READWRITE, &old_protect);
173 #else
174 qemu_real_host_page_size = getpagesize();
176 unsigned long start, end;
178 start = (unsigned long)code_gen_buffer;
179 start &= ~(qemu_real_host_page_size - 1);
181 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
182 end += qemu_real_host_page_size - 1;
183 end &= ~(qemu_real_host_page_size - 1);
185 mprotect((void *)start, end - start,
186 PROT_READ | PROT_WRITE | PROT_EXEC);
188 #endif
190 if (qemu_host_page_size == 0)
191 qemu_host_page_size = qemu_real_host_page_size;
192 if (qemu_host_page_size < TARGET_PAGE_SIZE)
193 qemu_host_page_size = TARGET_PAGE_SIZE;
194 qemu_host_page_bits = 0;
195 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
196 qemu_host_page_bits++;
197 qemu_host_page_mask = ~(qemu_host_page_size - 1);
198 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
199 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
202 static inline PageDesc *page_find_alloc(unsigned int index)
204 PageDesc **lp, *p;
206 lp = &l1_map[index >> L2_BITS];
207 p = *lp;
208 if (!p) {
209 /* allocate if not found */
210 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
211 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
212 *lp = p;
214 return p + (index & (L2_SIZE - 1));
217 static inline PageDesc *page_find(unsigned int index)
219 PageDesc *p;
221 p = l1_map[index >> L2_BITS];
222 if (!p)
223 return 0;
224 return p + (index & (L2_SIZE - 1));
227 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
229 void **lp, **p;
230 PhysPageDesc *pd;
232 p = (void **)l1_phys_map;
233 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
235 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
236 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
237 #endif
238 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
239 p = *lp;
240 if (!p) {
241 /* allocate if not found */
242 if (!alloc)
243 return NULL;
244 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
245 memset(p, 0, sizeof(void *) * L1_SIZE);
246 *lp = p;
248 #endif
249 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
250 pd = *lp;
251 if (!pd) {
252 int i;
253 /* allocate if not found */
254 if (!alloc)
255 return NULL;
256 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
257 *lp = pd;
258 for (i = 0; i < L2_SIZE; i++)
259 pd[i].phys_offset = IO_MEM_UNASSIGNED;
261 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
264 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
266 return phys_page_find_alloc(index, 0);
269 #if !defined(CONFIG_USER_ONLY)
270 static void tlb_protect_code(ram_addr_t ram_addr);
271 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
272 target_ulong vaddr);
273 #endif
275 void cpu_exec_init(CPUState *env)
277 CPUState **penv;
278 int cpu_index;
280 if (!code_gen_ptr) {
281 code_gen_ptr = code_gen_buffer;
282 page_init();
283 io_mem_init();
285 env->next_cpu = NULL;
286 penv = &first_cpu;
287 cpu_index = 0;
288 while (*penv != NULL) {
289 penv = (CPUState **)&(*penv)->next_cpu;
290 cpu_index++;
292 env->cpu_index = cpu_index;
293 env->nb_watchpoints = 0;
294 *penv = env;
297 static inline void invalidate_page_bitmap(PageDesc *p)
299 if (p->code_bitmap) {
300 qemu_free(p->code_bitmap);
301 p->code_bitmap = NULL;
303 p->code_write_count = 0;
306 /* set to NULL all the 'first_tb' fields in all PageDescs */
307 static void page_flush_tb(void)
309 int i, j;
310 PageDesc *p;
312 for(i = 0; i < L1_SIZE; i++) {
313 p = l1_map[i];
314 if (p) {
315 for(j = 0; j < L2_SIZE; j++) {
316 p->first_tb = NULL;
317 invalidate_page_bitmap(p);
318 p++;
324 /* flush all the translation blocks */
325 /* XXX: tb_flush is currently not thread safe */
326 void tb_flush(CPUState *env1)
328 CPUState *env;
329 #if defined(DEBUG_FLUSH)
330 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
331 code_gen_ptr - code_gen_buffer,
332 nb_tbs,
333 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
334 #endif
335 nb_tbs = 0;
337 for(env = first_cpu; env != NULL; env = env->next_cpu) {
338 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
341 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
342 page_flush_tb();
344 code_gen_ptr = code_gen_buffer;
345 /* XXX: flush processor icache at this point if cache flush is
346 expensive */
347 tb_flush_count++;
350 #ifdef DEBUG_TB_CHECK
352 static void tb_invalidate_check(target_ulong address)
354 TranslationBlock *tb;
355 int i;
356 address &= TARGET_PAGE_MASK;
357 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
360 address >= tb->pc + tb->size)) {
361 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
362 address, (long)tb->pc, tb->size);
368 /* verify that all the pages have correct rights for code */
369 static void tb_page_check(void)
371 TranslationBlock *tb;
372 int i, flags1, flags2;
374 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
375 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
376 flags1 = page_get_flags(tb->pc);
377 flags2 = page_get_flags(tb->pc + tb->size - 1);
378 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
379 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
380 (long)tb->pc, tb->size, flags1, flags2);
386 void tb_jmp_check(TranslationBlock *tb)
388 TranslationBlock *tb1;
389 unsigned int n1;
391 /* suppress any remaining jumps to this TB */
392 tb1 = tb->jmp_first;
393 for(;;) {
394 n1 = (long)tb1 & 3;
395 tb1 = (TranslationBlock *)((long)tb1 & ~3);
396 if (n1 == 2)
397 break;
398 tb1 = tb1->jmp_next[n1];
400 /* check end of list */
401 if (tb1 != tb) {
402 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
406 #endif
408 /* invalidate one TB */
409 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
410 int next_offset)
412 TranslationBlock *tb1;
413 for(;;) {
414 tb1 = *ptb;
415 if (tb1 == tb) {
416 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
417 break;
419 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
423 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
425 TranslationBlock *tb1;
426 unsigned int n1;
428 for(;;) {
429 tb1 = *ptb;
430 n1 = (long)tb1 & 3;
431 tb1 = (TranslationBlock *)((long)tb1 & ~3);
432 if (tb1 == tb) {
433 *ptb = tb1->page_next[n1];
434 break;
436 ptb = &tb1->page_next[n1];
440 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
442 TranslationBlock *tb1, **ptb;
443 unsigned int n1;
445 ptb = &tb->jmp_next[n];
446 tb1 = *ptb;
447 if (tb1) {
448 /* find tb(n) in circular list */
449 for(;;) {
450 tb1 = *ptb;
451 n1 = (long)tb1 & 3;
452 tb1 = (TranslationBlock *)((long)tb1 & ~3);
453 if (n1 == n && tb1 == tb)
454 break;
455 if (n1 == 2) {
456 ptb = &tb1->jmp_first;
457 } else {
458 ptb = &tb1->jmp_next[n1];
461 /* now we can suppress tb(n) from the list */
462 *ptb = tb->jmp_next[n];
464 tb->jmp_next[n] = NULL;
468 /* reset the jump entry 'n' of a TB so that it is not chained to
469 another TB */
470 static inline void tb_reset_jump(TranslationBlock *tb, int n)
472 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
475 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
477 CPUState *env;
478 PageDesc *p;
479 unsigned int h, n1;
480 target_ulong phys_pc;
481 TranslationBlock *tb1, *tb2;
483 /* remove the TB from the hash list */
484 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
485 h = tb_phys_hash_func(phys_pc);
486 tb_remove(&tb_phys_hash[h], tb,
487 offsetof(TranslationBlock, phys_hash_next));
489 /* remove the TB from the page list */
490 if (tb->page_addr[0] != page_addr) {
491 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
492 tb_page_remove(&p->first_tb, tb);
493 invalidate_page_bitmap(p);
495 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
496 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
497 tb_page_remove(&p->first_tb, tb);
498 invalidate_page_bitmap(p);
501 tb_invalidated_flag = 1;
503 /* remove the TB from the hash list */
504 h = tb_jmp_cache_hash_func(tb->pc);
505 for(env = first_cpu; env != NULL; env = env->next_cpu) {
506 if (env->tb_jmp_cache[h] == tb)
507 env->tb_jmp_cache[h] = NULL;
510 /* suppress this TB from the two jump lists */
511 tb_jmp_remove(tb, 0);
512 tb_jmp_remove(tb, 1);
514 /* suppress any remaining jumps to this TB */
515 tb1 = tb->jmp_first;
516 for(;;) {
517 n1 = (long)tb1 & 3;
518 if (n1 == 2)
519 break;
520 tb1 = (TranslationBlock *)((long)tb1 & ~3);
521 tb2 = tb1->jmp_next[n1];
522 tb_reset_jump(tb1, n1);
523 tb1->jmp_next[n1] = NULL;
524 tb1 = tb2;
526 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
528 tb_phys_invalidate_count++;
531 static inline void set_bits(uint8_t *tab, int start, int len)
533 int end, mask, end1;
535 end = start + len;
536 tab += start >> 3;
537 mask = 0xff << (start & 7);
538 if ((start & ~7) == (end & ~7)) {
539 if (start < end) {
540 mask &= ~(0xff << (end & 7));
541 *tab |= mask;
543 } else {
544 *tab++ |= mask;
545 start = (start + 8) & ~7;
546 end1 = end & ~7;
547 while (start < end1) {
548 *tab++ = 0xff;
549 start += 8;
551 if (start < end) {
552 mask = ~(0xff << (end & 7));
553 *tab |= mask;
558 static void build_page_bitmap(PageDesc *p)
560 int n, tb_start, tb_end;
561 TranslationBlock *tb;
563 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
564 if (!p->code_bitmap)
565 return;
566 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
568 tb = p->first_tb;
569 while (tb != NULL) {
570 n = (long)tb & 3;
571 tb = (TranslationBlock *)((long)tb & ~3);
572 /* NOTE: this is subtle as a TB may span two physical pages */
573 if (n == 0) {
574 /* NOTE: tb_end may be after the end of the page, but
575 it is not a problem */
576 tb_start = tb->pc & ~TARGET_PAGE_MASK;
577 tb_end = tb_start + tb->size;
578 if (tb_end > TARGET_PAGE_SIZE)
579 tb_end = TARGET_PAGE_SIZE;
580 } else {
581 tb_start = 0;
582 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
584 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
585 tb = tb->page_next[n];
589 #ifdef TARGET_HAS_PRECISE_SMC
591 static void tb_gen_code(CPUState *env,
592 target_ulong pc, target_ulong cs_base, int flags,
593 int cflags)
595 TranslationBlock *tb;
596 uint8_t *tc_ptr;
597 target_ulong phys_pc, phys_page2, virt_page2;
598 int code_gen_size;
600 phys_pc = get_phys_addr_code(env, pc);
601 tb = tb_alloc(pc);
602 if (!tb) {
603 /* flush must be done */
604 tb_flush(env);
605 /* cannot fail at this point */
606 tb = tb_alloc(pc);
608 tc_ptr = code_gen_ptr;
609 tb->tc_ptr = tc_ptr;
610 tb->cs_base = cs_base;
611 tb->flags = flags;
612 tb->cflags = cflags;
613 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
616 /* check next page if needed */
617 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
618 phys_page2 = -1;
619 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
620 phys_page2 = get_phys_addr_code(env, virt_page2);
622 tb_link_phys(tb, phys_pc, phys_page2);
624 #endif
626 /* invalidate all TBs which intersect with the target physical page
627 starting in range [start;end[. NOTE: start and end must refer to
628 the same physical page. 'is_cpu_write_access' should be true if called
629 from a real cpu write access: the virtual CPU will exit the current
630 TB if code is modified inside this TB. */
631 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
632 int is_cpu_write_access)
634 int n, current_tb_modified, current_tb_not_found, current_flags;
635 CPUState *env = cpu_single_env;
636 PageDesc *p;
637 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
638 target_ulong tb_start, tb_end;
639 target_ulong current_pc, current_cs_base;
641 p = page_find(start >> TARGET_PAGE_BITS);
642 if (!p)
643 return;
644 if (!p->code_bitmap &&
645 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646 is_cpu_write_access) {
647 /* build code bitmap */
648 build_page_bitmap(p);
651 /* we remove all the TBs in the range [start, end[ */
652 /* XXX: see if in some cases it could be faster to invalidate all the code */
653 current_tb_not_found = is_cpu_write_access;
654 current_tb_modified = 0;
655 current_tb = NULL; /* avoid warning */
656 current_pc = 0; /* avoid warning */
657 current_cs_base = 0; /* avoid warning */
658 current_flags = 0; /* avoid warning */
659 tb = p->first_tb;
660 while (tb != NULL) {
661 n = (long)tb & 3;
662 tb = (TranslationBlock *)((long)tb & ~3);
663 tb_next = tb->page_next[n];
664 /* NOTE: this is subtle as a TB may span two physical pages */
665 if (n == 0) {
666 /* NOTE: tb_end may be after the end of the page, but
667 it is not a problem */
668 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
669 tb_end = tb_start + tb->size;
670 } else {
671 tb_start = tb->page_addr[1];
672 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
674 if (!(tb_end <= start || tb_start >= end)) {
675 #ifdef TARGET_HAS_PRECISE_SMC
676 if (current_tb_not_found) {
677 current_tb_not_found = 0;
678 current_tb = NULL;
679 if (env->mem_write_pc) {
680 /* now we have a real cpu fault */
681 current_tb = tb_find_pc(env->mem_write_pc);
684 if (current_tb == tb &&
685 !(current_tb->cflags & CF_SINGLE_INSN)) {
686 /* If we are modifying the current TB, we must stop
687 its execution. We could be more precise by checking
688 that the modification is after the current PC, but it
689 would require a specialized function to partially
690 restore the CPU state */
692 current_tb_modified = 1;
693 cpu_restore_state(current_tb, env,
694 env->mem_write_pc, NULL);
695 #if defined(TARGET_I386)
696 current_flags = env->hflags;
697 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698 current_cs_base = (target_ulong)env->segs[R_CS].base;
699 current_pc = current_cs_base + env->eip;
700 #else
701 #error unsupported CPU
702 #endif
704 #endif /* TARGET_HAS_PRECISE_SMC */
705 /* we need to do that to handle the case where a signal
706 occurs while doing tb_phys_invalidate() */
707 saved_tb = NULL;
708 if (env) {
709 saved_tb = env->current_tb;
710 env->current_tb = NULL;
712 tb_phys_invalidate(tb, -1);
713 if (env) {
714 env->current_tb = saved_tb;
715 if (env->interrupt_request && env->current_tb)
716 cpu_interrupt(env, env->interrupt_request);
719 tb = tb_next;
721 #if !defined(CONFIG_USER_ONLY)
722 /* if no code remaining, no need to continue to use slow writes */
723 if (!p->first_tb) {
724 invalidate_page_bitmap(p);
725 if (is_cpu_write_access) {
726 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
729 #endif
730 #ifdef TARGET_HAS_PRECISE_SMC
731 if (current_tb_modified) {
732 /* we generate a block containing just the instruction
733 modifying the memory. It will ensure that it cannot modify
734 itself */
735 env->current_tb = NULL;
736 tb_gen_code(env, current_pc, current_cs_base, current_flags,
737 CF_SINGLE_INSN);
738 cpu_resume_from_signal(env, NULL);
740 #endif
743 /* len must be <= 8 and start must be a multiple of len */
744 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
746 PageDesc *p;
747 int offset, b;
748 #if 0
749 if (1) {
750 if (loglevel) {
751 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
752 cpu_single_env->mem_write_vaddr, len,
753 cpu_single_env->eip,
754 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
757 #endif
758 p = page_find(start >> TARGET_PAGE_BITS);
759 if (!p)
760 return;
761 if (p->code_bitmap) {
762 offset = start & ~TARGET_PAGE_MASK;
763 b = p->code_bitmap[offset >> 3] >> (offset & 7);
764 if (b & ((1 << len) - 1))
765 goto do_invalidate;
766 } else {
767 do_invalidate:
768 tb_invalidate_phys_page_range(start, start + len, 1);
772 #if !defined(CONFIG_SOFTMMU)
773 static void tb_invalidate_phys_page(target_ulong addr,
774 unsigned long pc, void *puc)
776 int n, current_flags, current_tb_modified;
777 target_ulong current_pc, current_cs_base;
778 PageDesc *p;
779 TranslationBlock *tb, *current_tb;
780 #ifdef TARGET_HAS_PRECISE_SMC
781 CPUState *env = cpu_single_env;
782 #endif
784 addr &= TARGET_PAGE_MASK;
785 p = page_find(addr >> TARGET_PAGE_BITS);
786 if (!p)
787 return;
788 tb = p->first_tb;
789 current_tb_modified = 0;
790 current_tb = NULL;
791 current_pc = 0; /* avoid warning */
792 current_cs_base = 0; /* avoid warning */
793 current_flags = 0; /* avoid warning */
794 #ifdef TARGET_HAS_PRECISE_SMC
795 if (tb && pc != 0) {
796 current_tb = tb_find_pc(pc);
798 #endif
799 while (tb != NULL) {
800 n = (long)tb & 3;
801 tb = (TranslationBlock *)((long)tb & ~3);
802 #ifdef TARGET_HAS_PRECISE_SMC
803 if (current_tb == tb &&
804 !(current_tb->cflags & CF_SINGLE_INSN)) {
805 /* If we are modifying the current TB, we must stop
806 its execution. We could be more precise by checking
807 that the modification is after the current PC, but it
808 would require a specialized function to partially
809 restore the CPU state */
811 current_tb_modified = 1;
812 cpu_restore_state(current_tb, env, pc, puc);
813 #if defined(TARGET_I386)
814 current_flags = env->hflags;
815 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
816 current_cs_base = (target_ulong)env->segs[R_CS].base;
817 current_pc = current_cs_base + env->eip;
818 #else
819 #error unsupported CPU
820 #endif
822 #endif /* TARGET_HAS_PRECISE_SMC */
823 tb_phys_invalidate(tb, addr);
824 tb = tb->page_next[n];
826 p->first_tb = NULL;
827 #ifdef TARGET_HAS_PRECISE_SMC
828 if (current_tb_modified) {
829 /* we generate a block containing just the instruction
830 modifying the memory. It will ensure that it cannot modify
831 itself */
832 env->current_tb = NULL;
833 tb_gen_code(env, current_pc, current_cs_base, current_flags,
834 CF_SINGLE_INSN);
835 cpu_resume_from_signal(env, puc);
837 #endif
839 #endif
841 /* add the tb in the target page and protect it if necessary */
842 static inline void tb_alloc_page(TranslationBlock *tb,
843 unsigned int n, target_ulong page_addr)
845 PageDesc *p;
846 TranslationBlock *last_first_tb;
848 tb->page_addr[n] = page_addr;
849 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
850 tb->page_next[n] = p->first_tb;
851 last_first_tb = p->first_tb;
852 p->first_tb = (TranslationBlock *)((long)tb | n);
853 invalidate_page_bitmap(p);
855 #if defined(TARGET_HAS_SMC) || 1
857 #if defined(CONFIG_USER_ONLY)
858 if (p->flags & PAGE_WRITE) {
859 target_ulong addr;
860 PageDesc *p2;
861 int prot;
863 /* force the host page as non writable (writes will have a
864 page fault + mprotect overhead) */
865 page_addr &= qemu_host_page_mask;
866 prot = 0;
867 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
868 addr += TARGET_PAGE_SIZE) {
870 p2 = page_find (addr >> TARGET_PAGE_BITS);
871 if (!p2)
872 continue;
873 prot |= p2->flags;
874 p2->flags &= ~PAGE_WRITE;
875 page_get_flags(addr);
877 mprotect(g2h(page_addr), qemu_host_page_size,
878 (prot & PAGE_BITS) & ~PAGE_WRITE);
879 #ifdef DEBUG_TB_INVALIDATE
880 printf("protecting code page: 0x%08lx\n",
881 page_addr);
882 #endif
884 #else
885 /* if some code is already present, then the pages are already
886 protected. So we handle the case where only the first TB is
887 allocated in a physical page */
888 if (!last_first_tb) {
889 tlb_protect_code(page_addr);
891 #endif
893 #endif /* TARGET_HAS_SMC */
896 /* Allocate a new translation block. Flush the translation buffer if
897 too many translation blocks or too much generated code. */
898 TranslationBlock *tb_alloc(target_ulong pc)
900 TranslationBlock *tb;
902 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
903 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
904 return NULL;
905 tb = &tbs[nb_tbs++];
906 tb->pc = pc;
907 tb->cflags = 0;
908 return tb;
911 /* add a new TB and link it to the physical page tables. phys_page2 is
912 (-1) to indicate that only one page contains the TB. */
913 void tb_link_phys(TranslationBlock *tb,
914 target_ulong phys_pc, target_ulong phys_page2)
916 unsigned int h;
917 TranslationBlock **ptb;
919 /* add in the physical hash table */
920 h = tb_phys_hash_func(phys_pc);
921 ptb = &tb_phys_hash[h];
922 tb->phys_hash_next = *ptb;
923 *ptb = tb;
925 /* add in the page list */
926 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
927 if (phys_page2 != -1)
928 tb_alloc_page(tb, 1, phys_page2);
929 else
930 tb->page_addr[1] = -1;
932 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
933 tb->jmp_next[0] = NULL;
934 tb->jmp_next[1] = NULL;
935 #ifdef USE_CODE_COPY
936 tb->cflags &= ~CF_FP_USED;
937 if (tb->cflags & CF_TB_FP_USED)
938 tb->cflags |= CF_FP_USED;
939 #endif
941 /* init original jump addresses */
942 if (tb->tb_next_offset[0] != 0xffff)
943 tb_reset_jump(tb, 0);
944 if (tb->tb_next_offset[1] != 0xffff)
945 tb_reset_jump(tb, 1);
947 #ifdef DEBUG_TB_CHECK
948 tb_page_check();
949 #endif
952 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
953 tb[1].tc_ptr. Return NULL if not found */
954 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
956 int m_min, m_max, m;
957 unsigned long v;
958 TranslationBlock *tb;
960 if (nb_tbs <= 0)
961 return NULL;
962 if (tc_ptr < (unsigned long)code_gen_buffer ||
963 tc_ptr >= (unsigned long)code_gen_ptr)
964 return NULL;
965 /* binary search (cf Knuth) */
966 m_min = 0;
967 m_max = nb_tbs - 1;
968 while (m_min <= m_max) {
969 m = (m_min + m_max) >> 1;
970 tb = &tbs[m];
971 v = (unsigned long)tb->tc_ptr;
972 if (v == tc_ptr)
973 return tb;
974 else if (tc_ptr < v) {
975 m_max = m - 1;
976 } else {
977 m_min = m + 1;
980 return &tbs[m_max];
983 static void tb_reset_jump_recursive(TranslationBlock *tb);
985 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
987 TranslationBlock *tb1, *tb_next, **ptb;
988 unsigned int n1;
990 tb1 = tb->jmp_next[n];
991 if (tb1 != NULL) {
992 /* find head of list */
993 for(;;) {
994 n1 = (long)tb1 & 3;
995 tb1 = (TranslationBlock *)((long)tb1 & ~3);
996 if (n1 == 2)
997 break;
998 tb1 = tb1->jmp_next[n1];
1000 /* we are now sure now that tb jumps to tb1 */
1001 tb_next = tb1;
1003 /* remove tb from the jmp_first list */
1004 ptb = &tb_next->jmp_first;
1005 for(;;) {
1006 tb1 = *ptb;
1007 n1 = (long)tb1 & 3;
1008 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009 if (n1 == n && tb1 == tb)
1010 break;
1011 ptb = &tb1->jmp_next[n1];
1013 *ptb = tb->jmp_next[n];
1014 tb->jmp_next[n] = NULL;
1016 /* suppress the jump to next tb in generated code */
1017 tb_reset_jump(tb, n);
1019 /* suppress jumps in the tb on which we could have jumped */
1020 tb_reset_jump_recursive(tb_next);
1024 static void tb_reset_jump_recursive(TranslationBlock *tb)
1026 tb_reset_jump_recursive2(tb, 0);
1027 tb_reset_jump_recursive2(tb, 1);
1030 #if defined(TARGET_HAS_ICE)
1031 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1033 target_phys_addr_t addr;
1034 target_ulong pd;
1035 ram_addr_t ram_addr;
1036 PhysPageDesc *p;
1038 addr = cpu_get_phys_page_debug(env, pc);
1039 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1040 if (!p) {
1041 pd = IO_MEM_UNASSIGNED;
1042 } else {
1043 pd = p->phys_offset;
1045 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1046 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1048 #endif
1050 /* Add a watchpoint. */
1051 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1053 int i;
1055 for (i = 0; i < env->nb_watchpoints; i++) {
1056 if (addr == env->watchpoint[i].vaddr)
1057 return 0;
1059 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1060 return -1;
1062 i = env->nb_watchpoints++;
1063 env->watchpoint[i].vaddr = addr;
1064 tlb_flush_page(env, addr);
1065 /* FIXME: This flush is needed because of the hack to make memory ops
1066 terminate the TB. It can be removed once the proper IO trap and
1067 re-execute bits are in. */
1068 tb_flush(env);
1069 return i;
1072 /* Remove a watchpoint. */
1073 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1075 int i;
1077 for (i = 0; i < env->nb_watchpoints; i++) {
1078 if (addr == env->watchpoint[i].vaddr) {
1079 env->nb_watchpoints--;
1080 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1081 tlb_flush_page(env, addr);
1082 return 0;
1085 return -1;
1088 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1089 breakpoint is reached */
1090 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1092 #if defined(TARGET_HAS_ICE)
1093 int i;
1095 for(i = 0; i < env->nb_breakpoints; i++) {
1096 if (env->breakpoints[i] == pc)
1097 return 0;
1100 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1101 return -1;
1102 env->breakpoints[env->nb_breakpoints++] = pc;
1104 breakpoint_invalidate(env, pc);
1105 return 0;
1106 #else
1107 return -1;
1108 #endif
1111 /* remove a breakpoint */
1112 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1114 #if defined(TARGET_HAS_ICE)
1115 int i;
1116 for(i = 0; i < env->nb_breakpoints; i++) {
1117 if (env->breakpoints[i] == pc)
1118 goto found;
1120 return -1;
1121 found:
1122 env->nb_breakpoints--;
1123 if (i < env->nb_breakpoints)
1124 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1126 breakpoint_invalidate(env, pc);
1127 return 0;
1128 #else
1129 return -1;
1130 #endif
1133 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1134 CPU loop after each instruction */
1135 void cpu_single_step(CPUState *env, int enabled)
1137 #if defined(TARGET_HAS_ICE)
1138 if (env->singlestep_enabled != enabled) {
1139 env->singlestep_enabled = enabled;
1140 /* must flush all the translated code to avoid inconsistancies */
1141 /* XXX: only flush what is necessary */
1142 tb_flush(env);
1144 #endif
1147 /* enable or disable low levels log */
1148 void cpu_set_log(int log_flags)
1150 loglevel = log_flags;
1151 if (loglevel && !logfile) {
1152 logfile = fopen(logfilename, "w");
1153 if (!logfile) {
1154 perror(logfilename);
1155 _exit(1);
1157 #if !defined(CONFIG_SOFTMMU)
1158 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1160 static uint8_t logfile_buf[4096];
1161 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1163 #else
1164 setvbuf(logfile, NULL, _IOLBF, 0);
1165 #endif
1169 void cpu_set_log_filename(const char *filename)
1171 logfilename = strdup(filename);
1174 /* mask must never be zero, except for A20 change call */
1175 void cpu_interrupt(CPUState *env, int mask)
1177 TranslationBlock *tb;
1178 static int interrupt_lock;
1180 env->interrupt_request |= mask;
1181 /* if the cpu is currently executing code, we must unlink it and
1182 all the potentially executing TB */
1183 tb = env->current_tb;
1184 if (tb && !testandset(&interrupt_lock)) {
1185 env->current_tb = NULL;
1186 tb_reset_jump_recursive(tb);
1187 interrupt_lock = 0;
1191 void cpu_reset_interrupt(CPUState *env, int mask)
1193 env->interrupt_request &= ~mask;
1196 CPULogItem cpu_log_items[] = {
1197 { CPU_LOG_TB_OUT_ASM, "out_asm",
1198 "show generated host assembly code for each compiled TB" },
1199 { CPU_LOG_TB_IN_ASM, "in_asm",
1200 "show target assembly code for each compiled TB" },
1201 { CPU_LOG_TB_OP, "op",
1202 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1203 #ifdef TARGET_I386
1204 { CPU_LOG_TB_OP_OPT, "op_opt",
1205 "show micro ops after optimization for each compiled TB" },
1206 #endif
1207 { CPU_LOG_INT, "int",
1208 "show interrupts/exceptions in short format" },
1209 { CPU_LOG_EXEC, "exec",
1210 "show trace before each executed TB (lots of logs)" },
1211 { CPU_LOG_TB_CPU, "cpu",
1212 "show CPU state before bloc translation" },
1213 #ifdef TARGET_I386
1214 { CPU_LOG_PCALL, "pcall",
1215 "show protected mode far calls/returns/exceptions" },
1216 #endif
1217 #ifdef DEBUG_IOPORT
1218 { CPU_LOG_IOPORT, "ioport",
1219 "show all i/o ports accesses" },
1220 #endif
1221 { 0, NULL, NULL },
1224 static int cmp1(const char *s1, int n, const char *s2)
1226 if (strlen(s2) != n)
1227 return 0;
1228 return memcmp(s1, s2, n) == 0;
1231 /* takes a comma separated list of log masks. Return 0 if error. */
1232 int cpu_str_to_log_mask(const char *str)
1234 CPULogItem *item;
1235 int mask;
1236 const char *p, *p1;
1238 p = str;
1239 mask = 0;
1240 for(;;) {
1241 p1 = strchr(p, ',');
1242 if (!p1)
1243 p1 = p + strlen(p);
1244 if(cmp1(p,p1-p,"all")) {
1245 for(item = cpu_log_items; item->mask != 0; item++) {
1246 mask |= item->mask;
1248 } else {
1249 for(item = cpu_log_items; item->mask != 0; item++) {
1250 if (cmp1(p, p1 - p, item->name))
1251 goto found;
1253 return 0;
1255 found:
1256 mask |= item->mask;
1257 if (*p1 != ',')
1258 break;
1259 p = p1 + 1;
1261 return mask;
1264 void cpu_abort(CPUState *env, const char *fmt, ...)
1266 va_list ap;
1268 va_start(ap, fmt);
1269 fprintf(stderr, "qemu: fatal: ");
1270 vfprintf(stderr, fmt, ap);
1271 fprintf(stderr, "\n");
1272 #ifdef TARGET_I386
1273 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1274 #else
1275 cpu_dump_state(env, stderr, fprintf, 0);
1276 #endif
1277 va_end(ap);
1278 abort();
1281 CPUState *cpu_copy(CPUState *env)
1283 CPUState *new_env = cpu_init();
1284 /* preserve chaining and index */
1285 CPUState *next_cpu = new_env->next_cpu;
1286 int cpu_index = new_env->cpu_index;
1287 memcpy(new_env, env, sizeof(CPUState));
1288 new_env->next_cpu = next_cpu;
1289 new_env->cpu_index = cpu_index;
1290 return new_env;
1293 #if !defined(CONFIG_USER_ONLY)
1295 /* NOTE: if flush_global is true, also flush global entries (not
1296 implemented yet) */
1297 void tlb_flush(CPUState *env, int flush_global)
1299 int i;
1301 #if defined(DEBUG_TLB)
1302 printf("tlb_flush:\n");
1303 #endif
1304 /* must reset current TB so that interrupts cannot modify the
1305 links while we are modifying them */
1306 env->current_tb = NULL;
1308 for(i = 0; i < CPU_TLB_SIZE; i++) {
1309 env->tlb_table[0][i].addr_read = -1;
1310 env->tlb_table[0][i].addr_write = -1;
1311 env->tlb_table[0][i].addr_code = -1;
1312 env->tlb_table[1][i].addr_read = -1;
1313 env->tlb_table[1][i].addr_write = -1;
1314 env->tlb_table[1][i].addr_code = -1;
1315 #if (NB_MMU_MODES >= 3)
1316 env->tlb_table[2][i].addr_read = -1;
1317 env->tlb_table[2][i].addr_write = -1;
1318 env->tlb_table[2][i].addr_code = -1;
1319 #if (NB_MMU_MODES == 4)
1320 env->tlb_table[3][i].addr_read = -1;
1321 env->tlb_table[3][i].addr_write = -1;
1322 env->tlb_table[3][i].addr_code = -1;
1323 #endif
1324 #endif
1327 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1329 #if !defined(CONFIG_SOFTMMU)
1330 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1331 #endif
1332 #ifdef USE_KQEMU
1333 if (env->kqemu_enabled) {
1334 kqemu_flush(env, flush_global);
1336 #endif
1337 tlb_flush_count++;
1340 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1342 if (addr == (tlb_entry->addr_read &
1343 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1344 addr == (tlb_entry->addr_write &
1345 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1346 addr == (tlb_entry->addr_code &
1347 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1348 tlb_entry->addr_read = -1;
1349 tlb_entry->addr_write = -1;
1350 tlb_entry->addr_code = -1;
1354 void tlb_flush_page(CPUState *env, target_ulong addr)
1356 int i;
1357 TranslationBlock *tb;
1359 #if defined(DEBUG_TLB)
1360 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1361 #endif
1362 /* must reset current TB so that interrupts cannot modify the
1363 links while we are modifying them */
1364 env->current_tb = NULL;
1366 addr &= TARGET_PAGE_MASK;
1367 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1368 tlb_flush_entry(&env->tlb_table[0][i], addr);
1369 tlb_flush_entry(&env->tlb_table[1][i], addr);
1370 #if (NB_MMU_MODES >= 3)
1371 tlb_flush_entry(&env->tlb_table[2][i], addr);
1372 #if (NB_MMU_MODES == 4)
1373 tlb_flush_entry(&env->tlb_table[3][i], addr);
1374 #endif
1375 #endif
1377 /* Discard jump cache entries for any tb which might potentially
1378 overlap the flushed page. */
1379 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1380 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1382 i = tb_jmp_cache_hash_page(addr);
1383 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1385 #if !defined(CONFIG_SOFTMMU)
1386 if (addr < MMAP_AREA_END)
1387 munmap((void *)addr, TARGET_PAGE_SIZE);
1388 #endif
1389 #ifdef USE_KQEMU
1390 if (env->kqemu_enabled) {
1391 kqemu_flush_page(env, addr);
1393 #endif
1396 /* update the TLBs so that writes to code in the virtual page 'addr'
1397 can be detected */
1398 static void tlb_protect_code(ram_addr_t ram_addr)
1400 cpu_physical_memory_reset_dirty(ram_addr,
1401 ram_addr + TARGET_PAGE_SIZE,
1402 CODE_DIRTY_FLAG);
1405 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1406 tested for self modifying code */
1407 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1408 target_ulong vaddr)
1410 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1413 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1414 unsigned long start, unsigned long length)
1416 unsigned long addr;
1417 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1418 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1419 if ((addr - start) < length) {
1420 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1425 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1426 int dirty_flags)
1428 CPUState *env;
1429 unsigned long length, start1;
1430 int i, mask, len;
1431 uint8_t *p;
1433 start &= TARGET_PAGE_MASK;
1434 end = TARGET_PAGE_ALIGN(end);
1436 length = end - start;
1437 if (length == 0)
1438 return;
1439 len = length >> TARGET_PAGE_BITS;
1440 #ifdef USE_KQEMU
1441 /* XXX: should not depend on cpu context */
1442 env = first_cpu;
1443 if (env->kqemu_enabled) {
1444 ram_addr_t addr;
1445 addr = start;
1446 for(i = 0; i < len; i++) {
1447 kqemu_set_notdirty(env, addr);
1448 addr += TARGET_PAGE_SIZE;
1451 #endif
1452 mask = ~dirty_flags;
1453 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1454 for(i = 0; i < len; i++)
1455 p[i] &= mask;
1457 /* we modify the TLB cache so that the dirty bit will be set again
1458 when accessing the range */
1459 start1 = start + (unsigned long)phys_ram_base;
1460 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1461 for(i = 0; i < CPU_TLB_SIZE; i++)
1462 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1463 for(i = 0; i < CPU_TLB_SIZE; i++)
1464 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1465 #if (NB_MMU_MODES >= 3)
1466 for(i = 0; i < CPU_TLB_SIZE; i++)
1467 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1468 #if (NB_MMU_MODES == 4)
1469 for(i = 0; i < CPU_TLB_SIZE; i++)
1470 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1471 #endif
1472 #endif
1475 #if !defined(CONFIG_SOFTMMU)
1476 /* XXX: this is expensive */
1478 VirtPageDesc *p;
1479 int j;
1480 target_ulong addr;
1482 for(i = 0; i < L1_SIZE; i++) {
1483 p = l1_virt_map[i];
1484 if (p) {
1485 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1486 for(j = 0; j < L2_SIZE; j++) {
1487 if (p->valid_tag == virt_valid_tag &&
1488 p->phys_addr >= start && p->phys_addr < end &&
1489 (p->prot & PROT_WRITE)) {
1490 if (addr < MMAP_AREA_END) {
1491 mprotect((void *)addr, TARGET_PAGE_SIZE,
1492 p->prot & ~PROT_WRITE);
1495 addr += TARGET_PAGE_SIZE;
1496 p++;
1501 #endif
1504 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1506 ram_addr_t ram_addr;
1508 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1509 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1510 tlb_entry->addend - (unsigned long)phys_ram_base;
1511 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1512 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1517 /* update the TLB according to the current state of the dirty bits */
1518 void cpu_tlb_update_dirty(CPUState *env)
1520 int i;
1521 for(i = 0; i < CPU_TLB_SIZE; i++)
1522 tlb_update_dirty(&env->tlb_table[0][i]);
1523 for(i = 0; i < CPU_TLB_SIZE; i++)
1524 tlb_update_dirty(&env->tlb_table[1][i]);
1525 #if (NB_MMU_MODES >= 3)
1526 for(i = 0; i < CPU_TLB_SIZE; i++)
1527 tlb_update_dirty(&env->tlb_table[2][i]);
1528 #if (NB_MMU_MODES == 4)
1529 for(i = 0; i < CPU_TLB_SIZE; i++)
1530 tlb_update_dirty(&env->tlb_table[3][i]);
1531 #endif
1532 #endif
1535 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1536 unsigned long start)
1538 unsigned long addr;
1539 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1540 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1541 if (addr == start) {
1542 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1547 /* update the TLB corresponding to virtual page vaddr and phys addr
1548 addr so that it is no longer dirty */
1549 static inline void tlb_set_dirty(CPUState *env,
1550 unsigned long addr, target_ulong vaddr)
1552 int i;
1554 addr &= TARGET_PAGE_MASK;
1555 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1556 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1557 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1558 #if (NB_MMU_MODES >= 3)
1559 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1560 #if (NB_MMU_MODES == 4)
1561 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1562 #endif
1563 #endif
1566 /* add a new TLB entry. At most one entry for a given virtual address
1567 is permitted. Return 0 if OK or 2 if the page could not be mapped
1568 (can only happen in non SOFTMMU mode for I/O pages or pages
1569 conflicting with the host address space). */
1570 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1571 target_phys_addr_t paddr, int prot,
1572 int is_user, int is_softmmu)
1574 PhysPageDesc *p;
1575 unsigned long pd;
1576 unsigned int index;
1577 target_ulong address;
1578 target_phys_addr_t addend;
1579 int ret;
1580 CPUTLBEntry *te;
1581 int i;
1583 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1584 if (!p) {
1585 pd = IO_MEM_UNASSIGNED;
1586 } else {
1587 pd = p->phys_offset;
1589 #if defined(DEBUG_TLB)
1590 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1591 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1592 #endif
1594 ret = 0;
1595 #if !defined(CONFIG_SOFTMMU)
1596 if (is_softmmu)
1597 #endif
1599 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1600 /* IO memory case */
1601 address = vaddr | pd;
1602 addend = paddr;
1603 } else {
1604 /* standard memory */
1605 address = vaddr;
1606 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1609 /* Make accesses to pages with watchpoints go via the
1610 watchpoint trap routines. */
1611 for (i = 0; i < env->nb_watchpoints; i++) {
1612 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1613 if (address & ~TARGET_PAGE_MASK) {
1614 env->watchpoint[i].is_ram = 0;
1615 address = vaddr | io_mem_watch;
1616 } else {
1617 env->watchpoint[i].is_ram = 1;
1618 /* TODO: Figure out how to make read watchpoints coexist
1619 with code. */
1620 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1625 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1626 addend -= vaddr;
1627 te = &env->tlb_table[is_user][index];
1628 te->addend = addend;
1629 if (prot & PAGE_READ) {
1630 te->addr_read = address;
1631 } else {
1632 te->addr_read = -1;
1634 if (prot & PAGE_EXEC) {
1635 te->addr_code = address;
1636 } else {
1637 te->addr_code = -1;
1639 if (prot & PAGE_WRITE) {
1640 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1641 (pd & IO_MEM_ROMD)) {
1642 /* write access calls the I/O callback */
1643 te->addr_write = vaddr |
1644 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1645 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1646 !cpu_physical_memory_is_dirty(pd)) {
1647 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1648 } else {
1649 te->addr_write = address;
1651 } else {
1652 te->addr_write = -1;
1655 #if !defined(CONFIG_SOFTMMU)
1656 else {
1657 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1658 /* IO access: no mapping is done as it will be handled by the
1659 soft MMU */
1660 if (!(env->hflags & HF_SOFTMMU_MASK))
1661 ret = 2;
1662 } else {
1663 void *map_addr;
1665 if (vaddr >= MMAP_AREA_END) {
1666 ret = 2;
1667 } else {
1668 if (prot & PROT_WRITE) {
1669 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1670 #if defined(TARGET_HAS_SMC) || 1
1671 first_tb ||
1672 #endif
1673 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1674 !cpu_physical_memory_is_dirty(pd))) {
1675 /* ROM: we do as if code was inside */
1676 /* if code is present, we only map as read only and save the
1677 original mapping */
1678 VirtPageDesc *vp;
1680 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1681 vp->phys_addr = pd;
1682 vp->prot = prot;
1683 vp->valid_tag = virt_valid_tag;
1684 prot &= ~PAGE_WRITE;
1687 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1688 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1689 if (map_addr == MAP_FAILED) {
1690 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1691 paddr, vaddr);
1696 #endif
1697 return ret;
1700 /* called from signal handler: invalidate the code and unprotect the
1701 page. Return TRUE if the fault was succesfully handled. */
1702 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1704 #if !defined(CONFIG_SOFTMMU)
1705 VirtPageDesc *vp;
1707 #if defined(DEBUG_TLB)
1708 printf("page_unprotect: addr=0x%08x\n", addr);
1709 #endif
1710 addr &= TARGET_PAGE_MASK;
1712 /* if it is not mapped, no need to worry here */
1713 if (addr >= MMAP_AREA_END)
1714 return 0;
1715 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1716 if (!vp)
1717 return 0;
1718 /* NOTE: in this case, validate_tag is _not_ tested as it
1719 validates only the code TLB */
1720 if (vp->valid_tag != virt_valid_tag)
1721 return 0;
1722 if (!(vp->prot & PAGE_WRITE))
1723 return 0;
1724 #if defined(DEBUG_TLB)
1725 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1726 addr, vp->phys_addr, vp->prot);
1727 #endif
1728 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1729 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1730 (unsigned long)addr, vp->prot);
1731 /* set the dirty bit */
1732 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1733 /* flush the code inside */
1734 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1735 return 1;
1736 #else
1737 return 0;
1738 #endif
1741 #else
1743 void tlb_flush(CPUState *env, int flush_global)
1747 void tlb_flush_page(CPUState *env, target_ulong addr)
1751 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1752 target_phys_addr_t paddr, int prot,
1753 int is_user, int is_softmmu)
1755 return 0;
1758 /* dump memory mappings */
1759 void page_dump(FILE *f)
1761 unsigned long start, end;
1762 int i, j, prot, prot1;
1763 PageDesc *p;
1765 fprintf(f, "%-8s %-8s %-8s %s\n",
1766 "start", "end", "size", "prot");
1767 start = -1;
1768 end = -1;
1769 prot = 0;
1770 for(i = 0; i <= L1_SIZE; i++) {
1771 if (i < L1_SIZE)
1772 p = l1_map[i];
1773 else
1774 p = NULL;
1775 for(j = 0;j < L2_SIZE; j++) {
1776 if (!p)
1777 prot1 = 0;
1778 else
1779 prot1 = p[j].flags;
1780 if (prot1 != prot) {
1781 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1782 if (start != -1) {
1783 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1784 start, end, end - start,
1785 prot & PAGE_READ ? 'r' : '-',
1786 prot & PAGE_WRITE ? 'w' : '-',
1787 prot & PAGE_EXEC ? 'x' : '-');
1789 if (prot1 != 0)
1790 start = end;
1791 else
1792 start = -1;
1793 prot = prot1;
1795 if (!p)
1796 break;
1801 int page_get_flags(target_ulong address)
1803 PageDesc *p;
1805 p = page_find(address >> TARGET_PAGE_BITS);
1806 if (!p)
1807 return 0;
1808 return p->flags;
1811 /* modify the flags of a page and invalidate the code if
1812 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1813 depending on PAGE_WRITE */
1814 void page_set_flags(target_ulong start, target_ulong end, int flags)
1816 PageDesc *p;
1817 target_ulong addr;
1819 start = start & TARGET_PAGE_MASK;
1820 end = TARGET_PAGE_ALIGN(end);
1821 if (flags & PAGE_WRITE)
1822 flags |= PAGE_WRITE_ORG;
1823 spin_lock(&tb_lock);
1824 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1825 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1826 /* if the write protection is set, then we invalidate the code
1827 inside */
1828 if (!(p->flags & PAGE_WRITE) &&
1829 (flags & PAGE_WRITE) &&
1830 p->first_tb) {
1831 tb_invalidate_phys_page(addr, 0, NULL);
1833 p->flags = flags;
1835 spin_unlock(&tb_lock);
1838 /* called from signal handler: invalidate the code and unprotect the
1839 page. Return TRUE if the fault was succesfully handled. */
1840 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1842 unsigned int page_index, prot, pindex;
1843 PageDesc *p, *p1;
1844 target_ulong host_start, host_end, addr;
1846 host_start = address & qemu_host_page_mask;
1847 page_index = host_start >> TARGET_PAGE_BITS;
1848 p1 = page_find(page_index);
1849 if (!p1)
1850 return 0;
1851 host_end = host_start + qemu_host_page_size;
1852 p = p1;
1853 prot = 0;
1854 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1855 prot |= p->flags;
1856 p++;
1858 /* if the page was really writable, then we change its
1859 protection back to writable */
1860 if (prot & PAGE_WRITE_ORG) {
1861 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1862 if (!(p1[pindex].flags & PAGE_WRITE)) {
1863 mprotect((void *)g2h(host_start), qemu_host_page_size,
1864 (prot & PAGE_BITS) | PAGE_WRITE);
1865 p1[pindex].flags |= PAGE_WRITE;
1866 /* and since the content will be modified, we must invalidate
1867 the corresponding translated code. */
1868 tb_invalidate_phys_page(address, pc, puc);
1869 #ifdef DEBUG_TB_CHECK
1870 tb_invalidate_check(address);
1871 #endif
1872 return 1;
1875 return 0;
1878 /* call this function when system calls directly modify a memory area */
1879 /* ??? This should be redundant now we have lock_user. */
1880 void page_unprotect_range(target_ulong data, target_ulong data_size)
1882 target_ulong start, end, addr;
1884 start = data;
1885 end = start + data_size;
1886 start &= TARGET_PAGE_MASK;
1887 end = TARGET_PAGE_ALIGN(end);
1888 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1889 page_unprotect(addr, 0, NULL);
1893 static inline void tlb_set_dirty(CPUState *env,
1894 unsigned long addr, target_ulong vaddr)
1897 #endif /* defined(CONFIG_USER_ONLY) */
1899 /* register physical memory. 'size' must be a multiple of the target
1900 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1901 io memory page */
1902 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1903 unsigned long size,
1904 unsigned long phys_offset)
1906 target_phys_addr_t addr, end_addr;
1907 PhysPageDesc *p;
1908 CPUState *env;
1910 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1911 end_addr = start_addr + size;
1912 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1913 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1914 p->phys_offset = phys_offset;
1915 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1916 (phys_offset & IO_MEM_ROMD))
1917 phys_offset += TARGET_PAGE_SIZE;
1920 /* since each CPU stores ram addresses in its TLB cache, we must
1921 reset the modified entries */
1922 /* XXX: slow ! */
1923 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1924 tlb_flush(env, 1);
1928 /* XXX: temporary until new memory mapping API */
1929 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1931 PhysPageDesc *p;
1933 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1934 if (!p)
1935 return IO_MEM_UNASSIGNED;
1936 return p->phys_offset;
1939 /* XXX: better than nothing */
1940 ram_addr_t qemu_ram_alloc(unsigned int size)
1942 ram_addr_t addr;
1943 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1944 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1945 size, phys_ram_size);
1946 abort();
1948 addr = phys_ram_alloc_offset;
1949 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1950 return addr;
1953 void qemu_ram_free(ram_addr_t addr)
1957 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1959 #ifdef DEBUG_UNASSIGNED
1960 printf("Unassigned mem read 0x%08x\n", (int)addr);
1961 #endif
1962 return 0;
1965 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1967 #ifdef DEBUG_UNASSIGNED
1968 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1969 #endif
1972 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1973 unassigned_mem_readb,
1974 unassigned_mem_readb,
1975 unassigned_mem_readb,
1978 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1979 unassigned_mem_writeb,
1980 unassigned_mem_writeb,
1981 unassigned_mem_writeb,
1984 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1986 unsigned long ram_addr;
1987 int dirty_flags;
1988 ram_addr = addr - (unsigned long)phys_ram_base;
1989 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1990 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1991 #if !defined(CONFIG_USER_ONLY)
1992 tb_invalidate_phys_page_fast(ram_addr, 1);
1993 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1994 #endif
1996 stb_p((uint8_t *)(long)addr, val);
1997 #ifdef USE_KQEMU
1998 if (cpu_single_env->kqemu_enabled &&
1999 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2000 kqemu_modify_page(cpu_single_env, ram_addr);
2001 #endif
2002 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2003 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2004 /* we remove the notdirty callback only if the code has been
2005 flushed */
2006 if (dirty_flags == 0xff)
2007 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2010 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2012 unsigned long ram_addr;
2013 int dirty_flags;
2014 ram_addr = addr - (unsigned long)phys_ram_base;
2015 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2016 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2017 #if !defined(CONFIG_USER_ONLY)
2018 tb_invalidate_phys_page_fast(ram_addr, 2);
2019 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2020 #endif
2022 stw_p((uint8_t *)(long)addr, val);
2023 #ifdef USE_KQEMU
2024 if (cpu_single_env->kqemu_enabled &&
2025 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2026 kqemu_modify_page(cpu_single_env, ram_addr);
2027 #endif
2028 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2029 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2030 /* we remove the notdirty callback only if the code has been
2031 flushed */
2032 if (dirty_flags == 0xff)
2033 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2036 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2038 unsigned long ram_addr;
2039 int dirty_flags;
2040 ram_addr = addr - (unsigned long)phys_ram_base;
2041 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2042 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2043 #if !defined(CONFIG_USER_ONLY)
2044 tb_invalidate_phys_page_fast(ram_addr, 4);
2045 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2046 #endif
2048 stl_p((uint8_t *)(long)addr, val);
2049 #ifdef USE_KQEMU
2050 if (cpu_single_env->kqemu_enabled &&
2051 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2052 kqemu_modify_page(cpu_single_env, ram_addr);
2053 #endif
2054 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2055 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2056 /* we remove the notdirty callback only if the code has been
2057 flushed */
2058 if (dirty_flags == 0xff)
2059 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2062 static CPUReadMemoryFunc *error_mem_read[3] = {
2063 NULL, /* never used */
2064 NULL, /* never used */
2065 NULL, /* never used */
2068 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2069 notdirty_mem_writeb,
2070 notdirty_mem_writew,
2071 notdirty_mem_writel,
2074 #if defined(CONFIG_SOFTMMU)
2075 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2076 so these check for a hit then pass through to the normal out-of-line
2077 phys routines. */
2078 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2080 return ldub_phys(addr);
2083 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2085 return lduw_phys(addr);
2088 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2090 return ldl_phys(addr);
2093 /* Generate a debug exception if a watchpoint has been hit.
2094 Returns the real physical address of the access. addr will be a host
2095 address in the is_ram case. */
2096 static target_ulong check_watchpoint(target_phys_addr_t addr)
2098 CPUState *env = cpu_single_env;
2099 target_ulong watch;
2100 target_ulong retaddr;
2101 int i;
2103 retaddr = addr;
2104 for (i = 0; i < env->nb_watchpoints; i++) {
2105 watch = env->watchpoint[i].vaddr;
2106 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2107 if (env->watchpoint[i].is_ram)
2108 retaddr = addr - (unsigned long)phys_ram_base;
2109 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2110 cpu_single_env->watchpoint_hit = i + 1;
2111 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2112 break;
2116 return retaddr;
2119 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2120 uint32_t val)
2122 addr = check_watchpoint(addr);
2123 stb_phys(addr, val);
2126 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2127 uint32_t val)
2129 addr = check_watchpoint(addr);
2130 stw_phys(addr, val);
2133 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2134 uint32_t val)
2136 addr = check_watchpoint(addr);
2137 stl_phys(addr, val);
2140 static CPUReadMemoryFunc *watch_mem_read[3] = {
2141 watch_mem_readb,
2142 watch_mem_readw,
2143 watch_mem_readl,
2146 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2147 watch_mem_writeb,
2148 watch_mem_writew,
2149 watch_mem_writel,
2151 #endif
2153 static void io_mem_init(void)
2155 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2156 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2157 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2158 io_mem_nb = 5;
2160 #if defined(CONFIG_SOFTMMU)
2161 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2162 watch_mem_write, NULL);
2163 #endif
2164 /* alloc dirty bits array */
2165 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2166 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2169 /* mem_read and mem_write are arrays of functions containing the
2170 function to access byte (index 0), word (index 1) and dword (index
2171 2). All functions must be supplied. If io_index is non zero, the
2172 corresponding io zone is modified. If it is zero, a new io zone is
2173 allocated. The return value can be used with
2174 cpu_register_physical_memory(). (-1) is returned if error. */
2175 int cpu_register_io_memory(int io_index,
2176 CPUReadMemoryFunc **mem_read,
2177 CPUWriteMemoryFunc **mem_write,
2178 void *opaque)
2180 int i;
2182 if (io_index <= 0) {
2183 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2184 return -1;
2185 io_index = io_mem_nb++;
2186 } else {
2187 if (io_index >= IO_MEM_NB_ENTRIES)
2188 return -1;
2191 for(i = 0;i < 3; i++) {
2192 io_mem_read[io_index][i] = mem_read[i];
2193 io_mem_write[io_index][i] = mem_write[i];
2195 io_mem_opaque[io_index] = opaque;
2196 return io_index << IO_MEM_SHIFT;
2199 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2201 return io_mem_write[io_index >> IO_MEM_SHIFT];
2204 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2206 return io_mem_read[io_index >> IO_MEM_SHIFT];
2209 /* physical memory access (slow version, mainly for debug) */
2210 #if defined(CONFIG_USER_ONLY)
2211 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2212 int len, int is_write)
2214 int l, flags;
2215 target_ulong page;
2216 void * p;
2218 while (len > 0) {
2219 page = addr & TARGET_PAGE_MASK;
2220 l = (page + TARGET_PAGE_SIZE) - addr;
2221 if (l > len)
2222 l = len;
2223 flags = page_get_flags(page);
2224 if (!(flags & PAGE_VALID))
2225 return;
2226 if (is_write) {
2227 if (!(flags & PAGE_WRITE))
2228 return;
2229 p = lock_user(addr, len, 0);
2230 memcpy(p, buf, len);
2231 unlock_user(p, addr, len);
2232 } else {
2233 if (!(flags & PAGE_READ))
2234 return;
2235 p = lock_user(addr, len, 1);
2236 memcpy(buf, p, len);
2237 unlock_user(p, addr, 0);
2239 len -= l;
2240 buf += l;
2241 addr += l;
2245 #else
2246 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2247 int len, int is_write)
2249 int l, io_index;
2250 uint8_t *ptr;
2251 uint32_t val;
2252 target_phys_addr_t page;
2253 unsigned long pd;
2254 PhysPageDesc *p;
2256 while (len > 0) {
2257 page = addr & TARGET_PAGE_MASK;
2258 l = (page + TARGET_PAGE_SIZE) - addr;
2259 if (l > len)
2260 l = len;
2261 p = phys_page_find(page >> TARGET_PAGE_BITS);
2262 if (!p) {
2263 pd = IO_MEM_UNASSIGNED;
2264 } else {
2265 pd = p->phys_offset;
2268 if (is_write) {
2269 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2270 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2271 /* XXX: could force cpu_single_env to NULL to avoid
2272 potential bugs */
2273 if (l >= 4 && ((addr & 3) == 0)) {
2274 /* 32 bit write access */
2275 val = ldl_p(buf);
2276 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2277 l = 4;
2278 } else if (l >= 2 && ((addr & 1) == 0)) {
2279 /* 16 bit write access */
2280 val = lduw_p(buf);
2281 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2282 l = 2;
2283 } else {
2284 /* 8 bit write access */
2285 val = ldub_p(buf);
2286 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2287 l = 1;
2289 } else {
2290 unsigned long addr1;
2291 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2292 /* RAM case */
2293 ptr = phys_ram_base + addr1;
2294 memcpy(ptr, buf, l);
2295 if (!cpu_physical_memory_is_dirty(addr1)) {
2296 /* invalidate code */
2297 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2298 /* set dirty bit */
2299 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2300 (0xff & ~CODE_DIRTY_FLAG);
2303 } else {
2304 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2305 !(pd & IO_MEM_ROMD)) {
2306 /* I/O case */
2307 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2308 if (l >= 4 && ((addr & 3) == 0)) {
2309 /* 32 bit read access */
2310 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2311 stl_p(buf, val);
2312 l = 4;
2313 } else if (l >= 2 && ((addr & 1) == 0)) {
2314 /* 16 bit read access */
2315 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2316 stw_p(buf, val);
2317 l = 2;
2318 } else {
2319 /* 8 bit read access */
2320 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2321 stb_p(buf, val);
2322 l = 1;
2324 } else {
2325 /* RAM case */
2326 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2327 (addr & ~TARGET_PAGE_MASK);
2328 memcpy(buf, ptr, l);
2331 len -= l;
2332 buf += l;
2333 addr += l;
2337 /* used for ROM loading : can write in RAM and ROM */
2338 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2339 const uint8_t *buf, int len)
2341 int l;
2342 uint8_t *ptr;
2343 target_phys_addr_t page;
2344 unsigned long pd;
2345 PhysPageDesc *p;
2347 while (len > 0) {
2348 page = addr & TARGET_PAGE_MASK;
2349 l = (page + TARGET_PAGE_SIZE) - addr;
2350 if (l > len)
2351 l = len;
2352 p = phys_page_find(page >> TARGET_PAGE_BITS);
2353 if (!p) {
2354 pd = IO_MEM_UNASSIGNED;
2355 } else {
2356 pd = p->phys_offset;
2359 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2360 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2361 !(pd & IO_MEM_ROMD)) {
2362 /* do nothing */
2363 } else {
2364 unsigned long addr1;
2365 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2366 /* ROM/RAM case */
2367 ptr = phys_ram_base + addr1;
2368 memcpy(ptr, buf, l);
2370 len -= l;
2371 buf += l;
2372 addr += l;
2377 /* warning: addr must be aligned */
2378 uint32_t ldl_phys(target_phys_addr_t addr)
2380 int io_index;
2381 uint8_t *ptr;
2382 uint32_t val;
2383 unsigned long pd;
2384 PhysPageDesc *p;
2386 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2387 if (!p) {
2388 pd = IO_MEM_UNASSIGNED;
2389 } else {
2390 pd = p->phys_offset;
2393 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2394 !(pd & IO_MEM_ROMD)) {
2395 /* I/O case */
2396 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2397 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2398 } else {
2399 /* RAM case */
2400 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2401 (addr & ~TARGET_PAGE_MASK);
2402 val = ldl_p(ptr);
2404 return val;
2407 /* warning: addr must be aligned */
2408 uint64_t ldq_phys(target_phys_addr_t addr)
2410 int io_index;
2411 uint8_t *ptr;
2412 uint64_t val;
2413 unsigned long pd;
2414 PhysPageDesc *p;
2416 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2417 if (!p) {
2418 pd = IO_MEM_UNASSIGNED;
2419 } else {
2420 pd = p->phys_offset;
2423 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2424 !(pd & IO_MEM_ROMD)) {
2425 /* I/O case */
2426 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2427 #ifdef TARGET_WORDS_BIGENDIAN
2428 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2429 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2430 #else
2431 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2432 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2433 #endif
2434 } else {
2435 /* RAM case */
2436 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2437 (addr & ~TARGET_PAGE_MASK);
2438 val = ldq_p(ptr);
2440 return val;
2443 /* XXX: optimize */
2444 uint32_t ldub_phys(target_phys_addr_t addr)
2446 uint8_t val;
2447 cpu_physical_memory_read(addr, &val, 1);
2448 return val;
2451 /* XXX: optimize */
2452 uint32_t lduw_phys(target_phys_addr_t addr)
2454 uint16_t val;
2455 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2456 return tswap16(val);
2459 /* warning: addr must be aligned. The ram page is not masked as dirty
2460 and the code inside is not invalidated. It is useful if the dirty
2461 bits are used to track modified PTEs */
2462 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2464 int io_index;
2465 uint8_t *ptr;
2466 unsigned long pd;
2467 PhysPageDesc *p;
2469 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2470 if (!p) {
2471 pd = IO_MEM_UNASSIGNED;
2472 } else {
2473 pd = p->phys_offset;
2476 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2477 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2478 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2479 } else {
2480 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2481 (addr & ~TARGET_PAGE_MASK);
2482 stl_p(ptr, val);
2486 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2488 int io_index;
2489 uint8_t *ptr;
2490 unsigned long pd;
2491 PhysPageDesc *p;
2493 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2494 if (!p) {
2495 pd = IO_MEM_UNASSIGNED;
2496 } else {
2497 pd = p->phys_offset;
2500 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2501 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2502 #ifdef TARGET_WORDS_BIGENDIAN
2503 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2504 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2505 #else
2506 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2507 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2508 #endif
2509 } else {
2510 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2511 (addr & ~TARGET_PAGE_MASK);
2512 stq_p(ptr, val);
2516 /* warning: addr must be aligned */
2517 void stl_phys(target_phys_addr_t addr, uint32_t val)
2519 int io_index;
2520 uint8_t *ptr;
2521 unsigned long pd;
2522 PhysPageDesc *p;
2524 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2525 if (!p) {
2526 pd = IO_MEM_UNASSIGNED;
2527 } else {
2528 pd = p->phys_offset;
2531 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2532 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2533 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2534 } else {
2535 unsigned long addr1;
2536 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2537 /* RAM case */
2538 ptr = phys_ram_base + addr1;
2539 stl_p(ptr, val);
2540 if (!cpu_physical_memory_is_dirty(addr1)) {
2541 /* invalidate code */
2542 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2543 /* set dirty bit */
2544 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2545 (0xff & ~CODE_DIRTY_FLAG);
2550 /* XXX: optimize */
2551 void stb_phys(target_phys_addr_t addr, uint32_t val)
2553 uint8_t v = val;
2554 cpu_physical_memory_write(addr, &v, 1);
2557 /* XXX: optimize */
2558 void stw_phys(target_phys_addr_t addr, uint32_t val)
2560 uint16_t v = tswap16(val);
2561 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2564 /* XXX: optimize */
2565 void stq_phys(target_phys_addr_t addr, uint64_t val)
2567 val = tswap64(val);
2568 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2571 #endif
2573 /* virtual memory access for debug */
2574 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2575 uint8_t *buf, int len, int is_write)
2577 int l;
2578 target_phys_addr_t phys_addr;
2579 target_ulong page;
2581 while (len > 0) {
2582 page = addr & TARGET_PAGE_MASK;
2583 phys_addr = cpu_get_phys_page_debug(env, page);
2584 /* if no physical page mapped, return an error */
2585 if (phys_addr == -1)
2586 return -1;
2587 l = (page + TARGET_PAGE_SIZE) - addr;
2588 if (l > len)
2589 l = len;
2590 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2591 buf, l, is_write);
2592 len -= l;
2593 buf += l;
2594 addr += l;
2596 return 0;
2599 void dump_exec_info(FILE *f,
2600 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2602 int i, target_code_size, max_target_code_size;
2603 int direct_jmp_count, direct_jmp2_count, cross_page;
2604 TranslationBlock *tb;
2606 target_code_size = 0;
2607 max_target_code_size = 0;
2608 cross_page = 0;
2609 direct_jmp_count = 0;
2610 direct_jmp2_count = 0;
2611 for(i = 0; i < nb_tbs; i++) {
2612 tb = &tbs[i];
2613 target_code_size += tb->size;
2614 if (tb->size > max_target_code_size)
2615 max_target_code_size = tb->size;
2616 if (tb->page_addr[1] != -1)
2617 cross_page++;
2618 if (tb->tb_next_offset[0] != 0xffff) {
2619 direct_jmp_count++;
2620 if (tb->tb_next_offset[1] != 0xffff) {
2621 direct_jmp2_count++;
2625 /* XXX: avoid using doubles ? */
2626 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2627 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2628 nb_tbs ? target_code_size / nb_tbs : 0,
2629 max_target_code_size);
2630 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2631 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2632 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2633 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2634 cross_page,
2635 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2636 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2637 direct_jmp_count,
2638 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2639 direct_jmp2_count,
2640 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2641 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2642 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2643 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2646 #if !defined(CONFIG_USER_ONLY)
2648 #define MMUSUFFIX _cmmu
2649 #define GETPC() NULL
2650 #define env cpu_single_env
2651 #define SOFTMMU_CODE_ACCESS
2653 #define SHIFT 0
2654 #include "softmmu_template.h"
2656 #define SHIFT 1
2657 #include "softmmu_template.h"
2659 #define SHIFT 2
2660 #include "softmmu_template.h"
2662 #define SHIFT 3
2663 #include "softmmu_template.h"
2665 #undef env
2667 #endif