Remove unoptimal code from qemu dcr handles for powerpc
[qemu-kvm/fedora.git] / exec.c
blobf8e67135a63c724d6e14471f6430a87fbc507234
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #ifdef USE_KVM
39 #include "dyngen.h"
40 #include "qemu-kvm.h"
41 #endif
42 #if defined(CONFIG_USER_ONLY)
43 #include <qemu.h>
44 #endif
46 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_FLUSH
48 //#define DEBUG_TLB
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
60 #undef DEBUG_TB_CHECK
61 #endif
63 /* threshold to flush the translated code buffer */
64 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
66 #define SMC_BITMAP_USE_THRESHOLD 10
68 #define MMAP_AREA_START 0x00000000
69 #define MMAP_AREA_END 0xa8000000
71 #if defined(TARGET_SPARC64)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 41
73 #elif defined(TARGET_SPARC)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 36
75 #elif defined(TARGET_ALPHA)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #define TARGET_VIRT_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_PPC64)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif USE_KQEMU
81 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 #elif TARGET_X86_64
84 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #elif defined(TARGET_IA64)
86 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 #else
88 #define TARGET_PHYS_ADDR_SPACE_BITS 32
89 #endif
91 #ifdef USE_KVM
92 extern int kvm_allowed;
93 extern kvm_context_t kvm_context;
94 #endif
96 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
97 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
98 int nb_tbs;
99 /* any access to the tbs or the page table must use this lock */
100 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
103 uint8_t *code_gen_ptr;
105 ram_addr_t phys_ram_size;
106 int phys_ram_fd;
107 uint8_t *phys_ram_base;
108 uint8_t *phys_ram_dirty;
109 uint8_t *bios_mem;
110 static int in_migration;
111 static ram_addr_t phys_ram_alloc_offset = 0;
113 CPUState *first_cpu;
114 /* current CPU in the current thread. It is only valid inside
115 cpu_exec() */
116 CPUState *cpu_single_env;
118 typedef struct PageDesc {
119 /* list of TBs intersecting this ram page */
120 TranslationBlock *first_tb;
121 /* in order to optimize self modifying code, we count the number
122 of lookups we do to a given page to use a bitmap */
123 unsigned int code_write_count;
124 uint8_t *code_bitmap;
125 #if defined(CONFIG_USER_ONLY)
126 unsigned long flags;
127 #endif
128 } PageDesc;
130 typedef struct PhysPageDesc {
131 /* offset in host memory of the page + io_index in the low 12 bits */
132 ram_addr_t phys_offset;
133 } PhysPageDesc;
135 #define L2_BITS 10
136 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
137 /* XXX: this is a temporary hack for alpha target.
138 * In the future, this is to be replaced by a multi-level table
139 * to actually be able to handle the complete 64 bits address space.
141 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
142 #else
143 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
144 #endif
146 #define L1_SIZE (1 << L1_BITS)
147 #define L2_SIZE (1 << L2_BITS)
149 static void io_mem_init(void);
151 unsigned long qemu_real_host_page_size;
152 unsigned long qemu_host_page_bits;
153 unsigned long qemu_host_page_size;
154 unsigned long qemu_host_page_mask;
156 /* XXX: for system emulation, it could just be an array */
157 static PageDesc *l1_map[L1_SIZE];
158 PhysPageDesc **l1_phys_map;
160 /* io memory support */
161 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
162 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
163 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
164 static int io_mem_nb;
165 #if defined(CONFIG_SOFTMMU)
166 static int io_mem_watch;
167 #endif
169 /* log support */
170 char *logfilename = "/tmp/qemu.log";
171 FILE *logfile;
172 int loglevel;
173 static int log_append = 0;
175 /* statistics */
176 static int tlb_flush_count;
177 static int tb_flush_count;
178 static int tb_phys_invalidate_count;
180 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
181 typedef struct subpage_t {
182 target_phys_addr_t base;
183 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
184 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
185 void *opaque[TARGET_PAGE_SIZE][2][4];
186 } subpage_t;
188 static void page_init(void)
190 /* NOTE: we can always suppose that qemu_host_page_size >=
191 TARGET_PAGE_SIZE */
192 #ifdef _WIN32
194 SYSTEM_INFO system_info;
195 DWORD old_protect;
197 GetSystemInfo(&system_info);
198 qemu_real_host_page_size = system_info.dwPageSize;
200 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
201 PAGE_EXECUTE_READWRITE, &old_protect);
203 #else
204 qemu_real_host_page_size = getpagesize();
206 unsigned long start, end;
208 start = (unsigned long)code_gen_buffer;
209 start &= ~(qemu_real_host_page_size - 1);
211 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
212 end += qemu_real_host_page_size - 1;
213 end &= ~(qemu_real_host_page_size - 1);
215 mprotect((void *)start, end - start,
216 PROT_READ | PROT_WRITE | PROT_EXEC);
218 #endif
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
237 f = fopen("/proc/self/maps", "r");
238 if (f) {
239 do {
240 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
241 if (n == 2) {
242 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
243 TARGET_PAGE_ALIGN(endaddr),
244 PAGE_RESERVED);
246 } while (!feof(f));
247 fclose(f);
250 #endif
253 static inline PageDesc *page_find_alloc(unsigned int index)
255 PageDesc **lp, *p;
257 lp = &l1_map[index >> L2_BITS];
258 p = *lp;
259 if (!p) {
260 /* allocate if not found */
261 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
262 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
263 *lp = p;
265 return p + (index & (L2_SIZE - 1));
268 static inline PageDesc *page_find(unsigned int index)
270 PageDesc *p;
272 p = l1_map[index >> L2_BITS];
273 if (!p)
274 return 0;
275 return p + (index & (L2_SIZE - 1));
278 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
280 void **lp, **p;
281 PhysPageDesc *pd;
283 p = (void **)l1_phys_map;
284 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
286 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
287 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
288 #endif
289 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
290 p = *lp;
291 if (!p) {
292 /* allocate if not found */
293 if (!alloc)
294 return NULL;
295 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
296 memset(p, 0, sizeof(void *) * L1_SIZE);
297 *lp = p;
299 #endif
300 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
301 pd = *lp;
302 if (!pd) {
303 int i;
304 /* allocate if not found */
305 if (!alloc)
306 return NULL;
307 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
308 *lp = pd;
309 for (i = 0; i < L2_SIZE; i++)
310 pd[i].phys_offset = IO_MEM_UNASSIGNED;
312 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
315 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
317 return phys_page_find_alloc(index, 0);
320 #if !defined(CONFIG_USER_ONLY)
321 static void tlb_protect_code(ram_addr_t ram_addr);
322 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
323 target_ulong vaddr);
324 #endif
326 void cpu_exec_init(CPUState *env)
328 CPUState **penv;
329 int cpu_index;
331 if (!code_gen_ptr) {
332 code_gen_ptr = code_gen_buffer;
333 page_init();
334 io_mem_init();
336 env->next_cpu = NULL;
337 penv = &first_cpu;
338 cpu_index = 0;
339 while (*penv != NULL) {
340 penv = (CPUState **)&(*penv)->next_cpu;
341 cpu_index++;
343 env->cpu_index = cpu_index;
344 env->nb_watchpoints = 0;
345 *penv = env;
348 static inline void invalidate_page_bitmap(PageDesc *p)
350 if (p->code_bitmap) {
351 qemu_free(p->code_bitmap);
352 p->code_bitmap = NULL;
354 p->code_write_count = 0;
357 /* set to NULL all the 'first_tb' fields in all PageDescs */
358 static void page_flush_tb(void)
360 int i, j;
361 PageDesc *p;
363 for(i = 0; i < L1_SIZE; i++) {
364 p = l1_map[i];
365 if (p) {
366 for(j = 0; j < L2_SIZE; j++) {
367 p->first_tb = NULL;
368 invalidate_page_bitmap(p);
369 p++;
375 /* flush all the translation blocks */
376 /* XXX: tb_flush is currently not thread safe */
377 void tb_flush(CPUState *env1)
379 CPUState *env;
380 #if defined(DEBUG_FLUSH)
381 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
382 (unsigned long)(code_gen_ptr - code_gen_buffer),
383 nb_tbs, nb_tbs > 0 ?
384 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
385 #endif
386 nb_tbs = 0;
388 for(env = first_cpu; env != NULL; env = env->next_cpu) {
389 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
392 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
393 page_flush_tb();
395 code_gen_ptr = code_gen_buffer;
396 /* XXX: flush processor icache at this point if cache flush is
397 expensive */
398 tb_flush_count++;
401 #ifdef DEBUG_TB_CHECK
403 static void tb_invalidate_check(target_ulong address)
405 TranslationBlock *tb;
406 int i;
407 address &= TARGET_PAGE_MASK;
408 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
409 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
410 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
411 address >= tb->pc + tb->size)) {
412 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
413 address, (long)tb->pc, tb->size);
419 /* verify that all the pages have correct rights for code */
420 static void tb_page_check(void)
422 TranslationBlock *tb;
423 int i, flags1, flags2;
425 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
426 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
427 flags1 = page_get_flags(tb->pc);
428 flags2 = page_get_flags(tb->pc + tb->size - 1);
429 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
430 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
431 (long)tb->pc, tb->size, flags1, flags2);
437 void tb_jmp_check(TranslationBlock *tb)
439 TranslationBlock *tb1;
440 unsigned int n1;
442 /* suppress any remaining jumps to this TB */
443 tb1 = tb->jmp_first;
444 for(;;) {
445 n1 = (long)tb1 & 3;
446 tb1 = (TranslationBlock *)((long)tb1 & ~3);
447 if (n1 == 2)
448 break;
449 tb1 = tb1->jmp_next[n1];
451 /* check end of list */
452 if (tb1 != tb) {
453 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
457 #endif
459 /* invalidate one TB */
460 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
461 int next_offset)
463 TranslationBlock *tb1;
464 for(;;) {
465 tb1 = *ptb;
466 if (tb1 == tb) {
467 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
468 break;
470 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
474 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
476 TranslationBlock *tb1;
477 unsigned int n1;
479 for(;;) {
480 tb1 = *ptb;
481 n1 = (long)tb1 & 3;
482 tb1 = (TranslationBlock *)((long)tb1 & ~3);
483 if (tb1 == tb) {
484 *ptb = tb1->page_next[n1];
485 break;
487 ptb = &tb1->page_next[n1];
491 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
493 TranslationBlock *tb1, **ptb;
494 unsigned int n1;
496 ptb = &tb->jmp_next[n];
497 tb1 = *ptb;
498 if (tb1) {
499 /* find tb(n) in circular list */
500 for(;;) {
501 tb1 = *ptb;
502 n1 = (long)tb1 & 3;
503 tb1 = (TranslationBlock *)((long)tb1 & ~3);
504 if (n1 == n && tb1 == tb)
505 break;
506 if (n1 == 2) {
507 ptb = &tb1->jmp_first;
508 } else {
509 ptb = &tb1->jmp_next[n1];
512 /* now we can suppress tb(n) from the list */
513 *ptb = tb->jmp_next[n];
515 tb->jmp_next[n] = NULL;
519 /* reset the jump entry 'n' of a TB so that it is not chained to
520 another TB */
521 static inline void tb_reset_jump(TranslationBlock *tb, int n)
523 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
526 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
528 CPUState *env;
529 PageDesc *p;
530 unsigned int h, n1;
531 target_ulong phys_pc;
532 TranslationBlock *tb1, *tb2;
534 /* remove the TB from the hash list */
535 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
536 h = tb_phys_hash_func(phys_pc);
537 tb_remove(&tb_phys_hash[h], tb,
538 offsetof(TranslationBlock, phys_hash_next));
540 /* remove the TB from the page list */
541 if (tb->page_addr[0] != page_addr) {
542 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
543 tb_page_remove(&p->first_tb, tb);
544 invalidate_page_bitmap(p);
546 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
547 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
548 tb_page_remove(&p->first_tb, tb);
549 invalidate_page_bitmap(p);
552 tb_invalidated_flag = 1;
554 /* remove the TB from the hash list */
555 h = tb_jmp_cache_hash_func(tb->pc);
556 for(env = first_cpu; env != NULL; env = env->next_cpu) {
557 if (env->tb_jmp_cache[h] == tb)
558 env->tb_jmp_cache[h] = NULL;
561 /* suppress this TB from the two jump lists */
562 tb_jmp_remove(tb, 0);
563 tb_jmp_remove(tb, 1);
565 /* suppress any remaining jumps to this TB */
566 tb1 = tb->jmp_first;
567 for(;;) {
568 n1 = (long)tb1 & 3;
569 if (n1 == 2)
570 break;
571 tb1 = (TranslationBlock *)((long)tb1 & ~3);
572 tb2 = tb1->jmp_next[n1];
573 tb_reset_jump(tb1, n1);
574 tb1->jmp_next[n1] = NULL;
575 tb1 = tb2;
577 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
579 tb_phys_invalidate_count++;
582 static inline void set_bits(uint8_t *tab, int start, int len)
584 int end, mask, end1;
586 end = start + len;
587 tab += start >> 3;
588 mask = 0xff << (start & 7);
589 if ((start & ~7) == (end & ~7)) {
590 if (start < end) {
591 mask &= ~(0xff << (end & 7));
592 *tab |= mask;
594 } else {
595 *tab++ |= mask;
596 start = (start + 8) & ~7;
597 end1 = end & ~7;
598 while (start < end1) {
599 *tab++ = 0xff;
600 start += 8;
602 if (start < end) {
603 mask = ~(0xff << (end & 7));
604 *tab |= mask;
609 static void build_page_bitmap(PageDesc *p)
611 int n, tb_start, tb_end;
612 TranslationBlock *tb;
614 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
615 if (!p->code_bitmap)
616 return;
617 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
619 tb = p->first_tb;
620 while (tb != NULL) {
621 n = (long)tb & 3;
622 tb = (TranslationBlock *)((long)tb & ~3);
623 /* NOTE: this is subtle as a TB may span two physical pages */
624 if (n == 0) {
625 /* NOTE: tb_end may be after the end of the page, but
626 it is not a problem */
627 tb_start = tb->pc & ~TARGET_PAGE_MASK;
628 tb_end = tb_start + tb->size;
629 if (tb_end > TARGET_PAGE_SIZE)
630 tb_end = TARGET_PAGE_SIZE;
631 } else {
632 tb_start = 0;
633 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
635 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
636 tb = tb->page_next[n];
640 #ifdef TARGET_HAS_PRECISE_SMC
642 static void tb_gen_code(CPUState *env,
643 target_ulong pc, target_ulong cs_base, int flags,
644 int cflags)
646 TranslationBlock *tb;
647 uint8_t *tc_ptr;
648 target_ulong phys_pc, phys_page2, virt_page2;
649 int code_gen_size;
651 phys_pc = get_phys_addr_code(env, pc);
652 tb = tb_alloc(pc);
653 if (!tb) {
654 /* flush must be done */
655 tb_flush(env);
656 /* cannot fail at this point */
657 tb = tb_alloc(pc);
659 tc_ptr = code_gen_ptr;
660 tb->tc_ptr = tc_ptr;
661 tb->cs_base = cs_base;
662 tb->flags = flags;
663 tb->cflags = cflags;
664 cpu_gen_code(env, tb, &code_gen_size);
665 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
667 /* check next page if needed */
668 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
669 phys_page2 = -1;
670 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
671 phys_page2 = get_phys_addr_code(env, virt_page2);
673 tb_link_phys(tb, phys_pc, phys_page2);
675 #endif
677 /* invalidate all TBs which intersect with the target physical page
678 starting in range [start;end[. NOTE: start and end must refer to
679 the same physical page. 'is_cpu_write_access' should be true if called
680 from a real cpu write access: the virtual CPU will exit the current
681 TB if code is modified inside this TB. */
682 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
683 int is_cpu_write_access)
685 int n, current_tb_modified, current_tb_not_found, current_flags;
686 CPUState *env = cpu_single_env;
687 PageDesc *p;
688 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
689 target_ulong tb_start, tb_end;
690 target_ulong current_pc, current_cs_base;
692 p = page_find(start >> TARGET_PAGE_BITS);
693 if (!p)
694 return;
695 if (!p->code_bitmap &&
696 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
697 is_cpu_write_access) {
698 /* build code bitmap */
699 build_page_bitmap(p);
702 /* we remove all the TBs in the range [start, end[ */
703 /* XXX: see if in some cases it could be faster to invalidate all the code */
704 current_tb_not_found = is_cpu_write_access;
705 current_tb_modified = 0;
706 current_tb = NULL; /* avoid warning */
707 current_pc = 0; /* avoid warning */
708 current_cs_base = 0; /* avoid warning */
709 current_flags = 0; /* avoid warning */
710 tb = p->first_tb;
711 while (tb != NULL) {
712 n = (long)tb & 3;
713 tb = (TranslationBlock *)((long)tb & ~3);
714 tb_next = tb->page_next[n];
715 /* NOTE: this is subtle as a TB may span two physical pages */
716 if (n == 0) {
717 /* NOTE: tb_end may be after the end of the page, but
718 it is not a problem */
719 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
720 tb_end = tb_start + tb->size;
721 } else {
722 tb_start = tb->page_addr[1];
723 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
725 if (!(tb_end <= start || tb_start >= end)) {
726 #ifdef TARGET_HAS_PRECISE_SMC
727 if (current_tb_not_found) {
728 current_tb_not_found = 0;
729 current_tb = NULL;
730 if (env->mem_write_pc) {
731 /* now we have a real cpu fault */
732 current_tb = tb_find_pc(env->mem_write_pc);
735 if (current_tb == tb &&
736 !(current_tb->cflags & CF_SINGLE_INSN)) {
737 /* If we are modifying the current TB, we must stop
738 its execution. We could be more precise by checking
739 that the modification is after the current PC, but it
740 would require a specialized function to partially
741 restore the CPU state */
743 current_tb_modified = 1;
744 cpu_restore_state(current_tb, env,
745 env->mem_write_pc, NULL);
746 #if defined(TARGET_I386)
747 current_flags = env->hflags;
748 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
749 current_cs_base = (target_ulong)env->segs[R_CS].base;
750 current_pc = current_cs_base + env->eip;
751 #else
752 #error unsupported CPU
753 #endif
755 #endif /* TARGET_HAS_PRECISE_SMC */
756 /* we need to do that to handle the case where a signal
757 occurs while doing tb_phys_invalidate() */
758 saved_tb = NULL;
759 if (env) {
760 saved_tb = env->current_tb;
761 env->current_tb = NULL;
763 tb_phys_invalidate(tb, -1);
764 if (env) {
765 env->current_tb = saved_tb;
766 if (env->interrupt_request && env->current_tb)
767 cpu_interrupt(env, env->interrupt_request);
770 tb = tb_next;
772 #if !defined(CONFIG_USER_ONLY)
773 /* if no code remaining, no need to continue to use slow writes */
774 if (!p->first_tb) {
775 invalidate_page_bitmap(p);
776 if (is_cpu_write_access) {
777 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
780 #endif
781 #ifdef TARGET_HAS_PRECISE_SMC
782 if (current_tb_modified) {
783 /* we generate a block containing just the instruction
784 modifying the memory. It will ensure that it cannot modify
785 itself */
786 env->current_tb = NULL;
787 tb_gen_code(env, current_pc, current_cs_base, current_flags,
788 CF_SINGLE_INSN);
789 cpu_resume_from_signal(env, NULL);
791 #endif
794 /* len must be <= 8 and start must be a multiple of len */
795 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
797 PageDesc *p;
798 int offset, b;
799 #if 0
800 if (1) {
801 if (loglevel) {
802 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
803 cpu_single_env->mem_write_vaddr, len,
804 cpu_single_env->eip,
805 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
808 #endif
809 p = page_find(start >> TARGET_PAGE_BITS);
810 if (!p)
811 return;
812 if (p->code_bitmap) {
813 offset = start & ~TARGET_PAGE_MASK;
814 b = p->code_bitmap[offset >> 3] >> (offset & 7);
815 if (b & ((1 << len) - 1))
816 goto do_invalidate;
817 } else {
818 do_invalidate:
819 tb_invalidate_phys_page_range(start, start + len, 1);
823 #if !defined(CONFIG_SOFTMMU)
824 static void tb_invalidate_phys_page(target_ulong addr,
825 unsigned long pc, void *puc)
827 int n, current_flags, current_tb_modified;
828 target_ulong current_pc, current_cs_base;
829 PageDesc *p;
830 TranslationBlock *tb, *current_tb;
831 #ifdef TARGET_HAS_PRECISE_SMC
832 CPUState *env = cpu_single_env;
833 #endif
835 addr &= TARGET_PAGE_MASK;
836 p = page_find(addr >> TARGET_PAGE_BITS);
837 if (!p)
838 return;
839 tb = p->first_tb;
840 current_tb_modified = 0;
841 current_tb = NULL;
842 current_pc = 0; /* avoid warning */
843 current_cs_base = 0; /* avoid warning */
844 current_flags = 0; /* avoid warning */
845 #ifdef TARGET_HAS_PRECISE_SMC
846 if (tb && pc != 0) {
847 current_tb = tb_find_pc(pc);
849 #endif
850 while (tb != NULL) {
851 n = (long)tb & 3;
852 tb = (TranslationBlock *)((long)tb & ~3);
853 #ifdef TARGET_HAS_PRECISE_SMC
854 if (current_tb == tb &&
855 !(current_tb->cflags & CF_SINGLE_INSN)) {
856 /* If we are modifying the current TB, we must stop
857 its execution. We could be more precise by checking
858 that the modification is after the current PC, but it
859 would require a specialized function to partially
860 restore the CPU state */
862 current_tb_modified = 1;
863 cpu_restore_state(current_tb, env, pc, puc);
864 #if defined(TARGET_I386)
865 current_flags = env->hflags;
866 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
867 current_cs_base = (target_ulong)env->segs[R_CS].base;
868 current_pc = current_cs_base + env->eip;
869 #else
870 #error unsupported CPU
871 #endif
873 #endif /* TARGET_HAS_PRECISE_SMC */
874 tb_phys_invalidate(tb, addr);
875 tb = tb->page_next[n];
877 p->first_tb = NULL;
878 #ifdef TARGET_HAS_PRECISE_SMC
879 if (current_tb_modified) {
880 /* we generate a block containing just the instruction
881 modifying the memory. It will ensure that it cannot modify
882 itself */
883 env->current_tb = NULL;
884 tb_gen_code(env, current_pc, current_cs_base, current_flags,
885 CF_SINGLE_INSN);
886 cpu_resume_from_signal(env, puc);
888 #endif
890 #endif
892 /* add the tb in the target page and protect it if necessary */
893 static inline void tb_alloc_page(TranslationBlock *tb,
894 unsigned int n, target_ulong page_addr)
896 PageDesc *p;
897 TranslationBlock *last_first_tb;
899 tb->page_addr[n] = page_addr;
900 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
901 tb->page_next[n] = p->first_tb;
902 last_first_tb = p->first_tb;
903 p->first_tb = (TranslationBlock *)((long)tb | n);
904 invalidate_page_bitmap(p);
906 #if defined(TARGET_HAS_SMC) || 1
908 #if defined(CONFIG_USER_ONLY)
909 if (p->flags & PAGE_WRITE) {
910 target_ulong addr;
911 PageDesc *p2;
912 int prot;
914 /* force the host page as non writable (writes will have a
915 page fault + mprotect overhead) */
916 page_addr &= qemu_host_page_mask;
917 prot = 0;
918 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
919 addr += TARGET_PAGE_SIZE) {
921 p2 = page_find (addr >> TARGET_PAGE_BITS);
922 if (!p2)
923 continue;
924 prot |= p2->flags;
925 p2->flags &= ~PAGE_WRITE;
926 page_get_flags(addr);
928 mprotect(g2h(page_addr), qemu_host_page_size,
929 (prot & PAGE_BITS) & ~PAGE_WRITE);
930 #ifdef DEBUG_TB_INVALIDATE
931 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
932 page_addr);
933 #endif
935 #else
936 /* if some code is already present, then the pages are already
937 protected. So we handle the case where only the first TB is
938 allocated in a physical page */
939 if (!last_first_tb) {
940 tlb_protect_code(page_addr);
942 #endif
944 #endif /* TARGET_HAS_SMC */
947 /* Allocate a new translation block. Flush the translation buffer if
948 too many translation blocks or too much generated code. */
949 TranslationBlock *tb_alloc(target_ulong pc)
951 TranslationBlock *tb;
953 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
954 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
955 return NULL;
956 tb = &tbs[nb_tbs++];
957 tb->pc = pc;
958 tb->cflags = 0;
959 return tb;
962 /* add a new TB and link it to the physical page tables. phys_page2 is
963 (-1) to indicate that only one page contains the TB. */
964 void tb_link_phys(TranslationBlock *tb,
965 target_ulong phys_pc, target_ulong phys_page2)
967 unsigned int h;
968 TranslationBlock **ptb;
970 /* add in the physical hash table */
971 h = tb_phys_hash_func(phys_pc);
972 ptb = &tb_phys_hash[h];
973 tb->phys_hash_next = *ptb;
974 *ptb = tb;
976 /* add in the page list */
977 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
978 if (phys_page2 != -1)
979 tb_alloc_page(tb, 1, phys_page2);
980 else
981 tb->page_addr[1] = -1;
983 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
984 tb->jmp_next[0] = NULL;
985 tb->jmp_next[1] = NULL;
987 /* init original jump addresses */
988 if (tb->tb_next_offset[0] != 0xffff)
989 tb_reset_jump(tb, 0);
990 if (tb->tb_next_offset[1] != 0xffff)
991 tb_reset_jump(tb, 1);
993 #ifdef DEBUG_TB_CHECK
994 tb_page_check();
995 #endif
998 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
999 tb[1].tc_ptr. Return NULL if not found */
1000 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1002 int m_min, m_max, m;
1003 unsigned long v;
1004 TranslationBlock *tb;
1006 if (nb_tbs <= 0)
1007 return NULL;
1008 if (tc_ptr < (unsigned long)code_gen_buffer ||
1009 tc_ptr >= (unsigned long)code_gen_ptr)
1010 return NULL;
1011 /* binary search (cf Knuth) */
1012 m_min = 0;
1013 m_max = nb_tbs - 1;
1014 while (m_min <= m_max) {
1015 m = (m_min + m_max) >> 1;
1016 tb = &tbs[m];
1017 v = (unsigned long)tb->tc_ptr;
1018 if (v == tc_ptr)
1019 return tb;
1020 else if (tc_ptr < v) {
1021 m_max = m - 1;
1022 } else {
1023 m_min = m + 1;
1026 return &tbs[m_max];
1029 static void tb_reset_jump_recursive(TranslationBlock *tb);
1031 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1033 TranslationBlock *tb1, *tb_next, **ptb;
1034 unsigned int n1;
1036 tb1 = tb->jmp_next[n];
1037 if (tb1 != NULL) {
1038 /* find head of list */
1039 for(;;) {
1040 n1 = (long)tb1 & 3;
1041 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1042 if (n1 == 2)
1043 break;
1044 tb1 = tb1->jmp_next[n1];
1046 /* we are now sure now that tb jumps to tb1 */
1047 tb_next = tb1;
1049 /* remove tb from the jmp_first list */
1050 ptb = &tb_next->jmp_first;
1051 for(;;) {
1052 tb1 = *ptb;
1053 n1 = (long)tb1 & 3;
1054 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1055 if (n1 == n && tb1 == tb)
1056 break;
1057 ptb = &tb1->jmp_next[n1];
1059 *ptb = tb->jmp_next[n];
1060 tb->jmp_next[n] = NULL;
1062 /* suppress the jump to next tb in generated code */
1063 tb_reset_jump(tb, n);
1065 /* suppress jumps in the tb on which we could have jumped */
1066 tb_reset_jump_recursive(tb_next);
1070 static void tb_reset_jump_recursive(TranslationBlock *tb)
1072 tb_reset_jump_recursive2(tb, 0);
1073 tb_reset_jump_recursive2(tb, 1);
1076 #if defined(TARGET_HAS_ICE)
1077 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1079 target_phys_addr_t addr;
1080 target_ulong pd;
1081 ram_addr_t ram_addr;
1082 PhysPageDesc *p;
1084 addr = cpu_get_phys_page_debug(env, pc);
1085 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1086 if (!p) {
1087 pd = IO_MEM_UNASSIGNED;
1088 } else {
1089 pd = p->phys_offset;
1091 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1092 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1094 #endif
1096 /* Add a watchpoint. */
1097 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1099 int i;
1101 for (i = 0; i < env->nb_watchpoints; i++) {
1102 if (addr == env->watchpoint[i].vaddr)
1103 return 0;
1105 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1106 return -1;
1108 i = env->nb_watchpoints++;
1109 env->watchpoint[i].vaddr = addr;
1110 tlb_flush_page(env, addr);
1111 /* FIXME: This flush is needed because of the hack to make memory ops
1112 terminate the TB. It can be removed once the proper IO trap and
1113 re-execute bits are in. */
1114 tb_flush(env);
1115 return i;
1118 /* Remove a watchpoint. */
1119 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1121 int i;
1123 for (i = 0; i < env->nb_watchpoints; i++) {
1124 if (addr == env->watchpoint[i].vaddr) {
1125 env->nb_watchpoints--;
1126 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1127 tlb_flush_page(env, addr);
1128 return 0;
1131 return -1;
1134 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1135 breakpoint is reached */
1136 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1138 #if defined(TARGET_HAS_ICE)
1139 int i;
1141 for(i = 0; i < env->nb_breakpoints; i++) {
1142 if (env->breakpoints[i] == pc)
1143 return 0;
1146 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1147 return -1;
1148 env->breakpoints[env->nb_breakpoints++] = pc;
1150 #ifdef USE_KVM
1151 if (kvm_allowed)
1152 kvm_update_debugger(env);
1153 #endif
1155 breakpoint_invalidate(env, pc);
1156 return 0;
1157 #else
1158 return -1;
1159 #endif
1162 /* remove a breakpoint */
1163 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1165 #if defined(TARGET_HAS_ICE)
1166 int i;
1167 for(i = 0; i < env->nb_breakpoints; i++) {
1168 if (env->breakpoints[i] == pc)
1169 goto found;
1171 return -1;
1172 found:
1173 env->nb_breakpoints--;
1174 if (i < env->nb_breakpoints)
1175 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1177 #ifdef USE_KVM
1178 if (kvm_allowed)
1179 kvm_update_debugger(env);
1180 #endif
1182 breakpoint_invalidate(env, pc);
1183 return 0;
1184 #else
1185 return -1;
1186 #endif
1189 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1190 CPU loop after each instruction */
1191 void cpu_single_step(CPUState *env, int enabled)
1193 #if defined(TARGET_HAS_ICE)
1194 if (env->singlestep_enabled != enabled) {
1195 env->singlestep_enabled = enabled;
1196 /* must flush all the translated code to avoid inconsistancies */
1197 /* XXX: only flush what is necessary */
1198 tb_flush(env);
1200 #ifdef USE_KVM
1201 if (kvm_allowed)
1202 kvm_update_debugger(env);
1203 #endif
1204 #endif
1207 /* enable or disable low levels log */
1208 void cpu_set_log(int log_flags)
1210 loglevel = log_flags;
1211 if (loglevel && !logfile) {
1212 logfile = fopen(logfilename, log_append ? "a" : "w");
1213 if (!logfile) {
1214 perror(logfilename);
1215 _exit(1);
1217 #if !defined(CONFIG_SOFTMMU)
1218 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1220 static uint8_t logfile_buf[4096];
1221 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1223 #else
1224 setvbuf(logfile, NULL, _IOLBF, 0);
1225 #endif
1226 log_append = 1;
1228 if (!loglevel && logfile) {
1229 fclose(logfile);
1230 logfile = NULL;
1234 void cpu_set_log_filename(const char *filename)
1236 logfilename = strdup(filename);
1237 if (logfile) {
1238 fclose(logfile);
1239 logfile = NULL;
1241 cpu_set_log(loglevel);
1244 /* mask must never be zero, except for A20 change call */
1245 void cpu_interrupt(CPUState *env, int mask)
1247 TranslationBlock *tb;
1248 static int interrupt_lock;
1250 env->interrupt_request |= mask;
1251 #ifdef USE_KVM
1252 if (kvm_allowed && !kvm_irqchip_in_kernel(kvm_context))
1253 kvm_update_interrupt_request(env);
1254 #endif
1255 /* if the cpu is currently executing code, we must unlink it and
1256 all the potentially executing TB */
1257 tb = env->current_tb;
1258 if (tb && !testandset(&interrupt_lock)) {
1259 env->current_tb = NULL;
1260 tb_reset_jump_recursive(tb);
1261 interrupt_lock = 0;
1265 void cpu_reset_interrupt(CPUState *env, int mask)
1267 env->interrupt_request &= ~mask;
1270 CPULogItem cpu_log_items[] = {
1271 { CPU_LOG_TB_OUT_ASM, "out_asm",
1272 "show generated host assembly code for each compiled TB" },
1273 { CPU_LOG_TB_IN_ASM, "in_asm",
1274 "show target assembly code for each compiled TB" },
1275 { CPU_LOG_TB_OP, "op",
1276 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1277 #ifdef TARGET_I386
1278 { CPU_LOG_TB_OP_OPT, "op_opt",
1279 "show micro ops after optimization for each compiled TB" },
1280 #endif
1281 { CPU_LOG_INT, "int",
1282 "show interrupts/exceptions in short format" },
1283 { CPU_LOG_EXEC, "exec",
1284 "show trace before each executed TB (lots of logs)" },
1285 { CPU_LOG_TB_CPU, "cpu",
1286 "show CPU state before block translation" },
1287 #ifdef TARGET_I386
1288 { CPU_LOG_PCALL, "pcall",
1289 "show protected mode far calls/returns/exceptions" },
1290 #endif
1291 #ifdef DEBUG_IOPORT
1292 { CPU_LOG_IOPORT, "ioport",
1293 "show all i/o ports accesses" },
1294 #endif
1295 { 0, NULL, NULL },
1298 static int cmp1(const char *s1, int n, const char *s2)
1300 if (strlen(s2) != n)
1301 return 0;
1302 return memcmp(s1, s2, n) == 0;
1305 /* takes a comma separated list of log masks. Return 0 if error. */
1306 int cpu_str_to_log_mask(const char *str)
1308 CPULogItem *item;
1309 int mask;
1310 const char *p, *p1;
1312 p = str;
1313 mask = 0;
1314 for(;;) {
1315 p1 = strchr(p, ',');
1316 if (!p1)
1317 p1 = p + strlen(p);
1318 if(cmp1(p,p1-p,"all")) {
1319 for(item = cpu_log_items; item->mask != 0; item++) {
1320 mask |= item->mask;
1322 } else {
1323 for(item = cpu_log_items; item->mask != 0; item++) {
1324 if (cmp1(p, p1 - p, item->name))
1325 goto found;
1327 return 0;
1329 found:
1330 mask |= item->mask;
1331 if (*p1 != ',')
1332 break;
1333 p = p1 + 1;
1335 return mask;
1338 void cpu_abort(CPUState *env, const char *fmt, ...)
1340 va_list ap;
1341 va_list ap2;
1343 va_start(ap, fmt);
1344 va_copy(ap2, ap);
1345 fprintf(stderr, "qemu: fatal: ");
1346 vfprintf(stderr, fmt, ap);
1347 fprintf(stderr, "\n");
1348 #ifdef TARGET_I386
1349 if(env->intercept & INTERCEPT_SVM_MASK) {
1350 /* most probably the virtual machine should not
1351 be shut down but rather caught by the VMM */
1352 vmexit(SVM_EXIT_SHUTDOWN, 0);
1354 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1355 #else
1356 cpu_dump_state(env, stderr, fprintf, 0);
1357 #endif
1358 if (logfile) {
1359 fprintf(logfile, "qemu: fatal: ");
1360 vfprintf(logfile, fmt, ap2);
1361 fprintf(logfile, "\n");
1362 #ifdef TARGET_I386
1363 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1364 #else
1365 cpu_dump_state(env, logfile, fprintf, 0);
1366 #endif
1367 fflush(logfile);
1368 fclose(logfile);
1370 va_end(ap2);
1371 va_end(ap);
1372 abort();
1375 CPUState *cpu_copy(CPUState *env)
1377 CPUState *new_env = cpu_init(env->cpu_model_str);
1378 /* preserve chaining and index */
1379 CPUState *next_cpu = new_env->next_cpu;
1380 int cpu_index = new_env->cpu_index;
1381 memcpy(new_env, env, sizeof(CPUState));
1382 new_env->next_cpu = next_cpu;
1383 new_env->cpu_index = cpu_index;
1384 return new_env;
1387 #if !defined(CONFIG_USER_ONLY)
1389 /* NOTE: if flush_global is true, also flush global entries (not
1390 implemented yet) */
1391 void tlb_flush(CPUState *env, int flush_global)
1393 int i;
1395 #if defined(DEBUG_TLB)
1396 printf("tlb_flush:\n");
1397 #endif
1398 /* must reset current TB so that interrupts cannot modify the
1399 links while we are modifying them */
1400 env->current_tb = NULL;
1402 for(i = 0; i < CPU_TLB_SIZE; i++) {
1403 env->tlb_table[0][i].addr_read = -1;
1404 env->tlb_table[0][i].addr_write = -1;
1405 env->tlb_table[0][i].addr_code = -1;
1406 env->tlb_table[1][i].addr_read = -1;
1407 env->tlb_table[1][i].addr_write = -1;
1408 env->tlb_table[1][i].addr_code = -1;
1409 #if (NB_MMU_MODES >= 3)
1410 env->tlb_table[2][i].addr_read = -1;
1411 env->tlb_table[2][i].addr_write = -1;
1412 env->tlb_table[2][i].addr_code = -1;
1413 #if (NB_MMU_MODES == 4)
1414 env->tlb_table[3][i].addr_read = -1;
1415 env->tlb_table[3][i].addr_write = -1;
1416 env->tlb_table[3][i].addr_code = -1;
1417 #endif
1418 #endif
1421 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1423 #if !defined(CONFIG_SOFTMMU)
1424 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1425 #endif
1426 #ifdef USE_KQEMU
1427 if (env->kqemu_enabled) {
1428 kqemu_flush(env, flush_global);
1430 #endif
1431 tlb_flush_count++;
1434 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1436 if (addr == (tlb_entry->addr_read &
1437 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1438 addr == (tlb_entry->addr_write &
1439 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1440 addr == (tlb_entry->addr_code &
1441 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1442 tlb_entry->addr_read = -1;
1443 tlb_entry->addr_write = -1;
1444 tlb_entry->addr_code = -1;
1448 void tlb_flush_page(CPUState *env, target_ulong addr)
1450 int i;
1451 TranslationBlock *tb;
1453 #if defined(DEBUG_TLB)
1454 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1455 #endif
1456 /* must reset current TB so that interrupts cannot modify the
1457 links while we are modifying them */
1458 env->current_tb = NULL;
1460 addr &= TARGET_PAGE_MASK;
1461 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1462 tlb_flush_entry(&env->tlb_table[0][i], addr);
1463 tlb_flush_entry(&env->tlb_table[1][i], addr);
1464 #if (NB_MMU_MODES >= 3)
1465 tlb_flush_entry(&env->tlb_table[2][i], addr);
1466 #if (NB_MMU_MODES == 4)
1467 tlb_flush_entry(&env->tlb_table[3][i], addr);
1468 #endif
1469 #endif
1471 /* Discard jump cache entries for any tb which might potentially
1472 overlap the flushed page. */
1473 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1474 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1476 i = tb_jmp_cache_hash_page(addr);
1477 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1479 #if !defined(CONFIG_SOFTMMU)
1480 if (addr < MMAP_AREA_END)
1481 munmap((void *)addr, TARGET_PAGE_SIZE);
1482 #endif
1483 #ifdef USE_KQEMU
1484 if (env->kqemu_enabled) {
1485 kqemu_flush_page(env, addr);
1487 #endif
1490 /* update the TLBs so that writes to code in the virtual page 'addr'
1491 can be detected */
1492 static void tlb_protect_code(ram_addr_t ram_addr)
1494 cpu_physical_memory_reset_dirty(ram_addr,
1495 ram_addr + TARGET_PAGE_SIZE,
1496 CODE_DIRTY_FLAG);
1499 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1500 tested for self modifying code */
1501 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1502 target_ulong vaddr)
1504 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1507 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1508 unsigned long start, unsigned long length)
1510 unsigned long addr;
1511 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1512 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1513 if ((addr - start) < length) {
1514 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1519 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1520 int dirty_flags)
1522 CPUState *env;
1523 unsigned long length, start1;
1524 int i, mask, len;
1525 uint8_t *p;
1527 start &= TARGET_PAGE_MASK;
1528 end = TARGET_PAGE_ALIGN(end);
1530 length = end - start;
1531 if (length == 0)
1532 return;
1533 len = length >> TARGET_PAGE_BITS;
1534 #ifdef USE_KQEMU
1535 /* XXX: should not depend on cpu context */
1536 env = first_cpu;
1537 if (env->kqemu_enabled) {
1538 ram_addr_t addr;
1539 addr = start;
1540 for(i = 0; i < len; i++) {
1541 kqemu_set_notdirty(env, addr);
1542 addr += TARGET_PAGE_SIZE;
1545 #endif
1546 mask = ~dirty_flags;
1547 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1548 for(i = 0; i < len; i++)
1549 p[i] &= mask;
1551 /* we modify the TLB cache so that the dirty bit will be set again
1552 when accessing the range */
1553 start1 = start + (unsigned long)phys_ram_base;
1554 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1555 for(i = 0; i < CPU_TLB_SIZE; i++)
1556 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1557 for(i = 0; i < CPU_TLB_SIZE; i++)
1558 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1559 #if (NB_MMU_MODES >= 3)
1560 for(i = 0; i < CPU_TLB_SIZE; i++)
1561 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1562 #if (NB_MMU_MODES == 4)
1563 for(i = 0; i < CPU_TLB_SIZE; i++)
1564 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1565 #endif
1566 #endif
1569 #if !defined(CONFIG_SOFTMMU)
1570 /* XXX: this is expensive */
1572 VirtPageDesc *p;
1573 int j;
1574 target_ulong addr;
1576 for(i = 0; i < L1_SIZE; i++) {
1577 p = l1_virt_map[i];
1578 if (p) {
1579 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1580 for(j = 0; j < L2_SIZE; j++) {
1581 if (p->valid_tag == virt_valid_tag &&
1582 p->phys_addr >= start && p->phys_addr < end &&
1583 (p->prot & PROT_WRITE)) {
1584 if (addr < MMAP_AREA_END) {
1585 mprotect((void *)addr, TARGET_PAGE_SIZE,
1586 p->prot & ~PROT_WRITE);
1589 addr += TARGET_PAGE_SIZE;
1590 p++;
1595 #endif
1598 int cpu_physical_memory_set_dirty_tracking(int enable)
1600 int r=0;
1602 #ifdef USE_KVM
1603 r = kvm_physical_memory_set_dirty_tracking(enable);
1604 #endif
1605 in_migration = enable;
1606 return r;
1609 int cpu_physical_memory_get_dirty_tracking(void)
1611 return in_migration;
1614 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1616 ram_addr_t ram_addr;
1618 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1619 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1620 tlb_entry->addend - (unsigned long)phys_ram_base;
1621 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1622 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1627 /* update the TLB according to the current state of the dirty bits */
1628 void cpu_tlb_update_dirty(CPUState *env)
1630 int i;
1631 for(i = 0; i < CPU_TLB_SIZE; i++)
1632 tlb_update_dirty(&env->tlb_table[0][i]);
1633 for(i = 0; i < CPU_TLB_SIZE; i++)
1634 tlb_update_dirty(&env->tlb_table[1][i]);
1635 #if (NB_MMU_MODES >= 3)
1636 for(i = 0; i < CPU_TLB_SIZE; i++)
1637 tlb_update_dirty(&env->tlb_table[2][i]);
1638 #if (NB_MMU_MODES == 4)
1639 for(i = 0; i < CPU_TLB_SIZE; i++)
1640 tlb_update_dirty(&env->tlb_table[3][i]);
1641 #endif
1642 #endif
1645 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1646 unsigned long start)
1648 unsigned long addr;
1649 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1650 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1651 if (addr == start) {
1652 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1657 /* update the TLB corresponding to virtual page vaddr and phys addr
1658 addr so that it is no longer dirty */
1659 static inline void tlb_set_dirty(CPUState *env,
1660 unsigned long addr, target_ulong vaddr)
1662 int i;
1664 addr &= TARGET_PAGE_MASK;
1665 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1666 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1667 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1668 #if (NB_MMU_MODES >= 3)
1669 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1670 #if (NB_MMU_MODES == 4)
1671 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1672 #endif
1673 #endif
1676 /* add a new TLB entry. At most one entry for a given virtual address
1677 is permitted. Return 0 if OK or 2 if the page could not be mapped
1678 (can only happen in non SOFTMMU mode for I/O pages or pages
1679 conflicting with the host address space). */
1680 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1681 target_phys_addr_t paddr, int prot,
1682 int mmu_idx, int is_softmmu)
1684 PhysPageDesc *p;
1685 unsigned long pd;
1686 unsigned int index;
1687 target_ulong address;
1688 target_phys_addr_t addend;
1689 int ret;
1690 CPUTLBEntry *te;
1691 int i;
1693 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1694 if (!p) {
1695 pd = IO_MEM_UNASSIGNED;
1696 } else {
1697 pd = p->phys_offset;
1699 #if defined(DEBUG_TLB)
1700 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1701 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1702 #endif
1704 ret = 0;
1705 #if !defined(CONFIG_SOFTMMU)
1706 if (is_softmmu)
1707 #endif
1709 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1710 /* IO memory case */
1711 address = vaddr | pd;
1712 addend = paddr;
1713 } else {
1714 /* standard memory */
1715 address = vaddr;
1716 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1719 /* Make accesses to pages with watchpoints go via the
1720 watchpoint trap routines. */
1721 for (i = 0; i < env->nb_watchpoints; i++) {
1722 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1723 if (address & ~TARGET_PAGE_MASK) {
1724 env->watchpoint[i].addend = 0;
1725 address = vaddr | io_mem_watch;
1726 } else {
1727 env->watchpoint[i].addend = pd - paddr +
1728 (unsigned long) phys_ram_base;
1729 /* TODO: Figure out how to make read watchpoints coexist
1730 with code. */
1731 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1736 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1737 addend -= vaddr;
1738 te = &env->tlb_table[mmu_idx][index];
1739 te->addend = addend;
1740 if (prot & PAGE_READ) {
1741 te->addr_read = address;
1742 } else {
1743 te->addr_read = -1;
1745 if (prot & PAGE_EXEC) {
1746 te->addr_code = address;
1747 } else {
1748 te->addr_code = -1;
1750 if (prot & PAGE_WRITE) {
1751 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1752 (pd & IO_MEM_ROMD)) {
1753 /* write access calls the I/O callback */
1754 te->addr_write = vaddr |
1755 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1756 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1757 !cpu_physical_memory_is_dirty(pd)) {
1758 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1759 } else {
1760 te->addr_write = address;
1762 } else {
1763 te->addr_write = -1;
1766 #if !defined(CONFIG_SOFTMMU)
1767 else {
1768 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1769 /* IO access: no mapping is done as it will be handled by the
1770 soft MMU */
1771 if (!(env->hflags & HF_SOFTMMU_MASK))
1772 ret = 2;
1773 } else {
1774 void *map_addr;
1776 if (vaddr >= MMAP_AREA_END) {
1777 ret = 2;
1778 } else {
1779 if (prot & PROT_WRITE) {
1780 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1781 #if defined(TARGET_HAS_SMC) || 1
1782 first_tb ||
1783 #endif
1784 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1785 !cpu_physical_memory_is_dirty(pd))) {
1786 /* ROM: we do as if code was inside */
1787 /* if code is present, we only map as read only and save the
1788 original mapping */
1789 VirtPageDesc *vp;
1791 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1792 vp->phys_addr = pd;
1793 vp->prot = prot;
1794 vp->valid_tag = virt_valid_tag;
1795 prot &= ~PAGE_WRITE;
1798 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1799 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1800 if (map_addr == MAP_FAILED) {
1801 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1802 paddr, vaddr);
1807 #endif
1808 return ret;
1811 /* called from signal handler: invalidate the code and unprotect the
1812 page. Return TRUE if the fault was succesfully handled. */
1813 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1815 #if !defined(CONFIG_SOFTMMU)
1816 VirtPageDesc *vp;
1818 #if defined(DEBUG_TLB)
1819 printf("page_unprotect: addr=0x%08x\n", addr);
1820 #endif
1821 addr &= TARGET_PAGE_MASK;
1823 /* if it is not mapped, no need to worry here */
1824 if (addr >= MMAP_AREA_END)
1825 return 0;
1826 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1827 if (!vp)
1828 return 0;
1829 /* NOTE: in this case, validate_tag is _not_ tested as it
1830 validates only the code TLB */
1831 if (vp->valid_tag != virt_valid_tag)
1832 return 0;
1833 if (!(vp->prot & PAGE_WRITE))
1834 return 0;
1835 #if defined(DEBUG_TLB)
1836 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1837 addr, vp->phys_addr, vp->prot);
1838 #endif
1839 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1840 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1841 (unsigned long)addr, vp->prot);
1842 /* set the dirty bit */
1843 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1844 /* flush the code inside */
1845 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1846 return 1;
1847 #else
1848 return 0;
1849 #endif
1852 #else
1854 void tlb_flush(CPUState *env, int flush_global)
1858 void tlb_flush_page(CPUState *env, target_ulong addr)
1862 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1863 target_phys_addr_t paddr, int prot,
1864 int mmu_idx, int is_softmmu)
1866 return 0;
1869 /* dump memory mappings */
1870 void page_dump(FILE *f)
1872 unsigned long start, end;
1873 int i, j, prot, prot1;
1874 PageDesc *p;
1876 fprintf(f, "%-8s %-8s %-8s %s\n",
1877 "start", "end", "size", "prot");
1878 start = -1;
1879 end = -1;
1880 prot = 0;
1881 for(i = 0; i <= L1_SIZE; i++) {
1882 if (i < L1_SIZE)
1883 p = l1_map[i];
1884 else
1885 p = NULL;
1886 for(j = 0;j < L2_SIZE; j++) {
1887 if (!p)
1888 prot1 = 0;
1889 else
1890 prot1 = p[j].flags;
1891 if (prot1 != prot) {
1892 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1893 if (start != -1) {
1894 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1895 start, end, end - start,
1896 prot & PAGE_READ ? 'r' : '-',
1897 prot & PAGE_WRITE ? 'w' : '-',
1898 prot & PAGE_EXEC ? 'x' : '-');
1900 if (prot1 != 0)
1901 start = end;
1902 else
1903 start = -1;
1904 prot = prot1;
1906 if (!p)
1907 break;
1912 int page_get_flags(target_ulong address)
1914 PageDesc *p;
1916 p = page_find(address >> TARGET_PAGE_BITS);
1917 if (!p)
1918 return 0;
1919 return p->flags;
1922 /* modify the flags of a page and invalidate the code if
1923 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1924 depending on PAGE_WRITE */
1925 void page_set_flags(target_ulong start, target_ulong end, int flags)
1927 PageDesc *p;
1928 target_ulong addr;
1930 start = start & TARGET_PAGE_MASK;
1931 end = TARGET_PAGE_ALIGN(end);
1932 if (flags & PAGE_WRITE)
1933 flags |= PAGE_WRITE_ORG;
1934 spin_lock(&tb_lock);
1935 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1936 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1937 /* if the write protection is set, then we invalidate the code
1938 inside */
1939 if (!(p->flags & PAGE_WRITE) &&
1940 (flags & PAGE_WRITE) &&
1941 p->first_tb) {
1942 tb_invalidate_phys_page(addr, 0, NULL);
1944 p->flags = flags;
1946 spin_unlock(&tb_lock);
1949 int page_check_range(target_ulong start, target_ulong len, int flags)
1951 PageDesc *p;
1952 target_ulong end;
1953 target_ulong addr;
1955 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1956 start = start & TARGET_PAGE_MASK;
1958 if( end < start )
1959 /* we've wrapped around */
1960 return -1;
1961 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1962 p = page_find(addr >> TARGET_PAGE_BITS);
1963 if( !p )
1964 return -1;
1965 if( !(p->flags & PAGE_VALID) )
1966 return -1;
1968 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1969 return -1;
1970 if (flags & PAGE_WRITE) {
1971 if (!(p->flags & PAGE_WRITE_ORG))
1972 return -1;
1973 /* unprotect the page if it was put read-only because it
1974 contains translated code */
1975 if (!(p->flags & PAGE_WRITE)) {
1976 if (!page_unprotect(addr, 0, NULL))
1977 return -1;
1979 return 0;
1982 return 0;
1985 /* called from signal handler: invalidate the code and unprotect the
1986 page. Return TRUE if the fault was succesfully handled. */
1987 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1989 unsigned int page_index, prot, pindex;
1990 PageDesc *p, *p1;
1991 target_ulong host_start, host_end, addr;
1993 host_start = address & qemu_host_page_mask;
1994 page_index = host_start >> TARGET_PAGE_BITS;
1995 p1 = page_find(page_index);
1996 if (!p1)
1997 return 0;
1998 host_end = host_start + qemu_host_page_size;
1999 p = p1;
2000 prot = 0;
2001 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2002 prot |= p->flags;
2003 p++;
2005 /* if the page was really writable, then we change its
2006 protection back to writable */
2007 if (prot & PAGE_WRITE_ORG) {
2008 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2009 if (!(p1[pindex].flags & PAGE_WRITE)) {
2010 mprotect((void *)g2h(host_start), qemu_host_page_size,
2011 (prot & PAGE_BITS) | PAGE_WRITE);
2012 p1[pindex].flags |= PAGE_WRITE;
2013 /* and since the content will be modified, we must invalidate
2014 the corresponding translated code. */
2015 tb_invalidate_phys_page(address, pc, puc);
2016 #ifdef DEBUG_TB_CHECK
2017 tb_invalidate_check(address);
2018 #endif
2019 return 1;
2022 return 0;
2025 static inline void tlb_set_dirty(CPUState *env,
2026 unsigned long addr, target_ulong vaddr)
2029 #endif /* defined(CONFIG_USER_ONLY) */
2031 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2032 int memory);
2033 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2034 int orig_memory);
2035 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2036 need_subpage) \
2037 do { \
2038 if (addr > start_addr) \
2039 start_addr2 = 0; \
2040 else { \
2041 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2042 if (start_addr2 > 0) \
2043 need_subpage = 1; \
2046 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2047 end_addr2 = TARGET_PAGE_SIZE - 1; \
2048 else { \
2049 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2050 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2051 need_subpage = 1; \
2053 } while (0)
2055 /* register physical memory. 'size' must be a multiple of the target
2056 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2057 io memory page */
2058 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2059 unsigned long size,
2060 unsigned long phys_offset)
2062 target_phys_addr_t addr, end_addr;
2063 PhysPageDesc *p;
2064 CPUState *env;
2065 unsigned long orig_size = size;
2066 void *subpage;
2068 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2069 end_addr = start_addr + (target_phys_addr_t)size;
2070 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2071 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2072 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2073 unsigned long orig_memory = p->phys_offset;
2074 target_phys_addr_t start_addr2, end_addr2;
2075 int need_subpage = 0;
2077 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2078 need_subpage);
2079 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2080 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2081 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2082 &p->phys_offset, orig_memory);
2083 } else {
2084 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2085 >> IO_MEM_SHIFT];
2087 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2088 } else {
2089 p->phys_offset = phys_offset;
2090 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2091 (phys_offset & IO_MEM_ROMD))
2092 phys_offset += TARGET_PAGE_SIZE;
2094 } else {
2095 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2096 p->phys_offset = phys_offset;
2097 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2098 (phys_offset & IO_MEM_ROMD))
2099 phys_offset += TARGET_PAGE_SIZE;
2100 else {
2101 target_phys_addr_t start_addr2, end_addr2;
2102 int need_subpage = 0;
2104 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2105 end_addr2, need_subpage);
2107 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2108 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2109 &p->phys_offset, IO_MEM_UNASSIGNED);
2110 subpage_register(subpage, start_addr2, end_addr2,
2111 phys_offset);
2117 /* since each CPU stores ram addresses in its TLB cache, we must
2118 reset the modified entries */
2119 /* XXX: slow ! */
2120 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2121 tlb_flush(env, 1);
2125 /* XXX: temporary until new memory mapping API */
2126 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2128 PhysPageDesc *p;
2130 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2131 if (!p)
2132 return IO_MEM_UNASSIGNED;
2133 return p->phys_offset;
2136 /* XXX: better than nothing */
2137 ram_addr_t qemu_ram_alloc(unsigned long size)
2139 ram_addr_t addr;
2140 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2141 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2142 size, phys_ram_size);
2143 abort();
2145 addr = phys_ram_alloc_offset;
2146 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2147 return addr;
2150 void qemu_ram_free(ram_addr_t addr)
2154 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2156 #ifdef DEBUG_UNASSIGNED
2157 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2158 #endif
2159 #ifdef TARGET_SPARC
2160 do_unassigned_access(addr, 0, 0, 0);
2161 #elif TARGET_CRIS
2162 do_unassigned_access(addr, 0, 0, 0);
2163 #endif
2164 return 0;
2167 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2169 #ifdef DEBUG_UNASSIGNED
2170 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2171 #endif
2172 #ifdef TARGET_SPARC
2173 do_unassigned_access(addr, 1, 0, 0);
2174 #elif TARGET_CRIS
2175 do_unassigned_access(addr, 1, 0, 0);
2176 #endif
2179 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2180 unassigned_mem_readb,
2181 unassigned_mem_readb,
2182 unassigned_mem_readb,
2185 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2186 unassigned_mem_writeb,
2187 unassigned_mem_writeb,
2188 unassigned_mem_writeb,
2191 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2193 unsigned long ram_addr;
2194 int dirty_flags;
2195 ram_addr = addr - (unsigned long)phys_ram_base;
2196 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2197 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2198 #if !defined(CONFIG_USER_ONLY)
2199 tb_invalidate_phys_page_fast(ram_addr, 1);
2200 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2201 #endif
2203 stb_p((uint8_t *)(long)addr, val);
2204 #ifdef USE_KQEMU
2205 if (cpu_single_env->kqemu_enabled &&
2206 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2207 kqemu_modify_page(cpu_single_env, ram_addr);
2208 #endif
2209 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2210 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2211 /* we remove the notdirty callback only if the code has been
2212 flushed */
2213 if (dirty_flags == 0xff)
2214 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2217 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2219 unsigned long ram_addr;
2220 int dirty_flags;
2221 ram_addr = addr - (unsigned long)phys_ram_base;
2222 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2223 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2224 #if !defined(CONFIG_USER_ONLY)
2225 tb_invalidate_phys_page_fast(ram_addr, 2);
2226 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2227 #endif
2229 stw_p((uint8_t *)(long)addr, val);
2230 #ifdef USE_KQEMU
2231 if (cpu_single_env->kqemu_enabled &&
2232 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2233 kqemu_modify_page(cpu_single_env, ram_addr);
2234 #endif
2235 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2236 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2237 /* we remove the notdirty callback only if the code has been
2238 flushed */
2239 if (dirty_flags == 0xff)
2240 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2243 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2245 unsigned long ram_addr;
2246 int dirty_flags;
2247 ram_addr = addr - (unsigned long)phys_ram_base;
2248 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2249 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2250 #if !defined(CONFIG_USER_ONLY)
2251 tb_invalidate_phys_page_fast(ram_addr, 4);
2252 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2253 #endif
2255 stl_p((uint8_t *)(long)addr, val);
2256 #ifdef USE_KQEMU
2257 if (cpu_single_env->kqemu_enabled &&
2258 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2259 kqemu_modify_page(cpu_single_env, ram_addr);
2260 #endif
2261 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2262 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2263 /* we remove the notdirty callback only if the code has been
2264 flushed */
2265 if (dirty_flags == 0xff)
2266 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2269 static CPUReadMemoryFunc *error_mem_read[3] = {
2270 NULL, /* never used */
2271 NULL, /* never used */
2272 NULL, /* never used */
2275 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2276 notdirty_mem_writeb,
2277 notdirty_mem_writew,
2278 notdirty_mem_writel,
2281 #if defined(CONFIG_SOFTMMU)
2282 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2283 so these check for a hit then pass through to the normal out-of-line
2284 phys routines. */
2285 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2287 return ldub_phys(addr);
2290 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2292 return lduw_phys(addr);
2295 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2297 return ldl_phys(addr);
2300 /* Generate a debug exception if a watchpoint has been hit.
2301 Returns the real physical address of the access. addr will be a host
2302 address in case of a RAM location. */
2303 static target_ulong check_watchpoint(target_phys_addr_t addr)
2305 CPUState *env = cpu_single_env;
2306 target_ulong watch;
2307 target_ulong retaddr;
2308 int i;
2310 retaddr = addr;
2311 for (i = 0; i < env->nb_watchpoints; i++) {
2312 watch = env->watchpoint[i].vaddr;
2313 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2314 retaddr = addr - env->watchpoint[i].addend;
2315 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2316 cpu_single_env->watchpoint_hit = i + 1;
2317 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2318 break;
2322 return retaddr;
2325 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2326 uint32_t val)
2328 addr = check_watchpoint(addr);
2329 stb_phys(addr, val);
2332 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2333 uint32_t val)
2335 addr = check_watchpoint(addr);
2336 stw_phys(addr, val);
2339 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2340 uint32_t val)
2342 addr = check_watchpoint(addr);
2343 stl_phys(addr, val);
2346 static CPUReadMemoryFunc *watch_mem_read[3] = {
2347 watch_mem_readb,
2348 watch_mem_readw,
2349 watch_mem_readl,
2352 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2353 watch_mem_writeb,
2354 watch_mem_writew,
2355 watch_mem_writel,
2357 #endif
2359 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2360 unsigned int len)
2362 uint32_t ret;
2363 unsigned int idx;
2365 idx = SUBPAGE_IDX(addr - mmio->base);
2366 #if defined(DEBUG_SUBPAGE)
2367 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2368 mmio, len, addr, idx);
2369 #endif
2370 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2372 return ret;
2375 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2376 uint32_t value, unsigned int len)
2378 unsigned int idx;
2380 idx = SUBPAGE_IDX(addr - mmio->base);
2381 #if defined(DEBUG_SUBPAGE)
2382 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2383 mmio, len, addr, idx, value);
2384 #endif
2385 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2388 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2390 #if defined(DEBUG_SUBPAGE)
2391 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2392 #endif
2394 return subpage_readlen(opaque, addr, 0);
2397 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2398 uint32_t value)
2400 #if defined(DEBUG_SUBPAGE)
2401 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2402 #endif
2403 subpage_writelen(opaque, addr, value, 0);
2406 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2408 #if defined(DEBUG_SUBPAGE)
2409 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2410 #endif
2412 return subpage_readlen(opaque, addr, 1);
2415 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2416 uint32_t value)
2418 #if defined(DEBUG_SUBPAGE)
2419 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2420 #endif
2421 subpage_writelen(opaque, addr, value, 1);
2424 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2426 #if defined(DEBUG_SUBPAGE)
2427 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2428 #endif
2430 return subpage_readlen(opaque, addr, 2);
2433 static void subpage_writel (void *opaque,
2434 target_phys_addr_t addr, uint32_t value)
2436 #if defined(DEBUG_SUBPAGE)
2437 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2438 #endif
2439 subpage_writelen(opaque, addr, value, 2);
2442 static CPUReadMemoryFunc *subpage_read[] = {
2443 &subpage_readb,
2444 &subpage_readw,
2445 &subpage_readl,
2448 static CPUWriteMemoryFunc *subpage_write[] = {
2449 &subpage_writeb,
2450 &subpage_writew,
2451 &subpage_writel,
2454 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2455 int memory)
2457 int idx, eidx;
2458 unsigned int i;
2460 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2461 return -1;
2462 idx = SUBPAGE_IDX(start);
2463 eidx = SUBPAGE_IDX(end);
2464 #if defined(DEBUG_SUBPAGE)
2465 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2466 mmio, start, end, idx, eidx, memory);
2467 #endif
2468 memory >>= IO_MEM_SHIFT;
2469 for (; idx <= eidx; idx++) {
2470 for (i = 0; i < 4; i++) {
2471 if (io_mem_read[memory][i]) {
2472 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2473 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2475 if (io_mem_write[memory][i]) {
2476 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2477 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2482 return 0;
2485 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2486 int orig_memory)
2488 subpage_t *mmio;
2489 int subpage_memory;
2491 mmio = qemu_mallocz(sizeof(subpage_t));
2492 if (mmio != NULL) {
2493 mmio->base = base;
2494 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2495 #if defined(DEBUG_SUBPAGE)
2496 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2497 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2498 #endif
2499 *phys = subpage_memory | IO_MEM_SUBPAGE;
2500 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2503 return mmio;
2506 static void io_mem_init(void)
2508 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2509 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2510 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2511 io_mem_nb = 5;
2513 #if defined(CONFIG_SOFTMMU)
2514 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2515 watch_mem_write, NULL);
2516 #endif
2517 /* alloc dirty bits array */
2518 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2519 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2522 /* mem_read and mem_write are arrays of functions containing the
2523 function to access byte (index 0), word (index 1) and dword (index
2524 2). Functions can be omitted with a NULL function pointer. The
2525 registered functions may be modified dynamically later.
2526 If io_index is non zero, the corresponding io zone is
2527 modified. If it is zero, a new io zone is allocated. The return
2528 value can be used with cpu_register_physical_memory(). (-1) is
2529 returned if error. */
2530 int cpu_register_io_memory(int io_index,
2531 CPUReadMemoryFunc **mem_read,
2532 CPUWriteMemoryFunc **mem_write,
2533 void *opaque)
2535 int i, subwidth = 0;
2537 if (io_index <= 0) {
2538 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2539 return -1;
2540 io_index = io_mem_nb++;
2541 } else {
2542 if (io_index >= IO_MEM_NB_ENTRIES)
2543 return -1;
2546 for(i = 0;i < 3; i++) {
2547 if (!mem_read[i] || !mem_write[i])
2548 subwidth = IO_MEM_SUBWIDTH;
2549 io_mem_read[io_index][i] = mem_read[i];
2550 io_mem_write[io_index][i] = mem_write[i];
2552 io_mem_opaque[io_index] = opaque;
2553 return (io_index << IO_MEM_SHIFT) | subwidth;
2556 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2558 return io_mem_write[io_index >> IO_MEM_SHIFT];
2561 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2563 return io_mem_read[io_index >> IO_MEM_SHIFT];
2566 /* physical memory access (slow version, mainly for debug) */
2567 #if defined(CONFIG_USER_ONLY)
2568 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2569 int len, int is_write)
2571 int l, flags;
2572 target_ulong page;
2573 void * p;
2575 while (len > 0) {
2576 page = addr & TARGET_PAGE_MASK;
2577 l = (page + TARGET_PAGE_SIZE) - addr;
2578 if (l > len)
2579 l = len;
2580 flags = page_get_flags(page);
2581 if (!(flags & PAGE_VALID))
2582 return;
2583 if (is_write) {
2584 if (!(flags & PAGE_WRITE))
2585 return;
2586 /* XXX: this code should not depend on lock_user */
2587 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2588 /* FIXME - should this return an error rather than just fail? */
2589 return;
2590 memcpy(p, buf, len);
2591 unlock_user(p, addr, len);
2592 } else {
2593 if (!(flags & PAGE_READ))
2594 return;
2595 /* XXX: this code should not depend on lock_user */
2596 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2597 /* FIXME - should this return an error rather than just fail? */
2598 return;
2599 memcpy(buf, p, len);
2600 unlock_user(p, addr, 0);
2602 len -= l;
2603 buf += l;
2604 addr += l;
2608 #else
2609 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2610 int len, int is_write)
2612 int l, io_index;
2613 uint8_t *ptr;
2614 uint32_t val;
2615 target_phys_addr_t page;
2616 unsigned long pd;
2617 PhysPageDesc *p;
2619 while (len > 0) {
2620 page = addr & TARGET_PAGE_MASK;
2621 l = (page + TARGET_PAGE_SIZE) - addr;
2622 if (l > len)
2623 l = len;
2624 p = phys_page_find(page >> TARGET_PAGE_BITS);
2625 if (!p) {
2626 pd = IO_MEM_UNASSIGNED;
2627 } else {
2628 pd = p->phys_offset;
2631 if (is_write) {
2632 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2633 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2634 /* XXX: could force cpu_single_env to NULL to avoid
2635 potential bugs */
2636 if (l >= 4 && ((addr & 3) == 0)) {
2637 /* 32 bit write access */
2638 val = ldl_p(buf);
2639 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2640 l = 4;
2641 } else if (l >= 2 && ((addr & 1) == 0)) {
2642 /* 16 bit write access */
2643 val = lduw_p(buf);
2644 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2645 l = 2;
2646 } else {
2647 /* 8 bit write access */
2648 val = ldub_p(buf);
2649 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2650 l = 1;
2652 } else {
2653 unsigned long addr1;
2654 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2655 /* RAM case */
2656 ptr = phys_ram_base + addr1;
2657 memcpy(ptr, buf, l);
2658 if (!cpu_physical_memory_is_dirty(addr1)) {
2659 /* invalidate code */
2660 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2661 /* set dirty bit */
2662 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2663 (0xff & ~CODE_DIRTY_FLAG);
2665 #ifdef USE_KVM
2666 /* qemu doesn't execute guest code directly, but kvm does
2667 therefore fluch instruction caches */
2668 flush_icache_range((unsigned long)ptr, ((unsigned long)ptr)+l);
2669 #endif
2671 } else {
2672 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2673 !(pd & IO_MEM_ROMD)) {
2674 /* I/O case */
2675 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2676 if (l >= 4 && ((addr & 3) == 0)) {
2677 /* 32 bit read access */
2678 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2679 stl_p(buf, val);
2680 l = 4;
2681 } else if (l >= 2 && ((addr & 1) == 0)) {
2682 /* 16 bit read access */
2683 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2684 stw_p(buf, val);
2685 l = 2;
2686 } else {
2687 /* 8 bit read access */
2688 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2689 stb_p(buf, val);
2690 l = 1;
2692 } else {
2693 /* RAM case */
2694 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2695 (addr & ~TARGET_PAGE_MASK);
2696 memcpy(buf, ptr, l);
2699 len -= l;
2700 buf += l;
2701 addr += l;
2705 /* used for ROM loading : can write in RAM and ROM */
2706 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2707 const uint8_t *buf, int len)
2709 int l;
2710 uint8_t *ptr;
2711 target_phys_addr_t page;
2712 unsigned long pd;
2713 PhysPageDesc *p;
2715 while (len > 0) {
2716 page = addr & TARGET_PAGE_MASK;
2717 l = (page + TARGET_PAGE_SIZE) - addr;
2718 if (l > len)
2719 l = len;
2720 p = phys_page_find(page >> TARGET_PAGE_BITS);
2721 if (!p) {
2722 pd = IO_MEM_UNASSIGNED;
2723 } else {
2724 pd = p->phys_offset;
2727 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2728 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2729 !(pd & IO_MEM_ROMD)) {
2730 /* do nothing */
2731 } else {
2732 unsigned long addr1;
2733 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2734 /* ROM/RAM case */
2735 ptr = phys_ram_base + addr1;
2736 memcpy(ptr, buf, l);
2738 len -= l;
2739 buf += l;
2740 addr += l;
2745 /* warning: addr must be aligned */
2746 uint32_t ldl_phys(target_phys_addr_t addr)
2748 int io_index;
2749 uint8_t *ptr;
2750 uint32_t val;
2751 unsigned long pd;
2752 PhysPageDesc *p;
2754 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2755 if (!p) {
2756 pd = IO_MEM_UNASSIGNED;
2757 } else {
2758 pd = p->phys_offset;
2761 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2762 !(pd & IO_MEM_ROMD)) {
2763 /* I/O case */
2764 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2765 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2766 } else {
2767 /* RAM case */
2768 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2769 (addr & ~TARGET_PAGE_MASK);
2770 val = ldl_p(ptr);
2772 return val;
2775 /* warning: addr must be aligned */
2776 uint64_t ldq_phys(target_phys_addr_t addr)
2778 int io_index;
2779 uint8_t *ptr;
2780 uint64_t val;
2781 unsigned long pd;
2782 PhysPageDesc *p;
2784 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2785 if (!p) {
2786 pd = IO_MEM_UNASSIGNED;
2787 } else {
2788 pd = p->phys_offset;
2791 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2792 !(pd & IO_MEM_ROMD)) {
2793 /* I/O case */
2794 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2795 #ifdef TARGET_WORDS_BIGENDIAN
2796 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2797 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2798 #else
2799 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2800 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2801 #endif
2802 } else {
2803 /* RAM case */
2804 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2805 (addr & ~TARGET_PAGE_MASK);
2806 val = ldq_p(ptr);
2808 return val;
2811 /* XXX: optimize */
2812 uint32_t ldub_phys(target_phys_addr_t addr)
2814 uint8_t val;
2815 cpu_physical_memory_read(addr, &val, 1);
2816 return val;
2819 /* XXX: optimize */
2820 uint32_t lduw_phys(target_phys_addr_t addr)
2822 uint16_t val;
2823 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2824 return tswap16(val);
2827 #ifdef __GNUC__
2828 #define likely(x) __builtin_expect(!!(x), 1)
2829 #define unlikely(x) __builtin_expect(!!(x), 0)
2830 #else
2831 #define likely(x) x
2832 #define unlikely(x) x
2833 #endif
2835 /* warning: addr must be aligned. The ram page is not masked as dirty
2836 and the code inside is not invalidated. It is useful if the dirty
2837 bits are used to track modified PTEs */
2838 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2840 int io_index;
2841 uint8_t *ptr;
2842 unsigned long pd;
2843 PhysPageDesc *p;
2845 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2846 if (!p) {
2847 pd = IO_MEM_UNASSIGNED;
2848 } else {
2849 pd = p->phys_offset;
2852 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2853 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2854 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2855 } else {
2856 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2857 ptr = phys_ram_base + addr1;
2858 stl_p(ptr, val);
2860 if (unlikely(in_migration)) {
2861 if (!cpu_physical_memory_is_dirty(addr1)) {
2862 /* invalidate code */
2863 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2864 /* set dirty bit */
2865 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2866 (0xff & ~CODE_DIRTY_FLAG);
2872 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2874 int io_index;
2875 uint8_t *ptr;
2876 unsigned long pd;
2877 PhysPageDesc *p;
2879 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2880 if (!p) {
2881 pd = IO_MEM_UNASSIGNED;
2882 } else {
2883 pd = p->phys_offset;
2886 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2887 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2888 #ifdef TARGET_WORDS_BIGENDIAN
2889 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2890 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2891 #else
2892 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2893 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2894 #endif
2895 } else {
2896 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2897 (addr & ~TARGET_PAGE_MASK);
2898 stq_p(ptr, val);
2902 /* warning: addr must be aligned */
2903 void stl_phys(target_phys_addr_t addr, uint32_t val)
2905 int io_index;
2906 uint8_t *ptr;
2907 unsigned long pd;
2908 PhysPageDesc *p;
2910 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2911 if (!p) {
2912 pd = IO_MEM_UNASSIGNED;
2913 } else {
2914 pd = p->phys_offset;
2917 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2918 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2919 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2920 } else {
2921 unsigned long addr1;
2922 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2923 /* RAM case */
2924 ptr = phys_ram_base + addr1;
2925 stl_p(ptr, val);
2926 if (!cpu_physical_memory_is_dirty(addr1)) {
2927 /* invalidate code */
2928 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2929 /* set dirty bit */
2930 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2931 (0xff & ~CODE_DIRTY_FLAG);
2936 /* XXX: optimize */
2937 void stb_phys(target_phys_addr_t addr, uint32_t val)
2939 uint8_t v = val;
2940 cpu_physical_memory_write(addr, &v, 1);
2943 /* XXX: optimize */
2944 void stw_phys(target_phys_addr_t addr, uint32_t val)
2946 uint16_t v = tswap16(val);
2947 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2950 /* XXX: optimize */
2951 void stq_phys(target_phys_addr_t addr, uint64_t val)
2953 val = tswap64(val);
2954 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2957 #endif
2959 /* virtual memory access for debug */
2960 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2961 uint8_t *buf, int len, int is_write)
2963 int l;
2964 target_phys_addr_t phys_addr;
2965 target_ulong page;
2967 while (len > 0) {
2968 page = addr & TARGET_PAGE_MASK;
2969 phys_addr = cpu_get_phys_page_debug(env, page);
2970 /* if no physical page mapped, return an error */
2971 if (phys_addr == -1)
2972 return -1;
2973 l = (page + TARGET_PAGE_SIZE) - addr;
2974 if (l > len)
2975 l = len;
2976 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2977 buf, l, is_write);
2978 len -= l;
2979 buf += l;
2980 addr += l;
2982 return 0;
2985 void dump_exec_info(FILE *f,
2986 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2988 int i, target_code_size, max_target_code_size;
2989 int direct_jmp_count, direct_jmp2_count, cross_page;
2990 TranslationBlock *tb;
2992 target_code_size = 0;
2993 max_target_code_size = 0;
2994 cross_page = 0;
2995 direct_jmp_count = 0;
2996 direct_jmp2_count = 0;
2997 for(i = 0; i < nb_tbs; i++) {
2998 tb = &tbs[i];
2999 target_code_size += tb->size;
3000 if (tb->size > max_target_code_size)
3001 max_target_code_size = tb->size;
3002 if (tb->page_addr[1] != -1)
3003 cross_page++;
3004 if (tb->tb_next_offset[0] != 0xffff) {
3005 direct_jmp_count++;
3006 if (tb->tb_next_offset[1] != 0xffff) {
3007 direct_jmp2_count++;
3011 /* XXX: avoid using doubles ? */
3012 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3013 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3014 nb_tbs ? target_code_size / nb_tbs : 0,
3015 max_target_code_size);
3016 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3017 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3018 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3019 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3020 cross_page,
3021 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3022 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3023 direct_jmp_count,
3024 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3025 direct_jmp2_count,
3026 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3027 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3028 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3029 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3032 #if !defined(CONFIG_USER_ONLY)
3034 #define MMUSUFFIX _cmmu
3035 #define GETPC() NULL
3036 #define env cpu_single_env
3037 #define SOFTMMU_CODE_ACCESS
3039 #define SHIFT 0
3040 #include "softmmu_template.h"
3042 #define SHIFT 1
3043 #include "softmmu_template.h"
3045 #define SHIFT 2
3046 #include "softmmu_template.h"
3048 #define SHIFT 3
3049 #include "softmmu_template.h"
3051 #undef env
3053 #endif