Align gdbstub with qemu-cvs
[qemu-kvm/fedora.git] / exec.c
blob69adc5e7f8dce0dc0b8ce7f8b9f3cbd723588d28
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #ifdef USE_KVM
38 #include "dyngen.h"
39 #include "qemu-kvm.h"
40 #endif
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #endif
45 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_FLUSH
47 //#define DEBUG_TLB
48 //#define DEBUG_UNASSIGNED
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
59 #undef DEBUG_TB_CHECK
60 #endif
62 /* threshold to flush the translated code buffer */
63 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
65 #define SMC_BITMAP_USE_THRESHOLD 10
67 #define MMAP_AREA_START 0x00000000
68 #define MMAP_AREA_END 0xa8000000
70 #if defined(TARGET_SPARC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 41
72 #elif defined(TARGET_SPARC)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 36
74 #elif defined(TARGET_ALPHA)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_VIRT_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_PPC64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif USE_KQEMU
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #elif TARGET_X86_64
83 #define TARGET_PHYS_ADDR_SPACE_BITS 42
84 #else
85 #define TARGET_PHYS_ADDR_SPACE_BITS 32
86 #endif
88 #ifdef USE_KVM
89 extern int kvm_allowed;
90 extern kvm_context_t kvm_context;
91 #endif
93 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
94 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
95 int nb_tbs;
96 /* any access to the tbs or the page table must use this lock */
97 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
99 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
100 uint8_t *code_gen_ptr;
102 ram_addr_t phys_ram_size;
103 int phys_ram_fd;
104 uint8_t *phys_ram_base;
105 uint8_t *phys_ram_dirty;
106 uint8_t *bios_mem;
107 static int in_migration;
108 static ram_addr_t phys_ram_alloc_offset = 0;
110 CPUState *first_cpu;
111 /* current CPU in the current thread. It is only valid inside
112 cpu_exec() */
113 CPUState *cpu_single_env;
115 typedef struct PageDesc {
116 /* list of TBs intersecting this ram page */
117 TranslationBlock *first_tb;
118 /* in order to optimize self modifying code, we count the number
119 of lookups we do to a given page to use a bitmap */
120 unsigned int code_write_count;
121 uint8_t *code_bitmap;
122 #if defined(CONFIG_USER_ONLY)
123 unsigned long flags;
124 #endif
125 } PageDesc;
127 typedef struct PhysPageDesc {
128 /* offset in host memory of the page + io_index in the low 12 bits */
129 ram_addr_t phys_offset;
130 } PhysPageDesc;
132 #define L2_BITS 10
133 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
134 /* XXX: this is a temporary hack for alpha target.
135 * In the future, this is to be replaced by a multi-level table
136 * to actually be able to handle the complete 64 bits address space.
138 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
139 #else
140 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
141 #endif
143 #define L1_SIZE (1 << L1_BITS)
144 #define L2_SIZE (1 << L2_BITS)
146 static void io_mem_init(void);
148 unsigned long qemu_real_host_page_size;
149 unsigned long qemu_host_page_bits;
150 unsigned long qemu_host_page_size;
151 unsigned long qemu_host_page_mask;
153 /* XXX: for system emulation, it could just be an array */
154 static PageDesc *l1_map[L1_SIZE];
155 PhysPageDesc **l1_phys_map;
157 /* io memory support */
158 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
159 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
160 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
161 static int io_mem_nb;
162 #if defined(CONFIG_SOFTMMU)
163 static int io_mem_watch;
164 #endif
166 /* log support */
167 char *logfilename = "/tmp/qemu.log";
168 FILE *logfile;
169 int loglevel;
170 static int log_append = 0;
172 /* statistics */
173 static int tlb_flush_count;
174 static int tb_flush_count;
175 static int tb_phys_invalidate_count;
177 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
178 typedef struct subpage_t {
179 target_phys_addr_t base;
180 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
181 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
182 void *opaque[TARGET_PAGE_SIZE];
183 } subpage_t;
185 static void page_init(void)
187 /* NOTE: we can always suppose that qemu_host_page_size >=
188 TARGET_PAGE_SIZE */
189 #ifdef _WIN32
191 SYSTEM_INFO system_info;
192 DWORD old_protect;
194 GetSystemInfo(&system_info);
195 qemu_real_host_page_size = system_info.dwPageSize;
197 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
198 PAGE_EXECUTE_READWRITE, &old_protect);
200 #else
201 qemu_real_host_page_size = getpagesize();
203 unsigned long start, end;
205 start = (unsigned long)code_gen_buffer;
206 start &= ~(qemu_real_host_page_size - 1);
208 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
209 end += qemu_real_host_page_size - 1;
210 end &= ~(qemu_real_host_page_size - 1);
212 mprotect((void *)start, end - start,
213 PROT_READ | PROT_WRITE | PROT_EXEC);
215 #endif
217 if (qemu_host_page_size == 0)
218 qemu_host_page_size = qemu_real_host_page_size;
219 if (qemu_host_page_size < TARGET_PAGE_SIZE)
220 qemu_host_page_size = TARGET_PAGE_SIZE;
221 qemu_host_page_bits = 0;
222 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
223 qemu_host_page_bits++;
224 qemu_host_page_mask = ~(qemu_host_page_size - 1);
225 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
226 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
228 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
230 long long startaddr, endaddr;
231 FILE *f;
232 int n;
234 f = fopen("/proc/self/maps", "r");
235 if (f) {
236 do {
237 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
238 if (n == 2) {
239 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
240 TARGET_PAGE_ALIGN(endaddr),
241 PAGE_RESERVED);
243 } while (!feof(f));
244 fclose(f);
247 #endif
250 static inline PageDesc *page_find_alloc(unsigned int index)
252 PageDesc **lp, *p;
254 lp = &l1_map[index >> L2_BITS];
255 p = *lp;
256 if (!p) {
257 /* allocate if not found */
258 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
259 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
260 *lp = p;
262 return p + (index & (L2_SIZE - 1));
265 static inline PageDesc *page_find(unsigned int index)
267 PageDesc *p;
269 p = l1_map[index >> L2_BITS];
270 if (!p)
271 return 0;
272 return p + (index & (L2_SIZE - 1));
275 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
277 void **lp, **p;
278 PhysPageDesc *pd;
280 p = (void **)l1_phys_map;
281 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
283 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
284 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
285 #endif
286 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
287 p = *lp;
288 if (!p) {
289 /* allocate if not found */
290 if (!alloc)
291 return NULL;
292 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
293 memset(p, 0, sizeof(void *) * L1_SIZE);
294 *lp = p;
296 #endif
297 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
298 pd = *lp;
299 if (!pd) {
300 int i;
301 /* allocate if not found */
302 if (!alloc)
303 return NULL;
304 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
305 *lp = pd;
306 for (i = 0; i < L2_SIZE; i++)
307 pd[i].phys_offset = IO_MEM_UNASSIGNED;
309 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
312 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
314 return phys_page_find_alloc(index, 0);
317 #if !defined(CONFIG_USER_ONLY)
318 static void tlb_protect_code(ram_addr_t ram_addr);
319 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
320 target_ulong vaddr);
321 #endif
323 void cpu_exec_init(CPUState *env)
325 CPUState **penv;
326 int cpu_index;
328 if (!code_gen_ptr) {
329 code_gen_ptr = code_gen_buffer;
330 page_init();
331 io_mem_init();
333 env->next_cpu = NULL;
334 penv = &first_cpu;
335 cpu_index = 0;
336 while (*penv != NULL) {
337 penv = (CPUState **)&(*penv)->next_cpu;
338 cpu_index++;
340 env->cpu_index = cpu_index;
341 env->nb_watchpoints = 0;
342 *penv = env;
345 static inline void invalidate_page_bitmap(PageDesc *p)
347 if (p->code_bitmap) {
348 qemu_free(p->code_bitmap);
349 p->code_bitmap = NULL;
351 p->code_write_count = 0;
354 /* set to NULL all the 'first_tb' fields in all PageDescs */
355 static void page_flush_tb(void)
357 int i, j;
358 PageDesc *p;
360 for(i = 0; i < L1_SIZE; i++) {
361 p = l1_map[i];
362 if (p) {
363 for(j = 0; j < L2_SIZE; j++) {
364 p->first_tb = NULL;
365 invalidate_page_bitmap(p);
366 p++;
372 /* flush all the translation blocks */
373 /* XXX: tb_flush is currently not thread safe */
374 void tb_flush(CPUState *env1)
376 CPUState *env;
377 #if defined(DEBUG_FLUSH)
378 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
379 (unsigned long)(code_gen_ptr - code_gen_buffer),
380 nb_tbs, nb_tbs > 0 ?
381 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
382 #endif
383 nb_tbs = 0;
385 for(env = first_cpu; env != NULL; env = env->next_cpu) {
386 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
389 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
390 page_flush_tb();
392 code_gen_ptr = code_gen_buffer;
393 /* XXX: flush processor icache at this point if cache flush is
394 expensive */
395 tb_flush_count++;
398 #ifdef DEBUG_TB_CHECK
400 static void tb_invalidate_check(target_ulong address)
402 TranslationBlock *tb;
403 int i;
404 address &= TARGET_PAGE_MASK;
405 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
406 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
407 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
408 address >= tb->pc + tb->size)) {
409 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
410 address, (long)tb->pc, tb->size);
416 /* verify that all the pages have correct rights for code */
417 static void tb_page_check(void)
419 TranslationBlock *tb;
420 int i, flags1, flags2;
422 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
423 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
424 flags1 = page_get_flags(tb->pc);
425 flags2 = page_get_flags(tb->pc + tb->size - 1);
426 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
427 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
428 (long)tb->pc, tb->size, flags1, flags2);
434 void tb_jmp_check(TranslationBlock *tb)
436 TranslationBlock *tb1;
437 unsigned int n1;
439 /* suppress any remaining jumps to this TB */
440 tb1 = tb->jmp_first;
441 for(;;) {
442 n1 = (long)tb1 & 3;
443 tb1 = (TranslationBlock *)((long)tb1 & ~3);
444 if (n1 == 2)
445 break;
446 tb1 = tb1->jmp_next[n1];
448 /* check end of list */
449 if (tb1 != tb) {
450 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
454 #endif
456 /* invalidate one TB */
457 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
458 int next_offset)
460 TranslationBlock *tb1;
461 for(;;) {
462 tb1 = *ptb;
463 if (tb1 == tb) {
464 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
465 break;
467 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
471 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
473 TranslationBlock *tb1;
474 unsigned int n1;
476 for(;;) {
477 tb1 = *ptb;
478 n1 = (long)tb1 & 3;
479 tb1 = (TranslationBlock *)((long)tb1 & ~3);
480 if (tb1 == tb) {
481 *ptb = tb1->page_next[n1];
482 break;
484 ptb = &tb1->page_next[n1];
488 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
490 TranslationBlock *tb1, **ptb;
491 unsigned int n1;
493 ptb = &tb->jmp_next[n];
494 tb1 = *ptb;
495 if (tb1) {
496 /* find tb(n) in circular list */
497 for(;;) {
498 tb1 = *ptb;
499 n1 = (long)tb1 & 3;
500 tb1 = (TranslationBlock *)((long)tb1 & ~3);
501 if (n1 == n && tb1 == tb)
502 break;
503 if (n1 == 2) {
504 ptb = &tb1->jmp_first;
505 } else {
506 ptb = &tb1->jmp_next[n1];
509 /* now we can suppress tb(n) from the list */
510 *ptb = tb->jmp_next[n];
512 tb->jmp_next[n] = NULL;
516 /* reset the jump entry 'n' of a TB so that it is not chained to
517 another TB */
518 static inline void tb_reset_jump(TranslationBlock *tb, int n)
520 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
523 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
525 CPUState *env;
526 PageDesc *p;
527 unsigned int h, n1;
528 target_ulong phys_pc;
529 TranslationBlock *tb1, *tb2;
531 /* remove the TB from the hash list */
532 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
533 h = tb_phys_hash_func(phys_pc);
534 tb_remove(&tb_phys_hash[h], tb,
535 offsetof(TranslationBlock, phys_hash_next));
537 /* remove the TB from the page list */
538 if (tb->page_addr[0] != page_addr) {
539 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
540 tb_page_remove(&p->first_tb, tb);
541 invalidate_page_bitmap(p);
543 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
544 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
545 tb_page_remove(&p->first_tb, tb);
546 invalidate_page_bitmap(p);
549 tb_invalidated_flag = 1;
551 /* remove the TB from the hash list */
552 h = tb_jmp_cache_hash_func(tb->pc);
553 for(env = first_cpu; env != NULL; env = env->next_cpu) {
554 if (env->tb_jmp_cache[h] == tb)
555 env->tb_jmp_cache[h] = NULL;
558 /* suppress this TB from the two jump lists */
559 tb_jmp_remove(tb, 0);
560 tb_jmp_remove(tb, 1);
562 /* suppress any remaining jumps to this TB */
563 tb1 = tb->jmp_first;
564 for(;;) {
565 n1 = (long)tb1 & 3;
566 if (n1 == 2)
567 break;
568 tb1 = (TranslationBlock *)((long)tb1 & ~3);
569 tb2 = tb1->jmp_next[n1];
570 tb_reset_jump(tb1, n1);
571 tb1->jmp_next[n1] = NULL;
572 tb1 = tb2;
574 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
576 tb_phys_invalidate_count++;
579 static inline void set_bits(uint8_t *tab, int start, int len)
581 int end, mask, end1;
583 end = start + len;
584 tab += start >> 3;
585 mask = 0xff << (start & 7);
586 if ((start & ~7) == (end & ~7)) {
587 if (start < end) {
588 mask &= ~(0xff << (end & 7));
589 *tab |= mask;
591 } else {
592 *tab++ |= mask;
593 start = (start + 8) & ~7;
594 end1 = end & ~7;
595 while (start < end1) {
596 *tab++ = 0xff;
597 start += 8;
599 if (start < end) {
600 mask = ~(0xff << (end & 7));
601 *tab |= mask;
606 static void build_page_bitmap(PageDesc *p)
608 int n, tb_start, tb_end;
609 TranslationBlock *tb;
611 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
612 if (!p->code_bitmap)
613 return;
614 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
616 tb = p->first_tb;
617 while (tb != NULL) {
618 n = (long)tb & 3;
619 tb = (TranslationBlock *)((long)tb & ~3);
620 /* NOTE: this is subtle as a TB may span two physical pages */
621 if (n == 0) {
622 /* NOTE: tb_end may be after the end of the page, but
623 it is not a problem */
624 tb_start = tb->pc & ~TARGET_PAGE_MASK;
625 tb_end = tb_start + tb->size;
626 if (tb_end > TARGET_PAGE_SIZE)
627 tb_end = TARGET_PAGE_SIZE;
628 } else {
629 tb_start = 0;
630 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
632 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
633 tb = tb->page_next[n];
637 #ifdef TARGET_HAS_PRECISE_SMC
639 static void tb_gen_code(CPUState *env,
640 target_ulong pc, target_ulong cs_base, int flags,
641 int cflags)
643 TranslationBlock *tb;
644 uint8_t *tc_ptr;
645 target_ulong phys_pc, phys_page2, virt_page2;
646 int code_gen_size;
648 phys_pc = get_phys_addr_code(env, pc);
649 tb = tb_alloc(pc);
650 if (!tb) {
651 /* flush must be done */
652 tb_flush(env);
653 /* cannot fail at this point */
654 tb = tb_alloc(pc);
656 tc_ptr = code_gen_ptr;
657 tb->tc_ptr = tc_ptr;
658 tb->cs_base = cs_base;
659 tb->flags = flags;
660 tb->cflags = cflags;
661 cpu_gen_code(env, tb, &code_gen_size);
662 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
664 /* check next page if needed */
665 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
666 phys_page2 = -1;
667 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
668 phys_page2 = get_phys_addr_code(env, virt_page2);
670 tb_link_phys(tb, phys_pc, phys_page2);
672 #endif
674 /* invalidate all TBs which intersect with the target physical page
675 starting in range [start;end[. NOTE: start and end must refer to
676 the same physical page. 'is_cpu_write_access' should be true if called
677 from a real cpu write access: the virtual CPU will exit the current
678 TB if code is modified inside this TB. */
679 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
680 int is_cpu_write_access)
682 int n, current_tb_modified, current_tb_not_found, current_flags;
683 CPUState *env = cpu_single_env;
684 PageDesc *p;
685 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
686 target_ulong tb_start, tb_end;
687 target_ulong current_pc, current_cs_base;
689 p = page_find(start >> TARGET_PAGE_BITS);
690 if (!p)
691 return;
692 if (!p->code_bitmap &&
693 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
694 is_cpu_write_access) {
695 /* build code bitmap */
696 build_page_bitmap(p);
699 /* we remove all the TBs in the range [start, end[ */
700 /* XXX: see if in some cases it could be faster to invalidate all the code */
701 current_tb_not_found = is_cpu_write_access;
702 current_tb_modified = 0;
703 current_tb = NULL; /* avoid warning */
704 current_pc = 0; /* avoid warning */
705 current_cs_base = 0; /* avoid warning */
706 current_flags = 0; /* avoid warning */
707 tb = p->first_tb;
708 while (tb != NULL) {
709 n = (long)tb & 3;
710 tb = (TranslationBlock *)((long)tb & ~3);
711 tb_next = tb->page_next[n];
712 /* NOTE: this is subtle as a TB may span two physical pages */
713 if (n == 0) {
714 /* NOTE: tb_end may be after the end of the page, but
715 it is not a problem */
716 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
717 tb_end = tb_start + tb->size;
718 } else {
719 tb_start = tb->page_addr[1];
720 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
722 if (!(tb_end <= start || tb_start >= end)) {
723 #ifdef TARGET_HAS_PRECISE_SMC
724 if (current_tb_not_found) {
725 current_tb_not_found = 0;
726 current_tb = NULL;
727 if (env->mem_write_pc) {
728 /* now we have a real cpu fault */
729 current_tb = tb_find_pc(env->mem_write_pc);
732 if (current_tb == tb &&
733 !(current_tb->cflags & CF_SINGLE_INSN)) {
734 /* If we are modifying the current TB, we must stop
735 its execution. We could be more precise by checking
736 that the modification is after the current PC, but it
737 would require a specialized function to partially
738 restore the CPU state */
740 current_tb_modified = 1;
741 cpu_restore_state(current_tb, env,
742 env->mem_write_pc, NULL);
743 #if defined(TARGET_I386)
744 current_flags = env->hflags;
745 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
746 current_cs_base = (target_ulong)env->segs[R_CS].base;
747 current_pc = current_cs_base + env->eip;
748 #else
749 #error unsupported CPU
750 #endif
752 #endif /* TARGET_HAS_PRECISE_SMC */
753 /* we need to do that to handle the case where a signal
754 occurs while doing tb_phys_invalidate() */
755 saved_tb = NULL;
756 if (env) {
757 saved_tb = env->current_tb;
758 env->current_tb = NULL;
760 tb_phys_invalidate(tb, -1);
761 if (env) {
762 env->current_tb = saved_tb;
763 if (env->interrupt_request && env->current_tb)
764 cpu_interrupt(env, env->interrupt_request);
767 tb = tb_next;
769 #if !defined(CONFIG_USER_ONLY)
770 /* if no code remaining, no need to continue to use slow writes */
771 if (!p->first_tb) {
772 invalidate_page_bitmap(p);
773 if (is_cpu_write_access) {
774 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
777 #endif
778 #ifdef TARGET_HAS_PRECISE_SMC
779 if (current_tb_modified) {
780 /* we generate a block containing just the instruction
781 modifying the memory. It will ensure that it cannot modify
782 itself */
783 env->current_tb = NULL;
784 tb_gen_code(env, current_pc, current_cs_base, current_flags,
785 CF_SINGLE_INSN);
786 cpu_resume_from_signal(env, NULL);
788 #endif
791 /* len must be <= 8 and start must be a multiple of len */
792 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
794 PageDesc *p;
795 int offset, b;
796 #if 0
797 if (1) {
798 if (loglevel) {
799 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
800 cpu_single_env->mem_write_vaddr, len,
801 cpu_single_env->eip,
802 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
805 #endif
806 p = page_find(start >> TARGET_PAGE_BITS);
807 if (!p)
808 return;
809 if (p->code_bitmap) {
810 offset = start & ~TARGET_PAGE_MASK;
811 b = p->code_bitmap[offset >> 3] >> (offset & 7);
812 if (b & ((1 << len) - 1))
813 goto do_invalidate;
814 } else {
815 do_invalidate:
816 tb_invalidate_phys_page_range(start, start + len, 1);
820 #if !defined(CONFIG_SOFTMMU)
821 static void tb_invalidate_phys_page(target_ulong addr,
822 unsigned long pc, void *puc)
824 int n, current_flags, current_tb_modified;
825 target_ulong current_pc, current_cs_base;
826 PageDesc *p;
827 TranslationBlock *tb, *current_tb;
828 #ifdef TARGET_HAS_PRECISE_SMC
829 CPUState *env = cpu_single_env;
830 #endif
832 addr &= TARGET_PAGE_MASK;
833 p = page_find(addr >> TARGET_PAGE_BITS);
834 if (!p)
835 return;
836 tb = p->first_tb;
837 current_tb_modified = 0;
838 current_tb = NULL;
839 current_pc = 0; /* avoid warning */
840 current_cs_base = 0; /* avoid warning */
841 current_flags = 0; /* avoid warning */
842 #ifdef TARGET_HAS_PRECISE_SMC
843 if (tb && pc != 0) {
844 current_tb = tb_find_pc(pc);
846 #endif
847 while (tb != NULL) {
848 n = (long)tb & 3;
849 tb = (TranslationBlock *)((long)tb & ~3);
850 #ifdef TARGET_HAS_PRECISE_SMC
851 if (current_tb == tb &&
852 !(current_tb->cflags & CF_SINGLE_INSN)) {
853 /* If we are modifying the current TB, we must stop
854 its execution. We could be more precise by checking
855 that the modification is after the current PC, but it
856 would require a specialized function to partially
857 restore the CPU state */
859 current_tb_modified = 1;
860 cpu_restore_state(current_tb, env, pc, puc);
861 #if defined(TARGET_I386)
862 current_flags = env->hflags;
863 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
864 current_cs_base = (target_ulong)env->segs[R_CS].base;
865 current_pc = current_cs_base + env->eip;
866 #else
867 #error unsupported CPU
868 #endif
870 #endif /* TARGET_HAS_PRECISE_SMC */
871 tb_phys_invalidate(tb, addr);
872 tb = tb->page_next[n];
874 p->first_tb = NULL;
875 #ifdef TARGET_HAS_PRECISE_SMC
876 if (current_tb_modified) {
877 /* we generate a block containing just the instruction
878 modifying the memory. It will ensure that it cannot modify
879 itself */
880 env->current_tb = NULL;
881 tb_gen_code(env, current_pc, current_cs_base, current_flags,
882 CF_SINGLE_INSN);
883 cpu_resume_from_signal(env, puc);
885 #endif
887 #endif
889 /* add the tb in the target page and protect it if necessary */
890 static inline void tb_alloc_page(TranslationBlock *tb,
891 unsigned int n, target_ulong page_addr)
893 PageDesc *p;
894 TranslationBlock *last_first_tb;
896 tb->page_addr[n] = page_addr;
897 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
898 tb->page_next[n] = p->first_tb;
899 last_first_tb = p->first_tb;
900 p->first_tb = (TranslationBlock *)((long)tb | n);
901 invalidate_page_bitmap(p);
903 #if defined(TARGET_HAS_SMC) || 1
905 #if defined(CONFIG_USER_ONLY)
906 if (p->flags & PAGE_WRITE) {
907 target_ulong addr;
908 PageDesc *p2;
909 int prot;
911 /* force the host page as non writable (writes will have a
912 page fault + mprotect overhead) */
913 page_addr &= qemu_host_page_mask;
914 prot = 0;
915 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
916 addr += TARGET_PAGE_SIZE) {
918 p2 = page_find (addr >> TARGET_PAGE_BITS);
919 if (!p2)
920 continue;
921 prot |= p2->flags;
922 p2->flags &= ~PAGE_WRITE;
923 page_get_flags(addr);
925 mprotect(g2h(page_addr), qemu_host_page_size,
926 (prot & PAGE_BITS) & ~PAGE_WRITE);
927 #ifdef DEBUG_TB_INVALIDATE
928 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
929 page_addr);
930 #endif
932 #else
933 /* if some code is already present, then the pages are already
934 protected. So we handle the case where only the first TB is
935 allocated in a physical page */
936 if (!last_first_tb) {
937 tlb_protect_code(page_addr);
939 #endif
941 #endif /* TARGET_HAS_SMC */
944 /* Allocate a new translation block. Flush the translation buffer if
945 too many translation blocks or too much generated code. */
946 TranslationBlock *tb_alloc(target_ulong pc)
948 TranslationBlock *tb;
950 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
951 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
952 return NULL;
953 tb = &tbs[nb_tbs++];
954 tb->pc = pc;
955 tb->cflags = 0;
956 return tb;
959 /* add a new TB and link it to the physical page tables. phys_page2 is
960 (-1) to indicate that only one page contains the TB. */
961 void tb_link_phys(TranslationBlock *tb,
962 target_ulong phys_pc, target_ulong phys_page2)
964 unsigned int h;
965 TranslationBlock **ptb;
967 /* add in the physical hash table */
968 h = tb_phys_hash_func(phys_pc);
969 ptb = &tb_phys_hash[h];
970 tb->phys_hash_next = *ptb;
971 *ptb = tb;
973 /* add in the page list */
974 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
975 if (phys_page2 != -1)
976 tb_alloc_page(tb, 1, phys_page2);
977 else
978 tb->page_addr[1] = -1;
980 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
981 tb->jmp_next[0] = NULL;
982 tb->jmp_next[1] = NULL;
984 /* init original jump addresses */
985 if (tb->tb_next_offset[0] != 0xffff)
986 tb_reset_jump(tb, 0);
987 if (tb->tb_next_offset[1] != 0xffff)
988 tb_reset_jump(tb, 1);
990 #ifdef DEBUG_TB_CHECK
991 tb_page_check();
992 #endif
995 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
996 tb[1].tc_ptr. Return NULL if not found */
997 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
999 int m_min, m_max, m;
1000 unsigned long v;
1001 TranslationBlock *tb;
1003 if (nb_tbs <= 0)
1004 return NULL;
1005 if (tc_ptr < (unsigned long)code_gen_buffer ||
1006 tc_ptr >= (unsigned long)code_gen_ptr)
1007 return NULL;
1008 /* binary search (cf Knuth) */
1009 m_min = 0;
1010 m_max = nb_tbs - 1;
1011 while (m_min <= m_max) {
1012 m = (m_min + m_max) >> 1;
1013 tb = &tbs[m];
1014 v = (unsigned long)tb->tc_ptr;
1015 if (v == tc_ptr)
1016 return tb;
1017 else if (tc_ptr < v) {
1018 m_max = m - 1;
1019 } else {
1020 m_min = m + 1;
1023 return &tbs[m_max];
1026 static void tb_reset_jump_recursive(TranslationBlock *tb);
1028 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1030 TranslationBlock *tb1, *tb_next, **ptb;
1031 unsigned int n1;
1033 tb1 = tb->jmp_next[n];
1034 if (tb1 != NULL) {
1035 /* find head of list */
1036 for(;;) {
1037 n1 = (long)tb1 & 3;
1038 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1039 if (n1 == 2)
1040 break;
1041 tb1 = tb1->jmp_next[n1];
1043 /* we are now sure now that tb jumps to tb1 */
1044 tb_next = tb1;
1046 /* remove tb from the jmp_first list */
1047 ptb = &tb_next->jmp_first;
1048 for(;;) {
1049 tb1 = *ptb;
1050 n1 = (long)tb1 & 3;
1051 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1052 if (n1 == n && tb1 == tb)
1053 break;
1054 ptb = &tb1->jmp_next[n1];
1056 *ptb = tb->jmp_next[n];
1057 tb->jmp_next[n] = NULL;
1059 /* suppress the jump to next tb in generated code */
1060 tb_reset_jump(tb, n);
1062 /* suppress jumps in the tb on which we could have jumped */
1063 tb_reset_jump_recursive(tb_next);
1067 static void tb_reset_jump_recursive(TranslationBlock *tb)
1069 tb_reset_jump_recursive2(tb, 0);
1070 tb_reset_jump_recursive2(tb, 1);
1073 #if defined(TARGET_HAS_ICE)
1074 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1076 target_phys_addr_t addr;
1077 target_ulong pd;
1078 ram_addr_t ram_addr;
1079 PhysPageDesc *p;
1081 addr = cpu_get_phys_page_debug(env, pc);
1082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1083 if (!p) {
1084 pd = IO_MEM_UNASSIGNED;
1085 } else {
1086 pd = p->phys_offset;
1088 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1089 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1091 #endif
1093 /* Add a watchpoint. */
1094 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1096 int i;
1098 for (i = 0; i < env->nb_watchpoints; i++) {
1099 if (addr == env->watchpoint[i].vaddr)
1100 return 0;
1102 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1103 return -1;
1105 i = env->nb_watchpoints++;
1106 env->watchpoint[i].vaddr = addr;
1107 tlb_flush_page(env, addr);
1108 /* FIXME: This flush is needed because of the hack to make memory ops
1109 terminate the TB. It can be removed once the proper IO trap and
1110 re-execute bits are in. */
1111 tb_flush(env);
1112 return i;
1115 /* Remove a watchpoint. */
1116 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1118 int i;
1120 for (i = 0; i < env->nb_watchpoints; i++) {
1121 if (addr == env->watchpoint[i].vaddr) {
1122 env->nb_watchpoints--;
1123 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1124 tlb_flush_page(env, addr);
1125 return 0;
1128 return -1;
1131 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1132 breakpoint is reached */
1133 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1135 #if defined(TARGET_HAS_ICE)
1136 int i;
1138 for(i = 0; i < env->nb_breakpoints; i++) {
1139 if (env->breakpoints[i] == pc)
1140 return 0;
1143 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1144 return -1;
1145 env->breakpoints[env->nb_breakpoints++] = pc;
1147 #ifdef USE_KVM
1148 if (kvm_allowed)
1149 kvm_update_debugger(env);
1150 #endif
1152 breakpoint_invalidate(env, pc);
1153 return 0;
1154 #else
1155 return -1;
1156 #endif
1159 /* remove a breakpoint */
1160 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1162 #if defined(TARGET_HAS_ICE)
1163 int i;
1164 for(i = 0; i < env->nb_breakpoints; i++) {
1165 if (env->breakpoints[i] == pc)
1166 goto found;
1168 return -1;
1169 found:
1170 env->nb_breakpoints--;
1171 if (i < env->nb_breakpoints)
1172 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1174 #ifdef USE_KVM
1175 if (kvm_allowed)
1176 kvm_update_debugger(env);
1177 #endif
1179 breakpoint_invalidate(env, pc);
1180 return 0;
1181 #else
1182 return -1;
1183 #endif
1186 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1187 CPU loop after each instruction */
1188 void cpu_single_step(CPUState *env, int enabled)
1190 #if defined(TARGET_HAS_ICE)
1191 if (env->singlestep_enabled != enabled) {
1192 env->singlestep_enabled = enabled;
1193 /* must flush all the translated code to avoid inconsistancies */
1194 /* XXX: only flush what is necessary */
1195 tb_flush(env);
1197 #ifdef USE_KVM
1198 if (kvm_allowed)
1199 kvm_update_debugger(env);
1200 #endif
1201 #endif
1204 /* enable or disable low levels log */
1205 void cpu_set_log(int log_flags)
1207 loglevel = log_flags;
1208 if (loglevel && !logfile) {
1209 logfile = fopen(logfilename, log_append ? "a" : "w");
1210 if (!logfile) {
1211 perror(logfilename);
1212 _exit(1);
1214 #if !defined(CONFIG_SOFTMMU)
1215 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1217 static uint8_t logfile_buf[4096];
1218 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1220 #else
1221 setvbuf(logfile, NULL, _IOLBF, 0);
1222 #endif
1223 log_append = 1;
1225 if (!loglevel && logfile) {
1226 fclose(logfile);
1227 logfile = NULL;
1231 void cpu_set_log_filename(const char *filename)
1233 logfilename = strdup(filename);
1234 if (logfile) {
1235 fclose(logfile);
1236 logfile = NULL;
1238 cpu_set_log(loglevel);
1241 /* mask must never be zero, except for A20 change call */
1242 void cpu_interrupt(CPUState *env, int mask)
1244 TranslationBlock *tb;
1245 static int interrupt_lock;
1247 env->interrupt_request |= mask;
1248 #ifdef USE_KVM
1249 if (kvm_allowed && !kvm_irqchip_in_kernel(kvm_context))
1250 kvm_update_interrupt_request(env);
1251 #endif
1252 /* if the cpu is currently executing code, we must unlink it and
1253 all the potentially executing TB */
1254 tb = env->current_tb;
1255 if (tb && !testandset(&interrupt_lock)) {
1256 env->current_tb = NULL;
1257 tb_reset_jump_recursive(tb);
1258 interrupt_lock = 0;
1262 void cpu_reset_interrupt(CPUState *env, int mask)
1264 env->interrupt_request &= ~mask;
1267 CPULogItem cpu_log_items[] = {
1268 { CPU_LOG_TB_OUT_ASM, "out_asm",
1269 "show generated host assembly code for each compiled TB" },
1270 { CPU_LOG_TB_IN_ASM, "in_asm",
1271 "show target assembly code for each compiled TB" },
1272 { CPU_LOG_TB_OP, "op",
1273 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1274 #ifdef TARGET_I386
1275 { CPU_LOG_TB_OP_OPT, "op_opt",
1276 "show micro ops after optimization for each compiled TB" },
1277 #endif
1278 { CPU_LOG_INT, "int",
1279 "show interrupts/exceptions in short format" },
1280 { CPU_LOG_EXEC, "exec",
1281 "show trace before each executed TB (lots of logs)" },
1282 { CPU_LOG_TB_CPU, "cpu",
1283 "show CPU state before block translation" },
1284 #ifdef TARGET_I386
1285 { CPU_LOG_PCALL, "pcall",
1286 "show protected mode far calls/returns/exceptions" },
1287 #endif
1288 #ifdef DEBUG_IOPORT
1289 { CPU_LOG_IOPORT, "ioport",
1290 "show all i/o ports accesses" },
1291 #endif
1292 { 0, NULL, NULL },
1295 static int cmp1(const char *s1, int n, const char *s2)
1297 if (strlen(s2) != n)
1298 return 0;
1299 return memcmp(s1, s2, n) == 0;
1302 /* takes a comma separated list of log masks. Return 0 if error. */
1303 int cpu_str_to_log_mask(const char *str)
1305 CPULogItem *item;
1306 int mask;
1307 const char *p, *p1;
1309 p = str;
1310 mask = 0;
1311 for(;;) {
1312 p1 = strchr(p, ',');
1313 if (!p1)
1314 p1 = p + strlen(p);
1315 if(cmp1(p,p1-p,"all")) {
1316 for(item = cpu_log_items; item->mask != 0; item++) {
1317 mask |= item->mask;
1319 } else {
1320 for(item = cpu_log_items; item->mask != 0; item++) {
1321 if (cmp1(p, p1 - p, item->name))
1322 goto found;
1324 return 0;
1326 found:
1327 mask |= item->mask;
1328 if (*p1 != ',')
1329 break;
1330 p = p1 + 1;
1332 return mask;
1335 void cpu_abort(CPUState *env, const char *fmt, ...)
1337 va_list ap;
1338 va_list ap2;
1340 va_start(ap, fmt);
1341 va_copy(ap2, ap);
1342 fprintf(stderr, "qemu: fatal: ");
1343 vfprintf(stderr, fmt, ap);
1344 fprintf(stderr, "\n");
1345 #ifdef TARGET_I386
1346 if(env->intercept & INTERCEPT_SVM_MASK) {
1347 /* most probably the virtual machine should not
1348 be shut down but rather caught by the VMM */
1349 vmexit(SVM_EXIT_SHUTDOWN, 0);
1351 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1352 #else
1353 cpu_dump_state(env, stderr, fprintf, 0);
1354 #endif
1355 if (logfile) {
1356 fprintf(logfile, "qemu: fatal: ");
1357 vfprintf(logfile, fmt, ap2);
1358 fprintf(logfile, "\n");
1359 #ifdef TARGET_I386
1360 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1361 #else
1362 cpu_dump_state(env, logfile, fprintf, 0);
1363 #endif
1364 fflush(logfile);
1365 fclose(logfile);
1367 va_end(ap2);
1368 va_end(ap);
1369 abort();
1372 CPUState *cpu_copy(CPUState *env)
1374 CPUState *new_env = cpu_init(env->cpu_model_str);
1375 /* preserve chaining and index */
1376 CPUState *next_cpu = new_env->next_cpu;
1377 int cpu_index = new_env->cpu_index;
1378 memcpy(new_env, env, sizeof(CPUState));
1379 new_env->next_cpu = next_cpu;
1380 new_env->cpu_index = cpu_index;
1381 return new_env;
1384 #if !defined(CONFIG_USER_ONLY)
1386 /* NOTE: if flush_global is true, also flush global entries (not
1387 implemented yet) */
1388 void tlb_flush(CPUState *env, int flush_global)
1390 int i;
1392 #if defined(DEBUG_TLB)
1393 printf("tlb_flush:\n");
1394 #endif
1395 /* must reset current TB so that interrupts cannot modify the
1396 links while we are modifying them */
1397 env->current_tb = NULL;
1399 for(i = 0; i < CPU_TLB_SIZE; i++) {
1400 env->tlb_table[0][i].addr_read = -1;
1401 env->tlb_table[0][i].addr_write = -1;
1402 env->tlb_table[0][i].addr_code = -1;
1403 env->tlb_table[1][i].addr_read = -1;
1404 env->tlb_table[1][i].addr_write = -1;
1405 env->tlb_table[1][i].addr_code = -1;
1406 #if (NB_MMU_MODES >= 3)
1407 env->tlb_table[2][i].addr_read = -1;
1408 env->tlb_table[2][i].addr_write = -1;
1409 env->tlb_table[2][i].addr_code = -1;
1410 #if (NB_MMU_MODES == 4)
1411 env->tlb_table[3][i].addr_read = -1;
1412 env->tlb_table[3][i].addr_write = -1;
1413 env->tlb_table[3][i].addr_code = -1;
1414 #endif
1415 #endif
1418 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1420 #if !defined(CONFIG_SOFTMMU)
1421 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1422 #endif
1423 #ifdef USE_KQEMU
1424 if (env->kqemu_enabled) {
1425 kqemu_flush(env, flush_global);
1427 #endif
1428 tlb_flush_count++;
1431 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1433 if (addr == (tlb_entry->addr_read &
1434 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1435 addr == (tlb_entry->addr_write &
1436 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1437 addr == (tlb_entry->addr_code &
1438 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1439 tlb_entry->addr_read = -1;
1440 tlb_entry->addr_write = -1;
1441 tlb_entry->addr_code = -1;
1445 void tlb_flush_page(CPUState *env, target_ulong addr)
1447 int i;
1448 TranslationBlock *tb;
1450 #if defined(DEBUG_TLB)
1451 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1452 #endif
1453 /* must reset current TB so that interrupts cannot modify the
1454 links while we are modifying them */
1455 env->current_tb = NULL;
1457 addr &= TARGET_PAGE_MASK;
1458 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1459 tlb_flush_entry(&env->tlb_table[0][i], addr);
1460 tlb_flush_entry(&env->tlb_table[1][i], addr);
1461 #if (NB_MMU_MODES >= 3)
1462 tlb_flush_entry(&env->tlb_table[2][i], addr);
1463 #if (NB_MMU_MODES == 4)
1464 tlb_flush_entry(&env->tlb_table[3][i], addr);
1465 #endif
1466 #endif
1468 /* Discard jump cache entries for any tb which might potentially
1469 overlap the flushed page. */
1470 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1471 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1473 i = tb_jmp_cache_hash_page(addr);
1474 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1476 #if !defined(CONFIG_SOFTMMU)
1477 if (addr < MMAP_AREA_END)
1478 munmap((void *)addr, TARGET_PAGE_SIZE);
1479 #endif
1480 #ifdef USE_KQEMU
1481 if (env->kqemu_enabled) {
1482 kqemu_flush_page(env, addr);
1484 #endif
1487 /* update the TLBs so that writes to code in the virtual page 'addr'
1488 can be detected */
1489 static void tlb_protect_code(ram_addr_t ram_addr)
1491 cpu_physical_memory_reset_dirty(ram_addr,
1492 ram_addr + TARGET_PAGE_SIZE,
1493 CODE_DIRTY_FLAG);
1496 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1497 tested for self modifying code */
1498 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1499 target_ulong vaddr)
1501 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1504 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1505 unsigned long start, unsigned long length)
1507 unsigned long addr;
1508 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1509 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1510 if ((addr - start) < length) {
1511 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1516 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1517 int dirty_flags)
1519 CPUState *env;
1520 unsigned long length, start1;
1521 int i, mask, len;
1522 uint8_t *p;
1524 start &= TARGET_PAGE_MASK;
1525 end = TARGET_PAGE_ALIGN(end);
1527 length = end - start;
1528 if (length == 0)
1529 return;
1530 len = length >> TARGET_PAGE_BITS;
1531 #ifdef USE_KQEMU
1532 /* XXX: should not depend on cpu context */
1533 env = first_cpu;
1534 if (env->kqemu_enabled) {
1535 ram_addr_t addr;
1536 addr = start;
1537 for(i = 0; i < len; i++) {
1538 kqemu_set_notdirty(env, addr);
1539 addr += TARGET_PAGE_SIZE;
1542 #endif
1543 mask = ~dirty_flags;
1544 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1545 for(i = 0; i < len; i++)
1546 p[i] &= mask;
1548 /* we modify the TLB cache so that the dirty bit will be set again
1549 when accessing the range */
1550 start1 = start + (unsigned long)phys_ram_base;
1551 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1552 for(i = 0; i < CPU_TLB_SIZE; i++)
1553 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1554 for(i = 0; i < CPU_TLB_SIZE; i++)
1555 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1556 #if (NB_MMU_MODES >= 3)
1557 for(i = 0; i < CPU_TLB_SIZE; i++)
1558 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1559 #if (NB_MMU_MODES == 4)
1560 for(i = 0; i < CPU_TLB_SIZE; i++)
1561 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1562 #endif
1563 #endif
1566 #if !defined(CONFIG_SOFTMMU)
1567 /* XXX: this is expensive */
1569 VirtPageDesc *p;
1570 int j;
1571 target_ulong addr;
1573 for(i = 0; i < L1_SIZE; i++) {
1574 p = l1_virt_map[i];
1575 if (p) {
1576 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1577 for(j = 0; j < L2_SIZE; j++) {
1578 if (p->valid_tag == virt_valid_tag &&
1579 p->phys_addr >= start && p->phys_addr < end &&
1580 (p->prot & PROT_WRITE)) {
1581 if (addr < MMAP_AREA_END) {
1582 mprotect((void *)addr, TARGET_PAGE_SIZE,
1583 p->prot & ~PROT_WRITE);
1586 addr += TARGET_PAGE_SIZE;
1587 p++;
1592 #endif
1595 int cpu_physical_memory_set_dirty_tracking(int enable)
1597 int r=0;
1599 #ifdef USE_KVM
1600 r = kvm_physical_memory_set_dirty_tracking(enable);
1601 #endif
1602 in_migration = enable;
1603 return r;
1606 int cpu_physical_memory_get_dirty_tracking(void)
1608 return in_migration;
1611 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1613 ram_addr_t ram_addr;
1615 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1616 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1617 tlb_entry->addend - (unsigned long)phys_ram_base;
1618 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1619 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1624 /* update the TLB according to the current state of the dirty bits */
1625 void cpu_tlb_update_dirty(CPUState *env)
1627 int i;
1628 for(i = 0; i < CPU_TLB_SIZE; i++)
1629 tlb_update_dirty(&env->tlb_table[0][i]);
1630 for(i = 0; i < CPU_TLB_SIZE; i++)
1631 tlb_update_dirty(&env->tlb_table[1][i]);
1632 #if (NB_MMU_MODES >= 3)
1633 for(i = 0; i < CPU_TLB_SIZE; i++)
1634 tlb_update_dirty(&env->tlb_table[2][i]);
1635 #if (NB_MMU_MODES == 4)
1636 for(i = 0; i < CPU_TLB_SIZE; i++)
1637 tlb_update_dirty(&env->tlb_table[3][i]);
1638 #endif
1639 #endif
1642 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1643 unsigned long start)
1645 unsigned long addr;
1646 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1647 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1648 if (addr == start) {
1649 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1654 /* update the TLB corresponding to virtual page vaddr and phys addr
1655 addr so that it is no longer dirty */
1656 static inline void tlb_set_dirty(CPUState *env,
1657 unsigned long addr, target_ulong vaddr)
1659 int i;
1661 addr &= TARGET_PAGE_MASK;
1662 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1663 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1664 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1665 #if (NB_MMU_MODES >= 3)
1666 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1667 #if (NB_MMU_MODES == 4)
1668 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1669 #endif
1670 #endif
1673 /* add a new TLB entry. At most one entry for a given virtual address
1674 is permitted. Return 0 if OK or 2 if the page could not be mapped
1675 (can only happen in non SOFTMMU mode for I/O pages or pages
1676 conflicting with the host address space). */
1677 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1678 target_phys_addr_t paddr, int prot,
1679 int mmu_idx, int is_softmmu)
1681 PhysPageDesc *p;
1682 unsigned long pd;
1683 unsigned int index;
1684 target_ulong address;
1685 target_phys_addr_t addend;
1686 int ret;
1687 CPUTLBEntry *te;
1688 int i;
1690 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1691 if (!p) {
1692 pd = IO_MEM_UNASSIGNED;
1693 } else {
1694 pd = p->phys_offset;
1696 #if defined(DEBUG_TLB)
1697 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1698 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1699 #endif
1701 ret = 0;
1702 #if !defined(CONFIG_SOFTMMU)
1703 if (is_softmmu)
1704 #endif
1706 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1707 /* IO memory case */
1708 address = vaddr | pd;
1709 addend = paddr;
1710 } else {
1711 /* standard memory */
1712 address = vaddr;
1713 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1716 /* Make accesses to pages with watchpoints go via the
1717 watchpoint trap routines. */
1718 for (i = 0; i < env->nb_watchpoints; i++) {
1719 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1720 if (address & ~TARGET_PAGE_MASK) {
1721 env->watchpoint[i].addend = 0;
1722 address = vaddr | io_mem_watch;
1723 } else {
1724 env->watchpoint[i].addend = pd - paddr +
1725 (unsigned long) phys_ram_base;
1726 /* TODO: Figure out how to make read watchpoints coexist
1727 with code. */
1728 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1733 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1734 addend -= vaddr;
1735 te = &env->tlb_table[mmu_idx][index];
1736 te->addend = addend;
1737 if (prot & PAGE_READ) {
1738 te->addr_read = address;
1739 } else {
1740 te->addr_read = -1;
1742 if (prot & PAGE_EXEC) {
1743 te->addr_code = address;
1744 } else {
1745 te->addr_code = -1;
1747 if (prot & PAGE_WRITE) {
1748 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1749 (pd & IO_MEM_ROMD)) {
1750 /* write access calls the I/O callback */
1751 te->addr_write = vaddr |
1752 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1753 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1754 !cpu_physical_memory_is_dirty(pd)) {
1755 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1756 } else {
1757 te->addr_write = address;
1759 } else {
1760 te->addr_write = -1;
1763 #if !defined(CONFIG_SOFTMMU)
1764 else {
1765 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1766 /* IO access: no mapping is done as it will be handled by the
1767 soft MMU */
1768 if (!(env->hflags & HF_SOFTMMU_MASK))
1769 ret = 2;
1770 } else {
1771 void *map_addr;
1773 if (vaddr >= MMAP_AREA_END) {
1774 ret = 2;
1775 } else {
1776 if (prot & PROT_WRITE) {
1777 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1778 #if defined(TARGET_HAS_SMC) || 1
1779 first_tb ||
1780 #endif
1781 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1782 !cpu_physical_memory_is_dirty(pd))) {
1783 /* ROM: we do as if code was inside */
1784 /* if code is present, we only map as read only and save the
1785 original mapping */
1786 VirtPageDesc *vp;
1788 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1789 vp->phys_addr = pd;
1790 vp->prot = prot;
1791 vp->valid_tag = virt_valid_tag;
1792 prot &= ~PAGE_WRITE;
1795 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1796 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1797 if (map_addr == MAP_FAILED) {
1798 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1799 paddr, vaddr);
1804 #endif
1805 return ret;
1808 /* called from signal handler: invalidate the code and unprotect the
1809 page. Return TRUE if the fault was succesfully handled. */
1810 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1812 #if !defined(CONFIG_SOFTMMU)
1813 VirtPageDesc *vp;
1815 #if defined(DEBUG_TLB)
1816 printf("page_unprotect: addr=0x%08x\n", addr);
1817 #endif
1818 addr &= TARGET_PAGE_MASK;
1820 /* if it is not mapped, no need to worry here */
1821 if (addr >= MMAP_AREA_END)
1822 return 0;
1823 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1824 if (!vp)
1825 return 0;
1826 /* NOTE: in this case, validate_tag is _not_ tested as it
1827 validates only the code TLB */
1828 if (vp->valid_tag != virt_valid_tag)
1829 return 0;
1830 if (!(vp->prot & PAGE_WRITE))
1831 return 0;
1832 #if defined(DEBUG_TLB)
1833 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1834 addr, vp->phys_addr, vp->prot);
1835 #endif
1836 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1837 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1838 (unsigned long)addr, vp->prot);
1839 /* set the dirty bit */
1840 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1841 /* flush the code inside */
1842 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1843 return 1;
1844 #else
1845 return 0;
1846 #endif
1849 #else
1851 void tlb_flush(CPUState *env, int flush_global)
1855 void tlb_flush_page(CPUState *env, target_ulong addr)
1859 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1860 target_phys_addr_t paddr, int prot,
1861 int mmu_idx, int is_softmmu)
1863 return 0;
1866 /* dump memory mappings */
1867 void page_dump(FILE *f)
1869 unsigned long start, end;
1870 int i, j, prot, prot1;
1871 PageDesc *p;
1873 fprintf(f, "%-8s %-8s %-8s %s\n",
1874 "start", "end", "size", "prot");
1875 start = -1;
1876 end = -1;
1877 prot = 0;
1878 for(i = 0; i <= L1_SIZE; i++) {
1879 if (i < L1_SIZE)
1880 p = l1_map[i];
1881 else
1882 p = NULL;
1883 for(j = 0;j < L2_SIZE; j++) {
1884 if (!p)
1885 prot1 = 0;
1886 else
1887 prot1 = p[j].flags;
1888 if (prot1 != prot) {
1889 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1890 if (start != -1) {
1891 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1892 start, end, end - start,
1893 prot & PAGE_READ ? 'r' : '-',
1894 prot & PAGE_WRITE ? 'w' : '-',
1895 prot & PAGE_EXEC ? 'x' : '-');
1897 if (prot1 != 0)
1898 start = end;
1899 else
1900 start = -1;
1901 prot = prot1;
1903 if (!p)
1904 break;
1909 int page_get_flags(target_ulong address)
1911 PageDesc *p;
1913 p = page_find(address >> TARGET_PAGE_BITS);
1914 if (!p)
1915 return 0;
1916 return p->flags;
1919 /* modify the flags of a page and invalidate the code if
1920 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1921 depending on PAGE_WRITE */
1922 void page_set_flags(target_ulong start, target_ulong end, int flags)
1924 PageDesc *p;
1925 target_ulong addr;
1927 start = start & TARGET_PAGE_MASK;
1928 end = TARGET_PAGE_ALIGN(end);
1929 if (flags & PAGE_WRITE)
1930 flags |= PAGE_WRITE_ORG;
1931 spin_lock(&tb_lock);
1932 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1933 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1934 /* if the write protection is set, then we invalidate the code
1935 inside */
1936 if (!(p->flags & PAGE_WRITE) &&
1937 (flags & PAGE_WRITE) &&
1938 p->first_tb) {
1939 tb_invalidate_phys_page(addr, 0, NULL);
1941 p->flags = flags;
1943 spin_unlock(&tb_lock);
1946 int page_check_range(target_ulong start, target_ulong len, int flags)
1948 PageDesc *p;
1949 target_ulong end;
1950 target_ulong addr;
1952 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1953 start = start & TARGET_PAGE_MASK;
1955 if( end < start )
1956 /* we've wrapped around */
1957 return -1;
1958 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1959 p = page_find(addr >> TARGET_PAGE_BITS);
1960 if( !p )
1961 return -1;
1962 if( !(p->flags & PAGE_VALID) )
1963 return -1;
1965 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1966 return -1;
1967 if (flags & PAGE_WRITE) {
1968 if (!(p->flags & PAGE_WRITE_ORG))
1969 return -1;
1970 /* unprotect the page if it was put read-only because it
1971 contains translated code */
1972 if (!(p->flags & PAGE_WRITE)) {
1973 if (!page_unprotect(addr, 0, NULL))
1974 return -1;
1976 return 0;
1979 return 0;
1982 /* called from signal handler: invalidate the code and unprotect the
1983 page. Return TRUE if the fault was succesfully handled. */
1984 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1986 unsigned int page_index, prot, pindex;
1987 PageDesc *p, *p1;
1988 target_ulong host_start, host_end, addr;
1990 host_start = address & qemu_host_page_mask;
1991 page_index = host_start >> TARGET_PAGE_BITS;
1992 p1 = page_find(page_index);
1993 if (!p1)
1994 return 0;
1995 host_end = host_start + qemu_host_page_size;
1996 p = p1;
1997 prot = 0;
1998 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1999 prot |= p->flags;
2000 p++;
2002 /* if the page was really writable, then we change its
2003 protection back to writable */
2004 if (prot & PAGE_WRITE_ORG) {
2005 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2006 if (!(p1[pindex].flags & PAGE_WRITE)) {
2007 mprotect((void *)g2h(host_start), qemu_host_page_size,
2008 (prot & PAGE_BITS) | PAGE_WRITE);
2009 p1[pindex].flags |= PAGE_WRITE;
2010 /* and since the content will be modified, we must invalidate
2011 the corresponding translated code. */
2012 tb_invalidate_phys_page(address, pc, puc);
2013 #ifdef DEBUG_TB_CHECK
2014 tb_invalidate_check(address);
2015 #endif
2016 return 1;
2019 return 0;
2022 static inline void tlb_set_dirty(CPUState *env,
2023 unsigned long addr, target_ulong vaddr)
2026 #endif /* defined(CONFIG_USER_ONLY) */
2028 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2029 int memory);
2030 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2031 int orig_memory);
2032 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2033 need_subpage) \
2034 do { \
2035 if (addr > start_addr) \
2036 start_addr2 = 0; \
2037 else { \
2038 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2039 if (start_addr2 > 0) \
2040 need_subpage = 1; \
2043 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2044 end_addr2 = TARGET_PAGE_SIZE - 1; \
2045 else { \
2046 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2047 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2048 need_subpage = 1; \
2050 } while (0)
2052 /* register physical memory. 'size' must be a multiple of the target
2053 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2054 io memory page */
2055 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2056 unsigned long size,
2057 unsigned long phys_offset)
2059 target_phys_addr_t addr, end_addr;
2060 PhysPageDesc *p;
2061 CPUState *env;
2062 unsigned long orig_size = size;
2063 void *subpage;
2065 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2066 end_addr = start_addr + (target_phys_addr_t)size;
2067 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2068 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2069 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2070 unsigned long orig_memory = p->phys_offset;
2071 target_phys_addr_t start_addr2, end_addr2;
2072 int need_subpage = 0;
2074 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2075 need_subpage);
2076 if (need_subpage) {
2077 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2078 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2079 &p->phys_offset, orig_memory);
2080 } else {
2081 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2082 >> IO_MEM_SHIFT];
2084 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2085 } else {
2086 p->phys_offset = phys_offset;
2087 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2088 (phys_offset & IO_MEM_ROMD))
2089 phys_offset += TARGET_PAGE_SIZE;
2091 } else {
2092 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2093 p->phys_offset = phys_offset;
2094 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2095 (phys_offset & IO_MEM_ROMD))
2096 phys_offset += TARGET_PAGE_SIZE;
2097 else {
2098 target_phys_addr_t start_addr2, end_addr2;
2099 int need_subpage = 0;
2101 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2102 end_addr2, need_subpage);
2104 if (need_subpage) {
2105 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2106 &p->phys_offset, IO_MEM_UNASSIGNED);
2107 subpage_register(subpage, start_addr2, end_addr2,
2108 phys_offset);
2114 /* since each CPU stores ram addresses in its TLB cache, we must
2115 reset the modified entries */
2116 /* XXX: slow ! */
2117 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2118 tlb_flush(env, 1);
2122 /* XXX: temporary until new memory mapping API */
2123 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2125 PhysPageDesc *p;
2127 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2128 if (!p)
2129 return IO_MEM_UNASSIGNED;
2130 return p->phys_offset;
2133 /* XXX: better than nothing */
2134 ram_addr_t qemu_ram_alloc(unsigned long size)
2136 ram_addr_t addr;
2137 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2138 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2139 size, phys_ram_size);
2140 abort();
2142 addr = phys_ram_alloc_offset;
2143 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2144 return addr;
2147 void qemu_ram_free(ram_addr_t addr)
2151 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2153 #ifdef DEBUG_UNASSIGNED
2154 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2155 #endif
2156 #ifdef TARGET_SPARC
2157 do_unassigned_access(addr, 0, 0, 0);
2158 #elif TARGET_CRIS
2159 do_unassigned_access(addr, 0, 0, 0);
2160 #endif
2161 return 0;
2164 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2166 #ifdef DEBUG_UNASSIGNED
2167 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2168 #endif
2169 #ifdef TARGET_SPARC
2170 do_unassigned_access(addr, 1, 0, 0);
2171 #elif TARGET_CRIS
2172 do_unassigned_access(addr, 1, 0, 0);
2173 #endif
2176 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2177 unassigned_mem_readb,
2178 unassigned_mem_readb,
2179 unassigned_mem_readb,
2182 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2183 unassigned_mem_writeb,
2184 unassigned_mem_writeb,
2185 unassigned_mem_writeb,
2188 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2190 unsigned long ram_addr;
2191 int dirty_flags;
2192 ram_addr = addr - (unsigned long)phys_ram_base;
2193 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2194 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2195 #if !defined(CONFIG_USER_ONLY)
2196 tb_invalidate_phys_page_fast(ram_addr, 1);
2197 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2198 #endif
2200 stb_p((uint8_t *)(long)addr, val);
2201 #ifdef USE_KQEMU
2202 if (cpu_single_env->kqemu_enabled &&
2203 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2204 kqemu_modify_page(cpu_single_env, ram_addr);
2205 #endif
2206 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2207 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2208 /* we remove the notdirty callback only if the code has been
2209 flushed */
2210 if (dirty_flags == 0xff)
2211 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2214 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2216 unsigned long ram_addr;
2217 int dirty_flags;
2218 ram_addr = addr - (unsigned long)phys_ram_base;
2219 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2220 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2221 #if !defined(CONFIG_USER_ONLY)
2222 tb_invalidate_phys_page_fast(ram_addr, 2);
2223 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2224 #endif
2226 stw_p((uint8_t *)(long)addr, val);
2227 #ifdef USE_KQEMU
2228 if (cpu_single_env->kqemu_enabled &&
2229 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2230 kqemu_modify_page(cpu_single_env, ram_addr);
2231 #endif
2232 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2233 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2234 /* we remove the notdirty callback only if the code has been
2235 flushed */
2236 if (dirty_flags == 0xff)
2237 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2240 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2242 unsigned long ram_addr;
2243 int dirty_flags;
2244 ram_addr = addr - (unsigned long)phys_ram_base;
2245 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2246 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2247 #if !defined(CONFIG_USER_ONLY)
2248 tb_invalidate_phys_page_fast(ram_addr, 4);
2249 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2250 #endif
2252 stl_p((uint8_t *)(long)addr, val);
2253 #ifdef USE_KQEMU
2254 if (cpu_single_env->kqemu_enabled &&
2255 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2256 kqemu_modify_page(cpu_single_env, ram_addr);
2257 #endif
2258 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2259 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2260 /* we remove the notdirty callback only if the code has been
2261 flushed */
2262 if (dirty_flags == 0xff)
2263 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2266 static CPUReadMemoryFunc *error_mem_read[3] = {
2267 NULL, /* never used */
2268 NULL, /* never used */
2269 NULL, /* never used */
2272 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2273 notdirty_mem_writeb,
2274 notdirty_mem_writew,
2275 notdirty_mem_writel,
2278 #if defined(CONFIG_SOFTMMU)
2279 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2280 so these check for a hit then pass through to the normal out-of-line
2281 phys routines. */
2282 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2284 return ldub_phys(addr);
2287 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2289 return lduw_phys(addr);
2292 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2294 return ldl_phys(addr);
2297 /* Generate a debug exception if a watchpoint has been hit.
2298 Returns the real physical address of the access. addr will be a host
2299 address in case of a RAM location. */
2300 static target_ulong check_watchpoint(target_phys_addr_t addr)
2302 CPUState *env = cpu_single_env;
2303 target_ulong watch;
2304 target_ulong retaddr;
2305 int i;
2307 retaddr = addr;
2308 for (i = 0; i < env->nb_watchpoints; i++) {
2309 watch = env->watchpoint[i].vaddr;
2310 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2311 retaddr = addr - env->watchpoint[i].addend;
2312 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2313 cpu_single_env->watchpoint_hit = i + 1;
2314 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2315 break;
2319 return retaddr;
2322 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2323 uint32_t val)
2325 addr = check_watchpoint(addr);
2326 stb_phys(addr, val);
2329 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2330 uint32_t val)
2332 addr = check_watchpoint(addr);
2333 stw_phys(addr, val);
2336 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2337 uint32_t val)
2339 addr = check_watchpoint(addr);
2340 stl_phys(addr, val);
2343 static CPUReadMemoryFunc *watch_mem_read[3] = {
2344 watch_mem_readb,
2345 watch_mem_readw,
2346 watch_mem_readl,
2349 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2350 watch_mem_writeb,
2351 watch_mem_writew,
2352 watch_mem_writel,
2354 #endif
2356 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2357 unsigned int len)
2359 CPUReadMemoryFunc **mem_read;
2360 uint32_t ret;
2361 unsigned int idx;
2363 idx = SUBPAGE_IDX(addr - mmio->base);
2364 #if defined(DEBUG_SUBPAGE)
2365 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2366 mmio, len, addr, idx);
2367 #endif
2368 mem_read = mmio->mem_read[idx];
2369 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2371 return ret;
2374 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2375 uint32_t value, unsigned int len)
2377 CPUWriteMemoryFunc **mem_write;
2378 unsigned int idx;
2380 idx = SUBPAGE_IDX(addr - mmio->base);
2381 #if defined(DEBUG_SUBPAGE)
2382 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2383 mmio, len, addr, idx, value);
2384 #endif
2385 mem_write = mmio->mem_write[idx];
2386 (*mem_write[len])(mmio->opaque[idx], addr, value);
2389 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2391 #if defined(DEBUG_SUBPAGE)
2392 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2393 #endif
2395 return subpage_readlen(opaque, addr, 0);
2398 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2399 uint32_t value)
2401 #if defined(DEBUG_SUBPAGE)
2402 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2403 #endif
2404 subpage_writelen(opaque, addr, value, 0);
2407 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2409 #if defined(DEBUG_SUBPAGE)
2410 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2411 #endif
2413 return subpage_readlen(opaque, addr, 1);
2416 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2417 uint32_t value)
2419 #if defined(DEBUG_SUBPAGE)
2420 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2421 #endif
2422 subpage_writelen(opaque, addr, value, 1);
2425 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2427 #if defined(DEBUG_SUBPAGE)
2428 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2429 #endif
2431 return subpage_readlen(opaque, addr, 2);
2434 static void subpage_writel (void *opaque,
2435 target_phys_addr_t addr, uint32_t value)
2437 #if defined(DEBUG_SUBPAGE)
2438 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2439 #endif
2440 subpage_writelen(opaque, addr, value, 2);
2443 static CPUReadMemoryFunc *subpage_read[] = {
2444 &subpage_readb,
2445 &subpage_readw,
2446 &subpage_readl,
2449 static CPUWriteMemoryFunc *subpage_write[] = {
2450 &subpage_writeb,
2451 &subpage_writew,
2452 &subpage_writel,
2455 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2456 int memory)
2458 int idx, eidx;
2460 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2461 return -1;
2462 idx = SUBPAGE_IDX(start);
2463 eidx = SUBPAGE_IDX(end);
2464 #if defined(DEBUG_SUBPAGE)
2465 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2466 mmio, start, end, idx, eidx, memory);
2467 #endif
2468 memory >>= IO_MEM_SHIFT;
2469 for (; idx <= eidx; idx++) {
2470 mmio->mem_read[idx] = io_mem_read[memory];
2471 mmio->mem_write[idx] = io_mem_write[memory];
2472 mmio->opaque[idx] = io_mem_opaque[memory];
2475 return 0;
2478 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2479 int orig_memory)
2481 subpage_t *mmio;
2482 int subpage_memory;
2484 mmio = qemu_mallocz(sizeof(subpage_t));
2485 if (mmio != NULL) {
2486 mmio->base = base;
2487 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2488 #if defined(DEBUG_SUBPAGE)
2489 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2490 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2491 #endif
2492 *phys = subpage_memory | IO_MEM_SUBPAGE;
2493 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2496 return mmio;
2499 static void io_mem_init(void)
2501 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2502 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2503 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2504 io_mem_nb = 5;
2506 #if defined(CONFIG_SOFTMMU)
2507 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2508 watch_mem_write, NULL);
2509 #endif
2510 /* alloc dirty bits array */
2511 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2512 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2515 /* mem_read and mem_write are arrays of functions containing the
2516 function to access byte (index 0), word (index 1) and dword (index
2517 2). All functions must be supplied. If io_index is non zero, the
2518 corresponding io zone is modified. If it is zero, a new io zone is
2519 allocated. The return value can be used with
2520 cpu_register_physical_memory(). (-1) is returned if error. */
2521 int cpu_register_io_memory(int io_index,
2522 CPUReadMemoryFunc **mem_read,
2523 CPUWriteMemoryFunc **mem_write,
2524 void *opaque)
2526 int i;
2528 if (io_index <= 0) {
2529 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2530 return -1;
2531 io_index = io_mem_nb++;
2532 } else {
2533 if (io_index >= IO_MEM_NB_ENTRIES)
2534 return -1;
2537 for(i = 0;i < 3; i++) {
2538 io_mem_read[io_index][i] = mem_read[i];
2539 io_mem_write[io_index][i] = mem_write[i];
2541 io_mem_opaque[io_index] = opaque;
2542 return io_index << IO_MEM_SHIFT;
2545 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2547 return io_mem_write[io_index >> IO_MEM_SHIFT];
2550 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2552 return io_mem_read[io_index >> IO_MEM_SHIFT];
2555 /* physical memory access (slow version, mainly for debug) */
2556 #if defined(CONFIG_USER_ONLY)
2557 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2558 int len, int is_write)
2560 int l, flags;
2561 target_ulong page;
2562 void * p;
2564 while (len > 0) {
2565 page = addr & TARGET_PAGE_MASK;
2566 l = (page + TARGET_PAGE_SIZE) - addr;
2567 if (l > len)
2568 l = len;
2569 flags = page_get_flags(page);
2570 if (!(flags & PAGE_VALID))
2571 return;
2572 if (is_write) {
2573 if (!(flags & PAGE_WRITE))
2574 return;
2575 /* XXX: this code should not depend on lock_user */
2576 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2577 /* FIXME - should this return an error rather than just fail? */
2578 return;
2579 memcpy(p, buf, len);
2580 unlock_user(p, addr, len);
2581 } else {
2582 if (!(flags & PAGE_READ))
2583 return;
2584 /* XXX: this code should not depend on lock_user */
2585 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2586 /* FIXME - should this return an error rather than just fail? */
2587 return;
2588 memcpy(buf, p, len);
2589 unlock_user(p, addr, 0);
2591 len -= l;
2592 buf += l;
2593 addr += l;
2597 #else
2598 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2599 int len, int is_write)
2601 int l, io_index;
2602 uint8_t *ptr;
2603 uint32_t val;
2604 target_phys_addr_t page;
2605 unsigned long pd;
2606 PhysPageDesc *p;
2608 while (len > 0) {
2609 page = addr & TARGET_PAGE_MASK;
2610 l = (page + TARGET_PAGE_SIZE) - addr;
2611 if (l > len)
2612 l = len;
2613 p = phys_page_find(page >> TARGET_PAGE_BITS);
2614 if (!p) {
2615 pd = IO_MEM_UNASSIGNED;
2616 } else {
2617 pd = p->phys_offset;
2620 if (is_write) {
2621 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2622 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2623 /* XXX: could force cpu_single_env to NULL to avoid
2624 potential bugs */
2625 if (l >= 4 && ((addr & 3) == 0)) {
2626 /* 32 bit write access */
2627 val = ldl_p(buf);
2628 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2629 l = 4;
2630 } else if (l >= 2 && ((addr & 1) == 0)) {
2631 /* 16 bit write access */
2632 val = lduw_p(buf);
2633 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2634 l = 2;
2635 } else {
2636 /* 8 bit write access */
2637 val = ldub_p(buf);
2638 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2639 l = 1;
2641 } else {
2642 unsigned long addr1;
2643 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2644 /* RAM case */
2645 ptr = phys_ram_base + addr1;
2646 memcpy(ptr, buf, l);
2647 if (!cpu_physical_memory_is_dirty(addr1)) {
2648 /* invalidate code */
2649 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2650 /* set dirty bit */
2651 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2652 (0xff & ~CODE_DIRTY_FLAG);
2654 #ifdef USE_KVM
2655 /* qemu doesn't execute guest code directly, but kvm does
2656 therefore fluch instruction caches */
2657 flush_icache_range((unsigned long)ptr, ((unsigned long)ptr)+l);
2658 #endif
2660 } else {
2661 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2662 !(pd & IO_MEM_ROMD)) {
2663 /* I/O case */
2664 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2665 if (l >= 4 && ((addr & 3) == 0)) {
2666 /* 32 bit read access */
2667 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2668 stl_p(buf, val);
2669 l = 4;
2670 } else if (l >= 2 && ((addr & 1) == 0)) {
2671 /* 16 bit read access */
2672 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2673 stw_p(buf, val);
2674 l = 2;
2675 } else {
2676 /* 8 bit read access */
2677 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2678 stb_p(buf, val);
2679 l = 1;
2681 } else {
2682 /* RAM case */
2683 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2684 (addr & ~TARGET_PAGE_MASK);
2685 memcpy(buf, ptr, l);
2688 len -= l;
2689 buf += l;
2690 addr += l;
2694 /* used for ROM loading : can write in RAM and ROM */
2695 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2696 const uint8_t *buf, int len)
2698 int l;
2699 uint8_t *ptr;
2700 target_phys_addr_t page;
2701 unsigned long pd;
2702 PhysPageDesc *p;
2704 while (len > 0) {
2705 page = addr & TARGET_PAGE_MASK;
2706 l = (page + TARGET_PAGE_SIZE) - addr;
2707 if (l > len)
2708 l = len;
2709 p = phys_page_find(page >> TARGET_PAGE_BITS);
2710 if (!p) {
2711 pd = IO_MEM_UNASSIGNED;
2712 } else {
2713 pd = p->phys_offset;
2716 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2717 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2718 !(pd & IO_MEM_ROMD)) {
2719 /* do nothing */
2720 } else {
2721 unsigned long addr1;
2722 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2723 /* ROM/RAM case */
2724 ptr = phys_ram_base + addr1;
2725 memcpy(ptr, buf, l);
2727 len -= l;
2728 buf += l;
2729 addr += l;
2734 /* warning: addr must be aligned */
2735 uint32_t ldl_phys(target_phys_addr_t addr)
2737 int io_index;
2738 uint8_t *ptr;
2739 uint32_t val;
2740 unsigned long pd;
2741 PhysPageDesc *p;
2743 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2744 if (!p) {
2745 pd = IO_MEM_UNASSIGNED;
2746 } else {
2747 pd = p->phys_offset;
2750 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2751 !(pd & IO_MEM_ROMD)) {
2752 /* I/O case */
2753 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2754 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2755 } else {
2756 /* RAM case */
2757 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2758 (addr & ~TARGET_PAGE_MASK);
2759 val = ldl_p(ptr);
2761 return val;
2764 /* warning: addr must be aligned */
2765 uint64_t ldq_phys(target_phys_addr_t addr)
2767 int io_index;
2768 uint8_t *ptr;
2769 uint64_t val;
2770 unsigned long pd;
2771 PhysPageDesc *p;
2773 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2774 if (!p) {
2775 pd = IO_MEM_UNASSIGNED;
2776 } else {
2777 pd = p->phys_offset;
2780 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2781 !(pd & IO_MEM_ROMD)) {
2782 /* I/O case */
2783 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2784 #ifdef TARGET_WORDS_BIGENDIAN
2785 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2786 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2787 #else
2788 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2789 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2790 #endif
2791 } else {
2792 /* RAM case */
2793 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2794 (addr & ~TARGET_PAGE_MASK);
2795 val = ldq_p(ptr);
2797 return val;
2800 /* XXX: optimize */
2801 uint32_t ldub_phys(target_phys_addr_t addr)
2803 uint8_t val;
2804 cpu_physical_memory_read(addr, &val, 1);
2805 return val;
2808 /* XXX: optimize */
2809 uint32_t lduw_phys(target_phys_addr_t addr)
2811 uint16_t val;
2812 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2813 return tswap16(val);
2816 #ifdef __GNUC__
2817 #define likely(x) __builtin_expect(!!(x), 1)
2818 #define unlikely(x) __builtin_expect(!!(x), 0)
2819 #else
2820 #define likely(x) x
2821 #define unlikely(x) x
2822 #endif
2824 /* warning: addr must be aligned. The ram page is not masked as dirty
2825 and the code inside is not invalidated. It is useful if the dirty
2826 bits are used to track modified PTEs */
2827 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2829 int io_index;
2830 uint8_t *ptr;
2831 unsigned long pd;
2832 PhysPageDesc *p;
2834 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2835 if (!p) {
2836 pd = IO_MEM_UNASSIGNED;
2837 } else {
2838 pd = p->phys_offset;
2841 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2842 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2843 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2844 } else {
2845 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2846 ptr = phys_ram_base + addr1;
2847 stl_p(ptr, val);
2849 if (unlikely(in_migration)) {
2850 if (!cpu_physical_memory_is_dirty(addr1)) {
2851 /* invalidate code */
2852 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2853 /* set dirty bit */
2854 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2855 (0xff & ~CODE_DIRTY_FLAG);
2861 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2863 int io_index;
2864 uint8_t *ptr;
2865 unsigned long pd;
2866 PhysPageDesc *p;
2868 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2869 if (!p) {
2870 pd = IO_MEM_UNASSIGNED;
2871 } else {
2872 pd = p->phys_offset;
2875 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2876 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2877 #ifdef TARGET_WORDS_BIGENDIAN
2878 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2879 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2880 #else
2881 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2882 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2883 #endif
2884 } else {
2885 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2886 (addr & ~TARGET_PAGE_MASK);
2887 stq_p(ptr, val);
2891 /* warning: addr must be aligned */
2892 void stl_phys(target_phys_addr_t addr, uint32_t val)
2894 int io_index;
2895 uint8_t *ptr;
2896 unsigned long pd;
2897 PhysPageDesc *p;
2899 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2900 if (!p) {
2901 pd = IO_MEM_UNASSIGNED;
2902 } else {
2903 pd = p->phys_offset;
2906 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2907 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2908 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2909 } else {
2910 unsigned long addr1;
2911 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2912 /* RAM case */
2913 ptr = phys_ram_base + addr1;
2914 stl_p(ptr, val);
2915 if (!cpu_physical_memory_is_dirty(addr1)) {
2916 /* invalidate code */
2917 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2918 /* set dirty bit */
2919 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2920 (0xff & ~CODE_DIRTY_FLAG);
2925 /* XXX: optimize */
2926 void stb_phys(target_phys_addr_t addr, uint32_t val)
2928 uint8_t v = val;
2929 cpu_physical_memory_write(addr, &v, 1);
2932 /* XXX: optimize */
2933 void stw_phys(target_phys_addr_t addr, uint32_t val)
2935 uint16_t v = tswap16(val);
2936 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2939 /* XXX: optimize */
2940 void stq_phys(target_phys_addr_t addr, uint64_t val)
2942 val = tswap64(val);
2943 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2946 #endif
2948 /* virtual memory access for debug */
2949 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2950 uint8_t *buf, int len, int is_write)
2952 int l;
2953 target_phys_addr_t phys_addr;
2954 target_ulong page;
2956 while (len > 0) {
2957 page = addr & TARGET_PAGE_MASK;
2958 phys_addr = cpu_get_phys_page_debug(env, page);
2959 /* if no physical page mapped, return an error */
2960 if (phys_addr == -1)
2961 return -1;
2962 l = (page + TARGET_PAGE_SIZE) - addr;
2963 if (l > len)
2964 l = len;
2965 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2966 buf, l, is_write);
2967 len -= l;
2968 buf += l;
2969 addr += l;
2971 return 0;
2974 void dump_exec_info(FILE *f,
2975 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2977 int i, target_code_size, max_target_code_size;
2978 int direct_jmp_count, direct_jmp2_count, cross_page;
2979 TranslationBlock *tb;
2981 target_code_size = 0;
2982 max_target_code_size = 0;
2983 cross_page = 0;
2984 direct_jmp_count = 0;
2985 direct_jmp2_count = 0;
2986 for(i = 0; i < nb_tbs; i++) {
2987 tb = &tbs[i];
2988 target_code_size += tb->size;
2989 if (tb->size > max_target_code_size)
2990 max_target_code_size = tb->size;
2991 if (tb->page_addr[1] != -1)
2992 cross_page++;
2993 if (tb->tb_next_offset[0] != 0xffff) {
2994 direct_jmp_count++;
2995 if (tb->tb_next_offset[1] != 0xffff) {
2996 direct_jmp2_count++;
3000 /* XXX: avoid using doubles ? */
3001 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3002 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3003 nb_tbs ? target_code_size / nb_tbs : 0,
3004 max_target_code_size);
3005 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3006 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3007 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3008 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3009 cross_page,
3010 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3011 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3012 direct_jmp_count,
3013 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3014 direct_jmp2_count,
3015 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3016 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3017 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3018 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3021 #if !defined(CONFIG_USER_ONLY)
3023 #define MMUSUFFIX _cmmu
3024 #define GETPC() NULL
3025 #define env cpu_single_env
3026 #define SOFTMMU_CODE_ACCESS
3028 #define SHIFT 0
3029 #include "softmmu_template.h"
3031 #define SHIFT 1
3032 #include "softmmu_template.h"
3034 #define SHIFT 2
3035 #include "softmmu_template.h"
3037 #define SHIFT 3
3038 #include "softmmu_template.h"
3040 #undef env
3042 #endif