Virtio register pow2 memory regions
[qemu-kvm/fedora.git] / exec.c
blobedeb21aa5d0a891f921f9b3d77f14dde20d5d50e
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
39 #if !defined(NO_CPU_EMULATION)
40 #include "tcg-target.h"
41 #endif
43 #include "qemu-kvm.h"
44 #if defined(CONFIG_USER_ONLY)
45 #include <qemu.h>
46 #endif
48 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_FLUSH
50 //#define DEBUG_TLB
51 //#define DEBUG_UNASSIGNED
53 /* make various TB consistency checks */
54 //#define DEBUG_TB_CHECK
55 //#define DEBUG_TLB_CHECK
57 //#define DEBUG_IOPORT
58 //#define DEBUG_SUBPAGE
60 #if !defined(CONFIG_USER_ONLY)
61 /* TB consistency checks only implemented for usermode emulation. */
62 #undef DEBUG_TB_CHECK
63 #endif
65 /* threshold to flush the translated code buffer */
66 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
68 #define SMC_BITMAP_USE_THRESHOLD 10
70 #define MMAP_AREA_START 0x00000000
71 #define MMAP_AREA_END 0xa8000000
73 #if defined(TARGET_SPARC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 41
75 #elif defined(TARGET_SPARC)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 36
77 #elif defined(TARGET_ALPHA)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #define TARGET_VIRT_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_PPC64)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif USE_KQEMU
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #elif TARGET_X86_64
86 #define TARGET_PHYS_ADDR_SPACE_BITS 42
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
89 #else
90 #define TARGET_PHYS_ADDR_SPACE_BITS 32
91 #endif
93 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
94 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
95 int nb_tbs;
96 /* any access to the tbs or the page table must use this lock */
97 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
99 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
100 uint8_t *code_gen_ptr;
102 ram_addr_t phys_ram_size;
103 int phys_ram_fd;
104 uint8_t *phys_ram_base;
105 uint8_t *phys_ram_dirty;
106 uint8_t *bios_mem;
107 static int in_migration;
108 static ram_addr_t phys_ram_alloc_offset = 0;
110 CPUState *first_cpu;
111 /* current CPU in the current thread. It is only valid inside
112 cpu_exec() */
113 CPUState *cpu_single_env;
115 typedef struct PageDesc {
116 /* list of TBs intersecting this ram page */
117 TranslationBlock *first_tb;
118 /* in order to optimize self modifying code, we count the number
119 of lookups we do to a given page to use a bitmap */
120 unsigned int code_write_count;
121 uint8_t *code_bitmap;
122 #if defined(CONFIG_USER_ONLY)
123 unsigned long flags;
124 #endif
125 } PageDesc;
127 typedef struct PhysPageDesc {
128 /* offset in host memory of the page + io_index in the low 12 bits */
129 ram_addr_t phys_offset;
130 } PhysPageDesc;
132 #define L2_BITS 10
133 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
134 /* XXX: this is a temporary hack for alpha target.
135 * In the future, this is to be replaced by a multi-level table
136 * to actually be able to handle the complete 64 bits address space.
138 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
139 #else
140 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
141 #endif
143 #define L1_SIZE (1 << L1_BITS)
144 #define L2_SIZE (1 << L2_BITS)
146 static void io_mem_init(void);
148 unsigned long qemu_real_host_page_size;
149 unsigned long qemu_host_page_bits;
150 unsigned long qemu_host_page_size;
151 unsigned long qemu_host_page_mask;
153 /* XXX: for system emulation, it could just be an array */
154 static PageDesc *l1_map[L1_SIZE];
155 PhysPageDesc **l1_phys_map;
157 /* io memory support */
158 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
159 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
160 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
161 char io_mem_used[IO_MEM_NB_ENTRIES];
162 #if defined(CONFIG_SOFTMMU)
163 static int io_mem_watch;
164 #endif
166 /* log support */
167 char *logfilename = "/tmp/qemu.log";
168 FILE *logfile;
169 int loglevel;
170 static int log_append = 0;
172 /* statistics */
173 static int tlb_flush_count;
174 static int tb_flush_count;
175 static int tb_phys_invalidate_count;
177 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
178 typedef struct subpage_t {
179 target_phys_addr_t base;
180 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
181 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
182 void *opaque[TARGET_PAGE_SIZE][2][4];
183 } subpage_t;
185 static void page_init(void)
187 /* NOTE: we can always suppose that qemu_host_page_size >=
188 TARGET_PAGE_SIZE */
189 #ifdef _WIN32
191 SYSTEM_INFO system_info;
192 DWORD old_protect;
194 GetSystemInfo(&system_info);
195 qemu_real_host_page_size = system_info.dwPageSize;
197 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
198 PAGE_EXECUTE_READWRITE, &old_protect);
200 #else
201 qemu_real_host_page_size = getpagesize();
203 unsigned long start, end;
205 start = (unsigned long)code_gen_buffer;
206 start &= ~(qemu_real_host_page_size - 1);
208 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
209 end += qemu_real_host_page_size - 1;
210 end &= ~(qemu_real_host_page_size - 1);
212 mprotect((void *)start, end - start,
213 PROT_READ | PROT_WRITE | PROT_EXEC);
215 #endif
217 if (qemu_host_page_size == 0)
218 qemu_host_page_size = qemu_real_host_page_size;
219 if (qemu_host_page_size < TARGET_PAGE_SIZE)
220 qemu_host_page_size = TARGET_PAGE_SIZE;
221 qemu_host_page_bits = 0;
222 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
223 qemu_host_page_bits++;
224 qemu_host_page_mask = ~(qemu_host_page_size - 1);
225 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
226 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
228 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
230 long long startaddr, endaddr;
231 FILE *f;
232 int n;
234 f = fopen("/proc/self/maps", "r");
235 if (f) {
236 do {
237 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
238 if (n == 2) {
239 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
240 TARGET_PAGE_ALIGN(endaddr),
241 PAGE_RESERVED);
243 } while (!feof(f));
244 fclose(f);
247 #endif
250 static inline PageDesc *page_find_alloc(unsigned int index)
252 PageDesc **lp, *p;
254 lp = &l1_map[index >> L2_BITS];
255 p = *lp;
256 if (!p) {
257 /* allocate if not found */
258 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
259 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
260 *lp = p;
262 return p + (index & (L2_SIZE - 1));
265 static inline PageDesc *page_find(unsigned int index)
267 PageDesc *p;
269 p = l1_map[index >> L2_BITS];
270 if (!p)
271 return 0;
272 return p + (index & (L2_SIZE - 1));
275 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
277 void **lp, **p;
278 PhysPageDesc *pd;
280 p = (void **)l1_phys_map;
281 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
283 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
284 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
285 #endif
286 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
287 p = *lp;
288 if (!p) {
289 /* allocate if not found */
290 if (!alloc)
291 return NULL;
292 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
293 memset(p, 0, sizeof(void *) * L1_SIZE);
294 *lp = p;
296 #endif
297 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
298 pd = *lp;
299 if (!pd) {
300 int i;
301 /* allocate if not found */
302 if (!alloc)
303 return NULL;
304 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
305 *lp = pd;
306 for (i = 0; i < L2_SIZE; i++)
307 pd[i].phys_offset = IO_MEM_UNASSIGNED;
309 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
312 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
314 return phys_page_find_alloc(index, 0);
317 #if !defined(CONFIG_USER_ONLY)
318 static void tlb_protect_code(ram_addr_t ram_addr);
319 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
320 target_ulong vaddr);
321 #endif
323 void cpu_exec_init(CPUState *env)
325 CPUState **penv;
326 int cpu_index;
328 if (!code_gen_ptr) {
329 cpu_gen_init();
330 code_gen_ptr = code_gen_buffer;
331 page_init();
332 io_mem_init();
334 env->next_cpu = NULL;
335 penv = &first_cpu;
336 cpu_index = 0;
337 while (*penv != NULL) {
338 penv = (CPUState **)&(*penv)->next_cpu;
339 cpu_index++;
341 env->cpu_index = cpu_index;
342 env->nb_watchpoints = 0;
343 #ifdef __WIN32
344 env->thread_id = GetCurrentProcessId();
345 #else
346 env->thread_id = getpid();
347 #endif
348 *penv = env;
351 static inline void invalidate_page_bitmap(PageDesc *p)
353 if (p->code_bitmap) {
354 qemu_free(p->code_bitmap);
355 p->code_bitmap = NULL;
357 p->code_write_count = 0;
360 /* set to NULL all the 'first_tb' fields in all PageDescs */
361 static void page_flush_tb(void)
363 int i, j;
364 PageDesc *p;
366 for(i = 0; i < L1_SIZE; i++) {
367 p = l1_map[i];
368 if (p) {
369 for(j = 0; j < L2_SIZE; j++) {
370 p->first_tb = NULL;
371 invalidate_page_bitmap(p);
372 p++;
378 /* flush all the translation blocks */
379 /* XXX: tb_flush is currently not thread safe */
380 void tb_flush(CPUState *env1)
382 CPUState *env;
383 #if defined(DEBUG_FLUSH)
384 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
385 (unsigned long)(code_gen_ptr - code_gen_buffer),
386 nb_tbs, nb_tbs > 0 ?
387 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
388 #endif
389 nb_tbs = 0;
391 for(env = first_cpu; env != NULL; env = env->next_cpu) {
392 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
395 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
396 page_flush_tb();
398 code_gen_ptr = code_gen_buffer;
399 /* XXX: flush processor icache at this point if cache flush is
400 expensive */
401 tb_flush_count++;
404 #ifdef DEBUG_TB_CHECK
406 static void tb_invalidate_check(target_ulong address)
408 TranslationBlock *tb;
409 int i;
410 address &= TARGET_PAGE_MASK;
411 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
412 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
413 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
414 address >= tb->pc + tb->size)) {
415 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
416 address, (long)tb->pc, tb->size);
422 /* verify that all the pages have correct rights for code */
423 static void tb_page_check(void)
425 TranslationBlock *tb;
426 int i, flags1, flags2;
428 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
429 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
430 flags1 = page_get_flags(tb->pc);
431 flags2 = page_get_flags(tb->pc + tb->size - 1);
432 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
433 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
434 (long)tb->pc, tb->size, flags1, flags2);
440 void tb_jmp_check(TranslationBlock *tb)
442 TranslationBlock *tb1;
443 unsigned int n1;
445 /* suppress any remaining jumps to this TB */
446 tb1 = tb->jmp_first;
447 for(;;) {
448 n1 = (long)tb1 & 3;
449 tb1 = (TranslationBlock *)((long)tb1 & ~3);
450 if (n1 == 2)
451 break;
452 tb1 = tb1->jmp_next[n1];
454 /* check end of list */
455 if (tb1 != tb) {
456 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
460 #endif
462 /* invalidate one TB */
463 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
464 int next_offset)
466 TranslationBlock *tb1;
467 for(;;) {
468 tb1 = *ptb;
469 if (tb1 == tb) {
470 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
471 break;
473 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
477 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
479 TranslationBlock *tb1;
480 unsigned int n1;
482 for(;;) {
483 tb1 = *ptb;
484 n1 = (long)tb1 & 3;
485 tb1 = (TranslationBlock *)((long)tb1 & ~3);
486 if (tb1 == tb) {
487 *ptb = tb1->page_next[n1];
488 break;
490 ptb = &tb1->page_next[n1];
494 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
496 TranslationBlock *tb1, **ptb;
497 unsigned int n1;
499 ptb = &tb->jmp_next[n];
500 tb1 = *ptb;
501 if (tb1) {
502 /* find tb(n) in circular list */
503 for(;;) {
504 tb1 = *ptb;
505 n1 = (long)tb1 & 3;
506 tb1 = (TranslationBlock *)((long)tb1 & ~3);
507 if (n1 == n && tb1 == tb)
508 break;
509 if (n1 == 2) {
510 ptb = &tb1->jmp_first;
511 } else {
512 ptb = &tb1->jmp_next[n1];
515 /* now we can suppress tb(n) from the list */
516 *ptb = tb->jmp_next[n];
518 tb->jmp_next[n] = NULL;
522 /* reset the jump entry 'n' of a TB so that it is not chained to
523 another TB */
524 static inline void tb_reset_jump(TranslationBlock *tb, int n)
526 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
529 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
531 CPUState *env;
532 PageDesc *p;
533 unsigned int h, n1;
534 target_ulong phys_pc;
535 TranslationBlock *tb1, *tb2;
537 /* remove the TB from the hash list */
538 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
539 h = tb_phys_hash_func(phys_pc);
540 tb_remove(&tb_phys_hash[h], tb,
541 offsetof(TranslationBlock, phys_hash_next));
543 /* remove the TB from the page list */
544 if (tb->page_addr[0] != page_addr) {
545 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
546 tb_page_remove(&p->first_tb, tb);
547 invalidate_page_bitmap(p);
549 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
550 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
551 tb_page_remove(&p->first_tb, tb);
552 invalidate_page_bitmap(p);
555 tb_invalidated_flag = 1;
557 /* remove the TB from the hash list */
558 h = tb_jmp_cache_hash_func(tb->pc);
559 for(env = first_cpu; env != NULL; env = env->next_cpu) {
560 if (env->tb_jmp_cache[h] == tb)
561 env->tb_jmp_cache[h] = NULL;
564 /* suppress this TB from the two jump lists */
565 tb_jmp_remove(tb, 0);
566 tb_jmp_remove(tb, 1);
568 /* suppress any remaining jumps to this TB */
569 tb1 = tb->jmp_first;
570 for(;;) {
571 n1 = (long)tb1 & 3;
572 if (n1 == 2)
573 break;
574 tb1 = (TranslationBlock *)((long)tb1 & ~3);
575 tb2 = tb1->jmp_next[n1];
576 tb_reset_jump(tb1, n1);
577 tb1->jmp_next[n1] = NULL;
578 tb1 = tb2;
580 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
582 tb_phys_invalidate_count++;
585 static inline void set_bits(uint8_t *tab, int start, int len)
587 int end, mask, end1;
589 end = start + len;
590 tab += start >> 3;
591 mask = 0xff << (start & 7);
592 if ((start & ~7) == (end & ~7)) {
593 if (start < end) {
594 mask &= ~(0xff << (end & 7));
595 *tab |= mask;
597 } else {
598 *tab++ |= mask;
599 start = (start + 8) & ~7;
600 end1 = end & ~7;
601 while (start < end1) {
602 *tab++ = 0xff;
603 start += 8;
605 if (start < end) {
606 mask = ~(0xff << (end & 7));
607 *tab |= mask;
612 static void build_page_bitmap(PageDesc *p)
614 int n, tb_start, tb_end;
615 TranslationBlock *tb;
617 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
618 if (!p->code_bitmap)
619 return;
620 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
622 tb = p->first_tb;
623 while (tb != NULL) {
624 n = (long)tb & 3;
625 tb = (TranslationBlock *)((long)tb & ~3);
626 /* NOTE: this is subtle as a TB may span two physical pages */
627 if (n == 0) {
628 /* NOTE: tb_end may be after the end of the page, but
629 it is not a problem */
630 tb_start = tb->pc & ~TARGET_PAGE_MASK;
631 tb_end = tb_start + tb->size;
632 if (tb_end > TARGET_PAGE_SIZE)
633 tb_end = TARGET_PAGE_SIZE;
634 } else {
635 tb_start = 0;
636 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
638 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
639 tb = tb->page_next[n];
643 #ifdef TARGET_HAS_PRECISE_SMC
645 static void tb_gen_code(CPUState *env,
646 target_ulong pc, target_ulong cs_base, int flags,
647 int cflags)
649 TranslationBlock *tb;
650 uint8_t *tc_ptr;
651 target_ulong phys_pc, phys_page2, virt_page2;
652 int code_gen_size;
654 phys_pc = get_phys_addr_code(env, pc);
655 tb = tb_alloc(pc);
656 if (!tb) {
657 /* flush must be done */
658 tb_flush(env);
659 /* cannot fail at this point */
660 tb = tb_alloc(pc);
662 tc_ptr = code_gen_ptr;
663 tb->tc_ptr = tc_ptr;
664 tb->cs_base = cs_base;
665 tb->flags = flags;
666 tb->cflags = cflags;
667 cpu_gen_code(env, tb, &code_gen_size);
668 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
670 /* check next page if needed */
671 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
672 phys_page2 = -1;
673 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
674 phys_page2 = get_phys_addr_code(env, virt_page2);
676 tb_link_phys(tb, phys_pc, phys_page2);
678 #endif
680 /* invalidate all TBs which intersect with the target physical page
681 starting in range [start;end[. NOTE: start and end must refer to
682 the same physical page. 'is_cpu_write_access' should be true if called
683 from a real cpu write access: the virtual CPU will exit the current
684 TB if code is modified inside this TB. */
685 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
686 int is_cpu_write_access)
688 int n, current_tb_modified, current_tb_not_found, current_flags;
689 CPUState *env = cpu_single_env;
690 PageDesc *p;
691 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
692 target_ulong tb_start, tb_end;
693 target_ulong current_pc, current_cs_base;
695 p = page_find(start >> TARGET_PAGE_BITS);
696 if (!p)
697 return;
698 if (!p->code_bitmap &&
699 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
700 is_cpu_write_access) {
701 /* build code bitmap */
702 build_page_bitmap(p);
705 /* we remove all the TBs in the range [start, end[ */
706 /* XXX: see if in some cases it could be faster to invalidate all the code */
707 current_tb_not_found = is_cpu_write_access;
708 current_tb_modified = 0;
709 current_tb = NULL; /* avoid warning */
710 current_pc = 0; /* avoid warning */
711 current_cs_base = 0; /* avoid warning */
712 current_flags = 0; /* avoid warning */
713 tb = p->first_tb;
714 while (tb != NULL) {
715 n = (long)tb & 3;
716 tb = (TranslationBlock *)((long)tb & ~3);
717 tb_next = tb->page_next[n];
718 /* NOTE: this is subtle as a TB may span two physical pages */
719 if (n == 0) {
720 /* NOTE: tb_end may be after the end of the page, but
721 it is not a problem */
722 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
723 tb_end = tb_start + tb->size;
724 } else {
725 tb_start = tb->page_addr[1];
726 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
728 if (!(tb_end <= start || tb_start >= end)) {
729 #ifdef TARGET_HAS_PRECISE_SMC
730 if (current_tb_not_found) {
731 current_tb_not_found = 0;
732 current_tb = NULL;
733 if (env->mem_write_pc) {
734 /* now we have a real cpu fault */
735 current_tb = tb_find_pc(env->mem_write_pc);
738 if (current_tb == tb &&
739 !(current_tb->cflags & CF_SINGLE_INSN)) {
740 /* If we are modifying the current TB, we must stop
741 its execution. We could be more precise by checking
742 that the modification is after the current PC, but it
743 would require a specialized function to partially
744 restore the CPU state */
746 current_tb_modified = 1;
747 cpu_restore_state(current_tb, env,
748 env->mem_write_pc, NULL);
749 #if defined(TARGET_I386)
750 current_flags = env->hflags;
751 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
752 current_cs_base = (target_ulong)env->segs[R_CS].base;
753 current_pc = current_cs_base + env->eip;
754 #else
755 #error unsupported CPU
756 #endif
758 #endif /* TARGET_HAS_PRECISE_SMC */
759 /* we need to do that to handle the case where a signal
760 occurs while doing tb_phys_invalidate() */
761 saved_tb = NULL;
762 if (env) {
763 saved_tb = env->current_tb;
764 env->current_tb = NULL;
766 tb_phys_invalidate(tb, -1);
767 if (env) {
768 env->current_tb = saved_tb;
769 if (env->interrupt_request && env->current_tb)
770 cpu_interrupt(env, env->interrupt_request);
773 tb = tb_next;
775 #if !defined(CONFIG_USER_ONLY)
776 /* if no code remaining, no need to continue to use slow writes */
777 if (!p->first_tb) {
778 invalidate_page_bitmap(p);
779 if (is_cpu_write_access) {
780 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
783 #endif
784 #ifdef TARGET_HAS_PRECISE_SMC
785 if (current_tb_modified) {
786 /* we generate a block containing just the instruction
787 modifying the memory. It will ensure that it cannot modify
788 itself */
789 env->current_tb = NULL;
790 tb_gen_code(env, current_pc, current_cs_base, current_flags,
791 CF_SINGLE_INSN);
792 cpu_resume_from_signal(env, NULL);
794 #endif
797 /* len must be <= 8 and start must be a multiple of len */
798 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
800 PageDesc *p;
801 int offset, b;
802 #if 0
803 if (1) {
804 if (loglevel) {
805 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
806 cpu_single_env->mem_write_vaddr, len,
807 cpu_single_env->eip,
808 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
811 #endif
812 p = page_find(start >> TARGET_PAGE_BITS);
813 if (!p)
814 return;
815 if (p->code_bitmap) {
816 offset = start & ~TARGET_PAGE_MASK;
817 b = p->code_bitmap[offset >> 3] >> (offset & 7);
818 if (b & ((1 << len) - 1))
819 goto do_invalidate;
820 } else {
821 do_invalidate:
822 tb_invalidate_phys_page_range(start, start + len, 1);
826 #if !defined(CONFIG_SOFTMMU)
827 static void tb_invalidate_phys_page(target_ulong addr,
828 unsigned long pc, void *puc)
830 int n, current_flags, current_tb_modified;
831 target_ulong current_pc, current_cs_base;
832 PageDesc *p;
833 TranslationBlock *tb, *current_tb;
834 #ifdef TARGET_HAS_PRECISE_SMC
835 CPUState *env = cpu_single_env;
836 #endif
838 addr &= TARGET_PAGE_MASK;
839 p = page_find(addr >> TARGET_PAGE_BITS);
840 if (!p)
841 return;
842 tb = p->first_tb;
843 current_tb_modified = 0;
844 current_tb = NULL;
845 current_pc = 0; /* avoid warning */
846 current_cs_base = 0; /* avoid warning */
847 current_flags = 0; /* avoid warning */
848 #ifdef TARGET_HAS_PRECISE_SMC
849 if (tb && pc != 0) {
850 current_tb = tb_find_pc(pc);
852 #endif
853 while (tb != NULL) {
854 n = (long)tb & 3;
855 tb = (TranslationBlock *)((long)tb & ~3);
856 #ifdef TARGET_HAS_PRECISE_SMC
857 if (current_tb == tb &&
858 !(current_tb->cflags & CF_SINGLE_INSN)) {
859 /* If we are modifying the current TB, we must stop
860 its execution. We could be more precise by checking
861 that the modification is after the current PC, but it
862 would require a specialized function to partially
863 restore the CPU state */
865 current_tb_modified = 1;
866 cpu_restore_state(current_tb, env, pc, puc);
867 #if defined(TARGET_I386)
868 current_flags = env->hflags;
869 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
870 current_cs_base = (target_ulong)env->segs[R_CS].base;
871 current_pc = current_cs_base + env->eip;
872 #else
873 #error unsupported CPU
874 #endif
876 #endif /* TARGET_HAS_PRECISE_SMC */
877 tb_phys_invalidate(tb, addr);
878 tb = tb->page_next[n];
880 p->first_tb = NULL;
881 #ifdef TARGET_HAS_PRECISE_SMC
882 if (current_tb_modified) {
883 /* we generate a block containing just the instruction
884 modifying the memory. It will ensure that it cannot modify
885 itself */
886 env->current_tb = NULL;
887 tb_gen_code(env, current_pc, current_cs_base, current_flags,
888 CF_SINGLE_INSN);
889 cpu_resume_from_signal(env, puc);
891 #endif
893 #endif
895 /* add the tb in the target page and protect it if necessary */
896 static inline void tb_alloc_page(TranslationBlock *tb,
897 unsigned int n, target_ulong page_addr)
899 PageDesc *p;
900 TranslationBlock *last_first_tb;
902 tb->page_addr[n] = page_addr;
903 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
904 tb->page_next[n] = p->first_tb;
905 last_first_tb = p->first_tb;
906 p->first_tb = (TranslationBlock *)((long)tb | n);
907 invalidate_page_bitmap(p);
909 #if defined(TARGET_HAS_SMC) || 1
911 #if defined(CONFIG_USER_ONLY)
912 if (p->flags & PAGE_WRITE) {
913 target_ulong addr;
914 PageDesc *p2;
915 int prot;
917 /* force the host page as non writable (writes will have a
918 page fault + mprotect overhead) */
919 page_addr &= qemu_host_page_mask;
920 prot = 0;
921 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
922 addr += TARGET_PAGE_SIZE) {
924 p2 = page_find (addr >> TARGET_PAGE_BITS);
925 if (!p2)
926 continue;
927 prot |= p2->flags;
928 p2->flags &= ~PAGE_WRITE;
929 page_get_flags(addr);
931 mprotect(g2h(page_addr), qemu_host_page_size,
932 (prot & PAGE_BITS) & ~PAGE_WRITE);
933 #ifdef DEBUG_TB_INVALIDATE
934 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
935 page_addr);
936 #endif
938 #else
939 /* if some code is already present, then the pages are already
940 protected. So we handle the case where only the first TB is
941 allocated in a physical page */
942 if (!last_first_tb) {
943 tlb_protect_code(page_addr);
945 #endif
947 #endif /* TARGET_HAS_SMC */
950 /* Allocate a new translation block. Flush the translation buffer if
951 too many translation blocks or too much generated code. */
952 TranslationBlock *tb_alloc(target_ulong pc)
954 TranslationBlock *tb;
956 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
957 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
958 return NULL;
959 tb = &tbs[nb_tbs++];
960 tb->pc = pc;
961 tb->cflags = 0;
962 return tb;
965 /* add a new TB and link it to the physical page tables. phys_page2 is
966 (-1) to indicate that only one page contains the TB. */
967 void tb_link_phys(TranslationBlock *tb,
968 target_ulong phys_pc, target_ulong phys_page2)
970 unsigned int h;
971 TranslationBlock **ptb;
973 /* add in the physical hash table */
974 h = tb_phys_hash_func(phys_pc);
975 ptb = &tb_phys_hash[h];
976 tb->phys_hash_next = *ptb;
977 *ptb = tb;
979 /* add in the page list */
980 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
981 if (phys_page2 != -1)
982 tb_alloc_page(tb, 1, phys_page2);
983 else
984 tb->page_addr[1] = -1;
986 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
987 tb->jmp_next[0] = NULL;
988 tb->jmp_next[1] = NULL;
990 /* init original jump addresses */
991 if (tb->tb_next_offset[0] != 0xffff)
992 tb_reset_jump(tb, 0);
993 if (tb->tb_next_offset[1] != 0xffff)
994 tb_reset_jump(tb, 1);
996 #ifdef DEBUG_TB_CHECK
997 tb_page_check();
998 #endif
1001 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1002 tb[1].tc_ptr. Return NULL if not found */
1003 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1005 int m_min, m_max, m;
1006 unsigned long v;
1007 TranslationBlock *tb;
1009 if (nb_tbs <= 0)
1010 return NULL;
1011 if (tc_ptr < (unsigned long)code_gen_buffer ||
1012 tc_ptr >= (unsigned long)code_gen_ptr)
1013 return NULL;
1014 /* binary search (cf Knuth) */
1015 m_min = 0;
1016 m_max = nb_tbs - 1;
1017 while (m_min <= m_max) {
1018 m = (m_min + m_max) >> 1;
1019 tb = &tbs[m];
1020 v = (unsigned long)tb->tc_ptr;
1021 if (v == tc_ptr)
1022 return tb;
1023 else if (tc_ptr < v) {
1024 m_max = m - 1;
1025 } else {
1026 m_min = m + 1;
1029 return &tbs[m_max];
1032 static void tb_reset_jump_recursive(TranslationBlock *tb);
1034 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1036 TranslationBlock *tb1, *tb_next, **ptb;
1037 unsigned int n1;
1039 tb1 = tb->jmp_next[n];
1040 if (tb1 != NULL) {
1041 /* find head of list */
1042 for(;;) {
1043 n1 = (long)tb1 & 3;
1044 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1045 if (n1 == 2)
1046 break;
1047 tb1 = tb1->jmp_next[n1];
1049 /* we are now sure now that tb jumps to tb1 */
1050 tb_next = tb1;
1052 /* remove tb from the jmp_first list */
1053 ptb = &tb_next->jmp_first;
1054 for(;;) {
1055 tb1 = *ptb;
1056 n1 = (long)tb1 & 3;
1057 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1058 if (n1 == n && tb1 == tb)
1059 break;
1060 ptb = &tb1->jmp_next[n1];
1062 *ptb = tb->jmp_next[n];
1063 tb->jmp_next[n] = NULL;
1065 /* suppress the jump to next tb in generated code */
1066 tb_reset_jump(tb, n);
1068 /* suppress jumps in the tb on which we could have jumped */
1069 tb_reset_jump_recursive(tb_next);
1073 static void tb_reset_jump_recursive(TranslationBlock *tb)
1075 tb_reset_jump_recursive2(tb, 0);
1076 tb_reset_jump_recursive2(tb, 1);
1079 #if defined(TARGET_HAS_ICE)
1080 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1082 target_phys_addr_t addr;
1083 target_ulong pd;
1084 ram_addr_t ram_addr;
1085 PhysPageDesc *p;
1087 addr = cpu_get_phys_page_debug(env, pc);
1088 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1089 if (!p) {
1090 pd = IO_MEM_UNASSIGNED;
1091 } else {
1092 pd = p->phys_offset;
1094 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1095 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1097 #endif
1099 /* Add a watchpoint. */
1100 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1102 int i;
1104 for (i = 0; i < env->nb_watchpoints; i++) {
1105 if (addr == env->watchpoint[i].vaddr)
1106 return 0;
1108 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1109 return -1;
1111 i = env->nb_watchpoints++;
1112 env->watchpoint[i].vaddr = addr;
1113 tlb_flush_page(env, addr);
1114 /* FIXME: This flush is needed because of the hack to make memory ops
1115 terminate the TB. It can be removed once the proper IO trap and
1116 re-execute bits are in. */
1117 tb_flush(env);
1118 return i;
1121 /* Remove a watchpoint. */
1122 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1124 int i;
1126 for (i = 0; i < env->nb_watchpoints; i++) {
1127 if (addr == env->watchpoint[i].vaddr) {
1128 env->nb_watchpoints--;
1129 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1130 tlb_flush_page(env, addr);
1131 return 0;
1134 return -1;
1137 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1138 breakpoint is reached */
1139 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1141 #if defined(TARGET_HAS_ICE)
1142 int i;
1144 for(i = 0; i < env->nb_breakpoints; i++) {
1145 if (env->breakpoints[i] == pc)
1146 return 0;
1149 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1150 return -1;
1151 env->breakpoints[env->nb_breakpoints++] = pc;
1153 if (kvm_enabled())
1154 kvm_update_debugger(env);
1156 breakpoint_invalidate(env, pc);
1157 return 0;
1158 #else
1159 return -1;
1160 #endif
1163 /* remove a breakpoint */
1164 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1166 #if defined(TARGET_HAS_ICE)
1167 int i;
1168 for(i = 0; i < env->nb_breakpoints; i++) {
1169 if (env->breakpoints[i] == pc)
1170 goto found;
1172 return -1;
1173 found:
1174 env->nb_breakpoints--;
1175 if (i < env->nb_breakpoints)
1176 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1178 if (kvm_enabled())
1179 kvm_update_debugger(env);
1181 breakpoint_invalidate(env, pc);
1182 return 0;
1183 #else
1184 return -1;
1185 #endif
1188 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1189 CPU loop after each instruction */
1190 void cpu_single_step(CPUState *env, int enabled)
1192 #if defined(TARGET_HAS_ICE)
1193 if (env->singlestep_enabled != enabled) {
1194 env->singlestep_enabled = enabled;
1195 /* must flush all the translated code to avoid inconsistancies */
1196 /* XXX: only flush what is necessary */
1197 tb_flush(env);
1199 if (kvm_enabled())
1200 kvm_update_debugger(env);
1201 #endif
1204 /* enable or disable low levels log */
1205 void cpu_set_log(int log_flags)
1207 loglevel = log_flags;
1208 if (loglevel && !logfile) {
1209 logfile = fopen(logfilename, log_append ? "a" : "w");
1210 if (!logfile) {
1211 perror(logfilename);
1212 _exit(1);
1214 #if !defined(CONFIG_SOFTMMU)
1215 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1217 static uint8_t logfile_buf[4096];
1218 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1220 #else
1221 setvbuf(logfile, NULL, _IOLBF, 0);
1222 #endif
1223 log_append = 1;
1225 if (!loglevel && logfile) {
1226 fclose(logfile);
1227 logfile = NULL;
1231 void cpu_set_log_filename(const char *filename)
1233 logfilename = strdup(filename);
1234 if (logfile) {
1235 fclose(logfile);
1236 logfile = NULL;
1238 cpu_set_log(loglevel);
1241 /* mask must never be zero, except for A20 change call */
1242 void cpu_interrupt(CPUState *env, int mask)
1244 TranslationBlock *tb;
1245 static int interrupt_lock;
1247 env->interrupt_request |= mask;
1248 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1249 kvm_update_interrupt_request(env);
1251 /* if the cpu is currently executing code, we must unlink it and
1252 all the potentially executing TB */
1253 tb = env->current_tb;
1254 if (tb && !testandset(&interrupt_lock)) {
1255 env->current_tb = NULL;
1256 tb_reset_jump_recursive(tb);
1257 interrupt_lock = 0;
1261 void cpu_reset_interrupt(CPUState *env, int mask)
1263 env->interrupt_request &= ~mask;
1266 CPULogItem cpu_log_items[] = {
1267 { CPU_LOG_TB_OUT_ASM, "out_asm",
1268 "show generated host assembly code for each compiled TB" },
1269 { CPU_LOG_TB_IN_ASM, "in_asm",
1270 "show target assembly code for each compiled TB" },
1271 { CPU_LOG_TB_OP, "op",
1272 "show micro ops for each compiled TB" },
1273 #ifdef TARGET_I386
1274 { CPU_LOG_TB_OP_OPT, "op_opt",
1275 "show micro ops before eflags optimization" },
1276 #endif
1277 { CPU_LOG_INT, "int",
1278 "show interrupts/exceptions in short format" },
1279 { CPU_LOG_EXEC, "exec",
1280 "show trace before each executed TB (lots of logs)" },
1281 { CPU_LOG_TB_CPU, "cpu",
1282 "show CPU state before block translation" },
1283 #ifdef TARGET_I386
1284 { CPU_LOG_PCALL, "pcall",
1285 "show protected mode far calls/returns/exceptions" },
1286 #endif
1287 #ifdef DEBUG_IOPORT
1288 { CPU_LOG_IOPORT, "ioport",
1289 "show all i/o ports accesses" },
1290 #endif
1291 { 0, NULL, NULL },
1294 static int cmp1(const char *s1, int n, const char *s2)
1296 if (strlen(s2) != n)
1297 return 0;
1298 return memcmp(s1, s2, n) == 0;
1301 /* takes a comma separated list of log masks. Return 0 if error. */
1302 int cpu_str_to_log_mask(const char *str)
1304 CPULogItem *item;
1305 int mask;
1306 const char *p, *p1;
1308 p = str;
1309 mask = 0;
1310 for(;;) {
1311 p1 = strchr(p, ',');
1312 if (!p1)
1313 p1 = p + strlen(p);
1314 if(cmp1(p,p1-p,"all")) {
1315 for(item = cpu_log_items; item->mask != 0; item++) {
1316 mask |= item->mask;
1318 } else {
1319 for(item = cpu_log_items; item->mask != 0; item++) {
1320 if (cmp1(p, p1 - p, item->name))
1321 goto found;
1323 return 0;
1325 found:
1326 mask |= item->mask;
1327 if (*p1 != ',')
1328 break;
1329 p = p1 + 1;
1331 return mask;
1334 void cpu_abort(CPUState *env, const char *fmt, ...)
1336 va_list ap;
1337 va_list ap2;
1339 va_start(ap, fmt);
1340 va_copy(ap2, ap);
1341 fprintf(stderr, "qemu: fatal: ");
1342 vfprintf(stderr, fmt, ap);
1343 fprintf(stderr, "\n");
1344 #ifdef TARGET_I386
1345 if(env->intercept & INTERCEPT_SVM_MASK) {
1346 /* most probably the virtual machine should not
1347 be shut down but rather caught by the VMM */
1348 vmexit(SVM_EXIT_SHUTDOWN, 0);
1350 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1351 #else
1352 cpu_dump_state(env, stderr, fprintf, 0);
1353 #endif
1354 if (logfile) {
1355 fprintf(logfile, "qemu: fatal: ");
1356 vfprintf(logfile, fmt, ap2);
1357 fprintf(logfile, "\n");
1358 #ifdef TARGET_I386
1359 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1360 #else
1361 cpu_dump_state(env, logfile, fprintf, 0);
1362 #endif
1363 fflush(logfile);
1364 fclose(logfile);
1366 va_end(ap2);
1367 va_end(ap);
1368 abort();
1371 CPUState *cpu_copy(CPUState *env)
1373 CPUState *new_env = cpu_init(env->cpu_model_str);
1374 /* preserve chaining and index */
1375 CPUState *next_cpu = new_env->next_cpu;
1376 int cpu_index = new_env->cpu_index;
1377 memcpy(new_env, env, sizeof(CPUState));
1378 new_env->next_cpu = next_cpu;
1379 new_env->cpu_index = cpu_index;
1380 return new_env;
1383 #if !defined(CONFIG_USER_ONLY)
1385 /* NOTE: if flush_global is true, also flush global entries (not
1386 implemented yet) */
1387 void tlb_flush(CPUState *env, int flush_global)
1389 int i;
1391 #if defined(DEBUG_TLB)
1392 printf("tlb_flush:\n");
1393 #endif
1394 /* must reset current TB so that interrupts cannot modify the
1395 links while we are modifying them */
1396 env->current_tb = NULL;
1398 for(i = 0; i < CPU_TLB_SIZE; i++) {
1399 env->tlb_table[0][i].addr_read = -1;
1400 env->tlb_table[0][i].addr_write = -1;
1401 env->tlb_table[0][i].addr_code = -1;
1402 env->tlb_table[1][i].addr_read = -1;
1403 env->tlb_table[1][i].addr_write = -1;
1404 env->tlb_table[1][i].addr_code = -1;
1405 #if (NB_MMU_MODES >= 3)
1406 env->tlb_table[2][i].addr_read = -1;
1407 env->tlb_table[2][i].addr_write = -1;
1408 env->tlb_table[2][i].addr_code = -1;
1409 #if (NB_MMU_MODES == 4)
1410 env->tlb_table[3][i].addr_read = -1;
1411 env->tlb_table[3][i].addr_write = -1;
1412 env->tlb_table[3][i].addr_code = -1;
1413 #endif
1414 #endif
1417 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1419 #if !defined(CONFIG_SOFTMMU)
1420 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1421 #endif
1422 #ifdef USE_KQEMU
1423 if (env->kqemu_enabled) {
1424 kqemu_flush(env, flush_global);
1426 #endif
1427 tlb_flush_count++;
1430 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1432 if (addr == (tlb_entry->addr_read &
1433 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1434 addr == (tlb_entry->addr_write &
1435 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1436 addr == (tlb_entry->addr_code &
1437 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1438 tlb_entry->addr_read = -1;
1439 tlb_entry->addr_write = -1;
1440 tlb_entry->addr_code = -1;
1444 void tlb_flush_page(CPUState *env, target_ulong addr)
1446 int i;
1447 TranslationBlock *tb;
1449 #if defined(DEBUG_TLB)
1450 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1451 #endif
1452 /* must reset current TB so that interrupts cannot modify the
1453 links while we are modifying them */
1454 env->current_tb = NULL;
1456 addr &= TARGET_PAGE_MASK;
1457 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1458 tlb_flush_entry(&env->tlb_table[0][i], addr);
1459 tlb_flush_entry(&env->tlb_table[1][i], addr);
1460 #if (NB_MMU_MODES >= 3)
1461 tlb_flush_entry(&env->tlb_table[2][i], addr);
1462 #if (NB_MMU_MODES == 4)
1463 tlb_flush_entry(&env->tlb_table[3][i], addr);
1464 #endif
1465 #endif
1467 /* Discard jump cache entries for any tb which might potentially
1468 overlap the flushed page. */
1469 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1470 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1472 i = tb_jmp_cache_hash_page(addr);
1473 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1475 #if !defined(CONFIG_SOFTMMU)
1476 if (addr < MMAP_AREA_END)
1477 munmap((void *)addr, TARGET_PAGE_SIZE);
1478 #endif
1479 #ifdef USE_KQEMU
1480 if (env->kqemu_enabled) {
1481 kqemu_flush_page(env, addr);
1483 #endif
1486 /* update the TLBs so that writes to code in the virtual page 'addr'
1487 can be detected */
1488 static void tlb_protect_code(ram_addr_t ram_addr)
1490 cpu_physical_memory_reset_dirty(ram_addr,
1491 ram_addr + TARGET_PAGE_SIZE,
1492 CODE_DIRTY_FLAG);
1495 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1496 tested for self modifying code */
1497 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1498 target_ulong vaddr)
1500 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1503 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1504 unsigned long start, unsigned long length)
1506 unsigned long addr;
1507 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1508 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1509 if ((addr - start) < length) {
1510 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1515 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1516 int dirty_flags)
1518 CPUState *env;
1519 unsigned long length, start1;
1520 int i, mask, len;
1521 uint8_t *p;
1523 start &= TARGET_PAGE_MASK;
1524 end = TARGET_PAGE_ALIGN(end);
1526 length = end - start;
1527 if (length == 0)
1528 return;
1529 len = length >> TARGET_PAGE_BITS;
1530 #ifdef USE_KQEMU
1531 /* XXX: should not depend on cpu context */
1532 env = first_cpu;
1533 if (env->kqemu_enabled) {
1534 ram_addr_t addr;
1535 addr = start;
1536 for(i = 0; i < len; i++) {
1537 kqemu_set_notdirty(env, addr);
1538 addr += TARGET_PAGE_SIZE;
1541 #endif
1542 mask = ~dirty_flags;
1543 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1544 for(i = 0; i < len; i++)
1545 p[i] &= mask;
1547 /* we modify the TLB cache so that the dirty bit will be set again
1548 when accessing the range */
1549 start1 = start + (unsigned long)phys_ram_base;
1550 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1551 for(i = 0; i < CPU_TLB_SIZE; i++)
1552 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1553 for(i = 0; i < CPU_TLB_SIZE; i++)
1554 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1555 #if (NB_MMU_MODES >= 3)
1556 for(i = 0; i < CPU_TLB_SIZE; i++)
1557 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1558 #if (NB_MMU_MODES == 4)
1559 for(i = 0; i < CPU_TLB_SIZE; i++)
1560 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1561 #endif
1562 #endif
1565 #if !defined(CONFIG_SOFTMMU)
1566 /* XXX: this is expensive */
1568 VirtPageDesc *p;
1569 int j;
1570 target_ulong addr;
1572 for(i = 0; i < L1_SIZE; i++) {
1573 p = l1_virt_map[i];
1574 if (p) {
1575 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1576 for(j = 0; j < L2_SIZE; j++) {
1577 if (p->valid_tag == virt_valid_tag &&
1578 p->phys_addr >= start && p->phys_addr < end &&
1579 (p->prot & PROT_WRITE)) {
1580 if (addr < MMAP_AREA_END) {
1581 mprotect((void *)addr, TARGET_PAGE_SIZE,
1582 p->prot & ~PROT_WRITE);
1585 addr += TARGET_PAGE_SIZE;
1586 p++;
1591 #endif
1594 int cpu_physical_memory_set_dirty_tracking(int enable)
1596 int r=0;
1598 if (kvm_enabled())
1599 r = kvm_physical_memory_set_dirty_tracking(enable);
1600 in_migration = enable;
1601 return r;
1604 int cpu_physical_memory_get_dirty_tracking(void)
1606 return in_migration;
1609 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1611 ram_addr_t ram_addr;
1613 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1614 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1615 tlb_entry->addend - (unsigned long)phys_ram_base;
1616 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1617 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1622 /* update the TLB according to the current state of the dirty bits */
1623 void cpu_tlb_update_dirty(CPUState *env)
1625 int i;
1626 for(i = 0; i < CPU_TLB_SIZE; i++)
1627 tlb_update_dirty(&env->tlb_table[0][i]);
1628 for(i = 0; i < CPU_TLB_SIZE; i++)
1629 tlb_update_dirty(&env->tlb_table[1][i]);
1630 #if (NB_MMU_MODES >= 3)
1631 for(i = 0; i < CPU_TLB_SIZE; i++)
1632 tlb_update_dirty(&env->tlb_table[2][i]);
1633 #if (NB_MMU_MODES == 4)
1634 for(i = 0; i < CPU_TLB_SIZE; i++)
1635 tlb_update_dirty(&env->tlb_table[3][i]);
1636 #endif
1637 #endif
1640 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1641 unsigned long start)
1643 unsigned long addr;
1644 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1645 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1646 if (addr == start) {
1647 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1652 /* update the TLB corresponding to virtual page vaddr and phys addr
1653 addr so that it is no longer dirty */
1654 static inline void tlb_set_dirty(CPUState *env,
1655 unsigned long addr, target_ulong vaddr)
1657 int i;
1659 addr &= TARGET_PAGE_MASK;
1660 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1661 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1662 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1663 #if (NB_MMU_MODES >= 3)
1664 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1665 #if (NB_MMU_MODES == 4)
1666 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1667 #endif
1668 #endif
1671 /* add a new TLB entry. At most one entry for a given virtual address
1672 is permitted. Return 0 if OK or 2 if the page could not be mapped
1673 (can only happen in non SOFTMMU mode for I/O pages or pages
1674 conflicting with the host address space). */
1675 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1676 target_phys_addr_t paddr, int prot,
1677 int mmu_idx, int is_softmmu)
1679 PhysPageDesc *p;
1680 unsigned long pd;
1681 unsigned int index;
1682 target_ulong address;
1683 target_phys_addr_t addend;
1684 int ret;
1685 CPUTLBEntry *te;
1686 int i;
1688 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1689 if (!p) {
1690 pd = IO_MEM_UNASSIGNED;
1691 } else {
1692 pd = p->phys_offset;
1694 #if defined(DEBUG_TLB)
1695 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1696 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1697 #endif
1699 ret = 0;
1700 #if !defined(CONFIG_SOFTMMU)
1701 if (is_softmmu)
1702 #endif
1704 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1705 /* IO memory case */
1706 address = vaddr | pd;
1707 addend = paddr;
1708 } else {
1709 /* standard memory */
1710 address = vaddr;
1711 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1714 /* Make accesses to pages with watchpoints go via the
1715 watchpoint trap routines. */
1716 for (i = 0; i < env->nb_watchpoints; i++) {
1717 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1718 if (address & ~TARGET_PAGE_MASK) {
1719 env->watchpoint[i].addend = 0;
1720 address = vaddr | io_mem_watch;
1721 } else {
1722 env->watchpoint[i].addend = pd - paddr +
1723 (unsigned long) phys_ram_base;
1724 /* TODO: Figure out how to make read watchpoints coexist
1725 with code. */
1726 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1731 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1732 addend -= vaddr;
1733 te = &env->tlb_table[mmu_idx][index];
1734 te->addend = addend;
1735 if (prot & PAGE_READ) {
1736 te->addr_read = address;
1737 } else {
1738 te->addr_read = -1;
1740 if (prot & PAGE_EXEC) {
1741 te->addr_code = address;
1742 } else {
1743 te->addr_code = -1;
1745 if (prot & PAGE_WRITE) {
1746 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1747 (pd & IO_MEM_ROMD)) {
1748 /* write access calls the I/O callback */
1749 te->addr_write = vaddr |
1750 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1751 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1752 !cpu_physical_memory_is_dirty(pd)) {
1753 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1754 } else {
1755 te->addr_write = address;
1757 } else {
1758 te->addr_write = -1;
1761 #if !defined(CONFIG_SOFTMMU)
1762 else {
1763 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1764 /* IO access: no mapping is done as it will be handled by the
1765 soft MMU */
1766 if (!(env->hflags & HF_SOFTMMU_MASK))
1767 ret = 2;
1768 } else {
1769 void *map_addr;
1771 if (vaddr >= MMAP_AREA_END) {
1772 ret = 2;
1773 } else {
1774 if (prot & PROT_WRITE) {
1775 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1776 #if defined(TARGET_HAS_SMC) || 1
1777 first_tb ||
1778 #endif
1779 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1780 !cpu_physical_memory_is_dirty(pd))) {
1781 /* ROM: we do as if code was inside */
1782 /* if code is present, we only map as read only and save the
1783 original mapping */
1784 VirtPageDesc *vp;
1786 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1787 vp->phys_addr = pd;
1788 vp->prot = prot;
1789 vp->valid_tag = virt_valid_tag;
1790 prot &= ~PAGE_WRITE;
1793 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1794 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1795 if (map_addr == MAP_FAILED) {
1796 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1797 paddr, vaddr);
1802 #endif
1803 return ret;
1806 /* called from signal handler: invalidate the code and unprotect the
1807 page. Return TRUE if the fault was succesfully handled. */
1808 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1810 #if !defined(CONFIG_SOFTMMU)
1811 VirtPageDesc *vp;
1813 #if defined(DEBUG_TLB)
1814 printf("page_unprotect: addr=0x%08x\n", addr);
1815 #endif
1816 addr &= TARGET_PAGE_MASK;
1818 /* if it is not mapped, no need to worry here */
1819 if (addr >= MMAP_AREA_END)
1820 return 0;
1821 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1822 if (!vp)
1823 return 0;
1824 /* NOTE: in this case, validate_tag is _not_ tested as it
1825 validates only the code TLB */
1826 if (vp->valid_tag != virt_valid_tag)
1827 return 0;
1828 if (!(vp->prot & PAGE_WRITE))
1829 return 0;
1830 #if defined(DEBUG_TLB)
1831 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1832 addr, vp->phys_addr, vp->prot);
1833 #endif
1834 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1835 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1836 (unsigned long)addr, vp->prot);
1837 /* set the dirty bit */
1838 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1839 /* flush the code inside */
1840 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1841 return 1;
1842 #else
1843 return 0;
1844 #endif
1847 #else
1849 void tlb_flush(CPUState *env, int flush_global)
1853 void tlb_flush_page(CPUState *env, target_ulong addr)
1857 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1858 target_phys_addr_t paddr, int prot,
1859 int mmu_idx, int is_softmmu)
1861 return 0;
1864 /* dump memory mappings */
1865 void page_dump(FILE *f)
1867 unsigned long start, end;
1868 int i, j, prot, prot1;
1869 PageDesc *p;
1871 fprintf(f, "%-8s %-8s %-8s %s\n",
1872 "start", "end", "size", "prot");
1873 start = -1;
1874 end = -1;
1875 prot = 0;
1876 for(i = 0; i <= L1_SIZE; i++) {
1877 if (i < L1_SIZE)
1878 p = l1_map[i];
1879 else
1880 p = NULL;
1881 for(j = 0;j < L2_SIZE; j++) {
1882 if (!p)
1883 prot1 = 0;
1884 else
1885 prot1 = p[j].flags;
1886 if (prot1 != prot) {
1887 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1888 if (start != -1) {
1889 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1890 start, end, end - start,
1891 prot & PAGE_READ ? 'r' : '-',
1892 prot & PAGE_WRITE ? 'w' : '-',
1893 prot & PAGE_EXEC ? 'x' : '-');
1895 if (prot1 != 0)
1896 start = end;
1897 else
1898 start = -1;
1899 prot = prot1;
1901 if (!p)
1902 break;
1907 int page_get_flags(target_ulong address)
1909 PageDesc *p;
1911 p = page_find(address >> TARGET_PAGE_BITS);
1912 if (!p)
1913 return 0;
1914 return p->flags;
1917 /* modify the flags of a page and invalidate the code if
1918 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1919 depending on PAGE_WRITE */
1920 void page_set_flags(target_ulong start, target_ulong end, int flags)
1922 PageDesc *p;
1923 target_ulong addr;
1925 start = start & TARGET_PAGE_MASK;
1926 end = TARGET_PAGE_ALIGN(end);
1927 if (flags & PAGE_WRITE)
1928 flags |= PAGE_WRITE_ORG;
1929 spin_lock(&tb_lock);
1930 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1931 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1932 /* if the write protection is set, then we invalidate the code
1933 inside */
1934 if (!(p->flags & PAGE_WRITE) &&
1935 (flags & PAGE_WRITE) &&
1936 p->first_tb) {
1937 tb_invalidate_phys_page(addr, 0, NULL);
1939 p->flags = flags;
1941 spin_unlock(&tb_lock);
1944 int page_check_range(target_ulong start, target_ulong len, int flags)
1946 PageDesc *p;
1947 target_ulong end;
1948 target_ulong addr;
1950 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1951 start = start & TARGET_PAGE_MASK;
1953 if( end < start )
1954 /* we've wrapped around */
1955 return -1;
1956 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1957 p = page_find(addr >> TARGET_PAGE_BITS);
1958 if( !p )
1959 return -1;
1960 if( !(p->flags & PAGE_VALID) )
1961 return -1;
1963 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1964 return -1;
1965 if (flags & PAGE_WRITE) {
1966 if (!(p->flags & PAGE_WRITE_ORG))
1967 return -1;
1968 /* unprotect the page if it was put read-only because it
1969 contains translated code */
1970 if (!(p->flags & PAGE_WRITE)) {
1971 if (!page_unprotect(addr, 0, NULL))
1972 return -1;
1974 return 0;
1977 return 0;
1980 /* called from signal handler: invalidate the code and unprotect the
1981 page. Return TRUE if the fault was succesfully handled. */
1982 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1984 unsigned int page_index, prot, pindex;
1985 PageDesc *p, *p1;
1986 target_ulong host_start, host_end, addr;
1988 host_start = address & qemu_host_page_mask;
1989 page_index = host_start >> TARGET_PAGE_BITS;
1990 p1 = page_find(page_index);
1991 if (!p1)
1992 return 0;
1993 host_end = host_start + qemu_host_page_size;
1994 p = p1;
1995 prot = 0;
1996 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1997 prot |= p->flags;
1998 p++;
2000 /* if the page was really writable, then we change its
2001 protection back to writable */
2002 if (prot & PAGE_WRITE_ORG) {
2003 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2004 if (!(p1[pindex].flags & PAGE_WRITE)) {
2005 mprotect((void *)g2h(host_start), qemu_host_page_size,
2006 (prot & PAGE_BITS) | PAGE_WRITE);
2007 p1[pindex].flags |= PAGE_WRITE;
2008 /* and since the content will be modified, we must invalidate
2009 the corresponding translated code. */
2010 tb_invalidate_phys_page(address, pc, puc);
2011 #ifdef DEBUG_TB_CHECK
2012 tb_invalidate_check(address);
2013 #endif
2014 return 1;
2017 return 0;
2020 static inline void tlb_set_dirty(CPUState *env,
2021 unsigned long addr, target_ulong vaddr)
2024 #endif /* defined(CONFIG_USER_ONLY) */
2026 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2027 int memory);
2028 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2029 int orig_memory);
2030 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2031 need_subpage) \
2032 do { \
2033 if (addr > start_addr) \
2034 start_addr2 = 0; \
2035 else { \
2036 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2037 if (start_addr2 > 0) \
2038 need_subpage = 1; \
2041 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2042 end_addr2 = TARGET_PAGE_SIZE - 1; \
2043 else { \
2044 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2045 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2046 need_subpage = 1; \
2048 } while (0)
2050 /* register physical memory. 'size' must be a multiple of the target
2051 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2052 io memory page */
2053 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2054 unsigned long size,
2055 unsigned long phys_offset)
2057 target_phys_addr_t addr, end_addr;
2058 PhysPageDesc *p;
2059 CPUState *env;
2060 unsigned long orig_size = size;
2061 void *subpage;
2063 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2064 end_addr = start_addr + (target_phys_addr_t)size;
2065 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2066 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2067 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2068 unsigned long orig_memory = p->phys_offset;
2069 target_phys_addr_t start_addr2, end_addr2;
2070 int need_subpage = 0;
2072 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2073 need_subpage);
2074 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2075 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2076 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2077 &p->phys_offset, orig_memory);
2078 } else {
2079 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2080 >> IO_MEM_SHIFT];
2082 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2083 } else {
2084 p->phys_offset = phys_offset;
2085 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2086 (phys_offset & IO_MEM_ROMD))
2087 phys_offset += TARGET_PAGE_SIZE;
2089 } else {
2090 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2091 p->phys_offset = phys_offset;
2092 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2093 (phys_offset & IO_MEM_ROMD))
2094 phys_offset += TARGET_PAGE_SIZE;
2095 else {
2096 target_phys_addr_t start_addr2, end_addr2;
2097 int need_subpage = 0;
2099 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2100 end_addr2, need_subpage);
2102 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2103 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2104 &p->phys_offset, IO_MEM_UNASSIGNED);
2105 subpage_register(subpage, start_addr2, end_addr2,
2106 phys_offset);
2112 /* since each CPU stores ram addresses in its TLB cache, we must
2113 reset the modified entries */
2114 /* XXX: slow ! */
2115 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2116 tlb_flush(env, 1);
2120 /* XXX: temporary until new memory mapping API */
2121 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2123 PhysPageDesc *p;
2125 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2126 if (!p)
2127 return IO_MEM_UNASSIGNED;
2128 return p->phys_offset;
2131 /* XXX: better than nothing */
2132 ram_addr_t qemu_ram_alloc(unsigned long size)
2134 ram_addr_t addr;
2135 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2136 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2137 size, phys_ram_size);
2138 abort();
2140 addr = phys_ram_alloc_offset;
2141 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2142 return addr;
2145 void qemu_ram_free(ram_addr_t addr)
2149 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2151 #ifdef DEBUG_UNASSIGNED
2152 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2153 #endif
2154 #ifdef TARGET_SPARC
2155 do_unassigned_access(addr, 0, 0, 0);
2156 #elif TARGET_CRIS
2157 do_unassigned_access(addr, 0, 0, 0);
2158 #endif
2159 return 0;
2162 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2164 #ifdef DEBUG_UNASSIGNED
2165 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2166 #endif
2167 #ifdef TARGET_SPARC
2168 do_unassigned_access(addr, 1, 0, 0);
2169 #elif TARGET_CRIS
2170 do_unassigned_access(addr, 1, 0, 0);
2171 #endif
2174 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2175 unassigned_mem_readb,
2176 unassigned_mem_readb,
2177 unassigned_mem_readb,
2180 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2181 unassigned_mem_writeb,
2182 unassigned_mem_writeb,
2183 unassigned_mem_writeb,
2186 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2188 unsigned long ram_addr;
2189 int dirty_flags;
2190 ram_addr = addr - (unsigned long)phys_ram_base;
2191 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2192 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2193 #if !defined(CONFIG_USER_ONLY)
2194 tb_invalidate_phys_page_fast(ram_addr, 1);
2195 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2196 #endif
2198 stb_p((uint8_t *)(long)addr, val);
2199 #ifdef USE_KQEMU
2200 if (cpu_single_env->kqemu_enabled &&
2201 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2202 kqemu_modify_page(cpu_single_env, ram_addr);
2203 #endif
2204 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2205 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2206 /* we remove the notdirty callback only if the code has been
2207 flushed */
2208 if (dirty_flags == 0xff)
2209 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2212 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2214 unsigned long ram_addr;
2215 int dirty_flags;
2216 ram_addr = addr - (unsigned long)phys_ram_base;
2217 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2218 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2219 #if !defined(CONFIG_USER_ONLY)
2220 tb_invalidate_phys_page_fast(ram_addr, 2);
2221 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2222 #endif
2224 stw_p((uint8_t *)(long)addr, val);
2225 #ifdef USE_KQEMU
2226 if (cpu_single_env->kqemu_enabled &&
2227 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2228 kqemu_modify_page(cpu_single_env, ram_addr);
2229 #endif
2230 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2231 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2232 /* we remove the notdirty callback only if the code has been
2233 flushed */
2234 if (dirty_flags == 0xff)
2235 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2238 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2240 unsigned long ram_addr;
2241 int dirty_flags;
2242 ram_addr = addr - (unsigned long)phys_ram_base;
2243 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2244 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2245 #if !defined(CONFIG_USER_ONLY)
2246 tb_invalidate_phys_page_fast(ram_addr, 4);
2247 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2248 #endif
2250 stl_p((uint8_t *)(long)addr, val);
2251 #ifdef USE_KQEMU
2252 if (cpu_single_env->kqemu_enabled &&
2253 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2254 kqemu_modify_page(cpu_single_env, ram_addr);
2255 #endif
2256 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2257 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2258 /* we remove the notdirty callback only if the code has been
2259 flushed */
2260 if (dirty_flags == 0xff)
2261 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2264 static CPUReadMemoryFunc *error_mem_read[3] = {
2265 NULL, /* never used */
2266 NULL, /* never used */
2267 NULL, /* never used */
2270 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2271 notdirty_mem_writeb,
2272 notdirty_mem_writew,
2273 notdirty_mem_writel,
2276 #if defined(CONFIG_SOFTMMU)
2277 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2278 so these check for a hit then pass through to the normal out-of-line
2279 phys routines. */
2280 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2282 return ldub_phys(addr);
2285 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2287 return lduw_phys(addr);
2290 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2292 return ldl_phys(addr);
2295 /* Generate a debug exception if a watchpoint has been hit.
2296 Returns the real physical address of the access. addr will be a host
2297 address in case of a RAM location. */
2298 static target_ulong check_watchpoint(target_phys_addr_t addr)
2300 CPUState *env = cpu_single_env;
2301 target_ulong watch;
2302 target_ulong retaddr;
2303 int i;
2305 retaddr = addr;
2306 for (i = 0; i < env->nb_watchpoints; i++) {
2307 watch = env->watchpoint[i].vaddr;
2308 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2309 retaddr = addr - env->watchpoint[i].addend;
2310 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2311 cpu_single_env->watchpoint_hit = i + 1;
2312 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2313 break;
2317 return retaddr;
2320 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2321 uint32_t val)
2323 addr = check_watchpoint(addr);
2324 stb_phys(addr, val);
2327 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2328 uint32_t val)
2330 addr = check_watchpoint(addr);
2331 stw_phys(addr, val);
2334 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2335 uint32_t val)
2337 addr = check_watchpoint(addr);
2338 stl_phys(addr, val);
2341 static CPUReadMemoryFunc *watch_mem_read[3] = {
2342 watch_mem_readb,
2343 watch_mem_readw,
2344 watch_mem_readl,
2347 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2348 watch_mem_writeb,
2349 watch_mem_writew,
2350 watch_mem_writel,
2352 #endif
2354 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2355 unsigned int len)
2357 uint32_t ret;
2358 unsigned int idx;
2360 idx = SUBPAGE_IDX(addr - mmio->base);
2361 #if defined(DEBUG_SUBPAGE)
2362 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2363 mmio, len, addr, idx);
2364 #endif
2365 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2367 return ret;
2370 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2371 uint32_t value, unsigned int len)
2373 unsigned int idx;
2375 idx = SUBPAGE_IDX(addr - mmio->base);
2376 #if defined(DEBUG_SUBPAGE)
2377 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2378 mmio, len, addr, idx, value);
2379 #endif
2380 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2383 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2387 #endif
2389 return subpage_readlen(opaque, addr, 0);
2392 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2393 uint32_t value)
2395 #if defined(DEBUG_SUBPAGE)
2396 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2397 #endif
2398 subpage_writelen(opaque, addr, value, 0);
2401 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2405 #endif
2407 return subpage_readlen(opaque, addr, 1);
2410 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2411 uint32_t value)
2413 #if defined(DEBUG_SUBPAGE)
2414 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2415 #endif
2416 subpage_writelen(opaque, addr, value, 1);
2419 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2421 #if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2423 #endif
2425 return subpage_readlen(opaque, addr, 2);
2428 static void subpage_writel (void *opaque,
2429 target_phys_addr_t addr, uint32_t value)
2431 #if defined(DEBUG_SUBPAGE)
2432 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2433 #endif
2434 subpage_writelen(opaque, addr, value, 2);
2437 static CPUReadMemoryFunc *subpage_read[] = {
2438 &subpage_readb,
2439 &subpage_readw,
2440 &subpage_readl,
2443 static CPUWriteMemoryFunc *subpage_write[] = {
2444 &subpage_writeb,
2445 &subpage_writew,
2446 &subpage_writel,
2449 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2450 int memory)
2452 int idx, eidx;
2453 unsigned int i;
2455 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2456 return -1;
2457 idx = SUBPAGE_IDX(start);
2458 eidx = SUBPAGE_IDX(end);
2459 #if defined(DEBUG_SUBPAGE)
2460 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2461 mmio, start, end, idx, eidx, memory);
2462 #endif
2463 memory >>= IO_MEM_SHIFT;
2464 for (; idx <= eidx; idx++) {
2465 for (i = 0; i < 4; i++) {
2466 if (io_mem_read[memory][i]) {
2467 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2468 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2470 if (io_mem_write[memory][i]) {
2471 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2472 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2477 return 0;
2480 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2481 int orig_memory)
2483 subpage_t *mmio;
2484 int subpage_memory;
2486 mmio = qemu_mallocz(sizeof(subpage_t));
2487 if (mmio != NULL) {
2488 mmio->base = base;
2489 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2490 #if defined(DEBUG_SUBPAGE)
2491 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2492 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2493 #endif
2494 *phys = subpage_memory | IO_MEM_SUBPAGE;
2495 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2498 return mmio;
2501 static int get_free_io_mem_idx(void)
2503 int i;
2505 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2506 if (!io_mem_used[i]) {
2507 io_mem_used[i] = 1;
2508 return i;
2511 return -1;
2514 static void io_mem_init(void)
2516 int i;
2518 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2519 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2520 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2521 for (i=0; i<5; i++)
2522 io_mem_used[i] = 0;
2524 #if defined(CONFIG_SOFTMMU)
2525 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2526 watch_mem_write, NULL);
2527 #endif
2528 /* alloc dirty bits array */
2529 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2530 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2533 /* mem_read and mem_write are arrays of functions containing the
2534 function to access byte (index 0), word (index 1) and dword (index
2535 2). Functions can be omitted with a NULL function pointer. The
2536 registered functions may be modified dynamically later.
2537 If io_index is non zero, the corresponding io zone is
2538 modified. If it is zero, a new io zone is allocated. The return
2539 value can be used with cpu_register_physical_memory(). (-1) is
2540 returned if error. */
2541 int cpu_register_io_memory(int io_index,
2542 CPUReadMemoryFunc **mem_read,
2543 CPUWriteMemoryFunc **mem_write,
2544 void *opaque)
2546 int i, subwidth = 0;
2548 if (io_index <= 0) {
2549 io_index = get_free_io_mem_idx();
2550 if (io_index == -1)
2551 return io_index;
2552 } else {
2553 if (io_index >= IO_MEM_NB_ENTRIES)
2554 return -1;
2557 for(i = 0;i < 3; i++) {
2558 if (!mem_read[i] || !mem_write[i])
2559 subwidth = IO_MEM_SUBWIDTH;
2560 io_mem_read[io_index][i] = mem_read[i];
2561 io_mem_write[io_index][i] = mem_write[i];
2563 io_mem_opaque[io_index] = opaque;
2564 return (io_index << IO_MEM_SHIFT) | subwidth;
2567 void cpu_unregister_io_memory(int io_table_address)
2569 int i;
2570 int io_index = io_table_address >> IO_MEM_SHIFT;
2572 for (i=0;i < 3; i++) {
2573 io_mem_read[io_index][i] = unassigned_mem_read[i];
2574 io_mem_write[io_index][i] = unassigned_mem_write[i];
2576 io_mem_opaque[io_index] = NULL;
2577 io_mem_used[io_index] = 0;
2580 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2582 return io_mem_write[io_index >> IO_MEM_SHIFT];
2585 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2587 return io_mem_read[io_index >> IO_MEM_SHIFT];
2590 /* physical memory access (slow version, mainly for debug) */
2591 #if defined(CONFIG_USER_ONLY)
2592 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2593 int len, int is_write)
2595 int l, flags;
2596 target_ulong page;
2597 void * p;
2599 while (len > 0) {
2600 page = addr & TARGET_PAGE_MASK;
2601 l = (page + TARGET_PAGE_SIZE) - addr;
2602 if (l > len)
2603 l = len;
2604 flags = page_get_flags(page);
2605 if (!(flags & PAGE_VALID))
2606 return;
2607 if (is_write) {
2608 if (!(flags & PAGE_WRITE))
2609 return;
2610 /* XXX: this code should not depend on lock_user */
2611 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2612 /* FIXME - should this return an error rather than just fail? */
2613 return;
2614 memcpy(p, buf, len);
2615 unlock_user(p, addr, len);
2616 } else {
2617 if (!(flags & PAGE_READ))
2618 return;
2619 /* XXX: this code should not depend on lock_user */
2620 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2621 /* FIXME - should this return an error rather than just fail? */
2622 return;
2623 memcpy(buf, p, len);
2624 unlock_user(p, addr, 0);
2626 len -= l;
2627 buf += l;
2628 addr += l;
2632 #else
2633 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2634 int len, int is_write)
2636 int l, io_index;
2637 uint8_t *ptr;
2638 uint32_t val;
2639 target_phys_addr_t page;
2640 unsigned long pd;
2641 PhysPageDesc *p;
2643 while (len > 0) {
2644 page = addr & TARGET_PAGE_MASK;
2645 l = (page + TARGET_PAGE_SIZE) - addr;
2646 if (l > len)
2647 l = len;
2648 p = phys_page_find(page >> TARGET_PAGE_BITS);
2649 if (!p) {
2650 pd = IO_MEM_UNASSIGNED;
2651 } else {
2652 pd = p->phys_offset;
2655 if (is_write) {
2656 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2657 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2658 /* XXX: could force cpu_single_env to NULL to avoid
2659 potential bugs */
2660 if (l >= 4 && ((addr & 3) == 0)) {
2661 /* 32 bit write access */
2662 val = ldl_p(buf);
2663 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2664 l = 4;
2665 } else if (l >= 2 && ((addr & 1) == 0)) {
2666 /* 16 bit write access */
2667 val = lduw_p(buf);
2668 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2669 l = 2;
2670 } else {
2671 /* 8 bit write access */
2672 val = ldub_p(buf);
2673 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2674 l = 1;
2676 } else {
2677 unsigned long addr1;
2678 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2679 /* RAM case */
2680 ptr = phys_ram_base + addr1;
2681 memcpy(ptr, buf, l);
2682 if (!cpu_physical_memory_is_dirty(addr1)) {
2683 /* invalidate code */
2684 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2685 /* set dirty bit */
2686 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2687 (0xff & ~CODE_DIRTY_FLAG);
2689 /* qemu doesn't execute guest code directly, but kvm does
2690 therefore fluch instruction caches */
2691 if (kvm_enabled())
2692 flush_icache_range((unsigned long)ptr,
2693 ((unsigned long)ptr)+l);
2695 } else {
2696 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2697 !(pd & IO_MEM_ROMD)) {
2698 /* I/O case */
2699 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2700 if (l >= 4 && ((addr & 3) == 0)) {
2701 /* 32 bit read access */
2702 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2703 stl_p(buf, val);
2704 l = 4;
2705 } else if (l >= 2 && ((addr & 1) == 0)) {
2706 /* 16 bit read access */
2707 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2708 stw_p(buf, val);
2709 l = 2;
2710 } else {
2711 /* 8 bit read access */
2712 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2713 stb_p(buf, val);
2714 l = 1;
2716 } else {
2717 /* RAM case */
2718 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2719 (addr & ~TARGET_PAGE_MASK);
2720 memcpy(buf, ptr, l);
2723 len -= l;
2724 buf += l;
2725 addr += l;
2729 /* used for ROM loading : can write in RAM and ROM */
2730 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2731 const uint8_t *buf, int len)
2733 int l;
2734 uint8_t *ptr;
2735 target_phys_addr_t page;
2736 unsigned long pd;
2737 PhysPageDesc *p;
2739 while (len > 0) {
2740 page = addr & TARGET_PAGE_MASK;
2741 l = (page + TARGET_PAGE_SIZE) - addr;
2742 if (l > len)
2743 l = len;
2744 p = phys_page_find(page >> TARGET_PAGE_BITS);
2745 if (!p) {
2746 pd = IO_MEM_UNASSIGNED;
2747 } else {
2748 pd = p->phys_offset;
2751 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2752 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2753 !(pd & IO_MEM_ROMD)) {
2754 /* do nothing */
2755 } else {
2756 unsigned long addr1;
2757 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2758 /* ROM/RAM case */
2759 ptr = phys_ram_base + addr1;
2760 memcpy(ptr, buf, l);
2762 len -= l;
2763 buf += l;
2764 addr += l;
2769 /* warning: addr must be aligned */
2770 uint32_t ldl_phys(target_phys_addr_t addr)
2772 int io_index;
2773 uint8_t *ptr;
2774 uint32_t val;
2775 unsigned long pd;
2776 PhysPageDesc *p;
2778 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2779 if (!p) {
2780 pd = IO_MEM_UNASSIGNED;
2781 } else {
2782 pd = p->phys_offset;
2785 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2786 !(pd & IO_MEM_ROMD)) {
2787 /* I/O case */
2788 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2789 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2790 } else {
2791 /* RAM case */
2792 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2793 (addr & ~TARGET_PAGE_MASK);
2794 val = ldl_p(ptr);
2796 return val;
2799 /* warning: addr must be aligned */
2800 uint64_t ldq_phys(target_phys_addr_t addr)
2802 int io_index;
2803 uint8_t *ptr;
2804 uint64_t val;
2805 unsigned long pd;
2806 PhysPageDesc *p;
2808 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2809 if (!p) {
2810 pd = IO_MEM_UNASSIGNED;
2811 } else {
2812 pd = p->phys_offset;
2815 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2816 !(pd & IO_MEM_ROMD)) {
2817 /* I/O case */
2818 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2819 #ifdef TARGET_WORDS_BIGENDIAN
2820 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2821 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2822 #else
2823 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2824 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2825 #endif
2826 } else {
2827 /* RAM case */
2828 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2829 (addr & ~TARGET_PAGE_MASK);
2830 val = ldq_p(ptr);
2832 return val;
2835 /* XXX: optimize */
2836 uint32_t ldub_phys(target_phys_addr_t addr)
2838 uint8_t val;
2839 cpu_physical_memory_read(addr, &val, 1);
2840 return val;
2843 /* XXX: optimize */
2844 uint32_t lduw_phys(target_phys_addr_t addr)
2846 uint16_t val;
2847 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2848 return tswap16(val);
2851 #ifdef __GNUC__
2852 #define likely(x) __builtin_expect(!!(x), 1)
2853 #define unlikely(x) __builtin_expect(!!(x), 0)
2854 #else
2855 #define likely(x) x
2856 #define unlikely(x) x
2857 #endif
2859 /* warning: addr must be aligned. The ram page is not masked as dirty
2860 and the code inside is not invalidated. It is useful if the dirty
2861 bits are used to track modified PTEs */
2862 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2864 int io_index;
2865 uint8_t *ptr;
2866 unsigned long pd;
2867 PhysPageDesc *p;
2869 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2870 if (!p) {
2871 pd = IO_MEM_UNASSIGNED;
2872 } else {
2873 pd = p->phys_offset;
2876 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2877 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2878 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2879 } else {
2880 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2881 ptr = phys_ram_base + addr1;
2882 stl_p(ptr, val);
2884 if (unlikely(in_migration)) {
2885 if (!cpu_physical_memory_is_dirty(addr1)) {
2886 /* invalidate code */
2887 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2888 /* set dirty bit */
2889 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2890 (0xff & ~CODE_DIRTY_FLAG);
2896 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2898 int io_index;
2899 uint8_t *ptr;
2900 unsigned long pd;
2901 PhysPageDesc *p;
2903 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2904 if (!p) {
2905 pd = IO_MEM_UNASSIGNED;
2906 } else {
2907 pd = p->phys_offset;
2910 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2911 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2912 #ifdef TARGET_WORDS_BIGENDIAN
2913 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2914 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2915 #else
2916 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2917 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2918 #endif
2919 } else {
2920 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2921 (addr & ~TARGET_PAGE_MASK);
2922 stq_p(ptr, val);
2926 /* warning: addr must be aligned */
2927 void stl_phys(target_phys_addr_t addr, uint32_t val)
2929 int io_index;
2930 uint8_t *ptr;
2931 unsigned long pd;
2932 PhysPageDesc *p;
2934 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2935 if (!p) {
2936 pd = IO_MEM_UNASSIGNED;
2937 } else {
2938 pd = p->phys_offset;
2941 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2942 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2943 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2944 } else {
2945 unsigned long addr1;
2946 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2947 /* RAM case */
2948 ptr = phys_ram_base + addr1;
2949 stl_p(ptr, val);
2950 if (!cpu_physical_memory_is_dirty(addr1)) {
2951 /* invalidate code */
2952 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2953 /* set dirty bit */
2954 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2955 (0xff & ~CODE_DIRTY_FLAG);
2960 /* XXX: optimize */
2961 void stb_phys(target_phys_addr_t addr, uint32_t val)
2963 uint8_t v = val;
2964 cpu_physical_memory_write(addr, &v, 1);
2967 /* XXX: optimize */
2968 void stw_phys(target_phys_addr_t addr, uint32_t val)
2970 uint16_t v = tswap16(val);
2971 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2974 /* XXX: optimize */
2975 void stq_phys(target_phys_addr_t addr, uint64_t val)
2977 val = tswap64(val);
2978 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2981 #endif
2983 /* virtual memory access for debug */
2984 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2985 uint8_t *buf, int len, int is_write)
2987 int l;
2988 target_phys_addr_t phys_addr;
2989 target_ulong page;
2991 while (len > 0) {
2992 page = addr & TARGET_PAGE_MASK;
2993 phys_addr = cpu_get_phys_page_debug(env, page);
2994 /* if no physical page mapped, return an error */
2995 if (phys_addr == -1)
2996 return -1;
2997 l = (page + TARGET_PAGE_SIZE) - addr;
2998 if (l > len)
2999 l = len;
3000 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3001 buf, l, is_write);
3002 len -= l;
3003 buf += l;
3004 addr += l;
3006 return 0;
3009 void dump_exec_info(FILE *f,
3010 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3012 int i, target_code_size, max_target_code_size;
3013 int direct_jmp_count, direct_jmp2_count, cross_page;
3014 TranslationBlock *tb;
3016 target_code_size = 0;
3017 max_target_code_size = 0;
3018 cross_page = 0;
3019 direct_jmp_count = 0;
3020 direct_jmp2_count = 0;
3021 for(i = 0; i < nb_tbs; i++) {
3022 tb = &tbs[i];
3023 target_code_size += tb->size;
3024 if (tb->size > max_target_code_size)
3025 max_target_code_size = tb->size;
3026 if (tb->page_addr[1] != -1)
3027 cross_page++;
3028 if (tb->tb_next_offset[0] != 0xffff) {
3029 direct_jmp_count++;
3030 if (tb->tb_next_offset[1] != 0xffff) {
3031 direct_jmp2_count++;
3035 /* XXX: avoid using doubles ? */
3036 cpu_fprintf(f, "Translation buffer state:\n");
3037 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3038 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3039 nb_tbs ? target_code_size / nb_tbs : 0,
3040 max_target_code_size);
3041 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3042 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3043 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3044 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3045 cross_page,
3046 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3047 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3048 direct_jmp_count,
3049 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3050 direct_jmp2_count,
3051 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3052 cpu_fprintf(f, "\nStatistics:\n");
3053 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3054 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3055 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3056 #ifdef CONFIG_PROFILER
3058 int64_t tot;
3059 tot = dyngen_interm_time + dyngen_code_time;
3060 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3061 tot, tot / 2.4e9);
3062 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
3063 dyngen_tb_count,
3064 dyngen_tb_count1 - dyngen_tb_count,
3065 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
3066 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
3067 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
3068 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
3069 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
3070 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3071 dyngen_tb_count ?
3072 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3073 cpu_fprintf(f, "cycles/op %0.1f\n",
3074 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3075 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3076 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3077 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3078 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3079 if (tot == 0)
3080 tot = 1;
3081 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3082 (double)dyngen_interm_time / tot * 100.0);
3083 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3084 (double)dyngen_code_time / tot * 100.0);
3085 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3086 dyngen_restore_count);
3087 cpu_fprintf(f, " avg cycles %0.1f\n",
3088 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3090 extern void dump_op_count(void);
3091 dump_op_count();
3094 #endif
3097 #if !defined(CONFIG_USER_ONLY)
3099 #define MMUSUFFIX _cmmu
3100 #define GETPC() NULL
3101 #define env cpu_single_env
3102 #define SOFTMMU_CODE_ACCESS
3104 #define SHIFT 0
3105 #include "softmmu_template.h"
3107 #define SHIFT 1
3108 #include "softmmu_template.h"
3110 #define SHIFT 2
3111 #include "softmmu_template.h"
3113 #define SHIFT 3
3114 #include "softmmu_template.h"
3116 #undef env
3118 #endif