Compile on kernels without paravirt clocksource
[qemu-kvm/fedora.git] / exec.c
blob960adcdc2bbfb6623ae5c91ea3889f0347c3104f
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
39 #if !defined(NO_CPU_EMULATION)
40 #include "tcg-target.h"
41 #endif
43 #include "qemu-kvm.h"
44 #if defined(CONFIG_USER_ONLY)
45 #include <qemu.h>
46 #endif
48 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_FLUSH
50 //#define DEBUG_TLB
51 //#define DEBUG_UNASSIGNED
53 /* make various TB consistency checks */
54 //#define DEBUG_TB_CHECK
55 //#define DEBUG_TLB_CHECK
57 //#define DEBUG_IOPORT
58 //#define DEBUG_SUBPAGE
60 #if !defined(CONFIG_USER_ONLY)
61 /* TB consistency checks only implemented for usermode emulation. */
62 #undef DEBUG_TB_CHECK
63 #endif
65 /* threshold to flush the translated code buffer */
66 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
68 #define SMC_BITMAP_USE_THRESHOLD 10
70 #define MMAP_AREA_START 0x00000000
71 #define MMAP_AREA_END 0xa8000000
73 #if defined(TARGET_SPARC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 41
75 #elif defined(TARGET_SPARC)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 36
77 #elif defined(TARGET_ALPHA)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #define TARGET_VIRT_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_PPC64)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif USE_KQEMU
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #elif TARGET_X86_64
86 #define TARGET_PHYS_ADDR_SPACE_BITS 42
87 #elif defined(TARGET_IA64)
88 #define TARGET_PHYS_ADDR_SPACE_BITS 36
89 #else
90 #define TARGET_PHYS_ADDR_SPACE_BITS 32
91 #endif
93 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
94 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
95 int nb_tbs;
96 /* any access to the tbs or the page table must use this lock */
97 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
99 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
100 uint8_t *code_gen_ptr;
102 ram_addr_t phys_ram_size;
103 int phys_ram_fd;
104 uint8_t *phys_ram_base;
105 uint8_t *phys_ram_dirty;
106 uint8_t *bios_mem;
107 static int in_migration;
108 static ram_addr_t phys_ram_alloc_offset = 0;
110 CPUState *first_cpu;
111 /* current CPU in the current thread. It is only valid inside
112 cpu_exec() */
113 CPUState *cpu_single_env;
115 typedef struct PageDesc {
116 /* list of TBs intersecting this ram page */
117 TranslationBlock *first_tb;
118 /* in order to optimize self modifying code, we count the number
119 of lookups we do to a given page to use a bitmap */
120 unsigned int code_write_count;
121 uint8_t *code_bitmap;
122 #if defined(CONFIG_USER_ONLY)
123 unsigned long flags;
124 #endif
125 } PageDesc;
127 typedef struct PhysPageDesc {
128 /* offset in host memory of the page + io_index in the low 12 bits */
129 ram_addr_t phys_offset;
130 } PhysPageDesc;
132 #define L2_BITS 10
133 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
134 /* XXX: this is a temporary hack for alpha target.
135 * In the future, this is to be replaced by a multi-level table
136 * to actually be able to handle the complete 64 bits address space.
138 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
139 #else
140 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
141 #endif
143 #define L1_SIZE (1 << L1_BITS)
144 #define L2_SIZE (1 << L2_BITS)
146 static void io_mem_init(void);
148 unsigned long qemu_real_host_page_size;
149 unsigned long qemu_host_page_bits;
150 unsigned long qemu_host_page_size;
151 unsigned long qemu_host_page_mask;
153 /* XXX: for system emulation, it could just be an array */
154 static PageDesc *l1_map[L1_SIZE];
155 PhysPageDesc **l1_phys_map;
157 /* io memory support */
158 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
159 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
160 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
161 static int io_mem_nb;
162 #if defined(CONFIG_SOFTMMU)
163 static int io_mem_watch;
164 #endif
166 /* log support */
167 char *logfilename = "/tmp/qemu.log";
168 FILE *logfile;
169 int loglevel;
170 static int log_append = 0;
172 /* statistics */
173 static int tlb_flush_count;
174 static int tb_flush_count;
175 static int tb_phys_invalidate_count;
177 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
178 typedef struct subpage_t {
179 target_phys_addr_t base;
180 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
181 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
182 void *opaque[TARGET_PAGE_SIZE][2][4];
183 } subpage_t;
185 static void page_init(void)
187 /* NOTE: we can always suppose that qemu_host_page_size >=
188 TARGET_PAGE_SIZE */
189 #ifdef _WIN32
191 SYSTEM_INFO system_info;
192 DWORD old_protect;
194 GetSystemInfo(&system_info);
195 qemu_real_host_page_size = system_info.dwPageSize;
197 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
198 PAGE_EXECUTE_READWRITE, &old_protect);
200 #else
201 qemu_real_host_page_size = getpagesize();
203 unsigned long start, end;
205 start = (unsigned long)code_gen_buffer;
206 start &= ~(qemu_real_host_page_size - 1);
208 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
209 end += qemu_real_host_page_size - 1;
210 end &= ~(qemu_real_host_page_size - 1);
212 mprotect((void *)start, end - start,
213 PROT_READ | PROT_WRITE | PROT_EXEC);
215 #endif
217 if (qemu_host_page_size == 0)
218 qemu_host_page_size = qemu_real_host_page_size;
219 if (qemu_host_page_size < TARGET_PAGE_SIZE)
220 qemu_host_page_size = TARGET_PAGE_SIZE;
221 qemu_host_page_bits = 0;
222 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
223 qemu_host_page_bits++;
224 qemu_host_page_mask = ~(qemu_host_page_size - 1);
225 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
226 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
228 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
230 long long startaddr, endaddr;
231 FILE *f;
232 int n;
234 f = fopen("/proc/self/maps", "r");
235 if (f) {
236 do {
237 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
238 if (n == 2) {
239 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
240 TARGET_PAGE_ALIGN(endaddr),
241 PAGE_RESERVED);
243 } while (!feof(f));
244 fclose(f);
247 #endif
250 static inline PageDesc *page_find_alloc(unsigned int index)
252 PageDesc **lp, *p;
254 lp = &l1_map[index >> L2_BITS];
255 p = *lp;
256 if (!p) {
257 /* allocate if not found */
258 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
259 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
260 *lp = p;
262 return p + (index & (L2_SIZE - 1));
265 static inline PageDesc *page_find(unsigned int index)
267 PageDesc *p;
269 p = l1_map[index >> L2_BITS];
270 if (!p)
271 return 0;
272 return p + (index & (L2_SIZE - 1));
275 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
277 void **lp, **p;
278 PhysPageDesc *pd;
280 p = (void **)l1_phys_map;
281 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
283 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
284 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
285 #endif
286 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
287 p = *lp;
288 if (!p) {
289 /* allocate if not found */
290 if (!alloc)
291 return NULL;
292 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
293 memset(p, 0, sizeof(void *) * L1_SIZE);
294 *lp = p;
296 #endif
297 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
298 pd = *lp;
299 if (!pd) {
300 int i;
301 /* allocate if not found */
302 if (!alloc)
303 return NULL;
304 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
305 *lp = pd;
306 for (i = 0; i < L2_SIZE; i++)
307 pd[i].phys_offset = IO_MEM_UNASSIGNED;
309 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
312 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
314 return phys_page_find_alloc(index, 0);
317 #if !defined(CONFIG_USER_ONLY)
318 static void tlb_protect_code(ram_addr_t ram_addr);
319 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
320 target_ulong vaddr);
321 #endif
323 void cpu_exec_init(CPUState *env)
325 CPUState **penv;
326 int cpu_index;
328 if (!code_gen_ptr) {
329 cpu_gen_init();
330 code_gen_ptr = code_gen_buffer;
331 page_init();
332 io_mem_init();
334 env->next_cpu = NULL;
335 penv = &first_cpu;
336 cpu_index = 0;
337 while (*penv != NULL) {
338 penv = (CPUState **)&(*penv)->next_cpu;
339 cpu_index++;
341 env->cpu_index = cpu_index;
342 env->nb_watchpoints = 0;
343 *penv = env;
346 static inline void invalidate_page_bitmap(PageDesc *p)
348 if (p->code_bitmap) {
349 qemu_free(p->code_bitmap);
350 p->code_bitmap = NULL;
352 p->code_write_count = 0;
355 /* set to NULL all the 'first_tb' fields in all PageDescs */
356 static void page_flush_tb(void)
358 int i, j;
359 PageDesc *p;
361 for(i = 0; i < L1_SIZE; i++) {
362 p = l1_map[i];
363 if (p) {
364 for(j = 0; j < L2_SIZE; j++) {
365 p->first_tb = NULL;
366 invalidate_page_bitmap(p);
367 p++;
373 /* flush all the translation blocks */
374 /* XXX: tb_flush is currently not thread safe */
375 void tb_flush(CPUState *env1)
377 CPUState *env;
378 #if defined(DEBUG_FLUSH)
379 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
380 (unsigned long)(code_gen_ptr - code_gen_buffer),
381 nb_tbs, nb_tbs > 0 ?
382 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
383 #endif
384 nb_tbs = 0;
386 for(env = first_cpu; env != NULL; env = env->next_cpu) {
387 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
390 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
391 page_flush_tb();
393 code_gen_ptr = code_gen_buffer;
394 /* XXX: flush processor icache at this point if cache flush is
395 expensive */
396 tb_flush_count++;
399 #ifdef DEBUG_TB_CHECK
401 static void tb_invalidate_check(target_ulong address)
403 TranslationBlock *tb;
404 int i;
405 address &= TARGET_PAGE_MASK;
406 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
407 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
408 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
409 address >= tb->pc + tb->size)) {
410 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
411 address, (long)tb->pc, tb->size);
417 /* verify that all the pages have correct rights for code */
418 static void tb_page_check(void)
420 TranslationBlock *tb;
421 int i, flags1, flags2;
423 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
424 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
425 flags1 = page_get_flags(tb->pc);
426 flags2 = page_get_flags(tb->pc + tb->size - 1);
427 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
428 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
429 (long)tb->pc, tb->size, flags1, flags2);
435 void tb_jmp_check(TranslationBlock *tb)
437 TranslationBlock *tb1;
438 unsigned int n1;
440 /* suppress any remaining jumps to this TB */
441 tb1 = tb->jmp_first;
442 for(;;) {
443 n1 = (long)tb1 & 3;
444 tb1 = (TranslationBlock *)((long)tb1 & ~3);
445 if (n1 == 2)
446 break;
447 tb1 = tb1->jmp_next[n1];
449 /* check end of list */
450 if (tb1 != tb) {
451 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
455 #endif
457 /* invalidate one TB */
458 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
459 int next_offset)
461 TranslationBlock *tb1;
462 for(;;) {
463 tb1 = *ptb;
464 if (tb1 == tb) {
465 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
466 break;
468 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
472 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
474 TranslationBlock *tb1;
475 unsigned int n1;
477 for(;;) {
478 tb1 = *ptb;
479 n1 = (long)tb1 & 3;
480 tb1 = (TranslationBlock *)((long)tb1 & ~3);
481 if (tb1 == tb) {
482 *ptb = tb1->page_next[n1];
483 break;
485 ptb = &tb1->page_next[n1];
489 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
491 TranslationBlock *tb1, **ptb;
492 unsigned int n1;
494 ptb = &tb->jmp_next[n];
495 tb1 = *ptb;
496 if (tb1) {
497 /* find tb(n) in circular list */
498 for(;;) {
499 tb1 = *ptb;
500 n1 = (long)tb1 & 3;
501 tb1 = (TranslationBlock *)((long)tb1 & ~3);
502 if (n1 == n && tb1 == tb)
503 break;
504 if (n1 == 2) {
505 ptb = &tb1->jmp_first;
506 } else {
507 ptb = &tb1->jmp_next[n1];
510 /* now we can suppress tb(n) from the list */
511 *ptb = tb->jmp_next[n];
513 tb->jmp_next[n] = NULL;
517 /* reset the jump entry 'n' of a TB so that it is not chained to
518 another TB */
519 static inline void tb_reset_jump(TranslationBlock *tb, int n)
521 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
524 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
526 CPUState *env;
527 PageDesc *p;
528 unsigned int h, n1;
529 target_ulong phys_pc;
530 TranslationBlock *tb1, *tb2;
532 /* remove the TB from the hash list */
533 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
534 h = tb_phys_hash_func(phys_pc);
535 tb_remove(&tb_phys_hash[h], tb,
536 offsetof(TranslationBlock, phys_hash_next));
538 /* remove the TB from the page list */
539 if (tb->page_addr[0] != page_addr) {
540 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
541 tb_page_remove(&p->first_tb, tb);
542 invalidate_page_bitmap(p);
544 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
545 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
546 tb_page_remove(&p->first_tb, tb);
547 invalidate_page_bitmap(p);
550 tb_invalidated_flag = 1;
552 /* remove the TB from the hash list */
553 h = tb_jmp_cache_hash_func(tb->pc);
554 for(env = first_cpu; env != NULL; env = env->next_cpu) {
555 if (env->tb_jmp_cache[h] == tb)
556 env->tb_jmp_cache[h] = NULL;
559 /* suppress this TB from the two jump lists */
560 tb_jmp_remove(tb, 0);
561 tb_jmp_remove(tb, 1);
563 /* suppress any remaining jumps to this TB */
564 tb1 = tb->jmp_first;
565 for(;;) {
566 n1 = (long)tb1 & 3;
567 if (n1 == 2)
568 break;
569 tb1 = (TranslationBlock *)((long)tb1 & ~3);
570 tb2 = tb1->jmp_next[n1];
571 tb_reset_jump(tb1, n1);
572 tb1->jmp_next[n1] = NULL;
573 tb1 = tb2;
575 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
577 tb_phys_invalidate_count++;
580 static inline void set_bits(uint8_t *tab, int start, int len)
582 int end, mask, end1;
584 end = start + len;
585 tab += start >> 3;
586 mask = 0xff << (start & 7);
587 if ((start & ~7) == (end & ~7)) {
588 if (start < end) {
589 mask &= ~(0xff << (end & 7));
590 *tab |= mask;
592 } else {
593 *tab++ |= mask;
594 start = (start + 8) & ~7;
595 end1 = end & ~7;
596 while (start < end1) {
597 *tab++ = 0xff;
598 start += 8;
600 if (start < end) {
601 mask = ~(0xff << (end & 7));
602 *tab |= mask;
607 static void build_page_bitmap(PageDesc *p)
609 int n, tb_start, tb_end;
610 TranslationBlock *tb;
612 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
613 if (!p->code_bitmap)
614 return;
615 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
617 tb = p->first_tb;
618 while (tb != NULL) {
619 n = (long)tb & 3;
620 tb = (TranslationBlock *)((long)tb & ~3);
621 /* NOTE: this is subtle as a TB may span two physical pages */
622 if (n == 0) {
623 /* NOTE: tb_end may be after the end of the page, but
624 it is not a problem */
625 tb_start = tb->pc & ~TARGET_PAGE_MASK;
626 tb_end = tb_start + tb->size;
627 if (tb_end > TARGET_PAGE_SIZE)
628 tb_end = TARGET_PAGE_SIZE;
629 } else {
630 tb_start = 0;
631 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
633 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
634 tb = tb->page_next[n];
638 #ifdef TARGET_HAS_PRECISE_SMC
640 static void tb_gen_code(CPUState *env,
641 target_ulong pc, target_ulong cs_base, int flags,
642 int cflags)
644 TranslationBlock *tb;
645 uint8_t *tc_ptr;
646 target_ulong phys_pc, phys_page2, virt_page2;
647 int code_gen_size;
649 phys_pc = get_phys_addr_code(env, pc);
650 tb = tb_alloc(pc);
651 if (!tb) {
652 /* flush must be done */
653 tb_flush(env);
654 /* cannot fail at this point */
655 tb = tb_alloc(pc);
657 tc_ptr = code_gen_ptr;
658 tb->tc_ptr = tc_ptr;
659 tb->cs_base = cs_base;
660 tb->flags = flags;
661 tb->cflags = cflags;
662 cpu_gen_code(env, tb, &code_gen_size);
663 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
665 /* check next page if needed */
666 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
667 phys_page2 = -1;
668 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
669 phys_page2 = get_phys_addr_code(env, virt_page2);
671 tb_link_phys(tb, phys_pc, phys_page2);
673 #endif
675 /* invalidate all TBs which intersect with the target physical page
676 starting in range [start;end[. NOTE: start and end must refer to
677 the same physical page. 'is_cpu_write_access' should be true if called
678 from a real cpu write access: the virtual CPU will exit the current
679 TB if code is modified inside this TB. */
680 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
681 int is_cpu_write_access)
683 int n, current_tb_modified, current_tb_not_found, current_flags;
684 CPUState *env = cpu_single_env;
685 PageDesc *p;
686 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
687 target_ulong tb_start, tb_end;
688 target_ulong current_pc, current_cs_base;
690 p = page_find(start >> TARGET_PAGE_BITS);
691 if (!p)
692 return;
693 if (!p->code_bitmap &&
694 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
695 is_cpu_write_access) {
696 /* build code bitmap */
697 build_page_bitmap(p);
700 /* we remove all the TBs in the range [start, end[ */
701 /* XXX: see if in some cases it could be faster to invalidate all the code */
702 current_tb_not_found = is_cpu_write_access;
703 current_tb_modified = 0;
704 current_tb = NULL; /* avoid warning */
705 current_pc = 0; /* avoid warning */
706 current_cs_base = 0; /* avoid warning */
707 current_flags = 0; /* avoid warning */
708 tb = p->first_tb;
709 while (tb != NULL) {
710 n = (long)tb & 3;
711 tb = (TranslationBlock *)((long)tb & ~3);
712 tb_next = tb->page_next[n];
713 /* NOTE: this is subtle as a TB may span two physical pages */
714 if (n == 0) {
715 /* NOTE: tb_end may be after the end of the page, but
716 it is not a problem */
717 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
718 tb_end = tb_start + tb->size;
719 } else {
720 tb_start = tb->page_addr[1];
721 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
723 if (!(tb_end <= start || tb_start >= end)) {
724 #ifdef TARGET_HAS_PRECISE_SMC
725 if (current_tb_not_found) {
726 current_tb_not_found = 0;
727 current_tb = NULL;
728 if (env->mem_write_pc) {
729 /* now we have a real cpu fault */
730 current_tb = tb_find_pc(env->mem_write_pc);
733 if (current_tb == tb &&
734 !(current_tb->cflags & CF_SINGLE_INSN)) {
735 /* If we are modifying the current TB, we must stop
736 its execution. We could be more precise by checking
737 that the modification is after the current PC, but it
738 would require a specialized function to partially
739 restore the CPU state */
741 current_tb_modified = 1;
742 cpu_restore_state(current_tb, env,
743 env->mem_write_pc, NULL);
744 #if defined(TARGET_I386)
745 current_flags = env->hflags;
746 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
747 current_cs_base = (target_ulong)env->segs[R_CS].base;
748 current_pc = current_cs_base + env->eip;
749 #else
750 #error unsupported CPU
751 #endif
753 #endif /* TARGET_HAS_PRECISE_SMC */
754 /* we need to do that to handle the case where a signal
755 occurs while doing tb_phys_invalidate() */
756 saved_tb = NULL;
757 if (env) {
758 saved_tb = env->current_tb;
759 env->current_tb = NULL;
761 tb_phys_invalidate(tb, -1);
762 if (env) {
763 env->current_tb = saved_tb;
764 if (env->interrupt_request && env->current_tb)
765 cpu_interrupt(env, env->interrupt_request);
768 tb = tb_next;
770 #if !defined(CONFIG_USER_ONLY)
771 /* if no code remaining, no need to continue to use slow writes */
772 if (!p->first_tb) {
773 invalidate_page_bitmap(p);
774 if (is_cpu_write_access) {
775 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
778 #endif
779 #ifdef TARGET_HAS_PRECISE_SMC
780 if (current_tb_modified) {
781 /* we generate a block containing just the instruction
782 modifying the memory. It will ensure that it cannot modify
783 itself */
784 env->current_tb = NULL;
785 tb_gen_code(env, current_pc, current_cs_base, current_flags,
786 CF_SINGLE_INSN);
787 cpu_resume_from_signal(env, NULL);
789 #endif
792 /* len must be <= 8 and start must be a multiple of len */
793 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
795 PageDesc *p;
796 int offset, b;
797 #if 0
798 if (1) {
799 if (loglevel) {
800 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
801 cpu_single_env->mem_write_vaddr, len,
802 cpu_single_env->eip,
803 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
806 #endif
807 p = page_find(start >> TARGET_PAGE_BITS);
808 if (!p)
809 return;
810 if (p->code_bitmap) {
811 offset = start & ~TARGET_PAGE_MASK;
812 b = p->code_bitmap[offset >> 3] >> (offset & 7);
813 if (b & ((1 << len) - 1))
814 goto do_invalidate;
815 } else {
816 do_invalidate:
817 tb_invalidate_phys_page_range(start, start + len, 1);
821 #if !defined(CONFIG_SOFTMMU)
822 static void tb_invalidate_phys_page(target_ulong addr,
823 unsigned long pc, void *puc)
825 int n, current_flags, current_tb_modified;
826 target_ulong current_pc, current_cs_base;
827 PageDesc *p;
828 TranslationBlock *tb, *current_tb;
829 #ifdef TARGET_HAS_PRECISE_SMC
830 CPUState *env = cpu_single_env;
831 #endif
833 addr &= TARGET_PAGE_MASK;
834 p = page_find(addr >> TARGET_PAGE_BITS);
835 if (!p)
836 return;
837 tb = p->first_tb;
838 current_tb_modified = 0;
839 current_tb = NULL;
840 current_pc = 0; /* avoid warning */
841 current_cs_base = 0; /* avoid warning */
842 current_flags = 0; /* avoid warning */
843 #ifdef TARGET_HAS_PRECISE_SMC
844 if (tb && pc != 0) {
845 current_tb = tb_find_pc(pc);
847 #endif
848 while (tb != NULL) {
849 n = (long)tb & 3;
850 tb = (TranslationBlock *)((long)tb & ~3);
851 #ifdef TARGET_HAS_PRECISE_SMC
852 if (current_tb == tb &&
853 !(current_tb->cflags & CF_SINGLE_INSN)) {
854 /* If we are modifying the current TB, we must stop
855 its execution. We could be more precise by checking
856 that the modification is after the current PC, but it
857 would require a specialized function to partially
858 restore the CPU state */
860 current_tb_modified = 1;
861 cpu_restore_state(current_tb, env, pc, puc);
862 #if defined(TARGET_I386)
863 current_flags = env->hflags;
864 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
865 current_cs_base = (target_ulong)env->segs[R_CS].base;
866 current_pc = current_cs_base + env->eip;
867 #else
868 #error unsupported CPU
869 #endif
871 #endif /* TARGET_HAS_PRECISE_SMC */
872 tb_phys_invalidate(tb, addr);
873 tb = tb->page_next[n];
875 p->first_tb = NULL;
876 #ifdef TARGET_HAS_PRECISE_SMC
877 if (current_tb_modified) {
878 /* we generate a block containing just the instruction
879 modifying the memory. It will ensure that it cannot modify
880 itself */
881 env->current_tb = NULL;
882 tb_gen_code(env, current_pc, current_cs_base, current_flags,
883 CF_SINGLE_INSN);
884 cpu_resume_from_signal(env, puc);
886 #endif
888 #endif
890 /* add the tb in the target page and protect it if necessary */
891 static inline void tb_alloc_page(TranslationBlock *tb,
892 unsigned int n, target_ulong page_addr)
894 PageDesc *p;
895 TranslationBlock *last_first_tb;
897 tb->page_addr[n] = page_addr;
898 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
899 tb->page_next[n] = p->first_tb;
900 last_first_tb = p->first_tb;
901 p->first_tb = (TranslationBlock *)((long)tb | n);
902 invalidate_page_bitmap(p);
904 #if defined(TARGET_HAS_SMC) || 1
906 #if defined(CONFIG_USER_ONLY)
907 if (p->flags & PAGE_WRITE) {
908 target_ulong addr;
909 PageDesc *p2;
910 int prot;
912 /* force the host page as non writable (writes will have a
913 page fault + mprotect overhead) */
914 page_addr &= qemu_host_page_mask;
915 prot = 0;
916 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
917 addr += TARGET_PAGE_SIZE) {
919 p2 = page_find (addr >> TARGET_PAGE_BITS);
920 if (!p2)
921 continue;
922 prot |= p2->flags;
923 p2->flags &= ~PAGE_WRITE;
924 page_get_flags(addr);
926 mprotect(g2h(page_addr), qemu_host_page_size,
927 (prot & PAGE_BITS) & ~PAGE_WRITE);
928 #ifdef DEBUG_TB_INVALIDATE
929 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
930 page_addr);
931 #endif
933 #else
934 /* if some code is already present, then the pages are already
935 protected. So we handle the case where only the first TB is
936 allocated in a physical page */
937 if (!last_first_tb) {
938 tlb_protect_code(page_addr);
940 #endif
942 #endif /* TARGET_HAS_SMC */
945 /* Allocate a new translation block. Flush the translation buffer if
946 too many translation blocks or too much generated code. */
947 TranslationBlock *tb_alloc(target_ulong pc)
949 TranslationBlock *tb;
951 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
952 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
953 return NULL;
954 tb = &tbs[nb_tbs++];
955 tb->pc = pc;
956 tb->cflags = 0;
957 return tb;
960 /* add a new TB and link it to the physical page tables. phys_page2 is
961 (-1) to indicate that only one page contains the TB. */
962 void tb_link_phys(TranslationBlock *tb,
963 target_ulong phys_pc, target_ulong phys_page2)
965 unsigned int h;
966 TranslationBlock **ptb;
968 /* add in the physical hash table */
969 h = tb_phys_hash_func(phys_pc);
970 ptb = &tb_phys_hash[h];
971 tb->phys_hash_next = *ptb;
972 *ptb = tb;
974 /* add in the page list */
975 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
976 if (phys_page2 != -1)
977 tb_alloc_page(tb, 1, phys_page2);
978 else
979 tb->page_addr[1] = -1;
981 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
982 tb->jmp_next[0] = NULL;
983 tb->jmp_next[1] = NULL;
985 /* init original jump addresses */
986 if (tb->tb_next_offset[0] != 0xffff)
987 tb_reset_jump(tb, 0);
988 if (tb->tb_next_offset[1] != 0xffff)
989 tb_reset_jump(tb, 1);
991 #ifdef DEBUG_TB_CHECK
992 tb_page_check();
993 #endif
996 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
997 tb[1].tc_ptr. Return NULL if not found */
998 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1000 int m_min, m_max, m;
1001 unsigned long v;
1002 TranslationBlock *tb;
1004 if (nb_tbs <= 0)
1005 return NULL;
1006 if (tc_ptr < (unsigned long)code_gen_buffer ||
1007 tc_ptr >= (unsigned long)code_gen_ptr)
1008 return NULL;
1009 /* binary search (cf Knuth) */
1010 m_min = 0;
1011 m_max = nb_tbs - 1;
1012 while (m_min <= m_max) {
1013 m = (m_min + m_max) >> 1;
1014 tb = &tbs[m];
1015 v = (unsigned long)tb->tc_ptr;
1016 if (v == tc_ptr)
1017 return tb;
1018 else if (tc_ptr < v) {
1019 m_max = m - 1;
1020 } else {
1021 m_min = m + 1;
1024 return &tbs[m_max];
1027 static void tb_reset_jump_recursive(TranslationBlock *tb);
1029 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1031 TranslationBlock *tb1, *tb_next, **ptb;
1032 unsigned int n1;
1034 tb1 = tb->jmp_next[n];
1035 if (tb1 != NULL) {
1036 /* find head of list */
1037 for(;;) {
1038 n1 = (long)tb1 & 3;
1039 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1040 if (n1 == 2)
1041 break;
1042 tb1 = tb1->jmp_next[n1];
1044 /* we are now sure now that tb jumps to tb1 */
1045 tb_next = tb1;
1047 /* remove tb from the jmp_first list */
1048 ptb = &tb_next->jmp_first;
1049 for(;;) {
1050 tb1 = *ptb;
1051 n1 = (long)tb1 & 3;
1052 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1053 if (n1 == n && tb1 == tb)
1054 break;
1055 ptb = &tb1->jmp_next[n1];
1057 *ptb = tb->jmp_next[n];
1058 tb->jmp_next[n] = NULL;
1060 /* suppress the jump to next tb in generated code */
1061 tb_reset_jump(tb, n);
1063 /* suppress jumps in the tb on which we could have jumped */
1064 tb_reset_jump_recursive(tb_next);
1068 static void tb_reset_jump_recursive(TranslationBlock *tb)
1070 tb_reset_jump_recursive2(tb, 0);
1071 tb_reset_jump_recursive2(tb, 1);
1074 #if defined(TARGET_HAS_ICE)
1075 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1077 target_phys_addr_t addr;
1078 target_ulong pd;
1079 ram_addr_t ram_addr;
1080 PhysPageDesc *p;
1082 addr = cpu_get_phys_page_debug(env, pc);
1083 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1084 if (!p) {
1085 pd = IO_MEM_UNASSIGNED;
1086 } else {
1087 pd = p->phys_offset;
1089 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1090 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1092 #endif
1094 /* Add a watchpoint. */
1095 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1097 int i;
1099 for (i = 0; i < env->nb_watchpoints; i++) {
1100 if (addr == env->watchpoint[i].vaddr)
1101 return 0;
1103 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1104 return -1;
1106 i = env->nb_watchpoints++;
1107 env->watchpoint[i].vaddr = addr;
1108 tlb_flush_page(env, addr);
1109 /* FIXME: This flush is needed because of the hack to make memory ops
1110 terminate the TB. It can be removed once the proper IO trap and
1111 re-execute bits are in. */
1112 tb_flush(env);
1113 return i;
1116 /* Remove a watchpoint. */
1117 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1119 int i;
1121 for (i = 0; i < env->nb_watchpoints; i++) {
1122 if (addr == env->watchpoint[i].vaddr) {
1123 env->nb_watchpoints--;
1124 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1125 tlb_flush_page(env, addr);
1126 return 0;
1129 return -1;
1132 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1133 breakpoint is reached */
1134 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1136 #if defined(TARGET_HAS_ICE)
1137 int i;
1139 for(i = 0; i < env->nb_breakpoints; i++) {
1140 if (env->breakpoints[i] == pc)
1141 return 0;
1144 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1145 return -1;
1146 env->breakpoints[env->nb_breakpoints++] = pc;
1148 if (kvm_enabled())
1149 kvm_update_debugger(env);
1151 breakpoint_invalidate(env, pc);
1152 return 0;
1153 #else
1154 return -1;
1155 #endif
1158 /* remove a breakpoint */
1159 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1161 #if defined(TARGET_HAS_ICE)
1162 int i;
1163 for(i = 0; i < env->nb_breakpoints; i++) {
1164 if (env->breakpoints[i] == pc)
1165 goto found;
1167 return -1;
1168 found:
1169 env->nb_breakpoints--;
1170 if (i < env->nb_breakpoints)
1171 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1173 if (kvm_enabled())
1174 kvm_update_debugger(env);
1176 breakpoint_invalidate(env, pc);
1177 return 0;
1178 #else
1179 return -1;
1180 #endif
1183 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1184 CPU loop after each instruction */
1185 void cpu_single_step(CPUState *env, int enabled)
1187 #if defined(TARGET_HAS_ICE)
1188 if (env->singlestep_enabled != enabled) {
1189 env->singlestep_enabled = enabled;
1190 /* must flush all the translated code to avoid inconsistancies */
1191 /* XXX: only flush what is necessary */
1192 tb_flush(env);
1194 if (kvm_enabled())
1195 kvm_update_debugger(env);
1196 #endif
1199 /* enable or disable low levels log */
1200 void cpu_set_log(int log_flags)
1202 loglevel = log_flags;
1203 if (loglevel && !logfile) {
1204 logfile = fopen(logfilename, log_append ? "a" : "w");
1205 if (!logfile) {
1206 perror(logfilename);
1207 _exit(1);
1209 #if !defined(CONFIG_SOFTMMU)
1210 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1212 static uint8_t logfile_buf[4096];
1213 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1215 #else
1216 setvbuf(logfile, NULL, _IOLBF, 0);
1217 #endif
1218 log_append = 1;
1220 if (!loglevel && logfile) {
1221 fclose(logfile);
1222 logfile = NULL;
1226 void cpu_set_log_filename(const char *filename)
1228 logfilename = strdup(filename);
1229 if (logfile) {
1230 fclose(logfile);
1231 logfile = NULL;
1233 cpu_set_log(loglevel);
1236 /* mask must never be zero, except for A20 change call */
1237 void cpu_interrupt(CPUState *env, int mask)
1239 TranslationBlock *tb;
1240 static int interrupt_lock;
1242 env->interrupt_request |= mask;
1243 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1244 kvm_update_interrupt_request(env);
1246 /* if the cpu is currently executing code, we must unlink it and
1247 all the potentially executing TB */
1248 tb = env->current_tb;
1249 if (tb && !testandset(&interrupt_lock)) {
1250 env->current_tb = NULL;
1251 tb_reset_jump_recursive(tb);
1252 interrupt_lock = 0;
1256 void cpu_reset_interrupt(CPUState *env, int mask)
1258 env->interrupt_request &= ~mask;
1261 CPULogItem cpu_log_items[] = {
1262 { CPU_LOG_TB_OUT_ASM, "out_asm",
1263 "show generated host assembly code for each compiled TB" },
1264 { CPU_LOG_TB_IN_ASM, "in_asm",
1265 "show target assembly code for each compiled TB" },
1266 { CPU_LOG_TB_OP, "op",
1267 "show micro ops for each compiled TB" },
1268 #ifdef TARGET_I386
1269 { CPU_LOG_TB_OP_OPT, "op_opt",
1270 "show micro ops before eflags optimization" },
1271 #endif
1272 { CPU_LOG_INT, "int",
1273 "show interrupts/exceptions in short format" },
1274 { CPU_LOG_EXEC, "exec",
1275 "show trace before each executed TB (lots of logs)" },
1276 { CPU_LOG_TB_CPU, "cpu",
1277 "show CPU state before block translation" },
1278 #ifdef TARGET_I386
1279 { CPU_LOG_PCALL, "pcall",
1280 "show protected mode far calls/returns/exceptions" },
1281 #endif
1282 #ifdef DEBUG_IOPORT
1283 { CPU_LOG_IOPORT, "ioport",
1284 "show all i/o ports accesses" },
1285 #endif
1286 { 0, NULL, NULL },
1289 static int cmp1(const char *s1, int n, const char *s2)
1291 if (strlen(s2) != n)
1292 return 0;
1293 return memcmp(s1, s2, n) == 0;
1296 /* takes a comma separated list of log masks. Return 0 if error. */
1297 int cpu_str_to_log_mask(const char *str)
1299 CPULogItem *item;
1300 int mask;
1301 const char *p, *p1;
1303 p = str;
1304 mask = 0;
1305 for(;;) {
1306 p1 = strchr(p, ',');
1307 if (!p1)
1308 p1 = p + strlen(p);
1309 if(cmp1(p,p1-p,"all")) {
1310 for(item = cpu_log_items; item->mask != 0; item++) {
1311 mask |= item->mask;
1313 } else {
1314 for(item = cpu_log_items; item->mask != 0; item++) {
1315 if (cmp1(p, p1 - p, item->name))
1316 goto found;
1318 return 0;
1320 found:
1321 mask |= item->mask;
1322 if (*p1 != ',')
1323 break;
1324 p = p1 + 1;
1326 return mask;
1329 void cpu_abort(CPUState *env, const char *fmt, ...)
1331 va_list ap;
1332 va_list ap2;
1334 va_start(ap, fmt);
1335 va_copy(ap2, ap);
1336 fprintf(stderr, "qemu: fatal: ");
1337 vfprintf(stderr, fmt, ap);
1338 fprintf(stderr, "\n");
1339 #ifdef TARGET_I386
1340 if(env->intercept & INTERCEPT_SVM_MASK) {
1341 /* most probably the virtual machine should not
1342 be shut down but rather caught by the VMM */
1343 vmexit(SVM_EXIT_SHUTDOWN, 0);
1345 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1346 #else
1347 cpu_dump_state(env, stderr, fprintf, 0);
1348 #endif
1349 if (logfile) {
1350 fprintf(logfile, "qemu: fatal: ");
1351 vfprintf(logfile, fmt, ap2);
1352 fprintf(logfile, "\n");
1353 #ifdef TARGET_I386
1354 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1355 #else
1356 cpu_dump_state(env, logfile, fprintf, 0);
1357 #endif
1358 fflush(logfile);
1359 fclose(logfile);
1361 va_end(ap2);
1362 va_end(ap);
1363 abort();
1366 CPUState *cpu_copy(CPUState *env)
1368 CPUState *new_env = cpu_init(env->cpu_model_str);
1369 /* preserve chaining and index */
1370 CPUState *next_cpu = new_env->next_cpu;
1371 int cpu_index = new_env->cpu_index;
1372 memcpy(new_env, env, sizeof(CPUState));
1373 new_env->next_cpu = next_cpu;
1374 new_env->cpu_index = cpu_index;
1375 return new_env;
1378 #if !defined(CONFIG_USER_ONLY)
1380 /* NOTE: if flush_global is true, also flush global entries (not
1381 implemented yet) */
1382 void tlb_flush(CPUState *env, int flush_global)
1384 int i;
1386 #if defined(DEBUG_TLB)
1387 printf("tlb_flush:\n");
1388 #endif
1389 /* must reset current TB so that interrupts cannot modify the
1390 links while we are modifying them */
1391 env->current_tb = NULL;
1393 for(i = 0; i < CPU_TLB_SIZE; i++) {
1394 env->tlb_table[0][i].addr_read = -1;
1395 env->tlb_table[0][i].addr_write = -1;
1396 env->tlb_table[0][i].addr_code = -1;
1397 env->tlb_table[1][i].addr_read = -1;
1398 env->tlb_table[1][i].addr_write = -1;
1399 env->tlb_table[1][i].addr_code = -1;
1400 #if (NB_MMU_MODES >= 3)
1401 env->tlb_table[2][i].addr_read = -1;
1402 env->tlb_table[2][i].addr_write = -1;
1403 env->tlb_table[2][i].addr_code = -1;
1404 #if (NB_MMU_MODES == 4)
1405 env->tlb_table[3][i].addr_read = -1;
1406 env->tlb_table[3][i].addr_write = -1;
1407 env->tlb_table[3][i].addr_code = -1;
1408 #endif
1409 #endif
1412 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1414 #if !defined(CONFIG_SOFTMMU)
1415 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1416 #endif
1417 #ifdef USE_KQEMU
1418 if (env->kqemu_enabled) {
1419 kqemu_flush(env, flush_global);
1421 #endif
1422 tlb_flush_count++;
1425 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1427 if (addr == (tlb_entry->addr_read &
1428 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1429 addr == (tlb_entry->addr_write &
1430 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1431 addr == (tlb_entry->addr_code &
1432 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1433 tlb_entry->addr_read = -1;
1434 tlb_entry->addr_write = -1;
1435 tlb_entry->addr_code = -1;
1439 void tlb_flush_page(CPUState *env, target_ulong addr)
1441 int i;
1442 TranslationBlock *tb;
1444 #if defined(DEBUG_TLB)
1445 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1446 #endif
1447 /* must reset current TB so that interrupts cannot modify the
1448 links while we are modifying them */
1449 env->current_tb = NULL;
1451 addr &= TARGET_PAGE_MASK;
1452 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1453 tlb_flush_entry(&env->tlb_table[0][i], addr);
1454 tlb_flush_entry(&env->tlb_table[1][i], addr);
1455 #if (NB_MMU_MODES >= 3)
1456 tlb_flush_entry(&env->tlb_table[2][i], addr);
1457 #if (NB_MMU_MODES == 4)
1458 tlb_flush_entry(&env->tlb_table[3][i], addr);
1459 #endif
1460 #endif
1462 /* Discard jump cache entries for any tb which might potentially
1463 overlap the flushed page. */
1464 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1465 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1467 i = tb_jmp_cache_hash_page(addr);
1468 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1470 #if !defined(CONFIG_SOFTMMU)
1471 if (addr < MMAP_AREA_END)
1472 munmap((void *)addr, TARGET_PAGE_SIZE);
1473 #endif
1474 #ifdef USE_KQEMU
1475 if (env->kqemu_enabled) {
1476 kqemu_flush_page(env, addr);
1478 #endif
1481 /* update the TLBs so that writes to code in the virtual page 'addr'
1482 can be detected */
1483 static void tlb_protect_code(ram_addr_t ram_addr)
1485 cpu_physical_memory_reset_dirty(ram_addr,
1486 ram_addr + TARGET_PAGE_SIZE,
1487 CODE_DIRTY_FLAG);
1490 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1491 tested for self modifying code */
1492 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1493 target_ulong vaddr)
1495 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1498 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1499 unsigned long start, unsigned long length)
1501 unsigned long addr;
1502 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1503 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1504 if ((addr - start) < length) {
1505 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1510 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1511 int dirty_flags)
1513 CPUState *env;
1514 unsigned long length, start1;
1515 int i, mask, len;
1516 uint8_t *p;
1518 start &= TARGET_PAGE_MASK;
1519 end = TARGET_PAGE_ALIGN(end);
1521 length = end - start;
1522 if (length == 0)
1523 return;
1524 len = length >> TARGET_PAGE_BITS;
1525 #ifdef USE_KQEMU
1526 /* XXX: should not depend on cpu context */
1527 env = first_cpu;
1528 if (env->kqemu_enabled) {
1529 ram_addr_t addr;
1530 addr = start;
1531 for(i = 0; i < len; i++) {
1532 kqemu_set_notdirty(env, addr);
1533 addr += TARGET_PAGE_SIZE;
1536 #endif
1537 mask = ~dirty_flags;
1538 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1539 for(i = 0; i < len; i++)
1540 p[i] &= mask;
1542 /* we modify the TLB cache so that the dirty bit will be set again
1543 when accessing the range */
1544 start1 = start + (unsigned long)phys_ram_base;
1545 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1546 for(i = 0; i < CPU_TLB_SIZE; i++)
1547 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1548 for(i = 0; i < CPU_TLB_SIZE; i++)
1549 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1550 #if (NB_MMU_MODES >= 3)
1551 for(i = 0; i < CPU_TLB_SIZE; i++)
1552 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1553 #if (NB_MMU_MODES == 4)
1554 for(i = 0; i < CPU_TLB_SIZE; i++)
1555 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1556 #endif
1557 #endif
1560 #if !defined(CONFIG_SOFTMMU)
1561 /* XXX: this is expensive */
1563 VirtPageDesc *p;
1564 int j;
1565 target_ulong addr;
1567 for(i = 0; i < L1_SIZE; i++) {
1568 p = l1_virt_map[i];
1569 if (p) {
1570 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1571 for(j = 0; j < L2_SIZE; j++) {
1572 if (p->valid_tag == virt_valid_tag &&
1573 p->phys_addr >= start && p->phys_addr < end &&
1574 (p->prot & PROT_WRITE)) {
1575 if (addr < MMAP_AREA_END) {
1576 mprotect((void *)addr, TARGET_PAGE_SIZE,
1577 p->prot & ~PROT_WRITE);
1580 addr += TARGET_PAGE_SIZE;
1581 p++;
1586 #endif
1589 int cpu_physical_memory_set_dirty_tracking(int enable)
1591 int r=0;
1593 if (kvm_enabled())
1594 r = kvm_physical_memory_set_dirty_tracking(enable);
1595 in_migration = enable;
1596 return r;
1599 int cpu_physical_memory_get_dirty_tracking(void)
1601 return in_migration;
1604 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1606 ram_addr_t ram_addr;
1608 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1609 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1610 tlb_entry->addend - (unsigned long)phys_ram_base;
1611 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1612 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1617 /* update the TLB according to the current state of the dirty bits */
1618 void cpu_tlb_update_dirty(CPUState *env)
1620 int i;
1621 for(i = 0; i < CPU_TLB_SIZE; i++)
1622 tlb_update_dirty(&env->tlb_table[0][i]);
1623 for(i = 0; i < CPU_TLB_SIZE; i++)
1624 tlb_update_dirty(&env->tlb_table[1][i]);
1625 #if (NB_MMU_MODES >= 3)
1626 for(i = 0; i < CPU_TLB_SIZE; i++)
1627 tlb_update_dirty(&env->tlb_table[2][i]);
1628 #if (NB_MMU_MODES == 4)
1629 for(i = 0; i < CPU_TLB_SIZE; i++)
1630 tlb_update_dirty(&env->tlb_table[3][i]);
1631 #endif
1632 #endif
1635 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1636 unsigned long start)
1638 unsigned long addr;
1639 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1640 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1641 if (addr == start) {
1642 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1647 /* update the TLB corresponding to virtual page vaddr and phys addr
1648 addr so that it is no longer dirty */
1649 static inline void tlb_set_dirty(CPUState *env,
1650 unsigned long addr, target_ulong vaddr)
1652 int i;
1654 addr &= TARGET_PAGE_MASK;
1655 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1656 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1657 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1658 #if (NB_MMU_MODES >= 3)
1659 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1660 #if (NB_MMU_MODES == 4)
1661 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1662 #endif
1663 #endif
1666 /* add a new TLB entry. At most one entry for a given virtual address
1667 is permitted. Return 0 if OK or 2 if the page could not be mapped
1668 (can only happen in non SOFTMMU mode for I/O pages or pages
1669 conflicting with the host address space). */
1670 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1671 target_phys_addr_t paddr, int prot,
1672 int mmu_idx, int is_softmmu)
1674 PhysPageDesc *p;
1675 unsigned long pd;
1676 unsigned int index;
1677 target_ulong address;
1678 target_phys_addr_t addend;
1679 int ret;
1680 CPUTLBEntry *te;
1681 int i;
1683 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1684 if (!p) {
1685 pd = IO_MEM_UNASSIGNED;
1686 } else {
1687 pd = p->phys_offset;
1689 #if defined(DEBUG_TLB)
1690 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1691 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1692 #endif
1694 ret = 0;
1695 #if !defined(CONFIG_SOFTMMU)
1696 if (is_softmmu)
1697 #endif
1699 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1700 /* IO memory case */
1701 address = vaddr | pd;
1702 addend = paddr;
1703 } else {
1704 /* standard memory */
1705 address = vaddr;
1706 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1709 /* Make accesses to pages with watchpoints go via the
1710 watchpoint trap routines. */
1711 for (i = 0; i < env->nb_watchpoints; i++) {
1712 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1713 if (address & ~TARGET_PAGE_MASK) {
1714 env->watchpoint[i].addend = 0;
1715 address = vaddr | io_mem_watch;
1716 } else {
1717 env->watchpoint[i].addend = pd - paddr +
1718 (unsigned long) phys_ram_base;
1719 /* TODO: Figure out how to make read watchpoints coexist
1720 with code. */
1721 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1726 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1727 addend -= vaddr;
1728 te = &env->tlb_table[mmu_idx][index];
1729 te->addend = addend;
1730 if (prot & PAGE_READ) {
1731 te->addr_read = address;
1732 } else {
1733 te->addr_read = -1;
1735 if (prot & PAGE_EXEC) {
1736 te->addr_code = address;
1737 } else {
1738 te->addr_code = -1;
1740 if (prot & PAGE_WRITE) {
1741 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1742 (pd & IO_MEM_ROMD)) {
1743 /* write access calls the I/O callback */
1744 te->addr_write = vaddr |
1745 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1746 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1747 !cpu_physical_memory_is_dirty(pd)) {
1748 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1749 } else {
1750 te->addr_write = address;
1752 } else {
1753 te->addr_write = -1;
1756 #if !defined(CONFIG_SOFTMMU)
1757 else {
1758 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1759 /* IO access: no mapping is done as it will be handled by the
1760 soft MMU */
1761 if (!(env->hflags & HF_SOFTMMU_MASK))
1762 ret = 2;
1763 } else {
1764 void *map_addr;
1766 if (vaddr >= MMAP_AREA_END) {
1767 ret = 2;
1768 } else {
1769 if (prot & PROT_WRITE) {
1770 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1771 #if defined(TARGET_HAS_SMC) || 1
1772 first_tb ||
1773 #endif
1774 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1775 !cpu_physical_memory_is_dirty(pd))) {
1776 /* ROM: we do as if code was inside */
1777 /* if code is present, we only map as read only and save the
1778 original mapping */
1779 VirtPageDesc *vp;
1781 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1782 vp->phys_addr = pd;
1783 vp->prot = prot;
1784 vp->valid_tag = virt_valid_tag;
1785 prot &= ~PAGE_WRITE;
1788 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1789 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1790 if (map_addr == MAP_FAILED) {
1791 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1792 paddr, vaddr);
1797 #endif
1798 return ret;
1801 /* called from signal handler: invalidate the code and unprotect the
1802 page. Return TRUE if the fault was succesfully handled. */
1803 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1805 #if !defined(CONFIG_SOFTMMU)
1806 VirtPageDesc *vp;
1808 #if defined(DEBUG_TLB)
1809 printf("page_unprotect: addr=0x%08x\n", addr);
1810 #endif
1811 addr &= TARGET_PAGE_MASK;
1813 /* if it is not mapped, no need to worry here */
1814 if (addr >= MMAP_AREA_END)
1815 return 0;
1816 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1817 if (!vp)
1818 return 0;
1819 /* NOTE: in this case, validate_tag is _not_ tested as it
1820 validates only the code TLB */
1821 if (vp->valid_tag != virt_valid_tag)
1822 return 0;
1823 if (!(vp->prot & PAGE_WRITE))
1824 return 0;
1825 #if defined(DEBUG_TLB)
1826 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1827 addr, vp->phys_addr, vp->prot);
1828 #endif
1829 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1830 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1831 (unsigned long)addr, vp->prot);
1832 /* set the dirty bit */
1833 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1834 /* flush the code inside */
1835 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1836 return 1;
1837 #else
1838 return 0;
1839 #endif
1842 #else
1844 void tlb_flush(CPUState *env, int flush_global)
1848 void tlb_flush_page(CPUState *env, target_ulong addr)
1852 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1853 target_phys_addr_t paddr, int prot,
1854 int mmu_idx, int is_softmmu)
1856 return 0;
1859 /* dump memory mappings */
1860 void page_dump(FILE *f)
1862 unsigned long start, end;
1863 int i, j, prot, prot1;
1864 PageDesc *p;
1866 fprintf(f, "%-8s %-8s %-8s %s\n",
1867 "start", "end", "size", "prot");
1868 start = -1;
1869 end = -1;
1870 prot = 0;
1871 for(i = 0; i <= L1_SIZE; i++) {
1872 if (i < L1_SIZE)
1873 p = l1_map[i];
1874 else
1875 p = NULL;
1876 for(j = 0;j < L2_SIZE; j++) {
1877 if (!p)
1878 prot1 = 0;
1879 else
1880 prot1 = p[j].flags;
1881 if (prot1 != prot) {
1882 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1883 if (start != -1) {
1884 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1885 start, end, end - start,
1886 prot & PAGE_READ ? 'r' : '-',
1887 prot & PAGE_WRITE ? 'w' : '-',
1888 prot & PAGE_EXEC ? 'x' : '-');
1890 if (prot1 != 0)
1891 start = end;
1892 else
1893 start = -1;
1894 prot = prot1;
1896 if (!p)
1897 break;
1902 int page_get_flags(target_ulong address)
1904 PageDesc *p;
1906 p = page_find(address >> TARGET_PAGE_BITS);
1907 if (!p)
1908 return 0;
1909 return p->flags;
1912 /* modify the flags of a page and invalidate the code if
1913 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1914 depending on PAGE_WRITE */
1915 void page_set_flags(target_ulong start, target_ulong end, int flags)
1917 PageDesc *p;
1918 target_ulong addr;
1920 start = start & TARGET_PAGE_MASK;
1921 end = TARGET_PAGE_ALIGN(end);
1922 if (flags & PAGE_WRITE)
1923 flags |= PAGE_WRITE_ORG;
1924 spin_lock(&tb_lock);
1925 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1926 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1927 /* if the write protection is set, then we invalidate the code
1928 inside */
1929 if (!(p->flags & PAGE_WRITE) &&
1930 (flags & PAGE_WRITE) &&
1931 p->first_tb) {
1932 tb_invalidate_phys_page(addr, 0, NULL);
1934 p->flags = flags;
1936 spin_unlock(&tb_lock);
1939 int page_check_range(target_ulong start, target_ulong len, int flags)
1941 PageDesc *p;
1942 target_ulong end;
1943 target_ulong addr;
1945 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1946 start = start & TARGET_PAGE_MASK;
1948 if( end < start )
1949 /* we've wrapped around */
1950 return -1;
1951 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1952 p = page_find(addr >> TARGET_PAGE_BITS);
1953 if( !p )
1954 return -1;
1955 if( !(p->flags & PAGE_VALID) )
1956 return -1;
1958 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1959 return -1;
1960 if (flags & PAGE_WRITE) {
1961 if (!(p->flags & PAGE_WRITE_ORG))
1962 return -1;
1963 /* unprotect the page if it was put read-only because it
1964 contains translated code */
1965 if (!(p->flags & PAGE_WRITE)) {
1966 if (!page_unprotect(addr, 0, NULL))
1967 return -1;
1969 return 0;
1972 return 0;
1975 /* called from signal handler: invalidate the code and unprotect the
1976 page. Return TRUE if the fault was succesfully handled. */
1977 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1979 unsigned int page_index, prot, pindex;
1980 PageDesc *p, *p1;
1981 target_ulong host_start, host_end, addr;
1983 host_start = address & qemu_host_page_mask;
1984 page_index = host_start >> TARGET_PAGE_BITS;
1985 p1 = page_find(page_index);
1986 if (!p1)
1987 return 0;
1988 host_end = host_start + qemu_host_page_size;
1989 p = p1;
1990 prot = 0;
1991 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1992 prot |= p->flags;
1993 p++;
1995 /* if the page was really writable, then we change its
1996 protection back to writable */
1997 if (prot & PAGE_WRITE_ORG) {
1998 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1999 if (!(p1[pindex].flags & PAGE_WRITE)) {
2000 mprotect((void *)g2h(host_start), qemu_host_page_size,
2001 (prot & PAGE_BITS) | PAGE_WRITE);
2002 p1[pindex].flags |= PAGE_WRITE;
2003 /* and since the content will be modified, we must invalidate
2004 the corresponding translated code. */
2005 tb_invalidate_phys_page(address, pc, puc);
2006 #ifdef DEBUG_TB_CHECK
2007 tb_invalidate_check(address);
2008 #endif
2009 return 1;
2012 return 0;
2015 static inline void tlb_set_dirty(CPUState *env,
2016 unsigned long addr, target_ulong vaddr)
2019 #endif /* defined(CONFIG_USER_ONLY) */
2021 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2022 int memory);
2023 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2024 int orig_memory);
2025 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2026 need_subpage) \
2027 do { \
2028 if (addr > start_addr) \
2029 start_addr2 = 0; \
2030 else { \
2031 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2032 if (start_addr2 > 0) \
2033 need_subpage = 1; \
2036 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2037 end_addr2 = TARGET_PAGE_SIZE - 1; \
2038 else { \
2039 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2040 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2041 need_subpage = 1; \
2043 } while (0)
2045 /* register physical memory. 'size' must be a multiple of the target
2046 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2047 io memory page */
2048 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2049 unsigned long size,
2050 unsigned long phys_offset)
2052 target_phys_addr_t addr, end_addr;
2053 PhysPageDesc *p;
2054 CPUState *env;
2055 unsigned long orig_size = size;
2056 void *subpage;
2058 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2059 end_addr = start_addr + (target_phys_addr_t)size;
2060 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2061 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2062 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2063 unsigned long orig_memory = p->phys_offset;
2064 target_phys_addr_t start_addr2, end_addr2;
2065 int need_subpage = 0;
2067 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2068 need_subpage);
2069 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2070 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2071 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2072 &p->phys_offset, orig_memory);
2073 } else {
2074 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2075 >> IO_MEM_SHIFT];
2077 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2078 } else {
2079 p->phys_offset = phys_offset;
2080 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2081 (phys_offset & IO_MEM_ROMD))
2082 phys_offset += TARGET_PAGE_SIZE;
2084 } else {
2085 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2086 p->phys_offset = phys_offset;
2087 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2088 (phys_offset & IO_MEM_ROMD))
2089 phys_offset += TARGET_PAGE_SIZE;
2090 else {
2091 target_phys_addr_t start_addr2, end_addr2;
2092 int need_subpage = 0;
2094 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2095 end_addr2, need_subpage);
2097 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2098 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2099 &p->phys_offset, IO_MEM_UNASSIGNED);
2100 subpage_register(subpage, start_addr2, end_addr2,
2101 phys_offset);
2107 /* since each CPU stores ram addresses in its TLB cache, we must
2108 reset the modified entries */
2109 /* XXX: slow ! */
2110 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2111 tlb_flush(env, 1);
2115 /* XXX: temporary until new memory mapping API */
2116 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2118 PhysPageDesc *p;
2120 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2121 if (!p)
2122 return IO_MEM_UNASSIGNED;
2123 return p->phys_offset;
2126 /* XXX: better than nothing */
2127 ram_addr_t qemu_ram_alloc(unsigned long size)
2129 ram_addr_t addr;
2130 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2131 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2132 size, phys_ram_size);
2133 abort();
2135 addr = phys_ram_alloc_offset;
2136 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2137 return addr;
2140 void qemu_ram_free(ram_addr_t addr)
2144 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2146 #ifdef DEBUG_UNASSIGNED
2147 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2148 #endif
2149 #ifdef TARGET_SPARC
2150 do_unassigned_access(addr, 0, 0, 0);
2151 #elif TARGET_CRIS
2152 do_unassigned_access(addr, 0, 0, 0);
2153 #endif
2154 return 0;
2157 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2159 #ifdef DEBUG_UNASSIGNED
2160 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2161 #endif
2162 #ifdef TARGET_SPARC
2163 do_unassigned_access(addr, 1, 0, 0);
2164 #elif TARGET_CRIS
2165 do_unassigned_access(addr, 1, 0, 0);
2166 #endif
2169 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2170 unassigned_mem_readb,
2171 unassigned_mem_readb,
2172 unassigned_mem_readb,
2175 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2176 unassigned_mem_writeb,
2177 unassigned_mem_writeb,
2178 unassigned_mem_writeb,
2181 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2183 unsigned long ram_addr;
2184 int dirty_flags;
2185 ram_addr = addr - (unsigned long)phys_ram_base;
2186 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2187 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2188 #if !defined(CONFIG_USER_ONLY)
2189 tb_invalidate_phys_page_fast(ram_addr, 1);
2190 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2191 #endif
2193 stb_p((uint8_t *)(long)addr, val);
2194 #ifdef USE_KQEMU
2195 if (cpu_single_env->kqemu_enabled &&
2196 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2197 kqemu_modify_page(cpu_single_env, ram_addr);
2198 #endif
2199 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2200 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2201 /* we remove the notdirty callback only if the code has been
2202 flushed */
2203 if (dirty_flags == 0xff)
2204 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2207 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2209 unsigned long ram_addr;
2210 int dirty_flags;
2211 ram_addr = addr - (unsigned long)phys_ram_base;
2212 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2213 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2214 #if !defined(CONFIG_USER_ONLY)
2215 tb_invalidate_phys_page_fast(ram_addr, 2);
2216 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2217 #endif
2219 stw_p((uint8_t *)(long)addr, val);
2220 #ifdef USE_KQEMU
2221 if (cpu_single_env->kqemu_enabled &&
2222 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2223 kqemu_modify_page(cpu_single_env, ram_addr);
2224 #endif
2225 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2226 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2227 /* we remove the notdirty callback only if the code has been
2228 flushed */
2229 if (dirty_flags == 0xff)
2230 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2233 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2235 unsigned long ram_addr;
2236 int dirty_flags;
2237 ram_addr = addr - (unsigned long)phys_ram_base;
2238 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2239 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2240 #if !defined(CONFIG_USER_ONLY)
2241 tb_invalidate_phys_page_fast(ram_addr, 4);
2242 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2243 #endif
2245 stl_p((uint8_t *)(long)addr, val);
2246 #ifdef USE_KQEMU
2247 if (cpu_single_env->kqemu_enabled &&
2248 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2249 kqemu_modify_page(cpu_single_env, ram_addr);
2250 #endif
2251 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2252 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2253 /* we remove the notdirty callback only if the code has been
2254 flushed */
2255 if (dirty_flags == 0xff)
2256 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2259 static CPUReadMemoryFunc *error_mem_read[3] = {
2260 NULL, /* never used */
2261 NULL, /* never used */
2262 NULL, /* never used */
2265 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2266 notdirty_mem_writeb,
2267 notdirty_mem_writew,
2268 notdirty_mem_writel,
2271 #if defined(CONFIG_SOFTMMU)
2272 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2273 so these check for a hit then pass through to the normal out-of-line
2274 phys routines. */
2275 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2277 return ldub_phys(addr);
2280 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2282 return lduw_phys(addr);
2285 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2287 return ldl_phys(addr);
2290 /* Generate a debug exception if a watchpoint has been hit.
2291 Returns the real physical address of the access. addr will be a host
2292 address in case of a RAM location. */
2293 static target_ulong check_watchpoint(target_phys_addr_t addr)
2295 CPUState *env = cpu_single_env;
2296 target_ulong watch;
2297 target_ulong retaddr;
2298 int i;
2300 retaddr = addr;
2301 for (i = 0; i < env->nb_watchpoints; i++) {
2302 watch = env->watchpoint[i].vaddr;
2303 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2304 retaddr = addr - env->watchpoint[i].addend;
2305 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2306 cpu_single_env->watchpoint_hit = i + 1;
2307 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2308 break;
2312 return retaddr;
2315 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2316 uint32_t val)
2318 addr = check_watchpoint(addr);
2319 stb_phys(addr, val);
2322 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2323 uint32_t val)
2325 addr = check_watchpoint(addr);
2326 stw_phys(addr, val);
2329 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2330 uint32_t val)
2332 addr = check_watchpoint(addr);
2333 stl_phys(addr, val);
2336 static CPUReadMemoryFunc *watch_mem_read[3] = {
2337 watch_mem_readb,
2338 watch_mem_readw,
2339 watch_mem_readl,
2342 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2343 watch_mem_writeb,
2344 watch_mem_writew,
2345 watch_mem_writel,
2347 #endif
2349 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2350 unsigned int len)
2352 uint32_t ret;
2353 unsigned int idx;
2355 idx = SUBPAGE_IDX(addr - mmio->base);
2356 #if defined(DEBUG_SUBPAGE)
2357 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2358 mmio, len, addr, idx);
2359 #endif
2360 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2362 return ret;
2365 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2366 uint32_t value, unsigned int len)
2368 unsigned int idx;
2370 idx = SUBPAGE_IDX(addr - mmio->base);
2371 #if defined(DEBUG_SUBPAGE)
2372 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2373 mmio, len, addr, idx, value);
2374 #endif
2375 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2378 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2380 #if defined(DEBUG_SUBPAGE)
2381 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2382 #endif
2384 return subpage_readlen(opaque, addr, 0);
2387 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2388 uint32_t value)
2390 #if defined(DEBUG_SUBPAGE)
2391 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2392 #endif
2393 subpage_writelen(opaque, addr, value, 0);
2396 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2398 #if defined(DEBUG_SUBPAGE)
2399 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2400 #endif
2402 return subpage_readlen(opaque, addr, 1);
2405 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2406 uint32_t value)
2408 #if defined(DEBUG_SUBPAGE)
2409 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2410 #endif
2411 subpage_writelen(opaque, addr, value, 1);
2414 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2416 #if defined(DEBUG_SUBPAGE)
2417 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2418 #endif
2420 return subpage_readlen(opaque, addr, 2);
2423 static void subpage_writel (void *opaque,
2424 target_phys_addr_t addr, uint32_t value)
2426 #if defined(DEBUG_SUBPAGE)
2427 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2428 #endif
2429 subpage_writelen(opaque, addr, value, 2);
2432 static CPUReadMemoryFunc *subpage_read[] = {
2433 &subpage_readb,
2434 &subpage_readw,
2435 &subpage_readl,
2438 static CPUWriteMemoryFunc *subpage_write[] = {
2439 &subpage_writeb,
2440 &subpage_writew,
2441 &subpage_writel,
2444 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2445 int memory)
2447 int idx, eidx;
2448 unsigned int i;
2450 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2451 return -1;
2452 idx = SUBPAGE_IDX(start);
2453 eidx = SUBPAGE_IDX(end);
2454 #if defined(DEBUG_SUBPAGE)
2455 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2456 mmio, start, end, idx, eidx, memory);
2457 #endif
2458 memory >>= IO_MEM_SHIFT;
2459 for (; idx <= eidx; idx++) {
2460 for (i = 0; i < 4; i++) {
2461 if (io_mem_read[memory][i]) {
2462 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2463 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2465 if (io_mem_write[memory][i]) {
2466 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2467 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2472 return 0;
2475 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2476 int orig_memory)
2478 subpage_t *mmio;
2479 int subpage_memory;
2481 mmio = qemu_mallocz(sizeof(subpage_t));
2482 if (mmio != NULL) {
2483 mmio->base = base;
2484 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2485 #if defined(DEBUG_SUBPAGE)
2486 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2487 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2488 #endif
2489 *phys = subpage_memory | IO_MEM_SUBPAGE;
2490 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2493 return mmio;
2496 static void io_mem_init(void)
2498 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2499 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2500 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2501 io_mem_nb = 5;
2503 #if defined(CONFIG_SOFTMMU)
2504 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2505 watch_mem_write, NULL);
2506 #endif
2507 /* alloc dirty bits array */
2508 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2509 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2512 /* mem_read and mem_write are arrays of functions containing the
2513 function to access byte (index 0), word (index 1) and dword (index
2514 2). Functions can be omitted with a NULL function pointer. The
2515 registered functions may be modified dynamically later.
2516 If io_index is non zero, the corresponding io zone is
2517 modified. If it is zero, a new io zone is allocated. The return
2518 value can be used with cpu_register_physical_memory(). (-1) is
2519 returned if error. */
2520 int cpu_register_io_memory(int io_index,
2521 CPUReadMemoryFunc **mem_read,
2522 CPUWriteMemoryFunc **mem_write,
2523 void *opaque)
2525 int i, subwidth = 0;
2527 if (io_index <= 0) {
2528 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2529 return -1;
2530 io_index = io_mem_nb++;
2531 } else {
2532 if (io_index >= IO_MEM_NB_ENTRIES)
2533 return -1;
2536 for(i = 0;i < 3; i++) {
2537 if (!mem_read[i] || !mem_write[i])
2538 subwidth = IO_MEM_SUBWIDTH;
2539 io_mem_read[io_index][i] = mem_read[i];
2540 io_mem_write[io_index][i] = mem_write[i];
2542 io_mem_opaque[io_index] = opaque;
2543 return (io_index << IO_MEM_SHIFT) | subwidth;
2546 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2548 return io_mem_write[io_index >> IO_MEM_SHIFT];
2551 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2553 return io_mem_read[io_index >> IO_MEM_SHIFT];
2556 /* physical memory access (slow version, mainly for debug) */
2557 #if defined(CONFIG_USER_ONLY)
2558 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2559 int len, int is_write)
2561 int l, flags;
2562 target_ulong page;
2563 void * p;
2565 while (len > 0) {
2566 page = addr & TARGET_PAGE_MASK;
2567 l = (page + TARGET_PAGE_SIZE) - addr;
2568 if (l > len)
2569 l = len;
2570 flags = page_get_flags(page);
2571 if (!(flags & PAGE_VALID))
2572 return;
2573 if (is_write) {
2574 if (!(flags & PAGE_WRITE))
2575 return;
2576 /* XXX: this code should not depend on lock_user */
2577 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2578 /* FIXME - should this return an error rather than just fail? */
2579 return;
2580 memcpy(p, buf, len);
2581 unlock_user(p, addr, len);
2582 } else {
2583 if (!(flags & PAGE_READ))
2584 return;
2585 /* XXX: this code should not depend on lock_user */
2586 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2587 /* FIXME - should this return an error rather than just fail? */
2588 return;
2589 memcpy(buf, p, len);
2590 unlock_user(p, addr, 0);
2592 len -= l;
2593 buf += l;
2594 addr += l;
2598 #else
2599 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2600 int len, int is_write)
2602 int l, io_index;
2603 uint8_t *ptr;
2604 uint32_t val;
2605 target_phys_addr_t page;
2606 unsigned long pd;
2607 PhysPageDesc *p;
2609 while (len > 0) {
2610 page = addr & TARGET_PAGE_MASK;
2611 l = (page + TARGET_PAGE_SIZE) - addr;
2612 if (l > len)
2613 l = len;
2614 p = phys_page_find(page >> TARGET_PAGE_BITS);
2615 if (!p) {
2616 pd = IO_MEM_UNASSIGNED;
2617 } else {
2618 pd = p->phys_offset;
2621 if (is_write) {
2622 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2623 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2624 /* XXX: could force cpu_single_env to NULL to avoid
2625 potential bugs */
2626 if (l >= 4 && ((addr & 3) == 0)) {
2627 /* 32 bit write access */
2628 val = ldl_p(buf);
2629 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2630 l = 4;
2631 } else if (l >= 2 && ((addr & 1) == 0)) {
2632 /* 16 bit write access */
2633 val = lduw_p(buf);
2634 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2635 l = 2;
2636 } else {
2637 /* 8 bit write access */
2638 val = ldub_p(buf);
2639 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2640 l = 1;
2642 } else {
2643 unsigned long addr1;
2644 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2645 /* RAM case */
2646 ptr = phys_ram_base + addr1;
2647 memcpy(ptr, buf, l);
2648 if (!cpu_physical_memory_is_dirty(addr1)) {
2649 /* invalidate code */
2650 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2651 /* set dirty bit */
2652 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2653 (0xff & ~CODE_DIRTY_FLAG);
2655 /* qemu doesn't execute guest code directly, but kvm does
2656 therefore fluch instruction caches */
2657 if (kvm_enabled())
2658 flush_icache_range((unsigned long)ptr,
2659 ((unsigned long)ptr)+l);
2661 } else {
2662 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2663 !(pd & IO_MEM_ROMD)) {
2664 /* I/O case */
2665 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2666 if (l >= 4 && ((addr & 3) == 0)) {
2667 /* 32 bit read access */
2668 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2669 stl_p(buf, val);
2670 l = 4;
2671 } else if (l >= 2 && ((addr & 1) == 0)) {
2672 /* 16 bit read access */
2673 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2674 stw_p(buf, val);
2675 l = 2;
2676 } else {
2677 /* 8 bit read access */
2678 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2679 stb_p(buf, val);
2680 l = 1;
2682 } else {
2683 /* RAM case */
2684 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2685 (addr & ~TARGET_PAGE_MASK);
2686 memcpy(buf, ptr, l);
2689 len -= l;
2690 buf += l;
2691 addr += l;
2695 /* used for ROM loading : can write in RAM and ROM */
2696 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2697 const uint8_t *buf, int len)
2699 int l;
2700 uint8_t *ptr;
2701 target_phys_addr_t page;
2702 unsigned long pd;
2703 PhysPageDesc *p;
2705 while (len > 0) {
2706 page = addr & TARGET_PAGE_MASK;
2707 l = (page + TARGET_PAGE_SIZE) - addr;
2708 if (l > len)
2709 l = len;
2710 p = phys_page_find(page >> TARGET_PAGE_BITS);
2711 if (!p) {
2712 pd = IO_MEM_UNASSIGNED;
2713 } else {
2714 pd = p->phys_offset;
2717 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2718 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2719 !(pd & IO_MEM_ROMD)) {
2720 /* do nothing */
2721 } else {
2722 unsigned long addr1;
2723 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2724 /* ROM/RAM case */
2725 ptr = phys_ram_base + addr1;
2726 memcpy(ptr, buf, l);
2728 len -= l;
2729 buf += l;
2730 addr += l;
2735 /* warning: addr must be aligned */
2736 uint32_t ldl_phys(target_phys_addr_t addr)
2738 int io_index;
2739 uint8_t *ptr;
2740 uint32_t val;
2741 unsigned long pd;
2742 PhysPageDesc *p;
2744 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2745 if (!p) {
2746 pd = IO_MEM_UNASSIGNED;
2747 } else {
2748 pd = p->phys_offset;
2751 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2752 !(pd & IO_MEM_ROMD)) {
2753 /* I/O case */
2754 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2755 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2756 } else {
2757 /* RAM case */
2758 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2759 (addr & ~TARGET_PAGE_MASK);
2760 val = ldl_p(ptr);
2762 return val;
2765 /* warning: addr must be aligned */
2766 uint64_t ldq_phys(target_phys_addr_t addr)
2768 int io_index;
2769 uint8_t *ptr;
2770 uint64_t val;
2771 unsigned long pd;
2772 PhysPageDesc *p;
2774 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2775 if (!p) {
2776 pd = IO_MEM_UNASSIGNED;
2777 } else {
2778 pd = p->phys_offset;
2781 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2782 !(pd & IO_MEM_ROMD)) {
2783 /* I/O case */
2784 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2785 #ifdef TARGET_WORDS_BIGENDIAN
2786 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2787 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2788 #else
2789 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2790 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2791 #endif
2792 } else {
2793 /* RAM case */
2794 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2795 (addr & ~TARGET_PAGE_MASK);
2796 val = ldq_p(ptr);
2798 return val;
2801 /* XXX: optimize */
2802 uint32_t ldub_phys(target_phys_addr_t addr)
2804 uint8_t val;
2805 cpu_physical_memory_read(addr, &val, 1);
2806 return val;
2809 /* XXX: optimize */
2810 uint32_t lduw_phys(target_phys_addr_t addr)
2812 uint16_t val;
2813 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2814 return tswap16(val);
2817 #ifdef __GNUC__
2818 #define likely(x) __builtin_expect(!!(x), 1)
2819 #define unlikely(x) __builtin_expect(!!(x), 0)
2820 #else
2821 #define likely(x) x
2822 #define unlikely(x) x
2823 #endif
2825 /* warning: addr must be aligned. The ram page is not masked as dirty
2826 and the code inside is not invalidated. It is useful if the dirty
2827 bits are used to track modified PTEs */
2828 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2830 int io_index;
2831 uint8_t *ptr;
2832 unsigned long pd;
2833 PhysPageDesc *p;
2835 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2836 if (!p) {
2837 pd = IO_MEM_UNASSIGNED;
2838 } else {
2839 pd = p->phys_offset;
2842 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2843 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2844 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2845 } else {
2846 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2847 ptr = phys_ram_base + addr1;
2848 stl_p(ptr, val);
2850 if (unlikely(in_migration)) {
2851 if (!cpu_physical_memory_is_dirty(addr1)) {
2852 /* invalidate code */
2853 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2854 /* set dirty bit */
2855 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2856 (0xff & ~CODE_DIRTY_FLAG);
2862 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2864 int io_index;
2865 uint8_t *ptr;
2866 unsigned long pd;
2867 PhysPageDesc *p;
2869 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2870 if (!p) {
2871 pd = IO_MEM_UNASSIGNED;
2872 } else {
2873 pd = p->phys_offset;
2876 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2877 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2878 #ifdef TARGET_WORDS_BIGENDIAN
2879 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2880 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2881 #else
2882 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2883 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2884 #endif
2885 } else {
2886 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2887 (addr & ~TARGET_PAGE_MASK);
2888 stq_p(ptr, val);
2892 /* warning: addr must be aligned */
2893 void stl_phys(target_phys_addr_t addr, uint32_t val)
2895 int io_index;
2896 uint8_t *ptr;
2897 unsigned long pd;
2898 PhysPageDesc *p;
2900 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2901 if (!p) {
2902 pd = IO_MEM_UNASSIGNED;
2903 } else {
2904 pd = p->phys_offset;
2907 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2908 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2909 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2910 } else {
2911 unsigned long addr1;
2912 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2913 /* RAM case */
2914 ptr = phys_ram_base + addr1;
2915 stl_p(ptr, val);
2916 if (!cpu_physical_memory_is_dirty(addr1)) {
2917 /* invalidate code */
2918 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2919 /* set dirty bit */
2920 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2921 (0xff & ~CODE_DIRTY_FLAG);
2926 /* XXX: optimize */
2927 void stb_phys(target_phys_addr_t addr, uint32_t val)
2929 uint8_t v = val;
2930 cpu_physical_memory_write(addr, &v, 1);
2933 /* XXX: optimize */
2934 void stw_phys(target_phys_addr_t addr, uint32_t val)
2936 uint16_t v = tswap16(val);
2937 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2940 /* XXX: optimize */
2941 void stq_phys(target_phys_addr_t addr, uint64_t val)
2943 val = tswap64(val);
2944 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2947 #endif
2949 /* virtual memory access for debug */
2950 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2951 uint8_t *buf, int len, int is_write)
2953 int l;
2954 target_phys_addr_t phys_addr;
2955 target_ulong page;
2957 while (len > 0) {
2958 page = addr & TARGET_PAGE_MASK;
2959 phys_addr = cpu_get_phys_page_debug(env, page);
2960 /* if no physical page mapped, return an error */
2961 if (phys_addr == -1)
2962 return -1;
2963 l = (page + TARGET_PAGE_SIZE) - addr;
2964 if (l > len)
2965 l = len;
2966 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2967 buf, l, is_write);
2968 len -= l;
2969 buf += l;
2970 addr += l;
2972 return 0;
2975 void dump_exec_info(FILE *f,
2976 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2978 int i, target_code_size, max_target_code_size;
2979 int direct_jmp_count, direct_jmp2_count, cross_page;
2980 TranslationBlock *tb;
2982 target_code_size = 0;
2983 max_target_code_size = 0;
2984 cross_page = 0;
2985 direct_jmp_count = 0;
2986 direct_jmp2_count = 0;
2987 for(i = 0; i < nb_tbs; i++) {
2988 tb = &tbs[i];
2989 target_code_size += tb->size;
2990 if (tb->size > max_target_code_size)
2991 max_target_code_size = tb->size;
2992 if (tb->page_addr[1] != -1)
2993 cross_page++;
2994 if (tb->tb_next_offset[0] != 0xffff) {
2995 direct_jmp_count++;
2996 if (tb->tb_next_offset[1] != 0xffff) {
2997 direct_jmp2_count++;
3001 /* XXX: avoid using doubles ? */
3002 cpu_fprintf(f, "Translation buffer state:\n");
3003 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3004 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3005 nb_tbs ? target_code_size / nb_tbs : 0,
3006 max_target_code_size);
3007 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3008 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3009 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3010 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3011 cross_page,
3012 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3013 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3014 direct_jmp_count,
3015 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3016 direct_jmp2_count,
3017 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3018 cpu_fprintf(f, "\nStatistics:\n");
3019 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3020 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3021 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3022 #ifdef CONFIG_PROFILER
3024 int64_t tot;
3025 tot = dyngen_interm_time + dyngen_code_time;
3026 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3027 tot, tot / 2.4e9);
3028 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
3029 dyngen_tb_count,
3030 dyngen_tb_count1 - dyngen_tb_count,
3031 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
3032 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
3033 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
3034 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
3035 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
3036 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3037 dyngen_tb_count ?
3038 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3039 cpu_fprintf(f, "cycles/op %0.1f\n",
3040 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3041 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3042 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3043 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3044 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3045 if (tot == 0)
3046 tot = 1;
3047 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3048 (double)dyngen_interm_time / tot * 100.0);
3049 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3050 (double)dyngen_code_time / tot * 100.0);
3051 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3052 dyngen_restore_count);
3053 cpu_fprintf(f, " avg cycles %0.1f\n",
3054 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3056 extern void dump_op_count(void);
3057 dump_op_count();
3060 #endif
3063 #if !defined(CONFIG_USER_ONLY)
3065 #define MMUSUFFIX _cmmu
3066 #define GETPC() NULL
3067 #define env cpu_single_env
3068 #define SOFTMMU_CODE_ACCESS
3070 #define SHIFT 0
3071 #include "softmmu_template.h"
3073 #define SHIFT 1
3074 #include "softmmu_template.h"
3076 #define SHIFT 2
3077 #include "softmmu_template.h"
3079 #define SHIFT 3
3080 #include "softmmu_template.h"
3082 #undef env
3084 #endif