Regenerate bios for vapic up changes
[qemu-kvm/fedora.git] / exec.c
blob06eaf625d3278e11c1b90b4ad6e56e64fe58ab37
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "dyngen.h"
39 #include "qemu-kvm.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
58 #undef DEBUG_TB_CHECK
59 #endif
61 /* threshold to flush the translated code buffer */
62 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif USE_KQEMU
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
81 #elif TARGET_X86_64
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_IA64)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
85 #else
86 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 #endif
89 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
90 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
91 int nb_tbs;
92 /* any access to the tbs or the page table must use this lock */
93 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
95 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
96 uint8_t *code_gen_ptr;
98 ram_addr_t phys_ram_size;
99 int phys_ram_fd;
100 uint8_t *phys_ram_base;
101 uint8_t *phys_ram_dirty;
102 uint8_t *bios_mem;
103 static int in_migration;
104 static ram_addr_t phys_ram_alloc_offset = 0;
106 CPUState *first_cpu;
107 /* current CPU in the current thread. It is only valid inside
108 cpu_exec() */
109 CPUState *cpu_single_env;
111 typedef struct PageDesc {
112 /* list of TBs intersecting this ram page */
113 TranslationBlock *first_tb;
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count;
117 uint8_t *code_bitmap;
118 #if defined(CONFIG_USER_ONLY)
119 unsigned long flags;
120 #endif
121 } PageDesc;
123 typedef struct PhysPageDesc {
124 /* offset in host memory of the page + io_index in the low 12 bits */
125 ram_addr_t phys_offset;
126 } PhysPageDesc;
128 #define L2_BITS 10
129 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130 /* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
134 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
135 #else
136 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137 #endif
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
142 static void io_mem_init(void);
144 unsigned long qemu_real_host_page_size;
145 unsigned long qemu_host_page_bits;
146 unsigned long qemu_host_page_size;
147 unsigned long qemu_host_page_mask;
149 /* XXX: for system emulation, it could just be an array */
150 static PageDesc *l1_map[L1_SIZE];
151 PhysPageDesc **l1_phys_map;
153 /* io memory support */
154 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
155 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
156 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
157 static int io_mem_nb;
158 #if defined(CONFIG_SOFTMMU)
159 static int io_mem_watch;
160 #endif
162 /* log support */
163 char *logfilename = "/tmp/qemu.log";
164 FILE *logfile;
165 int loglevel;
166 static int log_append = 0;
168 /* statistics */
169 static int tlb_flush_count;
170 static int tb_flush_count;
171 static int tb_phys_invalidate_count;
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t {
175 target_phys_addr_t base;
176 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178 void *opaque[TARGET_PAGE_SIZE][2][4];
179 } subpage_t;
181 static void page_init(void)
183 /* NOTE: we can always suppose that qemu_host_page_size >=
184 TARGET_PAGE_SIZE */
185 #ifdef _WIN32
187 SYSTEM_INFO system_info;
188 DWORD old_protect;
190 GetSystemInfo(&system_info);
191 qemu_real_host_page_size = system_info.dwPageSize;
193 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
194 PAGE_EXECUTE_READWRITE, &old_protect);
196 #else
197 qemu_real_host_page_size = getpagesize();
199 unsigned long start, end;
201 start = (unsigned long)code_gen_buffer;
202 start &= ~(qemu_real_host_page_size - 1);
204 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
205 end += qemu_real_host_page_size - 1;
206 end &= ~(qemu_real_host_page_size - 1);
208 mprotect((void *)start, end - start,
209 PROT_READ | PROT_WRITE | PROT_EXEC);
211 #endif
213 if (qemu_host_page_size == 0)
214 qemu_host_page_size = qemu_real_host_page_size;
215 if (qemu_host_page_size < TARGET_PAGE_SIZE)
216 qemu_host_page_size = TARGET_PAGE_SIZE;
217 qemu_host_page_bits = 0;
218 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
219 qemu_host_page_bits++;
220 qemu_host_page_mask = ~(qemu_host_page_size - 1);
221 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
222 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
224 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
226 long long startaddr, endaddr;
227 FILE *f;
228 int n;
230 f = fopen("/proc/self/maps", "r");
231 if (f) {
232 do {
233 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
234 if (n == 2) {
235 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
236 TARGET_PAGE_ALIGN(endaddr),
237 PAGE_RESERVED);
239 } while (!feof(f));
240 fclose(f);
243 #endif
246 static inline PageDesc *page_find_alloc(unsigned int index)
248 PageDesc **lp, *p;
250 lp = &l1_map[index >> L2_BITS];
251 p = *lp;
252 if (!p) {
253 /* allocate if not found */
254 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
255 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
256 *lp = p;
258 return p + (index & (L2_SIZE - 1));
261 static inline PageDesc *page_find(unsigned int index)
263 PageDesc *p;
265 p = l1_map[index >> L2_BITS];
266 if (!p)
267 return 0;
268 return p + (index & (L2_SIZE - 1));
271 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
273 void **lp, **p;
274 PhysPageDesc *pd;
276 p = (void **)l1_phys_map;
277 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
279 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
280 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
281 #endif
282 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
283 p = *lp;
284 if (!p) {
285 /* allocate if not found */
286 if (!alloc)
287 return NULL;
288 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
289 memset(p, 0, sizeof(void *) * L1_SIZE);
290 *lp = p;
292 #endif
293 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
294 pd = *lp;
295 if (!pd) {
296 int i;
297 /* allocate if not found */
298 if (!alloc)
299 return NULL;
300 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
301 *lp = pd;
302 for (i = 0; i < L2_SIZE; i++)
303 pd[i].phys_offset = IO_MEM_UNASSIGNED;
305 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
308 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
310 return phys_page_find_alloc(index, 0);
313 #if !defined(CONFIG_USER_ONLY)
314 static void tlb_protect_code(ram_addr_t ram_addr);
315 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
316 target_ulong vaddr);
317 #endif
319 void cpu_exec_init(CPUState *env)
321 CPUState **penv;
322 int cpu_index;
324 if (!code_gen_ptr) {
325 code_gen_ptr = code_gen_buffer;
326 page_init();
327 io_mem_init();
329 env->next_cpu = NULL;
330 penv = &first_cpu;
331 cpu_index = 0;
332 while (*penv != NULL) {
333 penv = (CPUState **)&(*penv)->next_cpu;
334 cpu_index++;
336 env->cpu_index = cpu_index;
337 env->nb_watchpoints = 0;
338 *penv = env;
341 static inline void invalidate_page_bitmap(PageDesc *p)
343 if (p->code_bitmap) {
344 qemu_free(p->code_bitmap);
345 p->code_bitmap = NULL;
347 p->code_write_count = 0;
350 /* set to NULL all the 'first_tb' fields in all PageDescs */
351 static void page_flush_tb(void)
353 int i, j;
354 PageDesc *p;
356 for(i = 0; i < L1_SIZE; i++) {
357 p = l1_map[i];
358 if (p) {
359 for(j = 0; j < L2_SIZE; j++) {
360 p->first_tb = NULL;
361 invalidate_page_bitmap(p);
362 p++;
368 /* flush all the translation blocks */
369 /* XXX: tb_flush is currently not thread safe */
370 void tb_flush(CPUState *env1)
372 CPUState *env;
373 #if defined(DEBUG_FLUSH)
374 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
375 (unsigned long)(code_gen_ptr - code_gen_buffer),
376 nb_tbs, nb_tbs > 0 ?
377 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
378 #endif
379 nb_tbs = 0;
381 for(env = first_cpu; env != NULL; env = env->next_cpu) {
382 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
385 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
386 page_flush_tb();
388 code_gen_ptr = code_gen_buffer;
389 /* XXX: flush processor icache at this point if cache flush is
390 expensive */
391 tb_flush_count++;
394 #ifdef DEBUG_TB_CHECK
396 static void tb_invalidate_check(target_ulong address)
398 TranslationBlock *tb;
399 int i;
400 address &= TARGET_PAGE_MASK;
401 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
402 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
403 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
404 address >= tb->pc + tb->size)) {
405 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
406 address, (long)tb->pc, tb->size);
412 /* verify that all the pages have correct rights for code */
413 static void tb_page_check(void)
415 TranslationBlock *tb;
416 int i, flags1, flags2;
418 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
419 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
420 flags1 = page_get_flags(tb->pc);
421 flags2 = page_get_flags(tb->pc + tb->size - 1);
422 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
423 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
424 (long)tb->pc, tb->size, flags1, flags2);
430 void tb_jmp_check(TranslationBlock *tb)
432 TranslationBlock *tb1;
433 unsigned int n1;
435 /* suppress any remaining jumps to this TB */
436 tb1 = tb->jmp_first;
437 for(;;) {
438 n1 = (long)tb1 & 3;
439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
440 if (n1 == 2)
441 break;
442 tb1 = tb1->jmp_next[n1];
444 /* check end of list */
445 if (tb1 != tb) {
446 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
450 #endif
452 /* invalidate one TB */
453 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
454 int next_offset)
456 TranslationBlock *tb1;
457 for(;;) {
458 tb1 = *ptb;
459 if (tb1 == tb) {
460 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
461 break;
463 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
467 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
469 TranslationBlock *tb1;
470 unsigned int n1;
472 for(;;) {
473 tb1 = *ptb;
474 n1 = (long)tb1 & 3;
475 tb1 = (TranslationBlock *)((long)tb1 & ~3);
476 if (tb1 == tb) {
477 *ptb = tb1->page_next[n1];
478 break;
480 ptb = &tb1->page_next[n1];
484 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
486 TranslationBlock *tb1, **ptb;
487 unsigned int n1;
489 ptb = &tb->jmp_next[n];
490 tb1 = *ptb;
491 if (tb1) {
492 /* find tb(n) in circular list */
493 for(;;) {
494 tb1 = *ptb;
495 n1 = (long)tb1 & 3;
496 tb1 = (TranslationBlock *)((long)tb1 & ~3);
497 if (n1 == n && tb1 == tb)
498 break;
499 if (n1 == 2) {
500 ptb = &tb1->jmp_first;
501 } else {
502 ptb = &tb1->jmp_next[n1];
505 /* now we can suppress tb(n) from the list */
506 *ptb = tb->jmp_next[n];
508 tb->jmp_next[n] = NULL;
512 /* reset the jump entry 'n' of a TB so that it is not chained to
513 another TB */
514 static inline void tb_reset_jump(TranslationBlock *tb, int n)
516 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
519 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
521 CPUState *env;
522 PageDesc *p;
523 unsigned int h, n1;
524 target_ulong phys_pc;
525 TranslationBlock *tb1, *tb2;
527 /* remove the TB from the hash list */
528 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
529 h = tb_phys_hash_func(phys_pc);
530 tb_remove(&tb_phys_hash[h], tb,
531 offsetof(TranslationBlock, phys_hash_next));
533 /* remove the TB from the page list */
534 if (tb->page_addr[0] != page_addr) {
535 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
536 tb_page_remove(&p->first_tb, tb);
537 invalidate_page_bitmap(p);
539 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
540 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
541 tb_page_remove(&p->first_tb, tb);
542 invalidate_page_bitmap(p);
545 tb_invalidated_flag = 1;
547 /* remove the TB from the hash list */
548 h = tb_jmp_cache_hash_func(tb->pc);
549 for(env = first_cpu; env != NULL; env = env->next_cpu) {
550 if (env->tb_jmp_cache[h] == tb)
551 env->tb_jmp_cache[h] = NULL;
554 /* suppress this TB from the two jump lists */
555 tb_jmp_remove(tb, 0);
556 tb_jmp_remove(tb, 1);
558 /* suppress any remaining jumps to this TB */
559 tb1 = tb->jmp_first;
560 for(;;) {
561 n1 = (long)tb1 & 3;
562 if (n1 == 2)
563 break;
564 tb1 = (TranslationBlock *)((long)tb1 & ~3);
565 tb2 = tb1->jmp_next[n1];
566 tb_reset_jump(tb1, n1);
567 tb1->jmp_next[n1] = NULL;
568 tb1 = tb2;
570 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
572 tb_phys_invalidate_count++;
575 static inline void set_bits(uint8_t *tab, int start, int len)
577 int end, mask, end1;
579 end = start + len;
580 tab += start >> 3;
581 mask = 0xff << (start & 7);
582 if ((start & ~7) == (end & ~7)) {
583 if (start < end) {
584 mask &= ~(0xff << (end & 7));
585 *tab |= mask;
587 } else {
588 *tab++ |= mask;
589 start = (start + 8) & ~7;
590 end1 = end & ~7;
591 while (start < end1) {
592 *tab++ = 0xff;
593 start += 8;
595 if (start < end) {
596 mask = ~(0xff << (end & 7));
597 *tab |= mask;
602 static void build_page_bitmap(PageDesc *p)
604 int n, tb_start, tb_end;
605 TranslationBlock *tb;
607 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
608 if (!p->code_bitmap)
609 return;
610 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
612 tb = p->first_tb;
613 while (tb != NULL) {
614 n = (long)tb & 3;
615 tb = (TranslationBlock *)((long)tb & ~3);
616 /* NOTE: this is subtle as a TB may span two physical pages */
617 if (n == 0) {
618 /* NOTE: tb_end may be after the end of the page, but
619 it is not a problem */
620 tb_start = tb->pc & ~TARGET_PAGE_MASK;
621 tb_end = tb_start + tb->size;
622 if (tb_end > TARGET_PAGE_SIZE)
623 tb_end = TARGET_PAGE_SIZE;
624 } else {
625 tb_start = 0;
626 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
628 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
629 tb = tb->page_next[n];
633 #ifdef TARGET_HAS_PRECISE_SMC
635 static void tb_gen_code(CPUState *env,
636 target_ulong pc, target_ulong cs_base, int flags,
637 int cflags)
639 TranslationBlock *tb;
640 uint8_t *tc_ptr;
641 target_ulong phys_pc, phys_page2, virt_page2;
642 int code_gen_size;
644 phys_pc = get_phys_addr_code(env, pc);
645 tb = tb_alloc(pc);
646 if (!tb) {
647 /* flush must be done */
648 tb_flush(env);
649 /* cannot fail at this point */
650 tb = tb_alloc(pc);
652 tc_ptr = code_gen_ptr;
653 tb->tc_ptr = tc_ptr;
654 tb->cs_base = cs_base;
655 tb->flags = flags;
656 tb->cflags = cflags;
657 cpu_gen_code(env, tb, &code_gen_size);
658 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
660 /* check next page if needed */
661 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
662 phys_page2 = -1;
663 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
664 phys_page2 = get_phys_addr_code(env, virt_page2);
666 tb_link_phys(tb, phys_pc, phys_page2);
668 #endif
670 /* invalidate all TBs which intersect with the target physical page
671 starting in range [start;end[. NOTE: start and end must refer to
672 the same physical page. 'is_cpu_write_access' should be true if called
673 from a real cpu write access: the virtual CPU will exit the current
674 TB if code is modified inside this TB. */
675 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
676 int is_cpu_write_access)
678 int n, current_tb_modified, current_tb_not_found, current_flags;
679 CPUState *env = cpu_single_env;
680 PageDesc *p;
681 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
682 target_ulong tb_start, tb_end;
683 target_ulong current_pc, current_cs_base;
685 p = page_find(start >> TARGET_PAGE_BITS);
686 if (!p)
687 return;
688 if (!p->code_bitmap &&
689 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
690 is_cpu_write_access) {
691 /* build code bitmap */
692 build_page_bitmap(p);
695 /* we remove all the TBs in the range [start, end[ */
696 /* XXX: see if in some cases it could be faster to invalidate all the code */
697 current_tb_not_found = is_cpu_write_access;
698 current_tb_modified = 0;
699 current_tb = NULL; /* avoid warning */
700 current_pc = 0; /* avoid warning */
701 current_cs_base = 0; /* avoid warning */
702 current_flags = 0; /* avoid warning */
703 tb = p->first_tb;
704 while (tb != NULL) {
705 n = (long)tb & 3;
706 tb = (TranslationBlock *)((long)tb & ~3);
707 tb_next = tb->page_next[n];
708 /* NOTE: this is subtle as a TB may span two physical pages */
709 if (n == 0) {
710 /* NOTE: tb_end may be after the end of the page, but
711 it is not a problem */
712 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
713 tb_end = tb_start + tb->size;
714 } else {
715 tb_start = tb->page_addr[1];
716 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
718 if (!(tb_end <= start || tb_start >= end)) {
719 #ifdef TARGET_HAS_PRECISE_SMC
720 if (current_tb_not_found) {
721 current_tb_not_found = 0;
722 current_tb = NULL;
723 if (env->mem_write_pc) {
724 /* now we have a real cpu fault */
725 current_tb = tb_find_pc(env->mem_write_pc);
728 if (current_tb == tb &&
729 !(current_tb->cflags & CF_SINGLE_INSN)) {
730 /* If we are modifying the current TB, we must stop
731 its execution. We could be more precise by checking
732 that the modification is after the current PC, but it
733 would require a specialized function to partially
734 restore the CPU state */
736 current_tb_modified = 1;
737 cpu_restore_state(current_tb, env,
738 env->mem_write_pc, NULL);
739 #if defined(TARGET_I386)
740 current_flags = env->hflags;
741 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
742 current_cs_base = (target_ulong)env->segs[R_CS].base;
743 current_pc = current_cs_base + env->eip;
744 #else
745 #error unsupported CPU
746 #endif
748 #endif /* TARGET_HAS_PRECISE_SMC */
749 /* we need to do that to handle the case where a signal
750 occurs while doing tb_phys_invalidate() */
751 saved_tb = NULL;
752 if (env) {
753 saved_tb = env->current_tb;
754 env->current_tb = NULL;
756 tb_phys_invalidate(tb, -1);
757 if (env) {
758 env->current_tb = saved_tb;
759 if (env->interrupt_request && env->current_tb)
760 cpu_interrupt(env, env->interrupt_request);
763 tb = tb_next;
765 #if !defined(CONFIG_USER_ONLY)
766 /* if no code remaining, no need to continue to use slow writes */
767 if (!p->first_tb) {
768 invalidate_page_bitmap(p);
769 if (is_cpu_write_access) {
770 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
773 #endif
774 #ifdef TARGET_HAS_PRECISE_SMC
775 if (current_tb_modified) {
776 /* we generate a block containing just the instruction
777 modifying the memory. It will ensure that it cannot modify
778 itself */
779 env->current_tb = NULL;
780 tb_gen_code(env, current_pc, current_cs_base, current_flags,
781 CF_SINGLE_INSN);
782 cpu_resume_from_signal(env, NULL);
784 #endif
787 /* len must be <= 8 and start must be a multiple of len */
788 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
790 PageDesc *p;
791 int offset, b;
792 #if 0
793 if (1) {
794 if (loglevel) {
795 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
796 cpu_single_env->mem_write_vaddr, len,
797 cpu_single_env->eip,
798 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
801 #endif
802 p = page_find(start >> TARGET_PAGE_BITS);
803 if (!p)
804 return;
805 if (p->code_bitmap) {
806 offset = start & ~TARGET_PAGE_MASK;
807 b = p->code_bitmap[offset >> 3] >> (offset & 7);
808 if (b & ((1 << len) - 1))
809 goto do_invalidate;
810 } else {
811 do_invalidate:
812 tb_invalidate_phys_page_range(start, start + len, 1);
816 #if !defined(CONFIG_SOFTMMU)
817 static void tb_invalidate_phys_page(target_ulong addr,
818 unsigned long pc, void *puc)
820 int n, current_flags, current_tb_modified;
821 target_ulong current_pc, current_cs_base;
822 PageDesc *p;
823 TranslationBlock *tb, *current_tb;
824 #ifdef TARGET_HAS_PRECISE_SMC
825 CPUState *env = cpu_single_env;
826 #endif
828 addr &= TARGET_PAGE_MASK;
829 p = page_find(addr >> TARGET_PAGE_BITS);
830 if (!p)
831 return;
832 tb = p->first_tb;
833 current_tb_modified = 0;
834 current_tb = NULL;
835 current_pc = 0; /* avoid warning */
836 current_cs_base = 0; /* avoid warning */
837 current_flags = 0; /* avoid warning */
838 #ifdef TARGET_HAS_PRECISE_SMC
839 if (tb && pc != 0) {
840 current_tb = tb_find_pc(pc);
842 #endif
843 while (tb != NULL) {
844 n = (long)tb & 3;
845 tb = (TranslationBlock *)((long)tb & ~3);
846 #ifdef TARGET_HAS_PRECISE_SMC
847 if (current_tb == tb &&
848 !(current_tb->cflags & CF_SINGLE_INSN)) {
849 /* If we are modifying the current TB, we must stop
850 its execution. We could be more precise by checking
851 that the modification is after the current PC, but it
852 would require a specialized function to partially
853 restore the CPU state */
855 current_tb_modified = 1;
856 cpu_restore_state(current_tb, env, pc, puc);
857 #if defined(TARGET_I386)
858 current_flags = env->hflags;
859 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
860 current_cs_base = (target_ulong)env->segs[R_CS].base;
861 current_pc = current_cs_base + env->eip;
862 #else
863 #error unsupported CPU
864 #endif
866 #endif /* TARGET_HAS_PRECISE_SMC */
867 tb_phys_invalidate(tb, addr);
868 tb = tb->page_next[n];
870 p->first_tb = NULL;
871 #ifdef TARGET_HAS_PRECISE_SMC
872 if (current_tb_modified) {
873 /* we generate a block containing just the instruction
874 modifying the memory. It will ensure that it cannot modify
875 itself */
876 env->current_tb = NULL;
877 tb_gen_code(env, current_pc, current_cs_base, current_flags,
878 CF_SINGLE_INSN);
879 cpu_resume_from_signal(env, puc);
881 #endif
883 #endif
885 /* add the tb in the target page and protect it if necessary */
886 static inline void tb_alloc_page(TranslationBlock *tb,
887 unsigned int n, target_ulong page_addr)
889 PageDesc *p;
890 TranslationBlock *last_first_tb;
892 tb->page_addr[n] = page_addr;
893 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
894 tb->page_next[n] = p->first_tb;
895 last_first_tb = p->first_tb;
896 p->first_tb = (TranslationBlock *)((long)tb | n);
897 invalidate_page_bitmap(p);
899 #if defined(TARGET_HAS_SMC) || 1
901 #if defined(CONFIG_USER_ONLY)
902 if (p->flags & PAGE_WRITE) {
903 target_ulong addr;
904 PageDesc *p2;
905 int prot;
907 /* force the host page as non writable (writes will have a
908 page fault + mprotect overhead) */
909 page_addr &= qemu_host_page_mask;
910 prot = 0;
911 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
912 addr += TARGET_PAGE_SIZE) {
914 p2 = page_find (addr >> TARGET_PAGE_BITS);
915 if (!p2)
916 continue;
917 prot |= p2->flags;
918 p2->flags &= ~PAGE_WRITE;
919 page_get_flags(addr);
921 mprotect(g2h(page_addr), qemu_host_page_size,
922 (prot & PAGE_BITS) & ~PAGE_WRITE);
923 #ifdef DEBUG_TB_INVALIDATE
924 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
925 page_addr);
926 #endif
928 #else
929 /* if some code is already present, then the pages are already
930 protected. So we handle the case where only the first TB is
931 allocated in a physical page */
932 if (!last_first_tb) {
933 tlb_protect_code(page_addr);
935 #endif
937 #endif /* TARGET_HAS_SMC */
940 /* Allocate a new translation block. Flush the translation buffer if
941 too many translation blocks or too much generated code. */
942 TranslationBlock *tb_alloc(target_ulong pc)
944 TranslationBlock *tb;
946 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
947 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
948 return NULL;
949 tb = &tbs[nb_tbs++];
950 tb->pc = pc;
951 tb->cflags = 0;
952 return tb;
955 /* add a new TB and link it to the physical page tables. phys_page2 is
956 (-1) to indicate that only one page contains the TB. */
957 void tb_link_phys(TranslationBlock *tb,
958 target_ulong phys_pc, target_ulong phys_page2)
960 unsigned int h;
961 TranslationBlock **ptb;
963 /* add in the physical hash table */
964 h = tb_phys_hash_func(phys_pc);
965 ptb = &tb_phys_hash[h];
966 tb->phys_hash_next = *ptb;
967 *ptb = tb;
969 /* add in the page list */
970 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
971 if (phys_page2 != -1)
972 tb_alloc_page(tb, 1, phys_page2);
973 else
974 tb->page_addr[1] = -1;
976 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
977 tb->jmp_next[0] = NULL;
978 tb->jmp_next[1] = NULL;
980 /* init original jump addresses */
981 if (tb->tb_next_offset[0] != 0xffff)
982 tb_reset_jump(tb, 0);
983 if (tb->tb_next_offset[1] != 0xffff)
984 tb_reset_jump(tb, 1);
986 #ifdef DEBUG_TB_CHECK
987 tb_page_check();
988 #endif
991 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
992 tb[1].tc_ptr. Return NULL if not found */
993 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
995 int m_min, m_max, m;
996 unsigned long v;
997 TranslationBlock *tb;
999 if (nb_tbs <= 0)
1000 return NULL;
1001 if (tc_ptr < (unsigned long)code_gen_buffer ||
1002 tc_ptr >= (unsigned long)code_gen_ptr)
1003 return NULL;
1004 /* binary search (cf Knuth) */
1005 m_min = 0;
1006 m_max = nb_tbs - 1;
1007 while (m_min <= m_max) {
1008 m = (m_min + m_max) >> 1;
1009 tb = &tbs[m];
1010 v = (unsigned long)tb->tc_ptr;
1011 if (v == tc_ptr)
1012 return tb;
1013 else if (tc_ptr < v) {
1014 m_max = m - 1;
1015 } else {
1016 m_min = m + 1;
1019 return &tbs[m_max];
1022 static void tb_reset_jump_recursive(TranslationBlock *tb);
1024 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1026 TranslationBlock *tb1, *tb_next, **ptb;
1027 unsigned int n1;
1029 tb1 = tb->jmp_next[n];
1030 if (tb1 != NULL) {
1031 /* find head of list */
1032 for(;;) {
1033 n1 = (long)tb1 & 3;
1034 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1035 if (n1 == 2)
1036 break;
1037 tb1 = tb1->jmp_next[n1];
1039 /* we are now sure now that tb jumps to tb1 */
1040 tb_next = tb1;
1042 /* remove tb from the jmp_first list */
1043 ptb = &tb_next->jmp_first;
1044 for(;;) {
1045 tb1 = *ptb;
1046 n1 = (long)tb1 & 3;
1047 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1048 if (n1 == n && tb1 == tb)
1049 break;
1050 ptb = &tb1->jmp_next[n1];
1052 *ptb = tb->jmp_next[n];
1053 tb->jmp_next[n] = NULL;
1055 /* suppress the jump to next tb in generated code */
1056 tb_reset_jump(tb, n);
1058 /* suppress jumps in the tb on which we could have jumped */
1059 tb_reset_jump_recursive(tb_next);
1063 static void tb_reset_jump_recursive(TranslationBlock *tb)
1065 tb_reset_jump_recursive2(tb, 0);
1066 tb_reset_jump_recursive2(tb, 1);
1069 #if defined(TARGET_HAS_ICE)
1070 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1072 target_phys_addr_t addr;
1073 target_ulong pd;
1074 ram_addr_t ram_addr;
1075 PhysPageDesc *p;
1077 addr = cpu_get_phys_page_debug(env, pc);
1078 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1079 if (!p) {
1080 pd = IO_MEM_UNASSIGNED;
1081 } else {
1082 pd = p->phys_offset;
1084 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1085 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1087 #endif
1089 /* Add a watchpoint. */
1090 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1092 int i;
1094 for (i = 0; i < env->nb_watchpoints; i++) {
1095 if (addr == env->watchpoint[i].vaddr)
1096 return 0;
1098 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1099 return -1;
1101 i = env->nb_watchpoints++;
1102 env->watchpoint[i].vaddr = addr;
1103 tlb_flush_page(env, addr);
1104 /* FIXME: This flush is needed because of the hack to make memory ops
1105 terminate the TB. It can be removed once the proper IO trap and
1106 re-execute bits are in. */
1107 tb_flush(env);
1108 return i;
1111 /* Remove a watchpoint. */
1112 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1114 int i;
1116 for (i = 0; i < env->nb_watchpoints; i++) {
1117 if (addr == env->watchpoint[i].vaddr) {
1118 env->nb_watchpoints--;
1119 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1120 tlb_flush_page(env, addr);
1121 return 0;
1124 return -1;
1127 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1128 breakpoint is reached */
1129 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1131 #if defined(TARGET_HAS_ICE)
1132 int i;
1134 for(i = 0; i < env->nb_breakpoints; i++) {
1135 if (env->breakpoints[i] == pc)
1136 return 0;
1139 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1140 return -1;
1141 env->breakpoints[env->nb_breakpoints++] = pc;
1143 if (kvm_enabled())
1144 kvm_update_debugger(env);
1146 breakpoint_invalidate(env, pc);
1147 return 0;
1148 #else
1149 return -1;
1150 #endif
1153 /* remove a breakpoint */
1154 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1156 #if defined(TARGET_HAS_ICE)
1157 int i;
1158 for(i = 0; i < env->nb_breakpoints; i++) {
1159 if (env->breakpoints[i] == pc)
1160 goto found;
1162 return -1;
1163 found:
1164 env->nb_breakpoints--;
1165 if (i < env->nb_breakpoints)
1166 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1168 if (kvm_enabled())
1169 kvm_update_debugger(env);
1171 breakpoint_invalidate(env, pc);
1172 return 0;
1173 #else
1174 return -1;
1175 #endif
1178 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1179 CPU loop after each instruction */
1180 void cpu_single_step(CPUState *env, int enabled)
1182 #if defined(TARGET_HAS_ICE)
1183 if (env->singlestep_enabled != enabled) {
1184 env->singlestep_enabled = enabled;
1185 /* must flush all the translated code to avoid inconsistancies */
1186 /* XXX: only flush what is necessary */
1187 tb_flush(env);
1189 if (kvm_enabled())
1190 kvm_update_debugger(env);
1191 #endif
1194 /* enable or disable low levels log */
1195 void cpu_set_log(int log_flags)
1197 loglevel = log_flags;
1198 if (loglevel && !logfile) {
1199 logfile = fopen(logfilename, log_append ? "a" : "w");
1200 if (!logfile) {
1201 perror(logfilename);
1202 _exit(1);
1204 #if !defined(CONFIG_SOFTMMU)
1205 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1207 static uint8_t logfile_buf[4096];
1208 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1210 #else
1211 setvbuf(logfile, NULL, _IOLBF, 0);
1212 #endif
1213 log_append = 1;
1215 if (!loglevel && logfile) {
1216 fclose(logfile);
1217 logfile = NULL;
1221 void cpu_set_log_filename(const char *filename)
1223 logfilename = strdup(filename);
1224 if (logfile) {
1225 fclose(logfile);
1226 logfile = NULL;
1228 cpu_set_log(loglevel);
1231 /* mask must never be zero, except for A20 change call */
1232 void cpu_interrupt(CPUState *env, int mask)
1234 TranslationBlock *tb;
1235 static int interrupt_lock;
1237 env->interrupt_request |= mask;
1238 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1239 kvm_update_interrupt_request(env);
1241 /* if the cpu is currently executing code, we must unlink it and
1242 all the potentially executing TB */
1243 tb = env->current_tb;
1244 if (tb && !testandset(&interrupt_lock)) {
1245 env->current_tb = NULL;
1246 tb_reset_jump_recursive(tb);
1247 interrupt_lock = 0;
1251 void cpu_reset_interrupt(CPUState *env, int mask)
1253 env->interrupt_request &= ~mask;
1256 CPULogItem cpu_log_items[] = {
1257 { CPU_LOG_TB_OUT_ASM, "out_asm",
1258 "show generated host assembly code for each compiled TB" },
1259 { CPU_LOG_TB_IN_ASM, "in_asm",
1260 "show target assembly code for each compiled TB" },
1261 { CPU_LOG_TB_OP, "op",
1262 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1263 #ifdef TARGET_I386
1264 { CPU_LOG_TB_OP_OPT, "op_opt",
1265 "show micro ops after optimization for each compiled TB" },
1266 #endif
1267 { CPU_LOG_INT, "int",
1268 "show interrupts/exceptions in short format" },
1269 { CPU_LOG_EXEC, "exec",
1270 "show trace before each executed TB (lots of logs)" },
1271 { CPU_LOG_TB_CPU, "cpu",
1272 "show CPU state before block translation" },
1273 #ifdef TARGET_I386
1274 { CPU_LOG_PCALL, "pcall",
1275 "show protected mode far calls/returns/exceptions" },
1276 #endif
1277 #ifdef DEBUG_IOPORT
1278 { CPU_LOG_IOPORT, "ioport",
1279 "show all i/o ports accesses" },
1280 #endif
1281 { 0, NULL, NULL },
1284 static int cmp1(const char *s1, int n, const char *s2)
1286 if (strlen(s2) != n)
1287 return 0;
1288 return memcmp(s1, s2, n) == 0;
1291 /* takes a comma separated list of log masks. Return 0 if error. */
1292 int cpu_str_to_log_mask(const char *str)
1294 CPULogItem *item;
1295 int mask;
1296 const char *p, *p1;
1298 p = str;
1299 mask = 0;
1300 for(;;) {
1301 p1 = strchr(p, ',');
1302 if (!p1)
1303 p1 = p + strlen(p);
1304 if(cmp1(p,p1-p,"all")) {
1305 for(item = cpu_log_items; item->mask != 0; item++) {
1306 mask |= item->mask;
1308 } else {
1309 for(item = cpu_log_items; item->mask != 0; item++) {
1310 if (cmp1(p, p1 - p, item->name))
1311 goto found;
1313 return 0;
1315 found:
1316 mask |= item->mask;
1317 if (*p1 != ',')
1318 break;
1319 p = p1 + 1;
1321 return mask;
1324 void cpu_abort(CPUState *env, const char *fmt, ...)
1326 va_list ap;
1327 va_list ap2;
1329 va_start(ap, fmt);
1330 va_copy(ap2, ap);
1331 fprintf(stderr, "qemu: fatal: ");
1332 vfprintf(stderr, fmt, ap);
1333 fprintf(stderr, "\n");
1334 #ifdef TARGET_I386
1335 if(env->intercept & INTERCEPT_SVM_MASK) {
1336 /* most probably the virtual machine should not
1337 be shut down but rather caught by the VMM */
1338 vmexit(SVM_EXIT_SHUTDOWN, 0);
1340 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1341 #else
1342 cpu_dump_state(env, stderr, fprintf, 0);
1343 #endif
1344 if (logfile) {
1345 fprintf(logfile, "qemu: fatal: ");
1346 vfprintf(logfile, fmt, ap2);
1347 fprintf(logfile, "\n");
1348 #ifdef TARGET_I386
1349 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1350 #else
1351 cpu_dump_state(env, logfile, fprintf, 0);
1352 #endif
1353 fflush(logfile);
1354 fclose(logfile);
1356 va_end(ap2);
1357 va_end(ap);
1358 abort();
1361 CPUState *cpu_copy(CPUState *env)
1363 CPUState *new_env = cpu_init(env->cpu_model_str);
1364 /* preserve chaining and index */
1365 CPUState *next_cpu = new_env->next_cpu;
1366 int cpu_index = new_env->cpu_index;
1367 memcpy(new_env, env, sizeof(CPUState));
1368 new_env->next_cpu = next_cpu;
1369 new_env->cpu_index = cpu_index;
1370 return new_env;
1373 #if !defined(CONFIG_USER_ONLY)
1375 /* NOTE: if flush_global is true, also flush global entries (not
1376 implemented yet) */
1377 void tlb_flush(CPUState *env, int flush_global)
1379 int i;
1381 #if defined(DEBUG_TLB)
1382 printf("tlb_flush:\n");
1383 #endif
1384 /* must reset current TB so that interrupts cannot modify the
1385 links while we are modifying them */
1386 env->current_tb = NULL;
1388 for(i = 0; i < CPU_TLB_SIZE; i++) {
1389 env->tlb_table[0][i].addr_read = -1;
1390 env->tlb_table[0][i].addr_write = -1;
1391 env->tlb_table[0][i].addr_code = -1;
1392 env->tlb_table[1][i].addr_read = -1;
1393 env->tlb_table[1][i].addr_write = -1;
1394 env->tlb_table[1][i].addr_code = -1;
1395 #if (NB_MMU_MODES >= 3)
1396 env->tlb_table[2][i].addr_read = -1;
1397 env->tlb_table[2][i].addr_write = -1;
1398 env->tlb_table[2][i].addr_code = -1;
1399 #if (NB_MMU_MODES == 4)
1400 env->tlb_table[3][i].addr_read = -1;
1401 env->tlb_table[3][i].addr_write = -1;
1402 env->tlb_table[3][i].addr_code = -1;
1403 #endif
1404 #endif
1407 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1409 #if !defined(CONFIG_SOFTMMU)
1410 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1411 #endif
1412 #ifdef USE_KQEMU
1413 if (env->kqemu_enabled) {
1414 kqemu_flush(env, flush_global);
1416 #endif
1417 tlb_flush_count++;
1420 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1422 if (addr == (tlb_entry->addr_read &
1423 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1424 addr == (tlb_entry->addr_write &
1425 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1426 addr == (tlb_entry->addr_code &
1427 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1428 tlb_entry->addr_read = -1;
1429 tlb_entry->addr_write = -1;
1430 tlb_entry->addr_code = -1;
1434 void tlb_flush_page(CPUState *env, target_ulong addr)
1436 int i;
1437 TranslationBlock *tb;
1439 #if defined(DEBUG_TLB)
1440 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1441 #endif
1442 /* must reset current TB so that interrupts cannot modify the
1443 links while we are modifying them */
1444 env->current_tb = NULL;
1446 addr &= TARGET_PAGE_MASK;
1447 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1448 tlb_flush_entry(&env->tlb_table[0][i], addr);
1449 tlb_flush_entry(&env->tlb_table[1][i], addr);
1450 #if (NB_MMU_MODES >= 3)
1451 tlb_flush_entry(&env->tlb_table[2][i], addr);
1452 #if (NB_MMU_MODES == 4)
1453 tlb_flush_entry(&env->tlb_table[3][i], addr);
1454 #endif
1455 #endif
1457 /* Discard jump cache entries for any tb which might potentially
1458 overlap the flushed page. */
1459 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1460 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1462 i = tb_jmp_cache_hash_page(addr);
1463 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1465 #if !defined(CONFIG_SOFTMMU)
1466 if (addr < MMAP_AREA_END)
1467 munmap((void *)addr, TARGET_PAGE_SIZE);
1468 #endif
1469 #ifdef USE_KQEMU
1470 if (env->kqemu_enabled) {
1471 kqemu_flush_page(env, addr);
1473 #endif
1476 /* update the TLBs so that writes to code in the virtual page 'addr'
1477 can be detected */
1478 static void tlb_protect_code(ram_addr_t ram_addr)
1480 cpu_physical_memory_reset_dirty(ram_addr,
1481 ram_addr + TARGET_PAGE_SIZE,
1482 CODE_DIRTY_FLAG);
1485 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1486 tested for self modifying code */
1487 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1488 target_ulong vaddr)
1490 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1493 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1494 unsigned long start, unsigned long length)
1496 unsigned long addr;
1497 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1498 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1499 if ((addr - start) < length) {
1500 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1505 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1506 int dirty_flags)
1508 CPUState *env;
1509 unsigned long length, start1;
1510 int i, mask, len;
1511 uint8_t *p;
1513 start &= TARGET_PAGE_MASK;
1514 end = TARGET_PAGE_ALIGN(end);
1516 length = end - start;
1517 if (length == 0)
1518 return;
1519 len = length >> TARGET_PAGE_BITS;
1520 #ifdef USE_KQEMU
1521 /* XXX: should not depend on cpu context */
1522 env = first_cpu;
1523 if (env->kqemu_enabled) {
1524 ram_addr_t addr;
1525 addr = start;
1526 for(i = 0; i < len; i++) {
1527 kqemu_set_notdirty(env, addr);
1528 addr += TARGET_PAGE_SIZE;
1531 #endif
1532 mask = ~dirty_flags;
1533 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1534 for(i = 0; i < len; i++)
1535 p[i] &= mask;
1537 /* we modify the TLB cache so that the dirty bit will be set again
1538 when accessing the range */
1539 start1 = start + (unsigned long)phys_ram_base;
1540 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1541 for(i = 0; i < CPU_TLB_SIZE; i++)
1542 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1543 for(i = 0; i < CPU_TLB_SIZE; i++)
1544 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1545 #if (NB_MMU_MODES >= 3)
1546 for(i = 0; i < CPU_TLB_SIZE; i++)
1547 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1548 #if (NB_MMU_MODES == 4)
1549 for(i = 0; i < CPU_TLB_SIZE; i++)
1550 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1551 #endif
1552 #endif
1555 #if !defined(CONFIG_SOFTMMU)
1556 /* XXX: this is expensive */
1558 VirtPageDesc *p;
1559 int j;
1560 target_ulong addr;
1562 for(i = 0; i < L1_SIZE; i++) {
1563 p = l1_virt_map[i];
1564 if (p) {
1565 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1566 for(j = 0; j < L2_SIZE; j++) {
1567 if (p->valid_tag == virt_valid_tag &&
1568 p->phys_addr >= start && p->phys_addr < end &&
1569 (p->prot & PROT_WRITE)) {
1570 if (addr < MMAP_AREA_END) {
1571 mprotect((void *)addr, TARGET_PAGE_SIZE,
1572 p->prot & ~PROT_WRITE);
1575 addr += TARGET_PAGE_SIZE;
1576 p++;
1581 #endif
1584 int cpu_physical_memory_set_dirty_tracking(int enable)
1586 int r=0;
1588 if (kvm_enabled())
1589 r = kvm_physical_memory_set_dirty_tracking(enable);
1590 in_migration = enable;
1591 return r;
1594 int cpu_physical_memory_get_dirty_tracking(void)
1596 return in_migration;
1599 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1601 ram_addr_t ram_addr;
1603 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1604 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1605 tlb_entry->addend - (unsigned long)phys_ram_base;
1606 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1607 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1612 /* update the TLB according to the current state of the dirty bits */
1613 void cpu_tlb_update_dirty(CPUState *env)
1615 int i;
1616 for(i = 0; i < CPU_TLB_SIZE; i++)
1617 tlb_update_dirty(&env->tlb_table[0][i]);
1618 for(i = 0; i < CPU_TLB_SIZE; i++)
1619 tlb_update_dirty(&env->tlb_table[1][i]);
1620 #if (NB_MMU_MODES >= 3)
1621 for(i = 0; i < CPU_TLB_SIZE; i++)
1622 tlb_update_dirty(&env->tlb_table[2][i]);
1623 #if (NB_MMU_MODES == 4)
1624 for(i = 0; i < CPU_TLB_SIZE; i++)
1625 tlb_update_dirty(&env->tlb_table[3][i]);
1626 #endif
1627 #endif
1630 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1631 unsigned long start)
1633 unsigned long addr;
1634 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1635 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1636 if (addr == start) {
1637 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1642 /* update the TLB corresponding to virtual page vaddr and phys addr
1643 addr so that it is no longer dirty */
1644 static inline void tlb_set_dirty(CPUState *env,
1645 unsigned long addr, target_ulong vaddr)
1647 int i;
1649 addr &= TARGET_PAGE_MASK;
1650 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1651 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1652 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1653 #if (NB_MMU_MODES >= 3)
1654 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1655 #if (NB_MMU_MODES == 4)
1656 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1657 #endif
1658 #endif
1661 /* add a new TLB entry. At most one entry for a given virtual address
1662 is permitted. Return 0 if OK or 2 if the page could not be mapped
1663 (can only happen in non SOFTMMU mode for I/O pages or pages
1664 conflicting with the host address space). */
1665 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1666 target_phys_addr_t paddr, int prot,
1667 int mmu_idx, int is_softmmu)
1669 PhysPageDesc *p;
1670 unsigned long pd;
1671 unsigned int index;
1672 target_ulong address;
1673 target_phys_addr_t addend;
1674 int ret;
1675 CPUTLBEntry *te;
1676 int i;
1678 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1679 if (!p) {
1680 pd = IO_MEM_UNASSIGNED;
1681 } else {
1682 pd = p->phys_offset;
1684 #if defined(DEBUG_TLB)
1685 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1686 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1687 #endif
1689 ret = 0;
1690 #if !defined(CONFIG_SOFTMMU)
1691 if (is_softmmu)
1692 #endif
1694 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1695 /* IO memory case */
1696 address = vaddr | pd;
1697 addend = paddr;
1698 } else {
1699 /* standard memory */
1700 address = vaddr;
1701 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1704 /* Make accesses to pages with watchpoints go via the
1705 watchpoint trap routines. */
1706 for (i = 0; i < env->nb_watchpoints; i++) {
1707 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1708 if (address & ~TARGET_PAGE_MASK) {
1709 env->watchpoint[i].addend = 0;
1710 address = vaddr | io_mem_watch;
1711 } else {
1712 env->watchpoint[i].addend = pd - paddr +
1713 (unsigned long) phys_ram_base;
1714 /* TODO: Figure out how to make read watchpoints coexist
1715 with code. */
1716 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1721 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1722 addend -= vaddr;
1723 te = &env->tlb_table[mmu_idx][index];
1724 te->addend = addend;
1725 if (prot & PAGE_READ) {
1726 te->addr_read = address;
1727 } else {
1728 te->addr_read = -1;
1730 if (prot & PAGE_EXEC) {
1731 te->addr_code = address;
1732 } else {
1733 te->addr_code = -1;
1735 if (prot & PAGE_WRITE) {
1736 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1737 (pd & IO_MEM_ROMD)) {
1738 /* write access calls the I/O callback */
1739 te->addr_write = vaddr |
1740 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1741 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1742 !cpu_physical_memory_is_dirty(pd)) {
1743 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1744 } else {
1745 te->addr_write = address;
1747 } else {
1748 te->addr_write = -1;
1751 #if !defined(CONFIG_SOFTMMU)
1752 else {
1753 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1754 /* IO access: no mapping is done as it will be handled by the
1755 soft MMU */
1756 if (!(env->hflags & HF_SOFTMMU_MASK))
1757 ret = 2;
1758 } else {
1759 void *map_addr;
1761 if (vaddr >= MMAP_AREA_END) {
1762 ret = 2;
1763 } else {
1764 if (prot & PROT_WRITE) {
1765 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1766 #if defined(TARGET_HAS_SMC) || 1
1767 first_tb ||
1768 #endif
1769 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1770 !cpu_physical_memory_is_dirty(pd))) {
1771 /* ROM: we do as if code was inside */
1772 /* if code is present, we only map as read only and save the
1773 original mapping */
1774 VirtPageDesc *vp;
1776 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1777 vp->phys_addr = pd;
1778 vp->prot = prot;
1779 vp->valid_tag = virt_valid_tag;
1780 prot &= ~PAGE_WRITE;
1783 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1784 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1785 if (map_addr == MAP_FAILED) {
1786 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1787 paddr, vaddr);
1792 #endif
1793 return ret;
1796 /* called from signal handler: invalidate the code and unprotect the
1797 page. Return TRUE if the fault was succesfully handled. */
1798 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1800 #if !defined(CONFIG_SOFTMMU)
1801 VirtPageDesc *vp;
1803 #if defined(DEBUG_TLB)
1804 printf("page_unprotect: addr=0x%08x\n", addr);
1805 #endif
1806 addr &= TARGET_PAGE_MASK;
1808 /* if it is not mapped, no need to worry here */
1809 if (addr >= MMAP_AREA_END)
1810 return 0;
1811 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1812 if (!vp)
1813 return 0;
1814 /* NOTE: in this case, validate_tag is _not_ tested as it
1815 validates only the code TLB */
1816 if (vp->valid_tag != virt_valid_tag)
1817 return 0;
1818 if (!(vp->prot & PAGE_WRITE))
1819 return 0;
1820 #if defined(DEBUG_TLB)
1821 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1822 addr, vp->phys_addr, vp->prot);
1823 #endif
1824 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1825 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1826 (unsigned long)addr, vp->prot);
1827 /* set the dirty bit */
1828 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1829 /* flush the code inside */
1830 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1831 return 1;
1832 #else
1833 return 0;
1834 #endif
1837 #else
1839 void tlb_flush(CPUState *env, int flush_global)
1843 void tlb_flush_page(CPUState *env, target_ulong addr)
1847 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1848 target_phys_addr_t paddr, int prot,
1849 int mmu_idx, int is_softmmu)
1851 return 0;
1854 /* dump memory mappings */
1855 void page_dump(FILE *f)
1857 unsigned long start, end;
1858 int i, j, prot, prot1;
1859 PageDesc *p;
1861 fprintf(f, "%-8s %-8s %-8s %s\n",
1862 "start", "end", "size", "prot");
1863 start = -1;
1864 end = -1;
1865 prot = 0;
1866 for(i = 0; i <= L1_SIZE; i++) {
1867 if (i < L1_SIZE)
1868 p = l1_map[i];
1869 else
1870 p = NULL;
1871 for(j = 0;j < L2_SIZE; j++) {
1872 if (!p)
1873 prot1 = 0;
1874 else
1875 prot1 = p[j].flags;
1876 if (prot1 != prot) {
1877 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1878 if (start != -1) {
1879 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1880 start, end, end - start,
1881 prot & PAGE_READ ? 'r' : '-',
1882 prot & PAGE_WRITE ? 'w' : '-',
1883 prot & PAGE_EXEC ? 'x' : '-');
1885 if (prot1 != 0)
1886 start = end;
1887 else
1888 start = -1;
1889 prot = prot1;
1891 if (!p)
1892 break;
1897 int page_get_flags(target_ulong address)
1899 PageDesc *p;
1901 p = page_find(address >> TARGET_PAGE_BITS);
1902 if (!p)
1903 return 0;
1904 return p->flags;
1907 /* modify the flags of a page and invalidate the code if
1908 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1909 depending on PAGE_WRITE */
1910 void page_set_flags(target_ulong start, target_ulong end, int flags)
1912 PageDesc *p;
1913 target_ulong addr;
1915 start = start & TARGET_PAGE_MASK;
1916 end = TARGET_PAGE_ALIGN(end);
1917 if (flags & PAGE_WRITE)
1918 flags |= PAGE_WRITE_ORG;
1919 spin_lock(&tb_lock);
1920 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1921 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1922 /* if the write protection is set, then we invalidate the code
1923 inside */
1924 if (!(p->flags & PAGE_WRITE) &&
1925 (flags & PAGE_WRITE) &&
1926 p->first_tb) {
1927 tb_invalidate_phys_page(addr, 0, NULL);
1929 p->flags = flags;
1931 spin_unlock(&tb_lock);
1934 int page_check_range(target_ulong start, target_ulong len, int flags)
1936 PageDesc *p;
1937 target_ulong end;
1938 target_ulong addr;
1940 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1941 start = start & TARGET_PAGE_MASK;
1943 if( end < start )
1944 /* we've wrapped around */
1945 return -1;
1946 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1947 p = page_find(addr >> TARGET_PAGE_BITS);
1948 if( !p )
1949 return -1;
1950 if( !(p->flags & PAGE_VALID) )
1951 return -1;
1953 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1954 return -1;
1955 if (flags & PAGE_WRITE) {
1956 if (!(p->flags & PAGE_WRITE_ORG))
1957 return -1;
1958 /* unprotect the page if it was put read-only because it
1959 contains translated code */
1960 if (!(p->flags & PAGE_WRITE)) {
1961 if (!page_unprotect(addr, 0, NULL))
1962 return -1;
1964 return 0;
1967 return 0;
1970 /* called from signal handler: invalidate the code and unprotect the
1971 page. Return TRUE if the fault was succesfully handled. */
1972 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1974 unsigned int page_index, prot, pindex;
1975 PageDesc *p, *p1;
1976 target_ulong host_start, host_end, addr;
1978 host_start = address & qemu_host_page_mask;
1979 page_index = host_start >> TARGET_PAGE_BITS;
1980 p1 = page_find(page_index);
1981 if (!p1)
1982 return 0;
1983 host_end = host_start + qemu_host_page_size;
1984 p = p1;
1985 prot = 0;
1986 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1987 prot |= p->flags;
1988 p++;
1990 /* if the page was really writable, then we change its
1991 protection back to writable */
1992 if (prot & PAGE_WRITE_ORG) {
1993 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1994 if (!(p1[pindex].flags & PAGE_WRITE)) {
1995 mprotect((void *)g2h(host_start), qemu_host_page_size,
1996 (prot & PAGE_BITS) | PAGE_WRITE);
1997 p1[pindex].flags |= PAGE_WRITE;
1998 /* and since the content will be modified, we must invalidate
1999 the corresponding translated code. */
2000 tb_invalidate_phys_page(address, pc, puc);
2001 #ifdef DEBUG_TB_CHECK
2002 tb_invalidate_check(address);
2003 #endif
2004 return 1;
2007 return 0;
2010 static inline void tlb_set_dirty(CPUState *env,
2011 unsigned long addr, target_ulong vaddr)
2014 #endif /* defined(CONFIG_USER_ONLY) */
2016 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2017 int memory);
2018 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2019 int orig_memory);
2020 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2021 need_subpage) \
2022 do { \
2023 if (addr > start_addr) \
2024 start_addr2 = 0; \
2025 else { \
2026 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2027 if (start_addr2 > 0) \
2028 need_subpage = 1; \
2031 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2032 end_addr2 = TARGET_PAGE_SIZE - 1; \
2033 else { \
2034 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2035 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2036 need_subpage = 1; \
2038 } while (0)
2040 /* register physical memory. 'size' must be a multiple of the target
2041 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2042 io memory page */
2043 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2044 unsigned long size,
2045 unsigned long phys_offset)
2047 target_phys_addr_t addr, end_addr;
2048 PhysPageDesc *p;
2049 CPUState *env;
2050 unsigned long orig_size = size;
2051 void *subpage;
2053 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2054 end_addr = start_addr + (target_phys_addr_t)size;
2055 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2056 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2057 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2058 unsigned long orig_memory = p->phys_offset;
2059 target_phys_addr_t start_addr2, end_addr2;
2060 int need_subpage = 0;
2062 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2063 need_subpage);
2064 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2065 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2066 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2067 &p->phys_offset, orig_memory);
2068 } else {
2069 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2070 >> IO_MEM_SHIFT];
2072 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2073 } else {
2074 p->phys_offset = phys_offset;
2075 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2076 (phys_offset & IO_MEM_ROMD))
2077 phys_offset += TARGET_PAGE_SIZE;
2079 } else {
2080 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2081 p->phys_offset = phys_offset;
2082 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2083 (phys_offset & IO_MEM_ROMD))
2084 phys_offset += TARGET_PAGE_SIZE;
2085 else {
2086 target_phys_addr_t start_addr2, end_addr2;
2087 int need_subpage = 0;
2089 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2090 end_addr2, need_subpage);
2092 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2093 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2094 &p->phys_offset, IO_MEM_UNASSIGNED);
2095 subpage_register(subpage, start_addr2, end_addr2,
2096 phys_offset);
2102 /* since each CPU stores ram addresses in its TLB cache, we must
2103 reset the modified entries */
2104 /* XXX: slow ! */
2105 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2106 tlb_flush(env, 1);
2110 /* XXX: temporary until new memory mapping API */
2111 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2113 PhysPageDesc *p;
2115 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2116 if (!p)
2117 return IO_MEM_UNASSIGNED;
2118 return p->phys_offset;
2121 /* XXX: better than nothing */
2122 ram_addr_t qemu_ram_alloc(unsigned long size)
2124 ram_addr_t addr;
2125 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2126 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2127 size, phys_ram_size);
2128 abort();
2130 addr = phys_ram_alloc_offset;
2131 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2132 return addr;
2135 void qemu_ram_free(ram_addr_t addr)
2139 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2141 #ifdef DEBUG_UNASSIGNED
2142 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2143 #endif
2144 #ifdef TARGET_SPARC
2145 do_unassigned_access(addr, 0, 0, 0);
2146 #elif TARGET_CRIS
2147 do_unassigned_access(addr, 0, 0, 0);
2148 #endif
2149 return 0;
2152 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2154 #ifdef DEBUG_UNASSIGNED
2155 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2156 #endif
2157 #ifdef TARGET_SPARC
2158 do_unassigned_access(addr, 1, 0, 0);
2159 #elif TARGET_CRIS
2160 do_unassigned_access(addr, 1, 0, 0);
2161 #endif
2164 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2165 unassigned_mem_readb,
2166 unassigned_mem_readb,
2167 unassigned_mem_readb,
2170 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2171 unassigned_mem_writeb,
2172 unassigned_mem_writeb,
2173 unassigned_mem_writeb,
2176 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2178 unsigned long ram_addr;
2179 int dirty_flags;
2180 ram_addr = addr - (unsigned long)phys_ram_base;
2181 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2182 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2183 #if !defined(CONFIG_USER_ONLY)
2184 tb_invalidate_phys_page_fast(ram_addr, 1);
2185 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2186 #endif
2188 stb_p((uint8_t *)(long)addr, val);
2189 #ifdef USE_KQEMU
2190 if (cpu_single_env->kqemu_enabled &&
2191 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2192 kqemu_modify_page(cpu_single_env, ram_addr);
2193 #endif
2194 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2195 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2196 /* we remove the notdirty callback only if the code has been
2197 flushed */
2198 if (dirty_flags == 0xff)
2199 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2202 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2204 unsigned long ram_addr;
2205 int dirty_flags;
2206 ram_addr = addr - (unsigned long)phys_ram_base;
2207 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2208 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2209 #if !defined(CONFIG_USER_ONLY)
2210 tb_invalidate_phys_page_fast(ram_addr, 2);
2211 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2212 #endif
2214 stw_p((uint8_t *)(long)addr, val);
2215 #ifdef USE_KQEMU
2216 if (cpu_single_env->kqemu_enabled &&
2217 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2218 kqemu_modify_page(cpu_single_env, ram_addr);
2219 #endif
2220 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2221 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2222 /* we remove the notdirty callback only if the code has been
2223 flushed */
2224 if (dirty_flags == 0xff)
2225 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2228 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2230 unsigned long ram_addr;
2231 int dirty_flags;
2232 ram_addr = addr - (unsigned long)phys_ram_base;
2233 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2234 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2235 #if !defined(CONFIG_USER_ONLY)
2236 tb_invalidate_phys_page_fast(ram_addr, 4);
2237 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2238 #endif
2240 stl_p((uint8_t *)(long)addr, val);
2241 #ifdef USE_KQEMU
2242 if (cpu_single_env->kqemu_enabled &&
2243 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2244 kqemu_modify_page(cpu_single_env, ram_addr);
2245 #endif
2246 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2247 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2248 /* we remove the notdirty callback only if the code has been
2249 flushed */
2250 if (dirty_flags == 0xff)
2251 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2254 static CPUReadMemoryFunc *error_mem_read[3] = {
2255 NULL, /* never used */
2256 NULL, /* never used */
2257 NULL, /* never used */
2260 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2261 notdirty_mem_writeb,
2262 notdirty_mem_writew,
2263 notdirty_mem_writel,
2266 #if defined(CONFIG_SOFTMMU)
2267 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2268 so these check for a hit then pass through to the normal out-of-line
2269 phys routines. */
2270 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2272 return ldub_phys(addr);
2275 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2277 return lduw_phys(addr);
2280 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2282 return ldl_phys(addr);
2285 /* Generate a debug exception if a watchpoint has been hit.
2286 Returns the real physical address of the access. addr will be a host
2287 address in case of a RAM location. */
2288 static target_ulong check_watchpoint(target_phys_addr_t addr)
2290 CPUState *env = cpu_single_env;
2291 target_ulong watch;
2292 target_ulong retaddr;
2293 int i;
2295 retaddr = addr;
2296 for (i = 0; i < env->nb_watchpoints; i++) {
2297 watch = env->watchpoint[i].vaddr;
2298 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2299 retaddr = addr - env->watchpoint[i].addend;
2300 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2301 cpu_single_env->watchpoint_hit = i + 1;
2302 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2303 break;
2307 return retaddr;
2310 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2311 uint32_t val)
2313 addr = check_watchpoint(addr);
2314 stb_phys(addr, val);
2317 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2318 uint32_t val)
2320 addr = check_watchpoint(addr);
2321 stw_phys(addr, val);
2324 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2325 uint32_t val)
2327 addr = check_watchpoint(addr);
2328 stl_phys(addr, val);
2331 static CPUReadMemoryFunc *watch_mem_read[3] = {
2332 watch_mem_readb,
2333 watch_mem_readw,
2334 watch_mem_readl,
2337 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2338 watch_mem_writeb,
2339 watch_mem_writew,
2340 watch_mem_writel,
2342 #endif
2344 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2345 unsigned int len)
2347 uint32_t ret;
2348 unsigned int idx;
2350 idx = SUBPAGE_IDX(addr - mmio->base);
2351 #if defined(DEBUG_SUBPAGE)
2352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2353 mmio, len, addr, idx);
2354 #endif
2355 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2357 return ret;
2360 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2361 uint32_t value, unsigned int len)
2363 unsigned int idx;
2365 idx = SUBPAGE_IDX(addr - mmio->base);
2366 #if defined(DEBUG_SUBPAGE)
2367 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2368 mmio, len, addr, idx, value);
2369 #endif
2370 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2373 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2375 #if defined(DEBUG_SUBPAGE)
2376 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2377 #endif
2379 return subpage_readlen(opaque, addr, 0);
2382 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2383 uint32_t value)
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2387 #endif
2388 subpage_writelen(opaque, addr, value, 0);
2391 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2393 #if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2395 #endif
2397 return subpage_readlen(opaque, addr, 1);
2400 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2401 uint32_t value)
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2405 #endif
2406 subpage_writelen(opaque, addr, value, 1);
2409 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2411 #if defined(DEBUG_SUBPAGE)
2412 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2413 #endif
2415 return subpage_readlen(opaque, addr, 2);
2418 static void subpage_writel (void *opaque,
2419 target_phys_addr_t addr, uint32_t value)
2421 #if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2423 #endif
2424 subpage_writelen(opaque, addr, value, 2);
2427 static CPUReadMemoryFunc *subpage_read[] = {
2428 &subpage_readb,
2429 &subpage_readw,
2430 &subpage_readl,
2433 static CPUWriteMemoryFunc *subpage_write[] = {
2434 &subpage_writeb,
2435 &subpage_writew,
2436 &subpage_writel,
2439 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2440 int memory)
2442 int idx, eidx;
2443 unsigned int i;
2445 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2446 return -1;
2447 idx = SUBPAGE_IDX(start);
2448 eidx = SUBPAGE_IDX(end);
2449 #if defined(DEBUG_SUBPAGE)
2450 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2451 mmio, start, end, idx, eidx, memory);
2452 #endif
2453 memory >>= IO_MEM_SHIFT;
2454 for (; idx <= eidx; idx++) {
2455 for (i = 0; i < 4; i++) {
2456 if (io_mem_read[memory][i]) {
2457 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2458 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2460 if (io_mem_write[memory][i]) {
2461 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2462 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2467 return 0;
2470 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2471 int orig_memory)
2473 subpage_t *mmio;
2474 int subpage_memory;
2476 mmio = qemu_mallocz(sizeof(subpage_t));
2477 if (mmio != NULL) {
2478 mmio->base = base;
2479 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2480 #if defined(DEBUG_SUBPAGE)
2481 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2482 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2483 #endif
2484 *phys = subpage_memory | IO_MEM_SUBPAGE;
2485 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2488 return mmio;
2491 static void io_mem_init(void)
2493 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2494 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2495 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2496 io_mem_nb = 5;
2498 #if defined(CONFIG_SOFTMMU)
2499 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2500 watch_mem_write, NULL);
2501 #endif
2502 /* alloc dirty bits array */
2503 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2504 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2507 /* mem_read and mem_write are arrays of functions containing the
2508 function to access byte (index 0), word (index 1) and dword (index
2509 2). Functions can be omitted with a NULL function pointer. The
2510 registered functions may be modified dynamically later.
2511 If io_index is non zero, the corresponding io zone is
2512 modified. If it is zero, a new io zone is allocated. The return
2513 value can be used with cpu_register_physical_memory(). (-1) is
2514 returned if error. */
2515 int cpu_register_io_memory(int io_index,
2516 CPUReadMemoryFunc **mem_read,
2517 CPUWriteMemoryFunc **mem_write,
2518 void *opaque)
2520 int i, subwidth = 0;
2522 if (io_index <= 0) {
2523 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2524 return -1;
2525 io_index = io_mem_nb++;
2526 } else {
2527 if (io_index >= IO_MEM_NB_ENTRIES)
2528 return -1;
2531 for(i = 0;i < 3; i++) {
2532 if (!mem_read[i] || !mem_write[i])
2533 subwidth = IO_MEM_SUBWIDTH;
2534 io_mem_read[io_index][i] = mem_read[i];
2535 io_mem_write[io_index][i] = mem_write[i];
2537 io_mem_opaque[io_index] = opaque;
2538 return (io_index << IO_MEM_SHIFT) | subwidth;
2541 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2543 return io_mem_write[io_index >> IO_MEM_SHIFT];
2546 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2548 return io_mem_read[io_index >> IO_MEM_SHIFT];
2551 /* physical memory access (slow version, mainly for debug) */
2552 #if defined(CONFIG_USER_ONLY)
2553 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2554 int len, int is_write)
2556 int l, flags;
2557 target_ulong page;
2558 void * p;
2560 while (len > 0) {
2561 page = addr & TARGET_PAGE_MASK;
2562 l = (page + TARGET_PAGE_SIZE) - addr;
2563 if (l > len)
2564 l = len;
2565 flags = page_get_flags(page);
2566 if (!(flags & PAGE_VALID))
2567 return;
2568 if (is_write) {
2569 if (!(flags & PAGE_WRITE))
2570 return;
2571 /* XXX: this code should not depend on lock_user */
2572 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2573 /* FIXME - should this return an error rather than just fail? */
2574 return;
2575 memcpy(p, buf, len);
2576 unlock_user(p, addr, len);
2577 } else {
2578 if (!(flags & PAGE_READ))
2579 return;
2580 /* XXX: this code should not depend on lock_user */
2581 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2582 /* FIXME - should this return an error rather than just fail? */
2583 return;
2584 memcpy(buf, p, len);
2585 unlock_user(p, addr, 0);
2587 len -= l;
2588 buf += l;
2589 addr += l;
2593 #else
2594 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2595 int len, int is_write)
2597 int l, io_index;
2598 uint8_t *ptr;
2599 uint32_t val;
2600 target_phys_addr_t page;
2601 unsigned long pd;
2602 PhysPageDesc *p;
2604 while (len > 0) {
2605 page = addr & TARGET_PAGE_MASK;
2606 l = (page + TARGET_PAGE_SIZE) - addr;
2607 if (l > len)
2608 l = len;
2609 p = phys_page_find(page >> TARGET_PAGE_BITS);
2610 if (!p) {
2611 pd = IO_MEM_UNASSIGNED;
2612 } else {
2613 pd = p->phys_offset;
2616 if (is_write) {
2617 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2618 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2619 /* XXX: could force cpu_single_env to NULL to avoid
2620 potential bugs */
2621 if (l >= 4 && ((addr & 3) == 0)) {
2622 /* 32 bit write access */
2623 val = ldl_p(buf);
2624 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2625 l = 4;
2626 } else if (l >= 2 && ((addr & 1) == 0)) {
2627 /* 16 bit write access */
2628 val = lduw_p(buf);
2629 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2630 l = 2;
2631 } else {
2632 /* 8 bit write access */
2633 val = ldub_p(buf);
2634 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2635 l = 1;
2637 } else {
2638 unsigned long addr1;
2639 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2640 /* RAM case */
2641 ptr = phys_ram_base + addr1;
2642 memcpy(ptr, buf, l);
2643 if (!cpu_physical_memory_is_dirty(addr1)) {
2644 /* invalidate code */
2645 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2646 /* set dirty bit */
2647 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2648 (0xff & ~CODE_DIRTY_FLAG);
2650 /* qemu doesn't execute guest code directly, but kvm does
2651 therefore fluch instruction caches */
2652 if (kvm_enabled())
2653 flush_icache_range((unsigned long)ptr,
2654 ((unsigned long)ptr)+l);
2656 } else {
2657 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2658 !(pd & IO_MEM_ROMD)) {
2659 /* I/O case */
2660 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2661 if (l >= 4 && ((addr & 3) == 0)) {
2662 /* 32 bit read access */
2663 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2664 stl_p(buf, val);
2665 l = 4;
2666 } else if (l >= 2 && ((addr & 1) == 0)) {
2667 /* 16 bit read access */
2668 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2669 stw_p(buf, val);
2670 l = 2;
2671 } else {
2672 /* 8 bit read access */
2673 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2674 stb_p(buf, val);
2675 l = 1;
2677 } else {
2678 /* RAM case */
2679 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2680 (addr & ~TARGET_PAGE_MASK);
2681 memcpy(buf, ptr, l);
2684 len -= l;
2685 buf += l;
2686 addr += l;
2690 /* used for ROM loading : can write in RAM and ROM */
2691 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2692 const uint8_t *buf, int len)
2694 int l;
2695 uint8_t *ptr;
2696 target_phys_addr_t page;
2697 unsigned long pd;
2698 PhysPageDesc *p;
2700 while (len > 0) {
2701 page = addr & TARGET_PAGE_MASK;
2702 l = (page + TARGET_PAGE_SIZE) - addr;
2703 if (l > len)
2704 l = len;
2705 p = phys_page_find(page >> TARGET_PAGE_BITS);
2706 if (!p) {
2707 pd = IO_MEM_UNASSIGNED;
2708 } else {
2709 pd = p->phys_offset;
2712 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2713 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2714 !(pd & IO_MEM_ROMD)) {
2715 /* do nothing */
2716 } else {
2717 unsigned long addr1;
2718 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2719 /* ROM/RAM case */
2720 ptr = phys_ram_base + addr1;
2721 memcpy(ptr, buf, l);
2723 len -= l;
2724 buf += l;
2725 addr += l;
2730 /* warning: addr must be aligned */
2731 uint32_t ldl_phys(target_phys_addr_t addr)
2733 int io_index;
2734 uint8_t *ptr;
2735 uint32_t val;
2736 unsigned long pd;
2737 PhysPageDesc *p;
2739 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2740 if (!p) {
2741 pd = IO_MEM_UNASSIGNED;
2742 } else {
2743 pd = p->phys_offset;
2746 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2747 !(pd & IO_MEM_ROMD)) {
2748 /* I/O case */
2749 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2750 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2751 } else {
2752 /* RAM case */
2753 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2754 (addr & ~TARGET_PAGE_MASK);
2755 val = ldl_p(ptr);
2757 return val;
2760 /* warning: addr must be aligned */
2761 uint64_t ldq_phys(target_phys_addr_t addr)
2763 int io_index;
2764 uint8_t *ptr;
2765 uint64_t val;
2766 unsigned long pd;
2767 PhysPageDesc *p;
2769 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2770 if (!p) {
2771 pd = IO_MEM_UNASSIGNED;
2772 } else {
2773 pd = p->phys_offset;
2776 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2777 !(pd & IO_MEM_ROMD)) {
2778 /* I/O case */
2779 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2780 #ifdef TARGET_WORDS_BIGENDIAN
2781 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2782 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2783 #else
2784 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2785 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2786 #endif
2787 } else {
2788 /* RAM case */
2789 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2790 (addr & ~TARGET_PAGE_MASK);
2791 val = ldq_p(ptr);
2793 return val;
2796 /* XXX: optimize */
2797 uint32_t ldub_phys(target_phys_addr_t addr)
2799 uint8_t val;
2800 cpu_physical_memory_read(addr, &val, 1);
2801 return val;
2804 /* XXX: optimize */
2805 uint32_t lduw_phys(target_phys_addr_t addr)
2807 uint16_t val;
2808 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2809 return tswap16(val);
2812 #ifdef __GNUC__
2813 #define likely(x) __builtin_expect(!!(x), 1)
2814 #define unlikely(x) __builtin_expect(!!(x), 0)
2815 #else
2816 #define likely(x) x
2817 #define unlikely(x) x
2818 #endif
2820 /* warning: addr must be aligned. The ram page is not masked as dirty
2821 and the code inside is not invalidated. It is useful if the dirty
2822 bits are used to track modified PTEs */
2823 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2825 int io_index;
2826 uint8_t *ptr;
2827 unsigned long pd;
2828 PhysPageDesc *p;
2830 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2831 if (!p) {
2832 pd = IO_MEM_UNASSIGNED;
2833 } else {
2834 pd = p->phys_offset;
2837 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2838 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2839 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2840 } else {
2841 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2842 ptr = phys_ram_base + addr1;
2843 stl_p(ptr, val);
2845 if (unlikely(in_migration)) {
2846 if (!cpu_physical_memory_is_dirty(addr1)) {
2847 /* invalidate code */
2848 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2849 /* set dirty bit */
2850 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2851 (0xff & ~CODE_DIRTY_FLAG);
2857 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2859 int io_index;
2860 uint8_t *ptr;
2861 unsigned long pd;
2862 PhysPageDesc *p;
2864 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2865 if (!p) {
2866 pd = IO_MEM_UNASSIGNED;
2867 } else {
2868 pd = p->phys_offset;
2871 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2872 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2873 #ifdef TARGET_WORDS_BIGENDIAN
2874 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2875 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2876 #else
2877 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2878 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2879 #endif
2880 } else {
2881 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2882 (addr & ~TARGET_PAGE_MASK);
2883 stq_p(ptr, val);
2887 /* warning: addr must be aligned */
2888 void stl_phys(target_phys_addr_t addr, uint32_t val)
2890 int io_index;
2891 uint8_t *ptr;
2892 unsigned long pd;
2893 PhysPageDesc *p;
2895 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2896 if (!p) {
2897 pd = IO_MEM_UNASSIGNED;
2898 } else {
2899 pd = p->phys_offset;
2902 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2903 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2904 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2905 } else {
2906 unsigned long addr1;
2907 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2908 /* RAM case */
2909 ptr = phys_ram_base + addr1;
2910 stl_p(ptr, val);
2911 if (!cpu_physical_memory_is_dirty(addr1)) {
2912 /* invalidate code */
2913 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2914 /* set dirty bit */
2915 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2916 (0xff & ~CODE_DIRTY_FLAG);
2921 /* XXX: optimize */
2922 void stb_phys(target_phys_addr_t addr, uint32_t val)
2924 uint8_t v = val;
2925 cpu_physical_memory_write(addr, &v, 1);
2928 /* XXX: optimize */
2929 void stw_phys(target_phys_addr_t addr, uint32_t val)
2931 uint16_t v = tswap16(val);
2932 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2935 /* XXX: optimize */
2936 void stq_phys(target_phys_addr_t addr, uint64_t val)
2938 val = tswap64(val);
2939 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2942 #endif
2944 /* virtual memory access for debug */
2945 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2946 uint8_t *buf, int len, int is_write)
2948 int l;
2949 target_phys_addr_t phys_addr;
2950 target_ulong page;
2952 while (len > 0) {
2953 page = addr & TARGET_PAGE_MASK;
2954 phys_addr = cpu_get_phys_page_debug(env, page);
2955 /* if no physical page mapped, return an error */
2956 if (phys_addr == -1)
2957 return -1;
2958 l = (page + TARGET_PAGE_SIZE) - addr;
2959 if (l > len)
2960 l = len;
2961 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2962 buf, l, is_write);
2963 len -= l;
2964 buf += l;
2965 addr += l;
2967 return 0;
2970 void dump_exec_info(FILE *f,
2971 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2973 int i, target_code_size, max_target_code_size;
2974 int direct_jmp_count, direct_jmp2_count, cross_page;
2975 TranslationBlock *tb;
2977 target_code_size = 0;
2978 max_target_code_size = 0;
2979 cross_page = 0;
2980 direct_jmp_count = 0;
2981 direct_jmp2_count = 0;
2982 for(i = 0; i < nb_tbs; i++) {
2983 tb = &tbs[i];
2984 target_code_size += tb->size;
2985 if (tb->size > max_target_code_size)
2986 max_target_code_size = tb->size;
2987 if (tb->page_addr[1] != -1)
2988 cross_page++;
2989 if (tb->tb_next_offset[0] != 0xffff) {
2990 direct_jmp_count++;
2991 if (tb->tb_next_offset[1] != 0xffff) {
2992 direct_jmp2_count++;
2996 /* XXX: avoid using doubles ? */
2997 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2998 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2999 nb_tbs ? target_code_size / nb_tbs : 0,
3000 max_target_code_size);
3001 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3002 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3003 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3004 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3005 cross_page,
3006 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3007 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3008 direct_jmp_count,
3009 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3010 direct_jmp2_count,
3011 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3012 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3013 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3014 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3017 #if !defined(CONFIG_USER_ONLY)
3019 #define MMUSUFFIX _cmmu
3020 #define GETPC() NULL
3021 #define env cpu_single_env
3022 #define SOFTMMU_CODE_ACCESS
3024 #define SHIFT 0
3025 #include "softmmu_template.h"
3027 #define SHIFT 1
3028 #include "softmmu_template.h"
3030 #define SHIFT 2
3031 #include "softmmu_template.h"
3033 #define SHIFT 3
3034 #include "softmmu_template.h"
3036 #undef env
3038 #endif