Fix configurations with more than 4GB of memory
[qemu-kvm/fedora.git] / exec.c
blob3e588d5a1aa1513253d640616eb647cf8da79602
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #ifdef USE_KVM
38 #include "qemu-kvm.h"
39 #endif
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
58 #undef DEBUG_TB_CHECK
59 #endif
61 /* threshold to flush the translated code buffer */
62 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif USE_KQEMU
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
81 #elif TARGET_X86_64
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #else
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
87 #ifdef USE_KVM
88 extern int kvm_allowed;
89 extern kvm_context_t kvm_context;
90 #endif
92 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
93 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
94 int nb_tbs;
95 /* any access to the tbs or the page table must use this lock */
96 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
98 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
99 uint8_t *code_gen_ptr;
101 ram_addr_t phys_ram_size;
102 int phys_ram_fd;
103 uint8_t *phys_ram_base;
104 uint8_t *phys_ram_dirty;
105 uint8_t *bios_mem;
106 static int in_migration;
107 static ram_addr_t phys_ram_alloc_offset = 0;
109 CPUState *first_cpu;
110 /* current CPU in the current thread. It is only valid inside
111 cpu_exec() */
112 CPUState *cpu_single_env;
114 typedef struct PageDesc {
115 /* list of TBs intersecting this ram page */
116 TranslationBlock *first_tb;
117 /* in order to optimize self modifying code, we count the number
118 of lookups we do to a given page to use a bitmap */
119 unsigned int code_write_count;
120 uint8_t *code_bitmap;
121 #if defined(CONFIG_USER_ONLY)
122 unsigned long flags;
123 #endif
124 } PageDesc;
126 typedef struct PhysPageDesc {
127 /* offset in host memory of the page + io_index in the low 12 bits */
128 ram_addr_t phys_offset;
129 } PhysPageDesc;
131 #define L2_BITS 10
132 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
133 /* XXX: this is a temporary hack for alpha target.
134 * In the future, this is to be replaced by a multi-level table
135 * to actually be able to handle the complete 64 bits address space.
137 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
138 #else
139 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
140 #endif
142 #define L1_SIZE (1 << L1_BITS)
143 #define L2_SIZE (1 << L2_BITS)
145 static void io_mem_init(void);
147 unsigned long qemu_real_host_page_size;
148 unsigned long qemu_host_page_bits;
149 unsigned long qemu_host_page_size;
150 unsigned long qemu_host_page_mask;
152 /* XXX: for system emulation, it could just be an array */
153 static PageDesc *l1_map[L1_SIZE];
154 PhysPageDesc **l1_phys_map;
156 /* io memory support */
157 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
158 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
159 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
160 static int io_mem_nb;
161 #if defined(CONFIG_SOFTMMU)
162 static int io_mem_watch;
163 #endif
165 /* log support */
166 char *logfilename = "/tmp/qemu.log";
167 FILE *logfile;
168 int loglevel;
169 static int log_append = 0;
171 /* statistics */
172 static int tlb_flush_count;
173 static int tb_flush_count;
174 static int tb_phys_invalidate_count;
176 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
177 typedef struct subpage_t {
178 target_phys_addr_t base;
179 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
180 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
181 void *opaque[TARGET_PAGE_SIZE];
182 } subpage_t;
184 static void page_init(void)
186 /* NOTE: we can always suppose that qemu_host_page_size >=
187 TARGET_PAGE_SIZE */
188 #ifdef _WIN32
190 SYSTEM_INFO system_info;
191 DWORD old_protect;
193 GetSystemInfo(&system_info);
194 qemu_real_host_page_size = system_info.dwPageSize;
196 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
197 PAGE_EXECUTE_READWRITE, &old_protect);
199 #else
200 qemu_real_host_page_size = getpagesize();
202 unsigned long start, end;
204 start = (unsigned long)code_gen_buffer;
205 start &= ~(qemu_real_host_page_size - 1);
207 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
208 end += qemu_real_host_page_size - 1;
209 end &= ~(qemu_real_host_page_size - 1);
211 mprotect((void *)start, end - start,
212 PROT_READ | PROT_WRITE | PROT_EXEC);
214 #endif
216 if (qemu_host_page_size == 0)
217 qemu_host_page_size = qemu_real_host_page_size;
218 if (qemu_host_page_size < TARGET_PAGE_SIZE)
219 qemu_host_page_size = TARGET_PAGE_SIZE;
220 qemu_host_page_bits = 0;
221 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
222 qemu_host_page_bits++;
223 qemu_host_page_mask = ~(qemu_host_page_size - 1);
224 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
225 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
228 static inline PageDesc *page_find_alloc(unsigned int index)
230 PageDesc **lp, *p;
232 lp = &l1_map[index >> L2_BITS];
233 p = *lp;
234 if (!p) {
235 /* allocate if not found */
236 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
237 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
238 *lp = p;
240 return p + (index & (L2_SIZE - 1));
243 static inline PageDesc *page_find(unsigned int index)
245 PageDesc *p;
247 p = l1_map[index >> L2_BITS];
248 if (!p)
249 return 0;
250 return p + (index & (L2_SIZE - 1));
253 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
255 void **lp, **p;
256 PhysPageDesc *pd;
258 p = (void **)l1_phys_map;
259 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
261 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
262 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
263 #endif
264 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
265 p = *lp;
266 if (!p) {
267 /* allocate if not found */
268 if (!alloc)
269 return NULL;
270 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
271 memset(p, 0, sizeof(void *) * L1_SIZE);
272 *lp = p;
274 #endif
275 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
276 pd = *lp;
277 if (!pd) {
278 int i;
279 /* allocate if not found */
280 if (!alloc)
281 return NULL;
282 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
283 *lp = pd;
284 for (i = 0; i < L2_SIZE; i++)
285 pd[i].phys_offset = IO_MEM_UNASSIGNED;
287 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
290 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
292 return phys_page_find_alloc(index, 0);
295 #if !defined(CONFIG_USER_ONLY)
296 static void tlb_protect_code(ram_addr_t ram_addr);
297 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
298 target_ulong vaddr);
299 #endif
301 void cpu_exec_init(CPUState *env)
303 CPUState **penv;
304 int cpu_index;
306 if (!code_gen_ptr) {
307 code_gen_ptr = code_gen_buffer;
308 page_init();
309 io_mem_init();
311 env->next_cpu = NULL;
312 penv = &first_cpu;
313 cpu_index = 0;
314 while (*penv != NULL) {
315 penv = (CPUState **)&(*penv)->next_cpu;
316 cpu_index++;
318 env->cpu_index = cpu_index;
319 env->nb_watchpoints = 0;
320 *penv = env;
323 static inline void invalidate_page_bitmap(PageDesc *p)
325 if (p->code_bitmap) {
326 qemu_free(p->code_bitmap);
327 p->code_bitmap = NULL;
329 p->code_write_count = 0;
332 /* set to NULL all the 'first_tb' fields in all PageDescs */
333 static void page_flush_tb(void)
335 int i, j;
336 PageDesc *p;
338 for(i = 0; i < L1_SIZE; i++) {
339 p = l1_map[i];
340 if (p) {
341 for(j = 0; j < L2_SIZE; j++) {
342 p->first_tb = NULL;
343 invalidate_page_bitmap(p);
344 p++;
350 /* flush all the translation blocks */
351 /* XXX: tb_flush is currently not thread safe */
352 void tb_flush(CPUState *env1)
354 CPUState *env;
355 #if defined(DEBUG_FLUSH)
356 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
357 code_gen_ptr - code_gen_buffer,
358 nb_tbs,
359 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
360 #endif
361 nb_tbs = 0;
363 for(env = first_cpu; env != NULL; env = env->next_cpu) {
364 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
367 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
368 page_flush_tb();
370 code_gen_ptr = code_gen_buffer;
371 /* XXX: flush processor icache at this point if cache flush is
372 expensive */
373 tb_flush_count++;
376 #ifdef DEBUG_TB_CHECK
378 static void tb_invalidate_check(target_ulong address)
380 TranslationBlock *tb;
381 int i;
382 address &= TARGET_PAGE_MASK;
383 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
384 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
385 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
386 address >= tb->pc + tb->size)) {
387 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
388 address, (long)tb->pc, tb->size);
394 /* verify that all the pages have correct rights for code */
395 static void tb_page_check(void)
397 TranslationBlock *tb;
398 int i, flags1, flags2;
400 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
401 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
402 flags1 = page_get_flags(tb->pc);
403 flags2 = page_get_flags(tb->pc + tb->size - 1);
404 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
405 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
406 (long)tb->pc, tb->size, flags1, flags2);
412 void tb_jmp_check(TranslationBlock *tb)
414 TranslationBlock *tb1;
415 unsigned int n1;
417 /* suppress any remaining jumps to this TB */
418 tb1 = tb->jmp_first;
419 for(;;) {
420 n1 = (long)tb1 & 3;
421 tb1 = (TranslationBlock *)((long)tb1 & ~3);
422 if (n1 == 2)
423 break;
424 tb1 = tb1->jmp_next[n1];
426 /* check end of list */
427 if (tb1 != tb) {
428 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
432 #endif
434 /* invalidate one TB */
435 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
436 int next_offset)
438 TranslationBlock *tb1;
439 for(;;) {
440 tb1 = *ptb;
441 if (tb1 == tb) {
442 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
443 break;
445 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
449 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
451 TranslationBlock *tb1;
452 unsigned int n1;
454 for(;;) {
455 tb1 = *ptb;
456 n1 = (long)tb1 & 3;
457 tb1 = (TranslationBlock *)((long)tb1 & ~3);
458 if (tb1 == tb) {
459 *ptb = tb1->page_next[n1];
460 break;
462 ptb = &tb1->page_next[n1];
466 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
468 TranslationBlock *tb1, **ptb;
469 unsigned int n1;
471 ptb = &tb->jmp_next[n];
472 tb1 = *ptb;
473 if (tb1) {
474 /* find tb(n) in circular list */
475 for(;;) {
476 tb1 = *ptb;
477 n1 = (long)tb1 & 3;
478 tb1 = (TranslationBlock *)((long)tb1 & ~3);
479 if (n1 == n && tb1 == tb)
480 break;
481 if (n1 == 2) {
482 ptb = &tb1->jmp_first;
483 } else {
484 ptb = &tb1->jmp_next[n1];
487 /* now we can suppress tb(n) from the list */
488 *ptb = tb->jmp_next[n];
490 tb->jmp_next[n] = NULL;
494 /* reset the jump entry 'n' of a TB so that it is not chained to
495 another TB */
496 static inline void tb_reset_jump(TranslationBlock *tb, int n)
498 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
501 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
503 CPUState *env;
504 PageDesc *p;
505 unsigned int h, n1;
506 target_ulong phys_pc;
507 TranslationBlock *tb1, *tb2;
509 /* remove the TB from the hash list */
510 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
511 h = tb_phys_hash_func(phys_pc);
512 tb_remove(&tb_phys_hash[h], tb,
513 offsetof(TranslationBlock, phys_hash_next));
515 /* remove the TB from the page list */
516 if (tb->page_addr[0] != page_addr) {
517 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
518 tb_page_remove(&p->first_tb, tb);
519 invalidate_page_bitmap(p);
521 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
522 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
523 tb_page_remove(&p->first_tb, tb);
524 invalidate_page_bitmap(p);
527 tb_invalidated_flag = 1;
529 /* remove the TB from the hash list */
530 h = tb_jmp_cache_hash_func(tb->pc);
531 for(env = first_cpu; env != NULL; env = env->next_cpu) {
532 if (env->tb_jmp_cache[h] == tb)
533 env->tb_jmp_cache[h] = NULL;
536 /* suppress this TB from the two jump lists */
537 tb_jmp_remove(tb, 0);
538 tb_jmp_remove(tb, 1);
540 /* suppress any remaining jumps to this TB */
541 tb1 = tb->jmp_first;
542 for(;;) {
543 n1 = (long)tb1 & 3;
544 if (n1 == 2)
545 break;
546 tb1 = (TranslationBlock *)((long)tb1 & ~3);
547 tb2 = tb1->jmp_next[n1];
548 tb_reset_jump(tb1, n1);
549 tb1->jmp_next[n1] = NULL;
550 tb1 = tb2;
552 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
554 tb_phys_invalidate_count++;
557 static inline void set_bits(uint8_t *tab, int start, int len)
559 int end, mask, end1;
561 end = start + len;
562 tab += start >> 3;
563 mask = 0xff << (start & 7);
564 if ((start & ~7) == (end & ~7)) {
565 if (start < end) {
566 mask &= ~(0xff << (end & 7));
567 *tab |= mask;
569 } else {
570 *tab++ |= mask;
571 start = (start + 8) & ~7;
572 end1 = end & ~7;
573 while (start < end1) {
574 *tab++ = 0xff;
575 start += 8;
577 if (start < end) {
578 mask = ~(0xff << (end & 7));
579 *tab |= mask;
584 static void build_page_bitmap(PageDesc *p)
586 int n, tb_start, tb_end;
587 TranslationBlock *tb;
589 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
590 if (!p->code_bitmap)
591 return;
592 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
594 tb = p->first_tb;
595 while (tb != NULL) {
596 n = (long)tb & 3;
597 tb = (TranslationBlock *)((long)tb & ~3);
598 /* NOTE: this is subtle as a TB may span two physical pages */
599 if (n == 0) {
600 /* NOTE: tb_end may be after the end of the page, but
601 it is not a problem */
602 tb_start = tb->pc & ~TARGET_PAGE_MASK;
603 tb_end = tb_start + tb->size;
604 if (tb_end > TARGET_PAGE_SIZE)
605 tb_end = TARGET_PAGE_SIZE;
606 } else {
607 tb_start = 0;
608 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
610 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
611 tb = tb->page_next[n];
615 #ifdef TARGET_HAS_PRECISE_SMC
617 static void tb_gen_code(CPUState *env,
618 target_ulong pc, target_ulong cs_base, int flags,
619 int cflags)
621 TranslationBlock *tb;
622 uint8_t *tc_ptr;
623 target_ulong phys_pc, phys_page2, virt_page2;
624 int code_gen_size;
626 phys_pc = get_phys_addr_code(env, pc);
627 tb = tb_alloc(pc);
628 if (!tb) {
629 /* flush must be done */
630 tb_flush(env);
631 /* cannot fail at this point */
632 tb = tb_alloc(pc);
634 tc_ptr = code_gen_ptr;
635 tb->tc_ptr = tc_ptr;
636 tb->cs_base = cs_base;
637 tb->flags = flags;
638 tb->cflags = cflags;
639 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
640 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
642 /* check next page if needed */
643 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
644 phys_page2 = -1;
645 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
646 phys_page2 = get_phys_addr_code(env, virt_page2);
648 tb_link_phys(tb, phys_pc, phys_page2);
650 #endif
652 /* invalidate all TBs which intersect with the target physical page
653 starting in range [start;end[. NOTE: start and end must refer to
654 the same physical page. 'is_cpu_write_access' should be true if called
655 from a real cpu write access: the virtual CPU will exit the current
656 TB if code is modified inside this TB. */
657 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
658 int is_cpu_write_access)
660 int n, current_tb_modified, current_tb_not_found, current_flags;
661 CPUState *env = cpu_single_env;
662 PageDesc *p;
663 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
664 target_ulong tb_start, tb_end;
665 target_ulong current_pc, current_cs_base;
667 p = page_find(start >> TARGET_PAGE_BITS);
668 if (!p)
669 return;
670 if (!p->code_bitmap &&
671 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
672 is_cpu_write_access) {
673 /* build code bitmap */
674 build_page_bitmap(p);
677 /* we remove all the TBs in the range [start, end[ */
678 /* XXX: see if in some cases it could be faster to invalidate all the code */
679 current_tb_not_found = is_cpu_write_access;
680 current_tb_modified = 0;
681 current_tb = NULL; /* avoid warning */
682 current_pc = 0; /* avoid warning */
683 current_cs_base = 0; /* avoid warning */
684 current_flags = 0; /* avoid warning */
685 tb = p->first_tb;
686 while (tb != NULL) {
687 n = (long)tb & 3;
688 tb = (TranslationBlock *)((long)tb & ~3);
689 tb_next = tb->page_next[n];
690 /* NOTE: this is subtle as a TB may span two physical pages */
691 if (n == 0) {
692 /* NOTE: tb_end may be after the end of the page, but
693 it is not a problem */
694 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
695 tb_end = tb_start + tb->size;
696 } else {
697 tb_start = tb->page_addr[1];
698 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
700 if (!(tb_end <= start || tb_start >= end)) {
701 #ifdef TARGET_HAS_PRECISE_SMC
702 if (current_tb_not_found) {
703 current_tb_not_found = 0;
704 current_tb = NULL;
705 if (env->mem_write_pc) {
706 /* now we have a real cpu fault */
707 current_tb = tb_find_pc(env->mem_write_pc);
710 if (current_tb == tb &&
711 !(current_tb->cflags & CF_SINGLE_INSN)) {
712 /* If we are modifying the current TB, we must stop
713 its execution. We could be more precise by checking
714 that the modification is after the current PC, but it
715 would require a specialized function to partially
716 restore the CPU state */
718 current_tb_modified = 1;
719 cpu_restore_state(current_tb, env,
720 env->mem_write_pc, NULL);
721 #if defined(TARGET_I386)
722 current_flags = env->hflags;
723 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
724 current_cs_base = (target_ulong)env->segs[R_CS].base;
725 current_pc = current_cs_base + env->eip;
726 #else
727 #error unsupported CPU
728 #endif
730 #endif /* TARGET_HAS_PRECISE_SMC */
731 /* we need to do that to handle the case where a signal
732 occurs while doing tb_phys_invalidate() */
733 saved_tb = NULL;
734 if (env) {
735 saved_tb = env->current_tb;
736 env->current_tb = NULL;
738 tb_phys_invalidate(tb, -1);
739 if (env) {
740 env->current_tb = saved_tb;
741 if (env->interrupt_request && env->current_tb)
742 cpu_interrupt(env, env->interrupt_request);
745 tb = tb_next;
747 #if !defined(CONFIG_USER_ONLY)
748 /* if no code remaining, no need to continue to use slow writes */
749 if (!p->first_tb) {
750 invalidate_page_bitmap(p);
751 if (is_cpu_write_access) {
752 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
755 #endif
756 #ifdef TARGET_HAS_PRECISE_SMC
757 if (current_tb_modified) {
758 /* we generate a block containing just the instruction
759 modifying the memory. It will ensure that it cannot modify
760 itself */
761 env->current_tb = NULL;
762 tb_gen_code(env, current_pc, current_cs_base, current_flags,
763 CF_SINGLE_INSN);
764 cpu_resume_from_signal(env, NULL);
766 #endif
769 /* len must be <= 8 and start must be a multiple of len */
770 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
772 PageDesc *p;
773 int offset, b;
774 #if 0
775 if (1) {
776 if (loglevel) {
777 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
778 cpu_single_env->mem_write_vaddr, len,
779 cpu_single_env->eip,
780 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
783 #endif
784 p = page_find(start >> TARGET_PAGE_BITS);
785 if (!p)
786 return;
787 if (p->code_bitmap) {
788 offset = start & ~TARGET_PAGE_MASK;
789 b = p->code_bitmap[offset >> 3] >> (offset & 7);
790 if (b & ((1 << len) - 1))
791 goto do_invalidate;
792 } else {
793 do_invalidate:
794 tb_invalidate_phys_page_range(start, start + len, 1);
798 #if !defined(CONFIG_SOFTMMU)
799 static void tb_invalidate_phys_page(target_ulong addr,
800 unsigned long pc, void *puc)
802 int n, current_flags, current_tb_modified;
803 target_ulong current_pc, current_cs_base;
804 PageDesc *p;
805 TranslationBlock *tb, *current_tb;
806 #ifdef TARGET_HAS_PRECISE_SMC
807 CPUState *env = cpu_single_env;
808 #endif
810 addr &= TARGET_PAGE_MASK;
811 p = page_find(addr >> TARGET_PAGE_BITS);
812 if (!p)
813 return;
814 tb = p->first_tb;
815 current_tb_modified = 0;
816 current_tb = NULL;
817 current_pc = 0; /* avoid warning */
818 current_cs_base = 0; /* avoid warning */
819 current_flags = 0; /* avoid warning */
820 #ifdef TARGET_HAS_PRECISE_SMC
821 if (tb && pc != 0) {
822 current_tb = tb_find_pc(pc);
824 #endif
825 while (tb != NULL) {
826 n = (long)tb & 3;
827 tb = (TranslationBlock *)((long)tb & ~3);
828 #ifdef TARGET_HAS_PRECISE_SMC
829 if (current_tb == tb &&
830 !(current_tb->cflags & CF_SINGLE_INSN)) {
831 /* If we are modifying the current TB, we must stop
832 its execution. We could be more precise by checking
833 that the modification is after the current PC, but it
834 would require a specialized function to partially
835 restore the CPU state */
837 current_tb_modified = 1;
838 cpu_restore_state(current_tb, env, pc, puc);
839 #if defined(TARGET_I386)
840 current_flags = env->hflags;
841 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
842 current_cs_base = (target_ulong)env->segs[R_CS].base;
843 current_pc = current_cs_base + env->eip;
844 #else
845 #error unsupported CPU
846 #endif
848 #endif /* TARGET_HAS_PRECISE_SMC */
849 tb_phys_invalidate(tb, addr);
850 tb = tb->page_next[n];
852 p->first_tb = NULL;
853 #ifdef TARGET_HAS_PRECISE_SMC
854 if (current_tb_modified) {
855 /* we generate a block containing just the instruction
856 modifying the memory. It will ensure that it cannot modify
857 itself */
858 env->current_tb = NULL;
859 tb_gen_code(env, current_pc, current_cs_base, current_flags,
860 CF_SINGLE_INSN);
861 cpu_resume_from_signal(env, puc);
863 #endif
865 #endif
867 /* add the tb in the target page and protect it if necessary */
868 static inline void tb_alloc_page(TranslationBlock *tb,
869 unsigned int n, target_ulong page_addr)
871 PageDesc *p;
872 TranslationBlock *last_first_tb;
874 tb->page_addr[n] = page_addr;
875 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
876 tb->page_next[n] = p->first_tb;
877 last_first_tb = p->first_tb;
878 p->first_tb = (TranslationBlock *)((long)tb | n);
879 invalidate_page_bitmap(p);
881 #if defined(TARGET_HAS_SMC) || 1
883 #if defined(CONFIG_USER_ONLY)
884 if (p->flags & PAGE_WRITE) {
885 target_ulong addr;
886 PageDesc *p2;
887 int prot;
889 /* force the host page as non writable (writes will have a
890 page fault + mprotect overhead) */
891 page_addr &= qemu_host_page_mask;
892 prot = 0;
893 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
894 addr += TARGET_PAGE_SIZE) {
896 p2 = page_find (addr >> TARGET_PAGE_BITS);
897 if (!p2)
898 continue;
899 prot |= p2->flags;
900 p2->flags &= ~PAGE_WRITE;
901 page_get_flags(addr);
903 mprotect(g2h(page_addr), qemu_host_page_size,
904 (prot & PAGE_BITS) & ~PAGE_WRITE);
905 #ifdef DEBUG_TB_INVALIDATE
906 printf("protecting code page: 0x%08lx\n",
907 page_addr);
908 #endif
910 #else
911 /* if some code is already present, then the pages are already
912 protected. So we handle the case where only the first TB is
913 allocated in a physical page */
914 if (!last_first_tb) {
915 tlb_protect_code(page_addr);
917 #endif
919 #endif /* TARGET_HAS_SMC */
922 /* Allocate a new translation block. Flush the translation buffer if
923 too many translation blocks or too much generated code. */
924 TranslationBlock *tb_alloc(target_ulong pc)
926 TranslationBlock *tb;
928 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
929 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
930 return NULL;
931 tb = &tbs[nb_tbs++];
932 tb->pc = pc;
933 tb->cflags = 0;
934 return tb;
937 /* add a new TB and link it to the physical page tables. phys_page2 is
938 (-1) to indicate that only one page contains the TB. */
939 void tb_link_phys(TranslationBlock *tb,
940 target_ulong phys_pc, target_ulong phys_page2)
942 unsigned int h;
943 TranslationBlock **ptb;
945 /* add in the physical hash table */
946 h = tb_phys_hash_func(phys_pc);
947 ptb = &tb_phys_hash[h];
948 tb->phys_hash_next = *ptb;
949 *ptb = tb;
951 /* add in the page list */
952 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
953 if (phys_page2 != -1)
954 tb_alloc_page(tb, 1, phys_page2);
955 else
956 tb->page_addr[1] = -1;
958 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
959 tb->jmp_next[0] = NULL;
960 tb->jmp_next[1] = NULL;
961 #ifdef USE_CODE_COPY
962 tb->cflags &= ~CF_FP_USED;
963 if (tb->cflags & CF_TB_FP_USED)
964 tb->cflags |= CF_FP_USED;
965 #endif
967 /* init original jump addresses */
968 if (tb->tb_next_offset[0] != 0xffff)
969 tb_reset_jump(tb, 0);
970 if (tb->tb_next_offset[1] != 0xffff)
971 tb_reset_jump(tb, 1);
973 #ifdef DEBUG_TB_CHECK
974 tb_page_check();
975 #endif
978 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
979 tb[1].tc_ptr. Return NULL if not found */
980 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
982 int m_min, m_max, m;
983 unsigned long v;
984 TranslationBlock *tb;
986 if (nb_tbs <= 0)
987 return NULL;
988 if (tc_ptr < (unsigned long)code_gen_buffer ||
989 tc_ptr >= (unsigned long)code_gen_ptr)
990 return NULL;
991 /* binary search (cf Knuth) */
992 m_min = 0;
993 m_max = nb_tbs - 1;
994 while (m_min <= m_max) {
995 m = (m_min + m_max) >> 1;
996 tb = &tbs[m];
997 v = (unsigned long)tb->tc_ptr;
998 if (v == tc_ptr)
999 return tb;
1000 else if (tc_ptr < v) {
1001 m_max = m - 1;
1002 } else {
1003 m_min = m + 1;
1006 return &tbs[m_max];
1009 static void tb_reset_jump_recursive(TranslationBlock *tb);
1011 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1013 TranslationBlock *tb1, *tb_next, **ptb;
1014 unsigned int n1;
1016 tb1 = tb->jmp_next[n];
1017 if (tb1 != NULL) {
1018 /* find head of list */
1019 for(;;) {
1020 n1 = (long)tb1 & 3;
1021 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1022 if (n1 == 2)
1023 break;
1024 tb1 = tb1->jmp_next[n1];
1026 /* we are now sure now that tb jumps to tb1 */
1027 tb_next = tb1;
1029 /* remove tb from the jmp_first list */
1030 ptb = &tb_next->jmp_first;
1031 for(;;) {
1032 tb1 = *ptb;
1033 n1 = (long)tb1 & 3;
1034 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1035 if (n1 == n && tb1 == tb)
1036 break;
1037 ptb = &tb1->jmp_next[n1];
1039 *ptb = tb->jmp_next[n];
1040 tb->jmp_next[n] = NULL;
1042 /* suppress the jump to next tb in generated code */
1043 tb_reset_jump(tb, n);
1045 /* suppress jumps in the tb on which we could have jumped */
1046 tb_reset_jump_recursive(tb_next);
1050 static void tb_reset_jump_recursive(TranslationBlock *tb)
1052 tb_reset_jump_recursive2(tb, 0);
1053 tb_reset_jump_recursive2(tb, 1);
1056 #if defined(TARGET_HAS_ICE)
1057 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1059 target_phys_addr_t addr;
1060 target_ulong pd;
1061 ram_addr_t ram_addr;
1062 PhysPageDesc *p;
1064 addr = cpu_get_phys_page_debug(env, pc);
1065 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1066 if (!p) {
1067 pd = IO_MEM_UNASSIGNED;
1068 } else {
1069 pd = p->phys_offset;
1071 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1072 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1074 #endif
1076 /* Add a watchpoint. */
1077 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1079 int i;
1081 for (i = 0; i < env->nb_watchpoints; i++) {
1082 if (addr == env->watchpoint[i].vaddr)
1083 return 0;
1085 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1086 return -1;
1088 i = env->nb_watchpoints++;
1089 env->watchpoint[i].vaddr = addr;
1090 tlb_flush_page(env, addr);
1091 /* FIXME: This flush is needed because of the hack to make memory ops
1092 terminate the TB. It can be removed once the proper IO trap and
1093 re-execute bits are in. */
1094 tb_flush(env);
1095 return i;
1098 /* Remove a watchpoint. */
1099 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1101 int i;
1103 for (i = 0; i < env->nb_watchpoints; i++) {
1104 if (addr == env->watchpoint[i].vaddr) {
1105 env->nb_watchpoints--;
1106 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1107 tlb_flush_page(env, addr);
1108 return 0;
1111 return -1;
1114 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1115 breakpoint is reached */
1116 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1118 #if defined(TARGET_HAS_ICE)
1119 int i;
1121 for(i = 0; i < env->nb_breakpoints; i++) {
1122 if (env->breakpoints[i] == pc)
1123 return 0;
1126 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1127 return -1;
1128 env->breakpoints[env->nb_breakpoints++] = pc;
1130 #ifdef USE_KVM
1131 if (kvm_allowed)
1132 kvm_update_debugger(env);
1133 #endif
1135 breakpoint_invalidate(env, pc);
1136 return 0;
1137 #else
1138 return -1;
1139 #endif
1142 /* remove a breakpoint */
1143 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1145 #if defined(TARGET_HAS_ICE)
1146 int i;
1147 for(i = 0; i < env->nb_breakpoints; i++) {
1148 if (env->breakpoints[i] == pc)
1149 goto found;
1151 return -1;
1152 found:
1153 env->nb_breakpoints--;
1154 if (i < env->nb_breakpoints)
1155 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1157 #ifdef USE_KVM
1158 if (kvm_allowed)
1159 kvm_update_debugger(env);
1160 #endif
1162 breakpoint_invalidate(env, pc);
1163 return 0;
1164 #else
1165 return -1;
1166 #endif
1169 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1170 CPU loop after each instruction */
1171 void cpu_single_step(CPUState *env, int enabled)
1173 #if defined(TARGET_HAS_ICE)
1174 if (env->singlestep_enabled != enabled) {
1175 env->singlestep_enabled = enabled;
1176 /* must flush all the translated code to avoid inconsistancies */
1177 /* XXX: only flush what is necessary */
1178 tb_flush(env);
1180 #ifdef USE_KVM
1181 if (kvm_allowed)
1182 kvm_update_debugger(env);
1183 #endif
1184 #endif
1187 /* enable or disable low levels log */
1188 void cpu_set_log(int log_flags)
1190 loglevel = log_flags;
1191 if (loglevel && !logfile) {
1192 logfile = fopen(logfilename, log_append ? "a" : "w");
1193 if (!logfile) {
1194 perror(logfilename);
1195 _exit(1);
1197 #if !defined(CONFIG_SOFTMMU)
1198 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1200 static uint8_t logfile_buf[4096];
1201 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1203 #else
1204 setvbuf(logfile, NULL, _IOLBF, 0);
1205 #endif
1206 log_append = 1;
1208 if (!loglevel && logfile) {
1209 fclose(logfile);
1210 logfile = NULL;
1214 void cpu_set_log_filename(const char *filename)
1216 logfilename = strdup(filename);
1217 if (logfile) {
1218 fclose(logfile);
1219 logfile = NULL;
1221 cpu_set_log(loglevel);
1224 /* mask must never be zero, except for A20 change call */
1225 void cpu_interrupt(CPUState *env, int mask)
1227 TranslationBlock *tb;
1228 static int interrupt_lock;
1230 env->interrupt_request |= mask;
1231 #ifdef USE_KVM
1232 if (kvm_allowed && !kvm_irqchip_in_kernel(kvm_context))
1233 kvm_update_interrupt_request(env);
1234 #endif
1235 /* if the cpu is currently executing code, we must unlink it and
1236 all the potentially executing TB */
1237 tb = env->current_tb;
1238 if (tb && !testandset(&interrupt_lock)) {
1239 env->current_tb = NULL;
1240 tb_reset_jump_recursive(tb);
1241 interrupt_lock = 0;
1245 void cpu_reset_interrupt(CPUState *env, int mask)
1247 env->interrupt_request &= ~mask;
1250 CPULogItem cpu_log_items[] = {
1251 { CPU_LOG_TB_OUT_ASM, "out_asm",
1252 "show generated host assembly code for each compiled TB" },
1253 { CPU_LOG_TB_IN_ASM, "in_asm",
1254 "show target assembly code for each compiled TB" },
1255 { CPU_LOG_TB_OP, "op",
1256 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1257 #ifdef TARGET_I386
1258 { CPU_LOG_TB_OP_OPT, "op_opt",
1259 "show micro ops after optimization for each compiled TB" },
1260 #endif
1261 { CPU_LOG_INT, "int",
1262 "show interrupts/exceptions in short format" },
1263 { CPU_LOG_EXEC, "exec",
1264 "show trace before each executed TB (lots of logs)" },
1265 { CPU_LOG_TB_CPU, "cpu",
1266 "show CPU state before block translation" },
1267 #ifdef TARGET_I386
1268 { CPU_LOG_PCALL, "pcall",
1269 "show protected mode far calls/returns/exceptions" },
1270 #endif
1271 #ifdef DEBUG_IOPORT
1272 { CPU_LOG_IOPORT, "ioport",
1273 "show all i/o ports accesses" },
1274 #endif
1275 { 0, NULL, NULL },
1278 static int cmp1(const char *s1, int n, const char *s2)
1280 if (strlen(s2) != n)
1281 return 0;
1282 return memcmp(s1, s2, n) == 0;
1285 /* takes a comma separated list of log masks. Return 0 if error. */
1286 int cpu_str_to_log_mask(const char *str)
1288 CPULogItem *item;
1289 int mask;
1290 const char *p, *p1;
1292 p = str;
1293 mask = 0;
1294 for(;;) {
1295 p1 = strchr(p, ',');
1296 if (!p1)
1297 p1 = p + strlen(p);
1298 if(cmp1(p,p1-p,"all")) {
1299 for(item = cpu_log_items; item->mask != 0; item++) {
1300 mask |= item->mask;
1302 } else {
1303 for(item = cpu_log_items; item->mask != 0; item++) {
1304 if (cmp1(p, p1 - p, item->name))
1305 goto found;
1307 return 0;
1309 found:
1310 mask |= item->mask;
1311 if (*p1 != ',')
1312 break;
1313 p = p1 + 1;
1315 return mask;
1318 void cpu_abort(CPUState *env, const char *fmt, ...)
1320 va_list ap;
1322 va_start(ap, fmt);
1323 fprintf(stderr, "qemu: fatal: ");
1324 vfprintf(stderr, fmt, ap);
1325 fprintf(stderr, "\n");
1326 #ifdef TARGET_I386
1327 if(env->intercept & INTERCEPT_SVM_MASK) {
1328 /* most probably the virtual machine should not
1329 be shut down but rather caught by the VMM */
1330 vmexit(SVM_EXIT_SHUTDOWN, 0);
1332 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1333 #else
1334 cpu_dump_state(env, stderr, fprintf, 0);
1335 #endif
1336 if (logfile) {
1337 fprintf(logfile, "qemu: fatal: ");
1338 vfprintf(logfile, fmt, ap);
1339 fprintf(logfile, "\n");
1340 #ifdef TARGET_I386
1341 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1342 #else
1343 cpu_dump_state(env, logfile, fprintf, 0);
1344 #endif
1345 fflush(logfile);
1346 fclose(logfile);
1348 va_end(ap);
1349 abort();
1352 CPUState *cpu_copy(CPUState *env)
1354 CPUState *new_env = cpu_init();
1355 /* preserve chaining and index */
1356 CPUState *next_cpu = new_env->next_cpu;
1357 int cpu_index = new_env->cpu_index;
1358 memcpy(new_env, env, sizeof(CPUState));
1359 new_env->next_cpu = next_cpu;
1360 new_env->cpu_index = cpu_index;
1361 return new_env;
1364 #if !defined(CONFIG_USER_ONLY)
1366 /* NOTE: if flush_global is true, also flush global entries (not
1367 implemented yet) */
1368 void tlb_flush(CPUState *env, int flush_global)
1370 int i;
1372 #if defined(DEBUG_TLB)
1373 printf("tlb_flush:\n");
1374 #endif
1375 /* must reset current TB so that interrupts cannot modify the
1376 links while we are modifying them */
1377 env->current_tb = NULL;
1379 for(i = 0; i < CPU_TLB_SIZE; i++) {
1380 env->tlb_table[0][i].addr_read = -1;
1381 env->tlb_table[0][i].addr_write = -1;
1382 env->tlb_table[0][i].addr_code = -1;
1383 env->tlb_table[1][i].addr_read = -1;
1384 env->tlb_table[1][i].addr_write = -1;
1385 env->tlb_table[1][i].addr_code = -1;
1386 #if (NB_MMU_MODES >= 3)
1387 env->tlb_table[2][i].addr_read = -1;
1388 env->tlb_table[2][i].addr_write = -1;
1389 env->tlb_table[2][i].addr_code = -1;
1390 #if (NB_MMU_MODES == 4)
1391 env->tlb_table[3][i].addr_read = -1;
1392 env->tlb_table[3][i].addr_write = -1;
1393 env->tlb_table[3][i].addr_code = -1;
1394 #endif
1395 #endif
1398 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1400 #if !defined(CONFIG_SOFTMMU)
1401 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1402 #endif
1403 #ifdef USE_KQEMU
1404 if (env->kqemu_enabled) {
1405 kqemu_flush(env, flush_global);
1407 #endif
1408 tlb_flush_count++;
1411 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1413 if (addr == (tlb_entry->addr_read &
1414 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1415 addr == (tlb_entry->addr_write &
1416 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1417 addr == (tlb_entry->addr_code &
1418 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1419 tlb_entry->addr_read = -1;
1420 tlb_entry->addr_write = -1;
1421 tlb_entry->addr_code = -1;
1425 void tlb_flush_page(CPUState *env, target_ulong addr)
1427 int i;
1428 TranslationBlock *tb;
1430 #if defined(DEBUG_TLB)
1431 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1432 #endif
1433 /* must reset current TB so that interrupts cannot modify the
1434 links while we are modifying them */
1435 env->current_tb = NULL;
1437 addr &= TARGET_PAGE_MASK;
1438 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1439 tlb_flush_entry(&env->tlb_table[0][i], addr);
1440 tlb_flush_entry(&env->tlb_table[1][i], addr);
1441 #if (NB_MMU_MODES >= 3)
1442 tlb_flush_entry(&env->tlb_table[2][i], addr);
1443 #if (NB_MMU_MODES == 4)
1444 tlb_flush_entry(&env->tlb_table[3][i], addr);
1445 #endif
1446 #endif
1448 /* Discard jump cache entries for any tb which might potentially
1449 overlap the flushed page. */
1450 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1451 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1453 i = tb_jmp_cache_hash_page(addr);
1454 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1456 #if !defined(CONFIG_SOFTMMU)
1457 if (addr < MMAP_AREA_END)
1458 munmap((void *)addr, TARGET_PAGE_SIZE);
1459 #endif
1460 #ifdef USE_KQEMU
1461 if (env->kqemu_enabled) {
1462 kqemu_flush_page(env, addr);
1464 #endif
1467 /* update the TLBs so that writes to code in the virtual page 'addr'
1468 can be detected */
1469 static void tlb_protect_code(ram_addr_t ram_addr)
1471 cpu_physical_memory_reset_dirty(ram_addr,
1472 ram_addr + TARGET_PAGE_SIZE,
1473 CODE_DIRTY_FLAG);
1476 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1477 tested for self modifying code */
1478 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1479 target_ulong vaddr)
1481 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1484 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1485 unsigned long start, unsigned long length)
1487 unsigned long addr;
1488 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1489 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1490 if ((addr - start) < length) {
1491 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1496 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1497 int dirty_flags)
1499 CPUState *env;
1500 unsigned long length, start1;
1501 int i, mask, len;
1502 uint8_t *p;
1504 start &= TARGET_PAGE_MASK;
1505 end = TARGET_PAGE_ALIGN(end);
1507 length = end - start;
1508 if (length == 0)
1509 return;
1510 len = length >> TARGET_PAGE_BITS;
1511 #ifdef USE_KQEMU
1512 /* XXX: should not depend on cpu context */
1513 env = first_cpu;
1514 if (env->kqemu_enabled) {
1515 ram_addr_t addr;
1516 addr = start;
1517 for(i = 0; i < len; i++) {
1518 kqemu_set_notdirty(env, addr);
1519 addr += TARGET_PAGE_SIZE;
1522 #endif
1523 mask = ~dirty_flags;
1524 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1525 for(i = 0; i < len; i++)
1526 p[i] &= mask;
1528 /* we modify the TLB cache so that the dirty bit will be set again
1529 when accessing the range */
1530 start1 = start + (unsigned long)phys_ram_base;
1531 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1532 for(i = 0; i < CPU_TLB_SIZE; i++)
1533 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1534 for(i = 0; i < CPU_TLB_SIZE; i++)
1535 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1536 #if (NB_MMU_MODES >= 3)
1537 for(i = 0; i < CPU_TLB_SIZE; i++)
1538 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1539 #if (NB_MMU_MODES == 4)
1540 for(i = 0; i < CPU_TLB_SIZE; i++)
1541 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1542 #endif
1543 #endif
1546 #if !defined(CONFIG_SOFTMMU)
1547 /* XXX: this is expensive */
1549 VirtPageDesc *p;
1550 int j;
1551 target_ulong addr;
1553 for(i = 0; i < L1_SIZE; i++) {
1554 p = l1_virt_map[i];
1555 if (p) {
1556 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1557 for(j = 0; j < L2_SIZE; j++) {
1558 if (p->valid_tag == virt_valid_tag &&
1559 p->phys_addr >= start && p->phys_addr < end &&
1560 (p->prot & PROT_WRITE)) {
1561 if (addr < MMAP_AREA_END) {
1562 mprotect((void *)addr, TARGET_PAGE_SIZE,
1563 p->prot & ~PROT_WRITE);
1566 addr += TARGET_PAGE_SIZE;
1567 p++;
1572 #endif
1575 int cpu_physical_memory_set_dirty_tracking(int enable)
1577 int r=0;
1579 #ifdef USE_KVM
1580 r = kvm_physical_memory_set_dirty_tracking(enable);
1581 #endif
1582 in_migration = enable;
1583 return r;
1586 int cpu_physical_memory_get_dirty_tracking(void)
1588 return in_migration;
1591 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1593 ram_addr_t ram_addr;
1595 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1596 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1597 tlb_entry->addend - (unsigned long)phys_ram_base;
1598 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1599 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1604 /* update the TLB according to the current state of the dirty bits */
1605 void cpu_tlb_update_dirty(CPUState *env)
1607 int i;
1608 for(i = 0; i < CPU_TLB_SIZE; i++)
1609 tlb_update_dirty(&env->tlb_table[0][i]);
1610 for(i = 0; i < CPU_TLB_SIZE; i++)
1611 tlb_update_dirty(&env->tlb_table[1][i]);
1612 #if (NB_MMU_MODES >= 3)
1613 for(i = 0; i < CPU_TLB_SIZE; i++)
1614 tlb_update_dirty(&env->tlb_table[2][i]);
1615 #if (NB_MMU_MODES == 4)
1616 for(i = 0; i < CPU_TLB_SIZE; i++)
1617 tlb_update_dirty(&env->tlb_table[3][i]);
1618 #endif
1619 #endif
1622 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1623 unsigned long start)
1625 unsigned long addr;
1626 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1627 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1628 if (addr == start) {
1629 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1634 /* update the TLB corresponding to virtual page vaddr and phys addr
1635 addr so that it is no longer dirty */
1636 static inline void tlb_set_dirty(CPUState *env,
1637 unsigned long addr, target_ulong vaddr)
1639 int i;
1641 addr &= TARGET_PAGE_MASK;
1642 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1643 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1644 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1645 #if (NB_MMU_MODES >= 3)
1646 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1647 #if (NB_MMU_MODES == 4)
1648 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1649 #endif
1650 #endif
1653 /* add a new TLB entry. At most one entry for a given virtual address
1654 is permitted. Return 0 if OK or 2 if the page could not be mapped
1655 (can only happen in non SOFTMMU mode for I/O pages or pages
1656 conflicting with the host address space). */
1657 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1658 target_phys_addr_t paddr, int prot,
1659 int is_user, int is_softmmu)
1661 PhysPageDesc *p;
1662 unsigned long pd;
1663 unsigned int index;
1664 target_ulong address;
1665 target_phys_addr_t addend;
1666 int ret;
1667 CPUTLBEntry *te;
1668 int i;
1670 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1671 if (!p) {
1672 pd = IO_MEM_UNASSIGNED;
1673 } else {
1674 pd = p->phys_offset;
1676 #if defined(DEBUG_TLB)
1677 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1678 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1679 #endif
1681 ret = 0;
1682 #if !defined(CONFIG_SOFTMMU)
1683 if (is_softmmu)
1684 #endif
1686 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1687 /* IO memory case */
1688 address = vaddr | pd;
1689 addend = paddr;
1690 } else {
1691 /* standard memory */
1692 address = vaddr;
1693 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1696 /* Make accesses to pages with watchpoints go via the
1697 watchpoint trap routines. */
1698 for (i = 0; i < env->nb_watchpoints; i++) {
1699 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1700 if (address & ~TARGET_PAGE_MASK) {
1701 env->watchpoint[i].addend = 0;
1702 address = vaddr | io_mem_watch;
1703 } else {
1704 env->watchpoint[i].addend = pd - paddr +
1705 (unsigned long) phys_ram_base;
1706 /* TODO: Figure out how to make read watchpoints coexist
1707 with code. */
1708 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1713 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1714 addend -= vaddr;
1715 te = &env->tlb_table[is_user][index];
1716 te->addend = addend;
1717 if (prot & PAGE_READ) {
1718 te->addr_read = address;
1719 } else {
1720 te->addr_read = -1;
1722 if (prot & PAGE_EXEC) {
1723 te->addr_code = address;
1724 } else {
1725 te->addr_code = -1;
1727 if (prot & PAGE_WRITE) {
1728 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1729 (pd & IO_MEM_ROMD)) {
1730 /* write access calls the I/O callback */
1731 te->addr_write = vaddr |
1732 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1733 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1734 !cpu_physical_memory_is_dirty(pd)) {
1735 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1736 } else {
1737 te->addr_write = address;
1739 } else {
1740 te->addr_write = -1;
1743 #if !defined(CONFIG_SOFTMMU)
1744 else {
1745 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1746 /* IO access: no mapping is done as it will be handled by the
1747 soft MMU */
1748 if (!(env->hflags & HF_SOFTMMU_MASK))
1749 ret = 2;
1750 } else {
1751 void *map_addr;
1753 if (vaddr >= MMAP_AREA_END) {
1754 ret = 2;
1755 } else {
1756 if (prot & PROT_WRITE) {
1757 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1758 #if defined(TARGET_HAS_SMC) || 1
1759 first_tb ||
1760 #endif
1761 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1762 !cpu_physical_memory_is_dirty(pd))) {
1763 /* ROM: we do as if code was inside */
1764 /* if code is present, we only map as read only and save the
1765 original mapping */
1766 VirtPageDesc *vp;
1768 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1769 vp->phys_addr = pd;
1770 vp->prot = prot;
1771 vp->valid_tag = virt_valid_tag;
1772 prot &= ~PAGE_WRITE;
1775 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1776 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1777 if (map_addr == MAP_FAILED) {
1778 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1779 paddr, vaddr);
1784 #endif
1785 return ret;
1788 /* called from signal handler: invalidate the code and unprotect the
1789 page. Return TRUE if the fault was succesfully handled. */
1790 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1792 #if !defined(CONFIG_SOFTMMU)
1793 VirtPageDesc *vp;
1795 #if defined(DEBUG_TLB)
1796 printf("page_unprotect: addr=0x%08x\n", addr);
1797 #endif
1798 addr &= TARGET_PAGE_MASK;
1800 /* if it is not mapped, no need to worry here */
1801 if (addr >= MMAP_AREA_END)
1802 return 0;
1803 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1804 if (!vp)
1805 return 0;
1806 /* NOTE: in this case, validate_tag is _not_ tested as it
1807 validates only the code TLB */
1808 if (vp->valid_tag != virt_valid_tag)
1809 return 0;
1810 if (!(vp->prot & PAGE_WRITE))
1811 return 0;
1812 #if defined(DEBUG_TLB)
1813 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1814 addr, vp->phys_addr, vp->prot);
1815 #endif
1816 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1817 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1818 (unsigned long)addr, vp->prot);
1819 /* set the dirty bit */
1820 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1821 /* flush the code inside */
1822 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1823 return 1;
1824 #else
1825 return 0;
1826 #endif
1829 #else
1831 void tlb_flush(CPUState *env, int flush_global)
1835 void tlb_flush_page(CPUState *env, target_ulong addr)
1839 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1840 target_phys_addr_t paddr, int prot,
1841 int is_user, int is_softmmu)
1843 return 0;
1846 /* dump memory mappings */
1847 void page_dump(FILE *f)
1849 unsigned long start, end;
1850 int i, j, prot, prot1;
1851 PageDesc *p;
1853 fprintf(f, "%-8s %-8s %-8s %s\n",
1854 "start", "end", "size", "prot");
1855 start = -1;
1856 end = -1;
1857 prot = 0;
1858 for(i = 0; i <= L1_SIZE; i++) {
1859 if (i < L1_SIZE)
1860 p = l1_map[i];
1861 else
1862 p = NULL;
1863 for(j = 0;j < L2_SIZE; j++) {
1864 if (!p)
1865 prot1 = 0;
1866 else
1867 prot1 = p[j].flags;
1868 if (prot1 != prot) {
1869 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1870 if (start != -1) {
1871 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1872 start, end, end - start,
1873 prot & PAGE_READ ? 'r' : '-',
1874 prot & PAGE_WRITE ? 'w' : '-',
1875 prot & PAGE_EXEC ? 'x' : '-');
1877 if (prot1 != 0)
1878 start = end;
1879 else
1880 start = -1;
1881 prot = prot1;
1883 if (!p)
1884 break;
1889 int page_get_flags(target_ulong address)
1891 PageDesc *p;
1893 p = page_find(address >> TARGET_PAGE_BITS);
1894 if (!p)
1895 return 0;
1896 return p->flags;
1899 /* modify the flags of a page and invalidate the code if
1900 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1901 depending on PAGE_WRITE */
1902 void page_set_flags(target_ulong start, target_ulong end, int flags)
1904 PageDesc *p;
1905 target_ulong addr;
1907 start = start & TARGET_PAGE_MASK;
1908 end = TARGET_PAGE_ALIGN(end);
1909 if (flags & PAGE_WRITE)
1910 flags |= PAGE_WRITE_ORG;
1911 spin_lock(&tb_lock);
1912 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1913 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1914 /* if the write protection is set, then we invalidate the code
1915 inside */
1916 if (!(p->flags & PAGE_WRITE) &&
1917 (flags & PAGE_WRITE) &&
1918 p->first_tb) {
1919 tb_invalidate_phys_page(addr, 0, NULL);
1921 p->flags = flags;
1923 spin_unlock(&tb_lock);
1926 /* called from signal handler: invalidate the code and unprotect the
1927 page. Return TRUE if the fault was succesfully handled. */
1928 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1930 unsigned int page_index, prot, pindex;
1931 PageDesc *p, *p1;
1932 target_ulong host_start, host_end, addr;
1934 host_start = address & qemu_host_page_mask;
1935 page_index = host_start >> TARGET_PAGE_BITS;
1936 p1 = page_find(page_index);
1937 if (!p1)
1938 return 0;
1939 host_end = host_start + qemu_host_page_size;
1940 p = p1;
1941 prot = 0;
1942 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1943 prot |= p->flags;
1944 p++;
1946 /* if the page was really writable, then we change its
1947 protection back to writable */
1948 if (prot & PAGE_WRITE_ORG) {
1949 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1950 if (!(p1[pindex].flags & PAGE_WRITE)) {
1951 mprotect((void *)g2h(host_start), qemu_host_page_size,
1952 (prot & PAGE_BITS) | PAGE_WRITE);
1953 p1[pindex].flags |= PAGE_WRITE;
1954 /* and since the content will be modified, we must invalidate
1955 the corresponding translated code. */
1956 tb_invalidate_phys_page(address, pc, puc);
1957 #ifdef DEBUG_TB_CHECK
1958 tb_invalidate_check(address);
1959 #endif
1960 return 1;
1963 return 0;
1966 /* call this function when system calls directly modify a memory area */
1967 /* ??? This should be redundant now we have lock_user. */
1968 void page_unprotect_range(target_ulong data, target_ulong data_size)
1970 target_ulong start, end, addr;
1972 start = data;
1973 end = start + data_size;
1974 start &= TARGET_PAGE_MASK;
1975 end = TARGET_PAGE_ALIGN(end);
1976 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1977 page_unprotect(addr, 0, NULL);
1981 static inline void tlb_set_dirty(CPUState *env,
1982 unsigned long addr, target_ulong vaddr)
1985 #endif /* defined(CONFIG_USER_ONLY) */
1987 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1988 int memory);
1989 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1990 int orig_memory);
1991 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1992 need_subpage) \
1993 do { \
1994 if (addr > start_addr) \
1995 start_addr2 = 0; \
1996 else { \
1997 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1998 if (start_addr2 > 0) \
1999 need_subpage = 1; \
2002 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2003 end_addr2 = TARGET_PAGE_SIZE - 1; \
2004 else { \
2005 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2006 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2007 need_subpage = 1; \
2009 } while (0)
2011 /* register physical memory. 'size' must be a multiple of the target
2012 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2013 io memory page */
2014 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2015 unsigned long size,
2016 unsigned long phys_offset)
2018 target_phys_addr_t addr, end_addr;
2019 PhysPageDesc *p;
2020 CPUState *env;
2021 unsigned long orig_size = size;
2022 void *subpage;
2024 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2025 end_addr = start_addr + (target_phys_addr_t)size;
2026 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2028 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2029 unsigned long orig_memory = p->phys_offset;
2030 target_phys_addr_t start_addr2, end_addr2;
2031 int need_subpage = 0;
2033 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2034 need_subpage);
2035 if (need_subpage) {
2036 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2037 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2038 &p->phys_offset, orig_memory);
2039 } else {
2040 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2041 >> IO_MEM_SHIFT];
2043 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2044 } else {
2045 p->phys_offset = phys_offset;
2046 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2047 (phys_offset & IO_MEM_ROMD))
2048 phys_offset += TARGET_PAGE_SIZE;
2050 } else {
2051 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2052 p->phys_offset = phys_offset;
2053 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2054 (phys_offset & IO_MEM_ROMD))
2055 phys_offset += TARGET_PAGE_SIZE;
2056 else {
2057 target_phys_addr_t start_addr2, end_addr2;
2058 int need_subpage = 0;
2060 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2061 end_addr2, need_subpage);
2063 if (need_subpage) {
2064 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2065 &p->phys_offset, IO_MEM_UNASSIGNED);
2066 subpage_register(subpage, start_addr2, end_addr2,
2067 phys_offset);
2073 /* since each CPU stores ram addresses in its TLB cache, we must
2074 reset the modified entries */
2075 /* XXX: slow ! */
2076 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2077 tlb_flush(env, 1);
2081 /* XXX: temporary until new memory mapping API */
2082 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2084 PhysPageDesc *p;
2086 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2087 if (!p)
2088 return IO_MEM_UNASSIGNED;
2089 return p->phys_offset;
2092 /* XXX: better than nothing */
2093 ram_addr_t qemu_ram_alloc(unsigned int size)
2095 ram_addr_t addr;
2096 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2097 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2098 size, phys_ram_size);
2099 abort();
2101 addr = phys_ram_alloc_offset;
2102 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2103 return addr;
2106 void qemu_ram_free(ram_addr_t addr)
2110 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2112 #ifdef DEBUG_UNASSIGNED
2113 printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
2114 #endif
2115 #ifdef TARGET_SPARC
2116 do_unassigned_access(addr, 0, 0, 0);
2117 #endif
2118 return 0;
2121 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2123 #ifdef DEBUG_UNASSIGNED
2124 printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
2125 #endif
2126 #ifdef TARGET_SPARC
2127 do_unassigned_access(addr, 1, 0, 0);
2128 #endif
2131 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2132 unassigned_mem_readb,
2133 unassigned_mem_readb,
2134 unassigned_mem_readb,
2137 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2138 unassigned_mem_writeb,
2139 unassigned_mem_writeb,
2140 unassigned_mem_writeb,
2143 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2145 unsigned long ram_addr;
2146 int dirty_flags;
2147 ram_addr = addr - (unsigned long)phys_ram_base;
2148 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2149 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2150 #if !defined(CONFIG_USER_ONLY)
2151 tb_invalidate_phys_page_fast(ram_addr, 1);
2152 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2153 #endif
2155 stb_p((uint8_t *)(long)addr, val);
2156 #ifdef USE_KQEMU
2157 if (cpu_single_env->kqemu_enabled &&
2158 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2159 kqemu_modify_page(cpu_single_env, ram_addr);
2160 #endif
2161 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2162 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2163 /* we remove the notdirty callback only if the code has been
2164 flushed */
2165 if (dirty_flags == 0xff)
2166 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2169 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2171 unsigned long ram_addr;
2172 int dirty_flags;
2173 ram_addr = addr - (unsigned long)phys_ram_base;
2174 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2175 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2176 #if !defined(CONFIG_USER_ONLY)
2177 tb_invalidate_phys_page_fast(ram_addr, 2);
2178 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2179 #endif
2181 stw_p((uint8_t *)(long)addr, val);
2182 #ifdef USE_KQEMU
2183 if (cpu_single_env->kqemu_enabled &&
2184 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2185 kqemu_modify_page(cpu_single_env, ram_addr);
2186 #endif
2187 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2188 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2189 /* we remove the notdirty callback only if the code has been
2190 flushed */
2191 if (dirty_flags == 0xff)
2192 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2195 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2197 unsigned long ram_addr;
2198 int dirty_flags;
2199 ram_addr = addr - (unsigned long)phys_ram_base;
2200 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2201 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2202 #if !defined(CONFIG_USER_ONLY)
2203 tb_invalidate_phys_page_fast(ram_addr, 4);
2204 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2205 #endif
2207 stl_p((uint8_t *)(long)addr, val);
2208 #ifdef USE_KQEMU
2209 if (cpu_single_env->kqemu_enabled &&
2210 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2211 kqemu_modify_page(cpu_single_env, ram_addr);
2212 #endif
2213 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2214 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2215 /* we remove the notdirty callback only if the code has been
2216 flushed */
2217 if (dirty_flags == 0xff)
2218 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2221 static CPUReadMemoryFunc *error_mem_read[3] = {
2222 NULL, /* never used */
2223 NULL, /* never used */
2224 NULL, /* never used */
2227 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2228 notdirty_mem_writeb,
2229 notdirty_mem_writew,
2230 notdirty_mem_writel,
2233 #if defined(CONFIG_SOFTMMU)
2234 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2235 so these check for a hit then pass through to the normal out-of-line
2236 phys routines. */
2237 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2239 return ldub_phys(addr);
2242 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2244 return lduw_phys(addr);
2247 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2249 return ldl_phys(addr);
2252 /* Generate a debug exception if a watchpoint has been hit.
2253 Returns the real physical address of the access. addr will be a host
2254 address in case of a RAM location. */
2255 static target_ulong check_watchpoint(target_phys_addr_t addr)
2257 CPUState *env = cpu_single_env;
2258 target_ulong watch;
2259 target_ulong retaddr;
2260 int i;
2262 retaddr = addr;
2263 for (i = 0; i < env->nb_watchpoints; i++) {
2264 watch = env->watchpoint[i].vaddr;
2265 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2266 retaddr = addr - env->watchpoint[i].addend;
2267 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2268 cpu_single_env->watchpoint_hit = i + 1;
2269 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2270 break;
2274 return retaddr;
2277 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2278 uint32_t val)
2280 addr = check_watchpoint(addr);
2281 stb_phys(addr, val);
2284 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2285 uint32_t val)
2287 addr = check_watchpoint(addr);
2288 stw_phys(addr, val);
2291 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2292 uint32_t val)
2294 addr = check_watchpoint(addr);
2295 stl_phys(addr, val);
2298 static CPUReadMemoryFunc *watch_mem_read[3] = {
2299 watch_mem_readb,
2300 watch_mem_readw,
2301 watch_mem_readl,
2304 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2305 watch_mem_writeb,
2306 watch_mem_writew,
2307 watch_mem_writel,
2309 #endif
2311 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2312 unsigned int len)
2314 CPUReadMemoryFunc **mem_read;
2315 uint32_t ret;
2316 unsigned int idx;
2318 idx = SUBPAGE_IDX(addr - mmio->base);
2319 #if defined(DEBUG_SUBPAGE)
2320 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2321 mmio, len, addr, idx);
2322 #endif
2323 mem_read = mmio->mem_read[idx];
2324 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2326 return ret;
2329 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2330 uint32_t value, unsigned int len)
2332 CPUWriteMemoryFunc **mem_write;
2333 unsigned int idx;
2335 idx = SUBPAGE_IDX(addr - mmio->base);
2336 #if defined(DEBUG_SUBPAGE)
2337 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2338 mmio, len, addr, idx, value);
2339 #endif
2340 mem_write = mmio->mem_write[idx];
2341 (*mem_write[len])(mmio->opaque[idx], addr, value);
2344 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2346 #if defined(DEBUG_SUBPAGE)
2347 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2348 #endif
2350 return subpage_readlen(opaque, addr, 0);
2353 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2354 uint32_t value)
2356 #if defined(DEBUG_SUBPAGE)
2357 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2358 #endif
2359 subpage_writelen(opaque, addr, value, 0);
2362 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2364 #if defined(DEBUG_SUBPAGE)
2365 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2366 #endif
2368 return subpage_readlen(opaque, addr, 1);
2371 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2372 uint32_t value)
2374 #if defined(DEBUG_SUBPAGE)
2375 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2376 #endif
2377 subpage_writelen(opaque, addr, value, 1);
2380 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2382 #if defined(DEBUG_SUBPAGE)
2383 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2384 #endif
2386 return subpage_readlen(opaque, addr, 2);
2389 static void subpage_writel (void *opaque,
2390 target_phys_addr_t addr, uint32_t value)
2392 #if defined(DEBUG_SUBPAGE)
2393 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2394 #endif
2395 subpage_writelen(opaque, addr, value, 2);
2398 static CPUReadMemoryFunc *subpage_read[] = {
2399 &subpage_readb,
2400 &subpage_readw,
2401 &subpage_readl,
2404 static CPUWriteMemoryFunc *subpage_write[] = {
2405 &subpage_writeb,
2406 &subpage_writew,
2407 &subpage_writel,
2410 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2411 int memory)
2413 int idx, eidx;
2415 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2416 return -1;
2417 idx = SUBPAGE_IDX(start);
2418 eidx = SUBPAGE_IDX(end);
2419 #if defined(DEBUG_SUBPAGE)
2420 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2421 mmio, start, end, idx, eidx, memory);
2422 #endif
2423 memory >>= IO_MEM_SHIFT;
2424 for (; idx <= eidx; idx++) {
2425 mmio->mem_read[idx] = io_mem_read[memory];
2426 mmio->mem_write[idx] = io_mem_write[memory];
2427 mmio->opaque[idx] = io_mem_opaque[memory];
2430 return 0;
2433 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2434 int orig_memory)
2436 subpage_t *mmio;
2437 int subpage_memory;
2439 mmio = qemu_mallocz(sizeof(subpage_t));
2440 if (mmio != NULL) {
2441 mmio->base = base;
2442 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2443 #if defined(DEBUG_SUBPAGE)
2444 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2445 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2446 #endif
2447 *phys = subpage_memory | IO_MEM_SUBPAGE;
2448 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2451 return mmio;
2454 static void io_mem_init(void)
2456 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2457 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2458 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2459 io_mem_nb = 5;
2461 #if defined(CONFIG_SOFTMMU)
2462 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2463 watch_mem_write, NULL);
2464 #endif
2465 /* alloc dirty bits array */
2466 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2467 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2470 /* mem_read and mem_write are arrays of functions containing the
2471 function to access byte (index 0), word (index 1) and dword (index
2472 2). All functions must be supplied. If io_index is non zero, the
2473 corresponding io zone is modified. If it is zero, a new io zone is
2474 allocated. The return value can be used with
2475 cpu_register_physical_memory(). (-1) is returned if error. */
2476 int cpu_register_io_memory(int io_index,
2477 CPUReadMemoryFunc **mem_read,
2478 CPUWriteMemoryFunc **mem_write,
2479 void *opaque)
2481 int i;
2483 if (io_index <= 0) {
2484 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2485 return -1;
2486 io_index = io_mem_nb++;
2487 } else {
2488 if (io_index >= IO_MEM_NB_ENTRIES)
2489 return -1;
2492 for(i = 0;i < 3; i++) {
2493 io_mem_read[io_index][i] = mem_read[i];
2494 io_mem_write[io_index][i] = mem_write[i];
2496 io_mem_opaque[io_index] = opaque;
2497 return io_index << IO_MEM_SHIFT;
2500 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2502 return io_mem_write[io_index >> IO_MEM_SHIFT];
2505 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2507 return io_mem_read[io_index >> IO_MEM_SHIFT];
2510 /* physical memory access (slow version, mainly for debug) */
2511 #if defined(CONFIG_USER_ONLY)
2512 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2513 int len, int is_write)
2515 int l, flags;
2516 target_ulong page;
2517 void * p;
2519 while (len > 0) {
2520 page = addr & TARGET_PAGE_MASK;
2521 l = (page + TARGET_PAGE_SIZE) - addr;
2522 if (l > len)
2523 l = len;
2524 flags = page_get_flags(page);
2525 if (!(flags & PAGE_VALID))
2526 return;
2527 if (is_write) {
2528 if (!(flags & PAGE_WRITE))
2529 return;
2530 p = lock_user(addr, len, 0);
2531 memcpy(p, buf, len);
2532 unlock_user(p, addr, len);
2533 } else {
2534 if (!(flags & PAGE_READ))
2535 return;
2536 p = lock_user(addr, len, 1);
2537 memcpy(buf, p, len);
2538 unlock_user(p, addr, 0);
2540 len -= l;
2541 buf += l;
2542 addr += l;
2546 #else
2547 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2548 int len, int is_write)
2550 int l, io_index;
2551 uint8_t *ptr;
2552 uint32_t val;
2553 target_phys_addr_t page;
2554 unsigned long pd;
2555 PhysPageDesc *p;
2557 while (len > 0) {
2558 page = addr & TARGET_PAGE_MASK;
2559 l = (page + TARGET_PAGE_SIZE) - addr;
2560 if (l > len)
2561 l = len;
2562 p = phys_page_find(page >> TARGET_PAGE_BITS);
2563 if (!p) {
2564 pd = IO_MEM_UNASSIGNED;
2565 } else {
2566 pd = p->phys_offset;
2569 if (is_write) {
2570 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2571 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2572 /* XXX: could force cpu_single_env to NULL to avoid
2573 potential bugs */
2574 if (l >= 4 && ((addr & 3) == 0)) {
2575 /* 32 bit write access */
2576 val = ldl_p(buf);
2577 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2578 l = 4;
2579 } else if (l >= 2 && ((addr & 1) == 0)) {
2580 /* 16 bit write access */
2581 val = lduw_p(buf);
2582 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2583 l = 2;
2584 } else {
2585 /* 8 bit write access */
2586 val = ldub_p(buf);
2587 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2588 l = 1;
2590 } else {
2591 unsigned long addr1;
2592 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2593 /* RAM case */
2594 ptr = phys_ram_base + addr1;
2595 memcpy(ptr, buf, l);
2596 if (!cpu_physical_memory_is_dirty(addr1)) {
2597 /* invalidate code */
2598 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2599 /* set dirty bit */
2600 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2601 (0xff & ~CODE_DIRTY_FLAG);
2604 } else {
2605 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2606 !(pd & IO_MEM_ROMD)) {
2607 /* I/O case */
2608 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2609 if (l >= 4 && ((addr & 3) == 0)) {
2610 /* 32 bit read access */
2611 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2612 stl_p(buf, val);
2613 l = 4;
2614 } else if (l >= 2 && ((addr & 1) == 0)) {
2615 /* 16 bit read access */
2616 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2617 stw_p(buf, val);
2618 l = 2;
2619 } else {
2620 /* 8 bit read access */
2621 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2622 stb_p(buf, val);
2623 l = 1;
2625 } else {
2626 /* RAM case */
2627 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2628 (addr & ~TARGET_PAGE_MASK);
2629 memcpy(buf, ptr, l);
2632 len -= l;
2633 buf += l;
2634 addr += l;
2638 /* used for ROM loading : can write in RAM and ROM */
2639 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2640 const uint8_t *buf, int len)
2642 int l;
2643 uint8_t *ptr;
2644 target_phys_addr_t page;
2645 unsigned long pd;
2646 PhysPageDesc *p;
2648 while (len > 0) {
2649 page = addr & TARGET_PAGE_MASK;
2650 l = (page + TARGET_PAGE_SIZE) - addr;
2651 if (l > len)
2652 l = len;
2653 p = phys_page_find(page >> TARGET_PAGE_BITS);
2654 if (!p) {
2655 pd = IO_MEM_UNASSIGNED;
2656 } else {
2657 pd = p->phys_offset;
2660 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2661 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2662 !(pd & IO_MEM_ROMD)) {
2663 /* do nothing */
2664 } else {
2665 unsigned long addr1;
2666 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2667 /* ROM/RAM case */
2668 ptr = phys_ram_base + addr1;
2669 memcpy(ptr, buf, l);
2671 len -= l;
2672 buf += l;
2673 addr += l;
2678 /* warning: addr must be aligned */
2679 uint32_t ldl_phys(target_phys_addr_t addr)
2681 int io_index;
2682 uint8_t *ptr;
2683 uint32_t val;
2684 unsigned long pd;
2685 PhysPageDesc *p;
2687 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2688 if (!p) {
2689 pd = IO_MEM_UNASSIGNED;
2690 } else {
2691 pd = p->phys_offset;
2694 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2695 !(pd & IO_MEM_ROMD)) {
2696 /* I/O case */
2697 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2698 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2699 } else {
2700 /* RAM case */
2701 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2702 (addr & ~TARGET_PAGE_MASK);
2703 val = ldl_p(ptr);
2705 return val;
2708 /* warning: addr must be aligned */
2709 uint64_t ldq_phys(target_phys_addr_t addr)
2711 int io_index;
2712 uint8_t *ptr;
2713 uint64_t val;
2714 unsigned long pd;
2715 PhysPageDesc *p;
2717 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2718 if (!p) {
2719 pd = IO_MEM_UNASSIGNED;
2720 } else {
2721 pd = p->phys_offset;
2724 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2725 !(pd & IO_MEM_ROMD)) {
2726 /* I/O case */
2727 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2728 #ifdef TARGET_WORDS_BIGENDIAN
2729 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2730 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2731 #else
2732 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2733 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2734 #endif
2735 } else {
2736 /* RAM case */
2737 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2738 (addr & ~TARGET_PAGE_MASK);
2739 val = ldq_p(ptr);
2741 return val;
2744 /* XXX: optimize */
2745 uint32_t ldub_phys(target_phys_addr_t addr)
2747 uint8_t val;
2748 cpu_physical_memory_read(addr, &val, 1);
2749 return val;
2752 /* XXX: optimize */
2753 uint32_t lduw_phys(target_phys_addr_t addr)
2755 uint16_t val;
2756 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2757 return tswap16(val);
2760 #ifdef __GNUC__
2761 #define likely(x) __builtin_expect(!!(x), 1)
2762 #define unlikely(x) __builtin_expect(!!(x), 0)
2763 #else
2764 #define likely(x) x
2765 #define unlikely(x) x
2766 #endif
2768 /* warning: addr must be aligned. The ram page is not masked as dirty
2769 and the code inside is not invalidated. It is useful if the dirty
2770 bits are used to track modified PTEs */
2771 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2773 int io_index;
2774 uint8_t *ptr;
2775 unsigned long pd;
2776 PhysPageDesc *p;
2778 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2779 if (!p) {
2780 pd = IO_MEM_UNASSIGNED;
2781 } else {
2782 pd = p->phys_offset;
2785 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2786 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2787 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2788 } else {
2789 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2790 ptr = phys_ram_base + addr1;
2791 stl_p(ptr, val);
2793 if (unlikely(in_migration)) {
2794 if (!cpu_physical_memory_is_dirty(addr1)) {
2795 /* invalidate code */
2796 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2797 /* set dirty bit */
2798 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2799 (0xff & ~CODE_DIRTY_FLAG);
2805 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2807 int io_index;
2808 uint8_t *ptr;
2809 unsigned long pd;
2810 PhysPageDesc *p;
2812 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2813 if (!p) {
2814 pd = IO_MEM_UNASSIGNED;
2815 } else {
2816 pd = p->phys_offset;
2819 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2820 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2821 #ifdef TARGET_WORDS_BIGENDIAN
2822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2823 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2824 #else
2825 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2826 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2827 #endif
2828 } else {
2829 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2830 (addr & ~TARGET_PAGE_MASK);
2831 stq_p(ptr, val);
2835 /* warning: addr must be aligned */
2836 void stl_phys(target_phys_addr_t addr, uint32_t val)
2838 int io_index;
2839 uint8_t *ptr;
2840 unsigned long pd;
2841 PhysPageDesc *p;
2843 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2844 if (!p) {
2845 pd = IO_MEM_UNASSIGNED;
2846 } else {
2847 pd = p->phys_offset;
2850 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2851 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2852 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2853 } else {
2854 unsigned long addr1;
2855 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2856 /* RAM case */
2857 ptr = phys_ram_base + addr1;
2858 stl_p(ptr, val);
2859 if (!cpu_physical_memory_is_dirty(addr1)) {
2860 /* invalidate code */
2861 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2862 /* set dirty bit */
2863 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2864 (0xff & ~CODE_DIRTY_FLAG);
2869 /* XXX: optimize */
2870 void stb_phys(target_phys_addr_t addr, uint32_t val)
2872 uint8_t v = val;
2873 cpu_physical_memory_write(addr, &v, 1);
2876 /* XXX: optimize */
2877 void stw_phys(target_phys_addr_t addr, uint32_t val)
2879 uint16_t v = tswap16(val);
2880 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2883 /* XXX: optimize */
2884 void stq_phys(target_phys_addr_t addr, uint64_t val)
2886 val = tswap64(val);
2887 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2890 #endif
2892 /* virtual memory access for debug */
2893 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2894 uint8_t *buf, int len, int is_write)
2896 int l;
2897 target_phys_addr_t phys_addr;
2898 target_ulong page;
2900 while (len > 0) {
2901 page = addr & TARGET_PAGE_MASK;
2902 phys_addr = cpu_get_phys_page_debug(env, page);
2903 /* if no physical page mapped, return an error */
2904 if (phys_addr == -1)
2905 return -1;
2906 l = (page + TARGET_PAGE_SIZE) - addr;
2907 if (l > len)
2908 l = len;
2909 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2910 buf, l, is_write);
2911 len -= l;
2912 buf += l;
2913 addr += l;
2915 return 0;
2918 void dump_exec_info(FILE *f,
2919 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2921 int i, target_code_size, max_target_code_size;
2922 int direct_jmp_count, direct_jmp2_count, cross_page;
2923 TranslationBlock *tb;
2925 target_code_size = 0;
2926 max_target_code_size = 0;
2927 cross_page = 0;
2928 direct_jmp_count = 0;
2929 direct_jmp2_count = 0;
2930 for(i = 0; i < nb_tbs; i++) {
2931 tb = &tbs[i];
2932 target_code_size += tb->size;
2933 if (tb->size > max_target_code_size)
2934 max_target_code_size = tb->size;
2935 if (tb->page_addr[1] != -1)
2936 cross_page++;
2937 if (tb->tb_next_offset[0] != 0xffff) {
2938 direct_jmp_count++;
2939 if (tb->tb_next_offset[1] != 0xffff) {
2940 direct_jmp2_count++;
2944 /* XXX: avoid using doubles ? */
2945 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2946 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2947 nb_tbs ? target_code_size / nb_tbs : 0,
2948 max_target_code_size);
2949 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2950 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2951 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2952 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2953 cross_page,
2954 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2955 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2956 direct_jmp_count,
2957 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2958 direct_jmp2_count,
2959 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2960 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2961 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2962 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2965 #if !defined(CONFIG_USER_ONLY)
2967 #define MMUSUFFIX _cmmu
2968 #define GETPC() NULL
2969 #define env cpu_single_env
2970 #define SOFTMMU_CODE_ACCESS
2972 #define SHIFT 0
2973 #include "softmmu_template.h"
2975 #define SHIFT 1
2976 #include "softmmu_template.h"
2978 #define SHIFT 2
2979 #include "softmmu_template.h"
2981 #define SHIFT 3
2982 #include "softmmu_template.h"
2984 #undef env
2986 #endif