Ppc: Remember the state of level-triggered interrupts
[qemu-kvm/fedora.git] / exec.c
blob895684e2ed7f46991ebbdc423d53427aa949d6ee
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
39 #if !defined(NO_CPU_EMULATION)
40 #include "tcg-target.h"
41 #endif
43 #include "qemu-kvm.h"
44 #include "qemu-common.h"
46 #if defined(CONFIG_USER_ONLY)
47 #include <qemu.h>
48 #endif
50 //#define DEBUG_TB_INVALIDATE
51 //#define DEBUG_FLUSH
52 //#define DEBUG_TLB
53 //#define DEBUG_UNASSIGNED
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
64 #undef DEBUG_TB_CHECK
65 #endif
67 /* threshold to flush the translated code buffer */
68 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
70 #define SMC_BITMAP_USE_THRESHOLD 10
72 #define MMAP_AREA_START 0x00000000
73 #define MMAP_AREA_END 0xa8000000
75 #if defined(TARGET_SPARC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 41
77 #elif defined(TARGET_SPARC)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #elif defined(TARGET_ALPHA)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #define TARGET_VIRT_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_PPC64)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 42
84 #elif USE_KQEMU
85 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
86 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 #elif TARGET_X86_64
88 #define TARGET_PHYS_ADDR_SPACE_BITS 42
89 #elif defined(TARGET_IA64)
90 #define TARGET_PHYS_ADDR_SPACE_BITS 36
91 #else
92 #define TARGET_PHYS_ADDR_SPACE_BITS 32
93 #endif
95 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
96 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
97 int nb_tbs;
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
101 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
102 uint8_t *code_gen_ptr;
104 ram_addr_t phys_ram_size;
105 int phys_ram_fd;
106 uint8_t *phys_ram_base;
107 uint8_t *phys_ram_dirty;
108 uint8_t *bios_mem;
109 static int in_migration;
110 static ram_addr_t phys_ram_alloc_offset = 0;
112 CPUState *first_cpu;
113 /* current CPU in the current thread. It is only valid inside
114 cpu_exec() */
115 CPUState *cpu_single_env;
117 typedef struct PageDesc {
118 /* list of TBs intersecting this ram page */
119 TranslationBlock *first_tb;
120 /* in order to optimize self modifying code, we count the number
121 of lookups we do to a given page to use a bitmap */
122 unsigned int code_write_count;
123 uint8_t *code_bitmap;
124 #if defined(CONFIG_USER_ONLY)
125 unsigned long flags;
126 #endif
127 } PageDesc;
129 typedef struct PhysPageDesc {
130 /* offset in host memory of the page + io_index in the low 12 bits */
131 ram_addr_t phys_offset;
132 } PhysPageDesc;
134 #define L2_BITS 10
135 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
136 /* XXX: this is a temporary hack for alpha target.
137 * In the future, this is to be replaced by a multi-level table
138 * to actually be able to handle the complete 64 bits address space.
140 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
141 #else
142 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
143 #endif
145 #define L1_SIZE (1 << L1_BITS)
146 #define L2_SIZE (1 << L2_BITS)
148 static void io_mem_init(void);
150 unsigned long qemu_real_host_page_size;
151 unsigned long qemu_host_page_bits;
152 unsigned long qemu_host_page_size;
153 unsigned long qemu_host_page_mask;
155 /* XXX: for system emulation, it could just be an array */
156 static PageDesc *l1_map[L1_SIZE];
157 PhysPageDesc **l1_phys_map;
159 /* io memory support */
160 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
161 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
162 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
163 char io_mem_used[IO_MEM_NB_ENTRIES];
164 #if defined(CONFIG_SOFTMMU)
165 static int io_mem_watch;
166 #endif
168 /* log support */
169 char *logfilename = "/tmp/qemu.log";
170 FILE *logfile;
171 int loglevel;
172 static int log_append = 0;
174 /* statistics */
175 static int tlb_flush_count;
176 static int tb_flush_count;
177 static int tb_phys_invalidate_count;
179 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
180 typedef struct subpage_t {
181 target_phys_addr_t base;
182 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
183 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
184 void *opaque[TARGET_PAGE_SIZE][2][4];
185 } subpage_t;
187 static void page_init(void)
189 /* NOTE: we can always suppose that qemu_host_page_size >=
190 TARGET_PAGE_SIZE */
191 #ifdef _WIN32
193 SYSTEM_INFO system_info;
194 DWORD old_protect;
196 GetSystemInfo(&system_info);
197 qemu_real_host_page_size = system_info.dwPageSize;
199 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
200 PAGE_EXECUTE_READWRITE, &old_protect);
202 #else
203 qemu_real_host_page_size = getpagesize();
205 unsigned long start, end;
207 start = (unsigned long)code_gen_buffer;
208 start &= ~(qemu_real_host_page_size - 1);
210 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
211 end += qemu_real_host_page_size - 1;
212 end &= ~(qemu_real_host_page_size - 1);
214 mprotect((void *)start, end - start,
215 PROT_READ | PROT_WRITE | PROT_EXEC);
217 #endif
219 if (qemu_host_page_size == 0)
220 qemu_host_page_size = qemu_real_host_page_size;
221 if (qemu_host_page_size < TARGET_PAGE_SIZE)
222 qemu_host_page_size = TARGET_PAGE_SIZE;
223 qemu_host_page_bits = 0;
224 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
225 qemu_host_page_bits++;
226 qemu_host_page_mask = ~(qemu_host_page_size - 1);
227 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
228 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 long long startaddr, endaddr;
233 FILE *f;
234 int n;
236 f = fopen("/proc/self/maps", "r");
237 if (f) {
238 do {
239 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
240 if (n == 2) {
241 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
242 TARGET_PAGE_ALIGN(endaddr),
243 PAGE_RESERVED);
245 } while (!feof(f));
246 fclose(f);
249 #endif
252 static inline PageDesc *page_find_alloc(unsigned int index)
254 PageDesc **lp, *p;
256 lp = &l1_map[index >> L2_BITS];
257 p = *lp;
258 if (!p) {
259 /* allocate if not found */
260 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
261 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
262 *lp = p;
264 return p + (index & (L2_SIZE - 1));
267 static inline PageDesc *page_find(unsigned int index)
269 PageDesc *p;
271 p = l1_map[index >> L2_BITS];
272 if (!p)
273 return 0;
274 return p + (index & (L2_SIZE - 1));
277 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
279 void **lp, **p;
280 PhysPageDesc *pd;
282 p = (void **)l1_phys_map;
283 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
285 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
286 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
287 #endif
288 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
289 p = *lp;
290 if (!p) {
291 /* allocate if not found */
292 if (!alloc)
293 return NULL;
294 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
295 memset(p, 0, sizeof(void *) * L1_SIZE);
296 *lp = p;
298 #endif
299 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
300 pd = *lp;
301 if (!pd) {
302 int i;
303 /* allocate if not found */
304 if (!alloc)
305 return NULL;
306 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
307 *lp = pd;
308 for (i = 0; i < L2_SIZE; i++)
309 pd[i].phys_offset = IO_MEM_UNASSIGNED;
311 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
314 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
316 return phys_page_find_alloc(index, 0);
319 #if !defined(CONFIG_USER_ONLY)
320 static void tlb_protect_code(ram_addr_t ram_addr);
321 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
322 target_ulong vaddr);
323 #endif
325 void cpu_exec_init(CPUState *env)
327 CPUState **penv;
328 int cpu_index;
330 if (!code_gen_ptr) {
331 cpu_gen_init();
332 code_gen_ptr = code_gen_buffer;
333 page_init();
334 io_mem_init();
336 env->next_cpu = NULL;
337 penv = &first_cpu;
338 cpu_index = 0;
339 while (*penv != NULL) {
340 penv = (CPUState **)&(*penv)->next_cpu;
341 cpu_index++;
343 env->cpu_index = cpu_index;
344 env->nb_watchpoints = 0;
345 #ifdef __WIN32
346 env->thread_id = GetCurrentProcessId();
347 #else
348 env->thread_id = getpid();
349 #endif
350 *penv = env;
353 static inline void invalidate_page_bitmap(PageDesc *p)
355 if (p->code_bitmap) {
356 qemu_free(p->code_bitmap);
357 p->code_bitmap = NULL;
359 p->code_write_count = 0;
362 /* set to NULL all the 'first_tb' fields in all PageDescs */
363 static void page_flush_tb(void)
365 int i, j;
366 PageDesc *p;
368 for(i = 0; i < L1_SIZE; i++) {
369 p = l1_map[i];
370 if (p) {
371 for(j = 0; j < L2_SIZE; j++) {
372 p->first_tb = NULL;
373 invalidate_page_bitmap(p);
374 p++;
380 /* flush all the translation blocks */
381 /* XXX: tb_flush is currently not thread safe */
382 void tb_flush(CPUState *env1)
384 CPUState *env;
385 #if defined(DEBUG_FLUSH)
386 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
387 (unsigned long)(code_gen_ptr - code_gen_buffer),
388 nb_tbs, nb_tbs > 0 ?
389 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
390 #endif
391 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
392 cpu_abort(env1, "Internal error: code buffer overflow\n");
394 nb_tbs = 0;
396 for(env = first_cpu; env != NULL; env = env->next_cpu) {
397 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
400 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
401 page_flush_tb();
403 code_gen_ptr = code_gen_buffer;
404 /* XXX: flush processor icache at this point if cache flush is
405 expensive */
406 tb_flush_count++;
409 #ifdef DEBUG_TB_CHECK
411 static void tb_invalidate_check(target_ulong address)
413 TranslationBlock *tb;
414 int i;
415 address &= TARGET_PAGE_MASK;
416 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
417 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
418 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
419 address >= tb->pc + tb->size)) {
420 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
421 address, (long)tb->pc, tb->size);
427 /* verify that all the pages have correct rights for code */
428 static void tb_page_check(void)
430 TranslationBlock *tb;
431 int i, flags1, flags2;
433 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
434 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
435 flags1 = page_get_flags(tb->pc);
436 flags2 = page_get_flags(tb->pc + tb->size - 1);
437 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
438 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
439 (long)tb->pc, tb->size, flags1, flags2);
445 void tb_jmp_check(TranslationBlock *tb)
447 TranslationBlock *tb1;
448 unsigned int n1;
450 /* suppress any remaining jumps to this TB */
451 tb1 = tb->jmp_first;
452 for(;;) {
453 n1 = (long)tb1 & 3;
454 tb1 = (TranslationBlock *)((long)tb1 & ~3);
455 if (n1 == 2)
456 break;
457 tb1 = tb1->jmp_next[n1];
459 /* check end of list */
460 if (tb1 != tb) {
461 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
465 #endif
467 /* invalidate one TB */
468 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
469 int next_offset)
471 TranslationBlock *tb1;
472 for(;;) {
473 tb1 = *ptb;
474 if (tb1 == tb) {
475 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
476 break;
478 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
482 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
484 TranslationBlock *tb1;
485 unsigned int n1;
487 for(;;) {
488 tb1 = *ptb;
489 n1 = (long)tb1 & 3;
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 if (tb1 == tb) {
492 *ptb = tb1->page_next[n1];
493 break;
495 ptb = &tb1->page_next[n1];
499 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
501 TranslationBlock *tb1, **ptb;
502 unsigned int n1;
504 ptb = &tb->jmp_next[n];
505 tb1 = *ptb;
506 if (tb1) {
507 /* find tb(n) in circular list */
508 for(;;) {
509 tb1 = *ptb;
510 n1 = (long)tb1 & 3;
511 tb1 = (TranslationBlock *)((long)tb1 & ~3);
512 if (n1 == n && tb1 == tb)
513 break;
514 if (n1 == 2) {
515 ptb = &tb1->jmp_first;
516 } else {
517 ptb = &tb1->jmp_next[n1];
520 /* now we can suppress tb(n) from the list */
521 *ptb = tb->jmp_next[n];
523 tb->jmp_next[n] = NULL;
527 /* reset the jump entry 'n' of a TB so that it is not chained to
528 another TB */
529 static inline void tb_reset_jump(TranslationBlock *tb, int n)
531 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
534 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
536 CPUState *env;
537 PageDesc *p;
538 unsigned int h, n1;
539 target_ulong phys_pc;
540 TranslationBlock *tb1, *tb2;
542 /* remove the TB from the hash list */
543 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
544 h = tb_phys_hash_func(phys_pc);
545 tb_remove(&tb_phys_hash[h], tb,
546 offsetof(TranslationBlock, phys_hash_next));
548 /* remove the TB from the page list */
549 if (tb->page_addr[0] != page_addr) {
550 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
551 tb_page_remove(&p->first_tb, tb);
552 invalidate_page_bitmap(p);
554 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
555 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
556 tb_page_remove(&p->first_tb, tb);
557 invalidate_page_bitmap(p);
560 tb_invalidated_flag = 1;
562 /* remove the TB from the hash list */
563 h = tb_jmp_cache_hash_func(tb->pc);
564 for(env = first_cpu; env != NULL; env = env->next_cpu) {
565 if (env->tb_jmp_cache[h] == tb)
566 env->tb_jmp_cache[h] = NULL;
569 /* suppress this TB from the two jump lists */
570 tb_jmp_remove(tb, 0);
571 tb_jmp_remove(tb, 1);
573 /* suppress any remaining jumps to this TB */
574 tb1 = tb->jmp_first;
575 for(;;) {
576 n1 = (long)tb1 & 3;
577 if (n1 == 2)
578 break;
579 tb1 = (TranslationBlock *)((long)tb1 & ~3);
580 tb2 = tb1->jmp_next[n1];
581 tb_reset_jump(tb1, n1);
582 tb1->jmp_next[n1] = NULL;
583 tb1 = tb2;
585 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
587 tb_phys_invalidate_count++;
590 static inline void set_bits(uint8_t *tab, int start, int len)
592 int end, mask, end1;
594 end = start + len;
595 tab += start >> 3;
596 mask = 0xff << (start & 7);
597 if ((start & ~7) == (end & ~7)) {
598 if (start < end) {
599 mask &= ~(0xff << (end & 7));
600 *tab |= mask;
602 } else {
603 *tab++ |= mask;
604 start = (start + 8) & ~7;
605 end1 = end & ~7;
606 while (start < end1) {
607 *tab++ = 0xff;
608 start += 8;
610 if (start < end) {
611 mask = ~(0xff << (end & 7));
612 *tab |= mask;
617 static void build_page_bitmap(PageDesc *p)
619 int n, tb_start, tb_end;
620 TranslationBlock *tb;
622 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
623 if (!p->code_bitmap)
624 return;
625 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
627 tb = p->first_tb;
628 while (tb != NULL) {
629 n = (long)tb & 3;
630 tb = (TranslationBlock *)((long)tb & ~3);
631 /* NOTE: this is subtle as a TB may span two physical pages */
632 if (n == 0) {
633 /* NOTE: tb_end may be after the end of the page, but
634 it is not a problem */
635 tb_start = tb->pc & ~TARGET_PAGE_MASK;
636 tb_end = tb_start + tb->size;
637 if (tb_end > TARGET_PAGE_SIZE)
638 tb_end = TARGET_PAGE_SIZE;
639 } else {
640 tb_start = 0;
641 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
644 tb = tb->page_next[n];
648 #ifdef TARGET_HAS_PRECISE_SMC
650 static void tb_gen_code(CPUState *env,
651 target_ulong pc, target_ulong cs_base, int flags,
652 int cflags)
654 TranslationBlock *tb;
655 uint8_t *tc_ptr;
656 target_ulong phys_pc, phys_page2, virt_page2;
657 int code_gen_size;
659 phys_pc = get_phys_addr_code(env, pc);
660 tb = tb_alloc(pc);
661 if (!tb) {
662 /* flush must be done */
663 tb_flush(env);
664 /* cannot fail at this point */
665 tb = tb_alloc(pc);
667 tc_ptr = code_gen_ptr;
668 tb->tc_ptr = tc_ptr;
669 tb->cs_base = cs_base;
670 tb->flags = flags;
671 tb->cflags = cflags;
672 cpu_gen_code(env, tb, &code_gen_size);
673 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
675 /* check next page if needed */
676 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
677 phys_page2 = -1;
678 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
679 phys_page2 = get_phys_addr_code(env, virt_page2);
681 tb_link_phys(tb, phys_pc, phys_page2);
683 #endif
685 /* invalidate all TBs which intersect with the target physical page
686 starting in range [start;end[. NOTE: start and end must refer to
687 the same physical page. 'is_cpu_write_access' should be true if called
688 from a real cpu write access: the virtual CPU will exit the current
689 TB if code is modified inside this TB. */
690 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
691 int is_cpu_write_access)
693 int n, current_tb_modified, current_tb_not_found, current_flags;
694 CPUState *env = cpu_single_env;
695 PageDesc *p;
696 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
697 target_ulong tb_start, tb_end;
698 target_ulong current_pc, current_cs_base;
700 p = page_find(start >> TARGET_PAGE_BITS);
701 if (!p)
702 return;
703 if (!p->code_bitmap &&
704 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
705 is_cpu_write_access) {
706 /* build code bitmap */
707 build_page_bitmap(p);
710 /* we remove all the TBs in the range [start, end[ */
711 /* XXX: see if in some cases it could be faster to invalidate all the code */
712 current_tb_not_found = is_cpu_write_access;
713 current_tb_modified = 0;
714 current_tb = NULL; /* avoid warning */
715 current_pc = 0; /* avoid warning */
716 current_cs_base = 0; /* avoid warning */
717 current_flags = 0; /* avoid warning */
718 tb = p->first_tb;
719 while (tb != NULL) {
720 n = (long)tb & 3;
721 tb = (TranslationBlock *)((long)tb & ~3);
722 tb_next = tb->page_next[n];
723 /* NOTE: this is subtle as a TB may span two physical pages */
724 if (n == 0) {
725 /* NOTE: tb_end may be after the end of the page, but
726 it is not a problem */
727 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
728 tb_end = tb_start + tb->size;
729 } else {
730 tb_start = tb->page_addr[1];
731 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
733 if (!(tb_end <= start || tb_start >= end)) {
734 #ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb_not_found) {
736 current_tb_not_found = 0;
737 current_tb = NULL;
738 if (env->mem_write_pc) {
739 /* now we have a real cpu fault */
740 current_tb = tb_find_pc(env->mem_write_pc);
743 if (current_tb == tb &&
744 !(current_tb->cflags & CF_SINGLE_INSN)) {
745 /* If we are modifying the current TB, we must stop
746 its execution. We could be more precise by checking
747 that the modification is after the current PC, but it
748 would require a specialized function to partially
749 restore the CPU state */
751 current_tb_modified = 1;
752 cpu_restore_state(current_tb, env,
753 env->mem_write_pc, NULL);
754 #if defined(TARGET_I386)
755 current_flags = env->hflags;
756 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
757 current_cs_base = (target_ulong)env->segs[R_CS].base;
758 current_pc = current_cs_base + env->eip;
759 #else
760 #error unsupported CPU
761 #endif
763 #endif /* TARGET_HAS_PRECISE_SMC */
764 /* we need to do that to handle the case where a signal
765 occurs while doing tb_phys_invalidate() */
766 saved_tb = NULL;
767 if (env) {
768 saved_tb = env->current_tb;
769 env->current_tb = NULL;
771 tb_phys_invalidate(tb, -1);
772 if (env) {
773 env->current_tb = saved_tb;
774 if (env->interrupt_request && env->current_tb)
775 cpu_interrupt(env, env->interrupt_request);
778 tb = tb_next;
780 #if !defined(CONFIG_USER_ONLY)
781 /* if no code remaining, no need to continue to use slow writes */
782 if (!p->first_tb) {
783 invalidate_page_bitmap(p);
784 if (is_cpu_write_access) {
785 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
788 #endif
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
793 itself */
794 env->current_tb = NULL;
795 tb_gen_code(env, current_pc, current_cs_base, current_flags,
796 CF_SINGLE_INSN);
797 cpu_resume_from_signal(env, NULL);
799 #endif
802 /* len must be <= 8 and start must be a multiple of len */
803 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
805 PageDesc *p;
806 int offset, b;
807 #if 0
808 if (1) {
809 if (loglevel) {
810 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
811 cpu_single_env->mem_write_vaddr, len,
812 cpu_single_env->eip,
813 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
816 #endif
817 p = page_find(start >> TARGET_PAGE_BITS);
818 if (!p)
819 return;
820 if (p->code_bitmap) {
821 offset = start & ~TARGET_PAGE_MASK;
822 b = p->code_bitmap[offset >> 3] >> (offset & 7);
823 if (b & ((1 << len) - 1))
824 goto do_invalidate;
825 } else {
826 do_invalidate:
827 tb_invalidate_phys_page_range(start, start + len, 1);
831 #if !defined(CONFIG_SOFTMMU)
832 static void tb_invalidate_phys_page(target_ulong addr,
833 unsigned long pc, void *puc)
835 int n, current_flags, current_tb_modified;
836 target_ulong current_pc, current_cs_base;
837 PageDesc *p;
838 TranslationBlock *tb, *current_tb;
839 #ifdef TARGET_HAS_PRECISE_SMC
840 CPUState *env = cpu_single_env;
841 #endif
843 addr &= TARGET_PAGE_MASK;
844 p = page_find(addr >> TARGET_PAGE_BITS);
845 if (!p)
846 return;
847 tb = p->first_tb;
848 current_tb_modified = 0;
849 current_tb = NULL;
850 current_pc = 0; /* avoid warning */
851 current_cs_base = 0; /* avoid warning */
852 current_flags = 0; /* avoid warning */
853 #ifdef TARGET_HAS_PRECISE_SMC
854 if (tb && pc != 0) {
855 current_tb = tb_find_pc(pc);
857 #endif
858 while (tb != NULL) {
859 n = (long)tb & 3;
860 tb = (TranslationBlock *)((long)tb & ~3);
861 #ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb == tb &&
863 !(current_tb->cflags & CF_SINGLE_INSN)) {
864 /* If we are modifying the current TB, we must stop
865 its execution. We could be more precise by checking
866 that the modification is after the current PC, but it
867 would require a specialized function to partially
868 restore the CPU state */
870 current_tb_modified = 1;
871 cpu_restore_state(current_tb, env, pc, puc);
872 #if defined(TARGET_I386)
873 current_flags = env->hflags;
874 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
875 current_cs_base = (target_ulong)env->segs[R_CS].base;
876 current_pc = current_cs_base + env->eip;
877 #else
878 #error unsupported CPU
879 #endif
881 #endif /* TARGET_HAS_PRECISE_SMC */
882 tb_phys_invalidate(tb, addr);
883 tb = tb->page_next[n];
885 p->first_tb = NULL;
886 #ifdef TARGET_HAS_PRECISE_SMC
887 if (current_tb_modified) {
888 /* we generate a block containing just the instruction
889 modifying the memory. It will ensure that it cannot modify
890 itself */
891 env->current_tb = NULL;
892 tb_gen_code(env, current_pc, current_cs_base, current_flags,
893 CF_SINGLE_INSN);
894 cpu_resume_from_signal(env, puc);
896 #endif
898 #endif
900 /* add the tb in the target page and protect it if necessary */
901 static inline void tb_alloc_page(TranslationBlock *tb,
902 unsigned int n, target_ulong page_addr)
904 PageDesc *p;
905 TranslationBlock *last_first_tb;
907 tb->page_addr[n] = page_addr;
908 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
909 tb->page_next[n] = p->first_tb;
910 last_first_tb = p->first_tb;
911 p->first_tb = (TranslationBlock *)((long)tb | n);
912 invalidate_page_bitmap(p);
914 #if defined(TARGET_HAS_SMC) || 1
916 #if defined(CONFIG_USER_ONLY)
917 if (p->flags & PAGE_WRITE) {
918 target_ulong addr;
919 PageDesc *p2;
920 int prot;
922 /* force the host page as non writable (writes will have a
923 page fault + mprotect overhead) */
924 page_addr &= qemu_host_page_mask;
925 prot = 0;
926 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
927 addr += TARGET_PAGE_SIZE) {
929 p2 = page_find (addr >> TARGET_PAGE_BITS);
930 if (!p2)
931 continue;
932 prot |= p2->flags;
933 p2->flags &= ~PAGE_WRITE;
934 page_get_flags(addr);
936 mprotect(g2h(page_addr), qemu_host_page_size,
937 (prot & PAGE_BITS) & ~PAGE_WRITE);
938 #ifdef DEBUG_TB_INVALIDATE
939 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
940 page_addr);
941 #endif
943 #else
944 /* if some code is already present, then the pages are already
945 protected. So we handle the case where only the first TB is
946 allocated in a physical page */
947 if (!last_first_tb) {
948 tlb_protect_code(page_addr);
950 #endif
952 #endif /* TARGET_HAS_SMC */
955 /* Allocate a new translation block. Flush the translation buffer if
956 too many translation blocks or too much generated code. */
957 TranslationBlock *tb_alloc(target_ulong pc)
959 TranslationBlock *tb;
961 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
962 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
963 return NULL;
964 tb = &tbs[nb_tbs++];
965 tb->pc = pc;
966 tb->cflags = 0;
967 return tb;
970 /* add a new TB and link it to the physical page tables. phys_page2 is
971 (-1) to indicate that only one page contains the TB. */
972 void tb_link_phys(TranslationBlock *tb,
973 target_ulong phys_pc, target_ulong phys_page2)
975 unsigned int h;
976 TranslationBlock **ptb;
978 /* add in the physical hash table */
979 h = tb_phys_hash_func(phys_pc);
980 ptb = &tb_phys_hash[h];
981 tb->phys_hash_next = *ptb;
982 *ptb = tb;
984 /* add in the page list */
985 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
986 if (phys_page2 != -1)
987 tb_alloc_page(tb, 1, phys_page2);
988 else
989 tb->page_addr[1] = -1;
991 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
992 tb->jmp_next[0] = NULL;
993 tb->jmp_next[1] = NULL;
995 /* init original jump addresses */
996 if (tb->tb_next_offset[0] != 0xffff)
997 tb_reset_jump(tb, 0);
998 if (tb->tb_next_offset[1] != 0xffff)
999 tb_reset_jump(tb, 1);
1001 #ifdef DEBUG_TB_CHECK
1002 tb_page_check();
1003 #endif
1006 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1007 tb[1].tc_ptr. Return NULL if not found */
1008 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1010 int m_min, m_max, m;
1011 unsigned long v;
1012 TranslationBlock *tb;
1014 if (nb_tbs <= 0)
1015 return NULL;
1016 if (tc_ptr < (unsigned long)code_gen_buffer ||
1017 tc_ptr >= (unsigned long)code_gen_ptr)
1018 return NULL;
1019 /* binary search (cf Knuth) */
1020 m_min = 0;
1021 m_max = nb_tbs - 1;
1022 while (m_min <= m_max) {
1023 m = (m_min + m_max) >> 1;
1024 tb = &tbs[m];
1025 v = (unsigned long)tb->tc_ptr;
1026 if (v == tc_ptr)
1027 return tb;
1028 else if (tc_ptr < v) {
1029 m_max = m - 1;
1030 } else {
1031 m_min = m + 1;
1034 return &tbs[m_max];
1037 static void tb_reset_jump_recursive(TranslationBlock *tb);
1039 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1041 TranslationBlock *tb1, *tb_next, **ptb;
1042 unsigned int n1;
1044 tb1 = tb->jmp_next[n];
1045 if (tb1 != NULL) {
1046 /* find head of list */
1047 for(;;) {
1048 n1 = (long)tb1 & 3;
1049 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1050 if (n1 == 2)
1051 break;
1052 tb1 = tb1->jmp_next[n1];
1054 /* we are now sure now that tb jumps to tb1 */
1055 tb_next = tb1;
1057 /* remove tb from the jmp_first list */
1058 ptb = &tb_next->jmp_first;
1059 for(;;) {
1060 tb1 = *ptb;
1061 n1 = (long)tb1 & 3;
1062 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1063 if (n1 == n && tb1 == tb)
1064 break;
1065 ptb = &tb1->jmp_next[n1];
1067 *ptb = tb->jmp_next[n];
1068 tb->jmp_next[n] = NULL;
1070 /* suppress the jump to next tb in generated code */
1071 tb_reset_jump(tb, n);
1073 /* suppress jumps in the tb on which we could have jumped */
1074 tb_reset_jump_recursive(tb_next);
1078 static void tb_reset_jump_recursive(TranslationBlock *tb)
1080 tb_reset_jump_recursive2(tb, 0);
1081 tb_reset_jump_recursive2(tb, 1);
1084 #if defined(TARGET_HAS_ICE)
1085 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1087 target_phys_addr_t addr;
1088 target_ulong pd;
1089 ram_addr_t ram_addr;
1090 PhysPageDesc *p;
1092 addr = cpu_get_phys_page_debug(env, pc);
1093 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1094 if (!p) {
1095 pd = IO_MEM_UNASSIGNED;
1096 } else {
1097 pd = p->phys_offset;
1099 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1100 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1102 #endif
1104 /* Add a watchpoint. */
1105 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1107 int i;
1109 for (i = 0; i < env->nb_watchpoints; i++) {
1110 if (addr == env->watchpoint[i].vaddr)
1111 return 0;
1113 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1114 return -1;
1116 i = env->nb_watchpoints++;
1117 env->watchpoint[i].vaddr = addr;
1118 tlb_flush_page(env, addr);
1119 /* FIXME: This flush is needed because of the hack to make memory ops
1120 terminate the TB. It can be removed once the proper IO trap and
1121 re-execute bits are in. */
1122 tb_flush(env);
1123 return i;
1126 /* Remove a watchpoint. */
1127 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1129 int i;
1131 for (i = 0; i < env->nb_watchpoints; i++) {
1132 if (addr == env->watchpoint[i].vaddr) {
1133 env->nb_watchpoints--;
1134 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1135 tlb_flush_page(env, addr);
1136 return 0;
1139 return -1;
1142 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1143 breakpoint is reached */
1144 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1146 #if defined(TARGET_HAS_ICE)
1147 int i;
1149 for(i = 0; i < env->nb_breakpoints; i++) {
1150 if (env->breakpoints[i] == pc)
1151 return 0;
1154 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1155 return -1;
1156 env->breakpoints[env->nb_breakpoints++] = pc;
1158 if (kvm_enabled())
1159 kvm_update_debugger(env);
1161 breakpoint_invalidate(env, pc);
1162 return 0;
1163 #else
1164 return -1;
1165 #endif
1168 /* remove a breakpoint */
1169 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1171 #if defined(TARGET_HAS_ICE)
1172 int i;
1173 for(i = 0; i < env->nb_breakpoints; i++) {
1174 if (env->breakpoints[i] == pc)
1175 goto found;
1177 return -1;
1178 found:
1179 env->nb_breakpoints--;
1180 if (i < env->nb_breakpoints)
1181 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1183 if (kvm_enabled())
1184 kvm_update_debugger(env);
1186 breakpoint_invalidate(env, pc);
1187 return 0;
1188 #else
1189 return -1;
1190 #endif
1193 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1194 CPU loop after each instruction */
1195 void cpu_single_step(CPUState *env, int enabled)
1197 #if defined(TARGET_HAS_ICE)
1198 if (env->singlestep_enabled != enabled) {
1199 env->singlestep_enabled = enabled;
1200 /* must flush all the translated code to avoid inconsistancies */
1201 /* XXX: only flush what is necessary */
1202 tb_flush(env);
1204 if (kvm_enabled())
1205 kvm_update_debugger(env);
1206 #endif
1209 /* enable or disable low levels log */
1210 void cpu_set_log(int log_flags)
1212 loglevel = log_flags;
1213 if (loglevel && !logfile) {
1214 logfile = fopen(logfilename, log_append ? "a" : "w");
1215 if (!logfile) {
1216 perror(logfilename);
1217 _exit(1);
1219 #if !defined(CONFIG_SOFTMMU)
1220 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1222 static uint8_t logfile_buf[4096];
1223 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1225 #else
1226 setvbuf(logfile, NULL, _IOLBF, 0);
1227 #endif
1228 log_append = 1;
1230 if (!loglevel && logfile) {
1231 fclose(logfile);
1232 logfile = NULL;
1236 void cpu_set_log_filename(const char *filename)
1238 logfilename = strdup(filename);
1239 if (logfile) {
1240 fclose(logfile);
1241 logfile = NULL;
1243 cpu_set_log(loglevel);
1246 /* mask must never be zero, except for A20 change call */
1247 void cpu_interrupt(CPUState *env, int mask)
1249 TranslationBlock *tb;
1250 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1252 env->interrupt_request |= mask;
1253 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1254 kvm_update_interrupt_request(env);
1256 /* if the cpu is currently executing code, we must unlink it and
1257 all the potentially executing TB */
1258 tb = env->current_tb;
1259 if (tb && !testandset(&interrupt_lock)) {
1260 env->current_tb = NULL;
1261 tb_reset_jump_recursive(tb);
1262 resetlock(&interrupt_lock);
1266 void cpu_reset_interrupt(CPUState *env, int mask)
1268 env->interrupt_request &= ~mask;
1271 CPULogItem cpu_log_items[] = {
1272 { CPU_LOG_TB_OUT_ASM, "out_asm",
1273 "show generated host assembly code for each compiled TB" },
1274 { CPU_LOG_TB_IN_ASM, "in_asm",
1275 "show target assembly code for each compiled TB" },
1276 { CPU_LOG_TB_OP, "op",
1277 "show micro ops for each compiled TB" },
1278 { CPU_LOG_TB_OP_OPT, "op_opt",
1279 "show micro ops "
1280 #ifdef TARGET_I386
1281 "before eflags optimization and "
1282 #endif
1283 "after liveness analysis" },
1284 { CPU_LOG_INT, "int",
1285 "show interrupts/exceptions in short format" },
1286 { CPU_LOG_EXEC, "exec",
1287 "show trace before each executed TB (lots of logs)" },
1288 { CPU_LOG_TB_CPU, "cpu",
1289 "show CPU state before block translation" },
1290 #ifdef TARGET_I386
1291 { CPU_LOG_PCALL, "pcall",
1292 "show protected mode far calls/returns/exceptions" },
1293 #endif
1294 #ifdef DEBUG_IOPORT
1295 { CPU_LOG_IOPORT, "ioport",
1296 "show all i/o ports accesses" },
1297 #endif
1298 { 0, NULL, NULL },
1301 static int cmp1(const char *s1, int n, const char *s2)
1303 if (strlen(s2) != n)
1304 return 0;
1305 return memcmp(s1, s2, n) == 0;
1308 /* takes a comma separated list of log masks. Return 0 if error. */
1309 int cpu_str_to_log_mask(const char *str)
1311 CPULogItem *item;
1312 int mask;
1313 const char *p, *p1;
1315 p = str;
1316 mask = 0;
1317 for(;;) {
1318 p1 = strchr(p, ',');
1319 if (!p1)
1320 p1 = p + strlen(p);
1321 if(cmp1(p,p1-p,"all")) {
1322 for(item = cpu_log_items; item->mask != 0; item++) {
1323 mask |= item->mask;
1325 } else {
1326 for(item = cpu_log_items; item->mask != 0; item++) {
1327 if (cmp1(p, p1 - p, item->name))
1328 goto found;
1330 return 0;
1332 found:
1333 mask |= item->mask;
1334 if (*p1 != ',')
1335 break;
1336 p = p1 + 1;
1338 return mask;
1341 void cpu_abort(CPUState *env, const char *fmt, ...)
1343 va_list ap;
1344 va_list ap2;
1346 va_start(ap, fmt);
1347 va_copy(ap2, ap);
1348 fprintf(stderr, "qemu: fatal: ");
1349 vfprintf(stderr, fmt, ap);
1350 fprintf(stderr, "\n");
1351 #ifdef TARGET_I386
1352 if(env->intercept & INTERCEPT_SVM_MASK) {
1353 /* most probably the virtual machine should not
1354 be shut down but rather caught by the VMM */
1355 vmexit(SVM_EXIT_SHUTDOWN, 0);
1357 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1358 #else
1359 cpu_dump_state(env, stderr, fprintf, 0);
1360 #endif
1361 if (logfile) {
1362 fprintf(logfile, "qemu: fatal: ");
1363 vfprintf(logfile, fmt, ap2);
1364 fprintf(logfile, "\n");
1365 #ifdef TARGET_I386
1366 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1367 #else
1368 cpu_dump_state(env, logfile, fprintf, 0);
1369 #endif
1370 fflush(logfile);
1371 fclose(logfile);
1373 va_end(ap2);
1374 va_end(ap);
1375 abort();
1378 CPUState *cpu_copy(CPUState *env)
1380 CPUState *new_env = cpu_init(env->cpu_model_str);
1381 /* preserve chaining and index */
1382 CPUState *next_cpu = new_env->next_cpu;
1383 int cpu_index = new_env->cpu_index;
1384 memcpy(new_env, env, sizeof(CPUState));
1385 new_env->next_cpu = next_cpu;
1386 new_env->cpu_index = cpu_index;
1387 return new_env;
1390 #if !defined(CONFIG_USER_ONLY)
1392 /* NOTE: if flush_global is true, also flush global entries (not
1393 implemented yet) */
1394 void tlb_flush(CPUState *env, int flush_global)
1396 int i;
1398 #if defined(DEBUG_TLB)
1399 printf("tlb_flush:\n");
1400 #endif
1401 /* must reset current TB so that interrupts cannot modify the
1402 links while we are modifying them */
1403 env->current_tb = NULL;
1405 for(i = 0; i < CPU_TLB_SIZE; i++) {
1406 env->tlb_table[0][i].addr_read = -1;
1407 env->tlb_table[0][i].addr_write = -1;
1408 env->tlb_table[0][i].addr_code = -1;
1409 env->tlb_table[1][i].addr_read = -1;
1410 env->tlb_table[1][i].addr_write = -1;
1411 env->tlb_table[1][i].addr_code = -1;
1412 #if (NB_MMU_MODES >= 3)
1413 env->tlb_table[2][i].addr_read = -1;
1414 env->tlb_table[2][i].addr_write = -1;
1415 env->tlb_table[2][i].addr_code = -1;
1416 #if (NB_MMU_MODES == 4)
1417 env->tlb_table[3][i].addr_read = -1;
1418 env->tlb_table[3][i].addr_write = -1;
1419 env->tlb_table[3][i].addr_code = -1;
1420 #endif
1421 #endif
1424 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1426 #if !defined(CONFIG_SOFTMMU)
1427 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1428 #endif
1429 #ifdef USE_KQEMU
1430 if (env->kqemu_enabled) {
1431 kqemu_flush(env, flush_global);
1433 #endif
1434 tlb_flush_count++;
1437 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1439 if (addr == (tlb_entry->addr_read &
1440 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1441 addr == (tlb_entry->addr_write &
1442 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1443 addr == (tlb_entry->addr_code &
1444 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1445 tlb_entry->addr_read = -1;
1446 tlb_entry->addr_write = -1;
1447 tlb_entry->addr_code = -1;
1451 void tlb_flush_page(CPUState *env, target_ulong addr)
1453 int i;
1454 TranslationBlock *tb;
1456 #if defined(DEBUG_TLB)
1457 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1458 #endif
1459 /* must reset current TB so that interrupts cannot modify the
1460 links while we are modifying them */
1461 env->current_tb = NULL;
1463 addr &= TARGET_PAGE_MASK;
1464 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1465 tlb_flush_entry(&env->tlb_table[0][i], addr);
1466 tlb_flush_entry(&env->tlb_table[1][i], addr);
1467 #if (NB_MMU_MODES >= 3)
1468 tlb_flush_entry(&env->tlb_table[2][i], addr);
1469 #if (NB_MMU_MODES == 4)
1470 tlb_flush_entry(&env->tlb_table[3][i], addr);
1471 #endif
1472 #endif
1474 /* Discard jump cache entries for any tb which might potentially
1475 overlap the flushed page. */
1476 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1477 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1479 i = tb_jmp_cache_hash_page(addr);
1480 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1482 #if !defined(CONFIG_SOFTMMU)
1483 if (addr < MMAP_AREA_END)
1484 munmap((void *)addr, TARGET_PAGE_SIZE);
1485 #endif
1486 #ifdef USE_KQEMU
1487 if (env->kqemu_enabled) {
1488 kqemu_flush_page(env, addr);
1490 #endif
1493 /* update the TLBs so that writes to code in the virtual page 'addr'
1494 can be detected */
1495 static void tlb_protect_code(ram_addr_t ram_addr)
1497 cpu_physical_memory_reset_dirty(ram_addr,
1498 ram_addr + TARGET_PAGE_SIZE,
1499 CODE_DIRTY_FLAG);
1502 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1503 tested for self modifying code */
1504 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1505 target_ulong vaddr)
1507 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1510 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1511 unsigned long start, unsigned long length)
1513 unsigned long addr;
1514 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1515 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1516 if ((addr - start) < length) {
1517 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1522 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1523 int dirty_flags)
1525 CPUState *env;
1526 unsigned long length, start1;
1527 int i, mask, len;
1528 uint8_t *p;
1530 start &= TARGET_PAGE_MASK;
1531 end = TARGET_PAGE_ALIGN(end);
1533 length = end - start;
1534 if (length == 0)
1535 return;
1536 len = length >> TARGET_PAGE_BITS;
1537 #ifdef USE_KQEMU
1538 /* XXX: should not depend on cpu context */
1539 env = first_cpu;
1540 if (env->kqemu_enabled) {
1541 ram_addr_t addr;
1542 addr = start;
1543 for(i = 0; i < len; i++) {
1544 kqemu_set_notdirty(env, addr);
1545 addr += TARGET_PAGE_SIZE;
1548 #endif
1549 mask = ~dirty_flags;
1550 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1551 for(i = 0; i < len; i++)
1552 p[i] &= mask;
1554 /* we modify the TLB cache so that the dirty bit will be set again
1555 when accessing the range */
1556 start1 = start + (unsigned long)phys_ram_base;
1557 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1558 for(i = 0; i < CPU_TLB_SIZE; i++)
1559 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1560 for(i = 0; i < CPU_TLB_SIZE; i++)
1561 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1562 #if (NB_MMU_MODES >= 3)
1563 for(i = 0; i < CPU_TLB_SIZE; i++)
1564 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1565 #if (NB_MMU_MODES == 4)
1566 for(i = 0; i < CPU_TLB_SIZE; i++)
1567 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1568 #endif
1569 #endif
1572 #if !defined(CONFIG_SOFTMMU)
1573 /* XXX: this is expensive */
1575 VirtPageDesc *p;
1576 int j;
1577 target_ulong addr;
1579 for(i = 0; i < L1_SIZE; i++) {
1580 p = l1_virt_map[i];
1581 if (p) {
1582 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1583 for(j = 0; j < L2_SIZE; j++) {
1584 if (p->valid_tag == virt_valid_tag &&
1585 p->phys_addr >= start && p->phys_addr < end &&
1586 (p->prot & PROT_WRITE)) {
1587 if (addr < MMAP_AREA_END) {
1588 mprotect((void *)addr, TARGET_PAGE_SIZE,
1589 p->prot & ~PROT_WRITE);
1592 addr += TARGET_PAGE_SIZE;
1593 p++;
1598 #endif
1601 int cpu_physical_memory_set_dirty_tracking(int enable)
1603 int r=0;
1605 if (kvm_enabled())
1606 r = kvm_physical_memory_set_dirty_tracking(enable);
1607 in_migration = enable;
1608 return r;
1611 int cpu_physical_memory_get_dirty_tracking(void)
1613 return in_migration;
1616 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1618 ram_addr_t ram_addr;
1620 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1621 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1622 tlb_entry->addend - (unsigned long)phys_ram_base;
1623 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1624 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1629 /* update the TLB according to the current state of the dirty bits */
1630 void cpu_tlb_update_dirty(CPUState *env)
1632 int i;
1633 for(i = 0; i < CPU_TLB_SIZE; i++)
1634 tlb_update_dirty(&env->tlb_table[0][i]);
1635 for(i = 0; i < CPU_TLB_SIZE; i++)
1636 tlb_update_dirty(&env->tlb_table[1][i]);
1637 #if (NB_MMU_MODES >= 3)
1638 for(i = 0; i < CPU_TLB_SIZE; i++)
1639 tlb_update_dirty(&env->tlb_table[2][i]);
1640 #if (NB_MMU_MODES == 4)
1641 for(i = 0; i < CPU_TLB_SIZE; i++)
1642 tlb_update_dirty(&env->tlb_table[3][i]);
1643 #endif
1644 #endif
1647 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1648 unsigned long start)
1650 unsigned long addr;
1651 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1652 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1653 if (addr == start) {
1654 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1659 /* update the TLB corresponding to virtual page vaddr and phys addr
1660 addr so that it is no longer dirty */
1661 static inline void tlb_set_dirty(CPUState *env,
1662 unsigned long addr, target_ulong vaddr)
1664 int i;
1666 addr &= TARGET_PAGE_MASK;
1667 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1668 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1669 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1670 #if (NB_MMU_MODES >= 3)
1671 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1672 #if (NB_MMU_MODES == 4)
1673 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1674 #endif
1675 #endif
1678 /* add a new TLB entry. At most one entry for a given virtual address
1679 is permitted. Return 0 if OK or 2 if the page could not be mapped
1680 (can only happen in non SOFTMMU mode for I/O pages or pages
1681 conflicting with the host address space). */
1682 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1683 target_phys_addr_t paddr, int prot,
1684 int mmu_idx, int is_softmmu)
1686 PhysPageDesc *p;
1687 unsigned long pd;
1688 unsigned int index;
1689 target_ulong address;
1690 target_phys_addr_t addend;
1691 int ret;
1692 CPUTLBEntry *te;
1693 int i;
1695 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1696 if (!p) {
1697 pd = IO_MEM_UNASSIGNED;
1698 } else {
1699 pd = p->phys_offset;
1701 #if defined(DEBUG_TLB)
1702 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1703 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1704 #endif
1706 ret = 0;
1707 #if !defined(CONFIG_SOFTMMU)
1708 if (is_softmmu)
1709 #endif
1711 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1712 /* IO memory case */
1713 address = vaddr | pd;
1714 addend = paddr;
1715 } else {
1716 /* standard memory */
1717 address = vaddr;
1718 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1721 /* Make accesses to pages with watchpoints go via the
1722 watchpoint trap routines. */
1723 for (i = 0; i < env->nb_watchpoints; i++) {
1724 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1725 if (address & ~TARGET_PAGE_MASK) {
1726 env->watchpoint[i].addend = 0;
1727 address = vaddr | io_mem_watch;
1728 } else {
1729 env->watchpoint[i].addend = pd - paddr +
1730 (unsigned long) phys_ram_base;
1731 /* TODO: Figure out how to make read watchpoints coexist
1732 with code. */
1733 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1738 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1739 addend -= vaddr;
1740 te = &env->tlb_table[mmu_idx][index];
1741 te->addend = addend;
1742 if (prot & PAGE_READ) {
1743 te->addr_read = address;
1744 } else {
1745 te->addr_read = -1;
1747 if (prot & PAGE_EXEC) {
1748 te->addr_code = address;
1749 } else {
1750 te->addr_code = -1;
1752 if (prot & PAGE_WRITE) {
1753 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1754 (pd & IO_MEM_ROMD)) {
1755 /* write access calls the I/O callback */
1756 te->addr_write = vaddr |
1757 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1758 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1759 !cpu_physical_memory_is_dirty(pd)) {
1760 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1761 } else {
1762 te->addr_write = address;
1764 } else {
1765 te->addr_write = -1;
1768 #if !defined(CONFIG_SOFTMMU)
1769 else {
1770 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1771 /* IO access: no mapping is done as it will be handled by the
1772 soft MMU */
1773 if (!(env->hflags & HF_SOFTMMU_MASK))
1774 ret = 2;
1775 } else {
1776 void *map_addr;
1778 if (vaddr >= MMAP_AREA_END) {
1779 ret = 2;
1780 } else {
1781 if (prot & PROT_WRITE) {
1782 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1783 #if defined(TARGET_HAS_SMC) || 1
1784 first_tb ||
1785 #endif
1786 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1787 !cpu_physical_memory_is_dirty(pd))) {
1788 /* ROM: we do as if code was inside */
1789 /* if code is present, we only map as read only and save the
1790 original mapping */
1791 VirtPageDesc *vp;
1793 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1794 vp->phys_addr = pd;
1795 vp->prot = prot;
1796 vp->valid_tag = virt_valid_tag;
1797 prot &= ~PAGE_WRITE;
1800 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1801 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1802 if (map_addr == MAP_FAILED) {
1803 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1804 paddr, vaddr);
1809 #endif
1810 return ret;
1813 /* called from signal handler: invalidate the code and unprotect the
1814 page. Return TRUE if the fault was succesfully handled. */
1815 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1817 #if !defined(CONFIG_SOFTMMU)
1818 VirtPageDesc *vp;
1820 #if defined(DEBUG_TLB)
1821 printf("page_unprotect: addr=0x%08x\n", addr);
1822 #endif
1823 addr &= TARGET_PAGE_MASK;
1825 /* if it is not mapped, no need to worry here */
1826 if (addr >= MMAP_AREA_END)
1827 return 0;
1828 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1829 if (!vp)
1830 return 0;
1831 /* NOTE: in this case, validate_tag is _not_ tested as it
1832 validates only the code TLB */
1833 if (vp->valid_tag != virt_valid_tag)
1834 return 0;
1835 if (!(vp->prot & PAGE_WRITE))
1836 return 0;
1837 #if defined(DEBUG_TLB)
1838 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1839 addr, vp->phys_addr, vp->prot);
1840 #endif
1841 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1842 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1843 (unsigned long)addr, vp->prot);
1844 /* set the dirty bit */
1845 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1846 /* flush the code inside */
1847 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1848 return 1;
1849 #else
1850 return 0;
1851 #endif
1854 #else
1856 void tlb_flush(CPUState *env, int flush_global)
1860 void tlb_flush_page(CPUState *env, target_ulong addr)
1864 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1865 target_phys_addr_t paddr, int prot,
1866 int mmu_idx, int is_softmmu)
1868 return 0;
1871 /* dump memory mappings */
1872 void page_dump(FILE *f)
1874 unsigned long start, end;
1875 int i, j, prot, prot1;
1876 PageDesc *p;
1878 fprintf(f, "%-8s %-8s %-8s %s\n",
1879 "start", "end", "size", "prot");
1880 start = -1;
1881 end = -1;
1882 prot = 0;
1883 for(i = 0; i <= L1_SIZE; i++) {
1884 if (i < L1_SIZE)
1885 p = l1_map[i];
1886 else
1887 p = NULL;
1888 for(j = 0;j < L2_SIZE; j++) {
1889 if (!p)
1890 prot1 = 0;
1891 else
1892 prot1 = p[j].flags;
1893 if (prot1 != prot) {
1894 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1895 if (start != -1) {
1896 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1897 start, end, end - start,
1898 prot & PAGE_READ ? 'r' : '-',
1899 prot & PAGE_WRITE ? 'w' : '-',
1900 prot & PAGE_EXEC ? 'x' : '-');
1902 if (prot1 != 0)
1903 start = end;
1904 else
1905 start = -1;
1906 prot = prot1;
1908 if (!p)
1909 break;
1914 int page_get_flags(target_ulong address)
1916 PageDesc *p;
1918 p = page_find(address >> TARGET_PAGE_BITS);
1919 if (!p)
1920 return 0;
1921 return p->flags;
1924 /* modify the flags of a page and invalidate the code if
1925 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1926 depending on PAGE_WRITE */
1927 void page_set_flags(target_ulong start, target_ulong end, int flags)
1929 PageDesc *p;
1930 target_ulong addr;
1932 start = start & TARGET_PAGE_MASK;
1933 end = TARGET_PAGE_ALIGN(end);
1934 if (flags & PAGE_WRITE)
1935 flags |= PAGE_WRITE_ORG;
1936 spin_lock(&tb_lock);
1937 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1938 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1939 /* if the write protection is set, then we invalidate the code
1940 inside */
1941 if (!(p->flags & PAGE_WRITE) &&
1942 (flags & PAGE_WRITE) &&
1943 p->first_tb) {
1944 tb_invalidate_phys_page(addr, 0, NULL);
1946 p->flags = flags;
1948 spin_unlock(&tb_lock);
1951 int page_check_range(target_ulong start, target_ulong len, int flags)
1953 PageDesc *p;
1954 target_ulong end;
1955 target_ulong addr;
1957 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1958 start = start & TARGET_PAGE_MASK;
1960 if( end < start )
1961 /* we've wrapped around */
1962 return -1;
1963 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1964 p = page_find(addr >> TARGET_PAGE_BITS);
1965 if( !p )
1966 return -1;
1967 if( !(p->flags & PAGE_VALID) )
1968 return -1;
1970 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1971 return -1;
1972 if (flags & PAGE_WRITE) {
1973 if (!(p->flags & PAGE_WRITE_ORG))
1974 return -1;
1975 /* unprotect the page if it was put read-only because it
1976 contains translated code */
1977 if (!(p->flags & PAGE_WRITE)) {
1978 if (!page_unprotect(addr, 0, NULL))
1979 return -1;
1981 return 0;
1984 return 0;
1987 /* called from signal handler: invalidate the code and unprotect the
1988 page. Return TRUE if the fault was succesfully handled. */
1989 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1991 unsigned int page_index, prot, pindex;
1992 PageDesc *p, *p1;
1993 target_ulong host_start, host_end, addr;
1995 host_start = address & qemu_host_page_mask;
1996 page_index = host_start >> TARGET_PAGE_BITS;
1997 p1 = page_find(page_index);
1998 if (!p1)
1999 return 0;
2000 host_end = host_start + qemu_host_page_size;
2001 p = p1;
2002 prot = 0;
2003 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2004 prot |= p->flags;
2005 p++;
2007 /* if the page was really writable, then we change its
2008 protection back to writable */
2009 if (prot & PAGE_WRITE_ORG) {
2010 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2011 if (!(p1[pindex].flags & PAGE_WRITE)) {
2012 mprotect((void *)g2h(host_start), qemu_host_page_size,
2013 (prot & PAGE_BITS) | PAGE_WRITE);
2014 p1[pindex].flags |= PAGE_WRITE;
2015 /* and since the content will be modified, we must invalidate
2016 the corresponding translated code. */
2017 tb_invalidate_phys_page(address, pc, puc);
2018 #ifdef DEBUG_TB_CHECK
2019 tb_invalidate_check(address);
2020 #endif
2021 return 1;
2024 return 0;
2027 static inline void tlb_set_dirty(CPUState *env,
2028 unsigned long addr, target_ulong vaddr)
2031 #endif /* defined(CONFIG_USER_ONLY) */
2033 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2034 int memory);
2035 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2036 int orig_memory);
2037 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2038 need_subpage) \
2039 do { \
2040 if (addr > start_addr) \
2041 start_addr2 = 0; \
2042 else { \
2043 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2044 if (start_addr2 > 0) \
2045 need_subpage = 1; \
2048 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2049 end_addr2 = TARGET_PAGE_SIZE - 1; \
2050 else { \
2051 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2052 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2053 need_subpage = 1; \
2055 } while (0)
2057 /* register physical memory. 'size' must be a multiple of the target
2058 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2059 io memory page */
2060 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2061 unsigned long size,
2062 unsigned long phys_offset)
2064 target_phys_addr_t addr, end_addr;
2065 PhysPageDesc *p;
2066 CPUState *env;
2067 unsigned long orig_size = size;
2068 void *subpage;
2070 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2071 end_addr = start_addr + (target_phys_addr_t)size;
2072 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2073 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2074 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2075 unsigned long orig_memory = p->phys_offset;
2076 target_phys_addr_t start_addr2, end_addr2;
2077 int need_subpage = 0;
2079 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2080 need_subpage);
2081 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2082 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2083 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2084 &p->phys_offset, orig_memory);
2085 } else {
2086 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2087 >> IO_MEM_SHIFT];
2089 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2090 } else {
2091 p->phys_offset = phys_offset;
2092 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2093 (phys_offset & IO_MEM_ROMD))
2094 phys_offset += TARGET_PAGE_SIZE;
2096 } else {
2097 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2098 p->phys_offset = phys_offset;
2099 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2100 (phys_offset & IO_MEM_ROMD))
2101 phys_offset += TARGET_PAGE_SIZE;
2102 else {
2103 target_phys_addr_t start_addr2, end_addr2;
2104 int need_subpage = 0;
2106 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2107 end_addr2, need_subpage);
2109 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2110 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2111 &p->phys_offset, IO_MEM_UNASSIGNED);
2112 subpage_register(subpage, start_addr2, end_addr2,
2113 phys_offset);
2119 /* since each CPU stores ram addresses in its TLB cache, we must
2120 reset the modified entries */
2121 /* XXX: slow ! */
2122 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2123 tlb_flush(env, 1);
2127 /* XXX: temporary until new memory mapping API */
2128 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2130 PhysPageDesc *p;
2132 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2133 if (!p)
2134 return IO_MEM_UNASSIGNED;
2135 return p->phys_offset;
2138 /* XXX: better than nothing */
2139 ram_addr_t qemu_ram_alloc(unsigned long size)
2141 ram_addr_t addr;
2142 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2143 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2144 size, phys_ram_size);
2145 abort();
2147 addr = phys_ram_alloc_offset;
2148 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2149 return addr;
2152 void qemu_ram_free(ram_addr_t addr)
2156 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2158 #ifdef DEBUG_UNASSIGNED
2159 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2160 #endif
2161 #ifdef TARGET_SPARC
2162 do_unassigned_access(addr, 0, 0, 0);
2163 #elif TARGET_CRIS
2164 do_unassigned_access(addr, 0, 0, 0);
2165 #endif
2166 return 0;
2169 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2171 #ifdef DEBUG_UNASSIGNED
2172 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2173 #endif
2174 #ifdef TARGET_SPARC
2175 do_unassigned_access(addr, 1, 0, 0);
2176 #elif TARGET_CRIS
2177 do_unassigned_access(addr, 1, 0, 0);
2178 #endif
2181 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2182 unassigned_mem_readb,
2183 unassigned_mem_readb,
2184 unassigned_mem_readb,
2187 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2188 unassigned_mem_writeb,
2189 unassigned_mem_writeb,
2190 unassigned_mem_writeb,
2193 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2195 unsigned long ram_addr;
2196 int dirty_flags;
2197 ram_addr = addr - (unsigned long)phys_ram_base;
2198 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2199 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2200 #if !defined(CONFIG_USER_ONLY)
2201 tb_invalidate_phys_page_fast(ram_addr, 1);
2202 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2203 #endif
2205 stb_p((uint8_t *)(long)addr, val);
2206 #ifdef USE_KQEMU
2207 if (cpu_single_env->kqemu_enabled &&
2208 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2209 kqemu_modify_page(cpu_single_env, ram_addr);
2210 #endif
2211 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2212 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2213 /* we remove the notdirty callback only if the code has been
2214 flushed */
2215 if (dirty_flags == 0xff)
2216 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2219 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2221 unsigned long ram_addr;
2222 int dirty_flags;
2223 ram_addr = addr - (unsigned long)phys_ram_base;
2224 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2225 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2226 #if !defined(CONFIG_USER_ONLY)
2227 tb_invalidate_phys_page_fast(ram_addr, 2);
2228 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2229 #endif
2231 stw_p((uint8_t *)(long)addr, val);
2232 #ifdef USE_KQEMU
2233 if (cpu_single_env->kqemu_enabled &&
2234 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2235 kqemu_modify_page(cpu_single_env, ram_addr);
2236 #endif
2237 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2238 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2239 /* we remove the notdirty callback only if the code has been
2240 flushed */
2241 if (dirty_flags == 0xff)
2242 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2245 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2247 unsigned long ram_addr;
2248 int dirty_flags;
2249 ram_addr = addr - (unsigned long)phys_ram_base;
2250 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2251 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2252 #if !defined(CONFIG_USER_ONLY)
2253 tb_invalidate_phys_page_fast(ram_addr, 4);
2254 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2255 #endif
2257 stl_p((uint8_t *)(long)addr, val);
2258 #ifdef USE_KQEMU
2259 if (cpu_single_env->kqemu_enabled &&
2260 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2261 kqemu_modify_page(cpu_single_env, ram_addr);
2262 #endif
2263 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2264 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2265 /* we remove the notdirty callback only if the code has been
2266 flushed */
2267 if (dirty_flags == 0xff)
2268 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2271 static CPUReadMemoryFunc *error_mem_read[3] = {
2272 NULL, /* never used */
2273 NULL, /* never used */
2274 NULL, /* never used */
2277 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2278 notdirty_mem_writeb,
2279 notdirty_mem_writew,
2280 notdirty_mem_writel,
2283 #if defined(CONFIG_SOFTMMU)
2284 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2285 so these check for a hit then pass through to the normal out-of-line
2286 phys routines. */
2287 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2289 return ldub_phys(addr);
2292 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2294 return lduw_phys(addr);
2297 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2299 return ldl_phys(addr);
2302 /* Generate a debug exception if a watchpoint has been hit.
2303 Returns the real physical address of the access. addr will be a host
2304 address in case of a RAM location. */
2305 static target_ulong check_watchpoint(target_phys_addr_t addr)
2307 CPUState *env = cpu_single_env;
2308 target_ulong watch;
2309 target_ulong retaddr;
2310 int i;
2312 retaddr = addr;
2313 for (i = 0; i < env->nb_watchpoints; i++) {
2314 watch = env->watchpoint[i].vaddr;
2315 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2316 retaddr = addr - env->watchpoint[i].addend;
2317 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2318 cpu_single_env->watchpoint_hit = i + 1;
2319 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2320 break;
2324 return retaddr;
2327 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2328 uint32_t val)
2330 addr = check_watchpoint(addr);
2331 stb_phys(addr, val);
2334 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2335 uint32_t val)
2337 addr = check_watchpoint(addr);
2338 stw_phys(addr, val);
2341 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2342 uint32_t val)
2344 addr = check_watchpoint(addr);
2345 stl_phys(addr, val);
2348 static CPUReadMemoryFunc *watch_mem_read[3] = {
2349 watch_mem_readb,
2350 watch_mem_readw,
2351 watch_mem_readl,
2354 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2355 watch_mem_writeb,
2356 watch_mem_writew,
2357 watch_mem_writel,
2359 #endif
2361 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2362 unsigned int len)
2364 uint32_t ret;
2365 unsigned int idx;
2367 idx = SUBPAGE_IDX(addr - mmio->base);
2368 #if defined(DEBUG_SUBPAGE)
2369 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2370 mmio, len, addr, idx);
2371 #endif
2372 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2374 return ret;
2377 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2378 uint32_t value, unsigned int len)
2380 unsigned int idx;
2382 idx = SUBPAGE_IDX(addr - mmio->base);
2383 #if defined(DEBUG_SUBPAGE)
2384 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2385 mmio, len, addr, idx, value);
2386 #endif
2387 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2390 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2392 #if defined(DEBUG_SUBPAGE)
2393 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2394 #endif
2396 return subpage_readlen(opaque, addr, 0);
2399 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2400 uint32_t value)
2402 #if defined(DEBUG_SUBPAGE)
2403 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2404 #endif
2405 subpage_writelen(opaque, addr, value, 0);
2408 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2410 #if defined(DEBUG_SUBPAGE)
2411 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2412 #endif
2414 return subpage_readlen(opaque, addr, 1);
2417 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2418 uint32_t value)
2420 #if defined(DEBUG_SUBPAGE)
2421 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2422 #endif
2423 subpage_writelen(opaque, addr, value, 1);
2426 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2428 #if defined(DEBUG_SUBPAGE)
2429 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2430 #endif
2432 return subpage_readlen(opaque, addr, 2);
2435 static void subpage_writel (void *opaque,
2436 target_phys_addr_t addr, uint32_t value)
2438 #if defined(DEBUG_SUBPAGE)
2439 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2440 #endif
2441 subpage_writelen(opaque, addr, value, 2);
2444 static CPUReadMemoryFunc *subpage_read[] = {
2445 &subpage_readb,
2446 &subpage_readw,
2447 &subpage_readl,
2450 static CPUWriteMemoryFunc *subpage_write[] = {
2451 &subpage_writeb,
2452 &subpage_writew,
2453 &subpage_writel,
2456 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2457 int memory)
2459 int idx, eidx;
2460 unsigned int i;
2462 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2463 return -1;
2464 idx = SUBPAGE_IDX(start);
2465 eidx = SUBPAGE_IDX(end);
2466 #if defined(DEBUG_SUBPAGE)
2467 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2468 mmio, start, end, idx, eidx, memory);
2469 #endif
2470 memory >>= IO_MEM_SHIFT;
2471 for (; idx <= eidx; idx++) {
2472 for (i = 0; i < 4; i++) {
2473 if (io_mem_read[memory][i]) {
2474 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2475 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2477 if (io_mem_write[memory][i]) {
2478 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2479 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2484 return 0;
2487 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2488 int orig_memory)
2490 subpage_t *mmio;
2491 int subpage_memory;
2493 mmio = qemu_mallocz(sizeof(subpage_t));
2494 if (mmio != NULL) {
2495 mmio->base = base;
2496 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2497 #if defined(DEBUG_SUBPAGE)
2498 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2499 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2500 #endif
2501 *phys = subpage_memory | IO_MEM_SUBPAGE;
2502 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2505 return mmio;
2508 static int get_free_io_mem_idx(void)
2510 int i;
2512 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2513 if (!io_mem_used[i]) {
2514 io_mem_used[i] = 1;
2515 return i;
2518 return -1;
2521 static void io_mem_init(void)
2523 int i;
2525 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2526 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2527 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2528 for (i=0; i<5; i++)
2529 io_mem_used[i] = 1;
2531 #if defined(CONFIG_SOFTMMU)
2532 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2533 watch_mem_write, NULL);
2534 #endif
2535 /* alloc dirty bits array */
2536 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2537 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2540 /* mem_read and mem_write are arrays of functions containing the
2541 function to access byte (index 0), word (index 1) and dword (index
2542 2). Functions can be omitted with a NULL function pointer. The
2543 registered functions may be modified dynamically later.
2544 If io_index is non zero, the corresponding io zone is
2545 modified. If it is zero, a new io zone is allocated. The return
2546 value can be used with cpu_register_physical_memory(). (-1) is
2547 returned if error. */
2548 int cpu_register_io_memory(int io_index,
2549 CPUReadMemoryFunc **mem_read,
2550 CPUWriteMemoryFunc **mem_write,
2551 void *opaque)
2553 int i, subwidth = 0;
2555 if (io_index <= 0) {
2556 io_index = get_free_io_mem_idx();
2557 if (io_index == -1)
2558 return io_index;
2559 } else {
2560 if (io_index >= IO_MEM_NB_ENTRIES)
2561 return -1;
2564 for(i = 0;i < 3; i++) {
2565 if (!mem_read[i] || !mem_write[i])
2566 subwidth = IO_MEM_SUBWIDTH;
2567 io_mem_read[io_index][i] = mem_read[i];
2568 io_mem_write[io_index][i] = mem_write[i];
2570 io_mem_opaque[io_index] = opaque;
2571 return (io_index << IO_MEM_SHIFT) | subwidth;
2574 void cpu_unregister_io_memory(int io_table_address)
2576 int i;
2577 int io_index = io_table_address >> IO_MEM_SHIFT;
2579 for (i=0;i < 3; i++) {
2580 io_mem_read[io_index][i] = unassigned_mem_read[i];
2581 io_mem_write[io_index][i] = unassigned_mem_write[i];
2583 io_mem_opaque[io_index] = NULL;
2584 io_mem_used[io_index] = 0;
2587 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2589 return io_mem_write[io_index >> IO_MEM_SHIFT];
2592 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2594 return io_mem_read[io_index >> IO_MEM_SHIFT];
2597 /* physical memory access (slow version, mainly for debug) */
2598 #if defined(CONFIG_USER_ONLY)
2599 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2600 int len, int is_write)
2602 int l, flags;
2603 target_ulong page;
2604 void * p;
2606 while (len > 0) {
2607 page = addr & TARGET_PAGE_MASK;
2608 l = (page + TARGET_PAGE_SIZE) - addr;
2609 if (l > len)
2610 l = len;
2611 flags = page_get_flags(page);
2612 if (!(flags & PAGE_VALID))
2613 return;
2614 if (is_write) {
2615 if (!(flags & PAGE_WRITE))
2616 return;
2617 /* XXX: this code should not depend on lock_user */
2618 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2619 /* FIXME - should this return an error rather than just fail? */
2620 return;
2621 memcpy(p, buf, len);
2622 unlock_user(p, addr, len);
2623 } else {
2624 if (!(flags & PAGE_READ))
2625 return;
2626 /* XXX: this code should not depend on lock_user */
2627 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2628 /* FIXME - should this return an error rather than just fail? */
2629 return;
2630 memcpy(buf, p, len);
2631 unlock_user(p, addr, 0);
2633 len -= l;
2634 buf += l;
2635 addr += l;
2639 #else
2640 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2641 int len, int is_write)
2643 int l, io_index;
2644 uint8_t *ptr;
2645 uint32_t val;
2646 target_phys_addr_t page;
2647 unsigned long pd;
2648 PhysPageDesc *p;
2650 while (len > 0) {
2651 page = addr & TARGET_PAGE_MASK;
2652 l = (page + TARGET_PAGE_SIZE) - addr;
2653 if (l > len)
2654 l = len;
2655 p = phys_page_find(page >> TARGET_PAGE_BITS);
2656 if (!p) {
2657 pd = IO_MEM_UNASSIGNED;
2658 } else {
2659 pd = p->phys_offset;
2662 if (is_write) {
2663 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2664 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2665 /* XXX: could force cpu_single_env to NULL to avoid
2666 potential bugs */
2667 if (l >= 4 && ((addr & 3) == 0)) {
2668 /* 32 bit write access */
2669 val = ldl_p(buf);
2670 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2671 l = 4;
2672 } else if (l >= 2 && ((addr & 1) == 0)) {
2673 /* 16 bit write access */
2674 val = lduw_p(buf);
2675 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2676 l = 2;
2677 } else {
2678 /* 8 bit write access */
2679 val = ldub_p(buf);
2680 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2681 l = 1;
2683 } else {
2684 unsigned long addr1;
2685 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2686 /* RAM case */
2687 ptr = phys_ram_base + addr1;
2688 memcpy(ptr, buf, l);
2689 if (!cpu_physical_memory_is_dirty(addr1)) {
2690 /* invalidate code */
2691 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2692 /* set dirty bit */
2693 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2694 (0xff & ~CODE_DIRTY_FLAG);
2696 /* qemu doesn't execute guest code directly, but kvm does
2697 therefore fluch instruction caches */
2698 if (kvm_enabled())
2699 flush_icache_range((unsigned long)ptr,
2700 ((unsigned long)ptr)+l);
2702 } else {
2703 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2704 !(pd & IO_MEM_ROMD)) {
2705 /* I/O case */
2706 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2707 if (l >= 4 && ((addr & 3) == 0)) {
2708 /* 32 bit read access */
2709 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2710 stl_p(buf, val);
2711 l = 4;
2712 } else if (l >= 2 && ((addr & 1) == 0)) {
2713 /* 16 bit read access */
2714 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2715 stw_p(buf, val);
2716 l = 2;
2717 } else {
2718 /* 8 bit read access */
2719 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2720 stb_p(buf, val);
2721 l = 1;
2723 } else {
2724 /* RAM case */
2725 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2726 (addr & ~TARGET_PAGE_MASK);
2727 memcpy(buf, ptr, l);
2730 len -= l;
2731 buf += l;
2732 addr += l;
2736 /* used for ROM loading : can write in RAM and ROM */
2737 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2738 const uint8_t *buf, int len)
2740 int l;
2741 uint8_t *ptr;
2742 target_phys_addr_t page;
2743 unsigned long pd;
2744 PhysPageDesc *p;
2746 while (len > 0) {
2747 page = addr & TARGET_PAGE_MASK;
2748 l = (page + TARGET_PAGE_SIZE) - addr;
2749 if (l > len)
2750 l = len;
2751 p = phys_page_find(page >> TARGET_PAGE_BITS);
2752 if (!p) {
2753 pd = IO_MEM_UNASSIGNED;
2754 } else {
2755 pd = p->phys_offset;
2758 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2759 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2760 !(pd & IO_MEM_ROMD)) {
2761 /* do nothing */
2762 } else {
2763 unsigned long addr1;
2764 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2765 /* ROM/RAM case */
2766 ptr = phys_ram_base + addr1;
2767 memcpy(ptr, buf, l);
2769 len -= l;
2770 buf += l;
2771 addr += l;
2776 /* warning: addr must be aligned */
2777 uint32_t ldl_phys(target_phys_addr_t addr)
2779 int io_index;
2780 uint8_t *ptr;
2781 uint32_t val;
2782 unsigned long pd;
2783 PhysPageDesc *p;
2785 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2786 if (!p) {
2787 pd = IO_MEM_UNASSIGNED;
2788 } else {
2789 pd = p->phys_offset;
2792 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2793 !(pd & IO_MEM_ROMD)) {
2794 /* I/O case */
2795 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2796 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2797 } else {
2798 /* RAM case */
2799 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2800 (addr & ~TARGET_PAGE_MASK);
2801 val = ldl_p(ptr);
2803 return val;
2806 /* warning: addr must be aligned */
2807 uint64_t ldq_phys(target_phys_addr_t addr)
2809 int io_index;
2810 uint8_t *ptr;
2811 uint64_t val;
2812 unsigned long pd;
2813 PhysPageDesc *p;
2815 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2816 if (!p) {
2817 pd = IO_MEM_UNASSIGNED;
2818 } else {
2819 pd = p->phys_offset;
2822 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2823 !(pd & IO_MEM_ROMD)) {
2824 /* I/O case */
2825 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2826 #ifdef TARGET_WORDS_BIGENDIAN
2827 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2828 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2829 #else
2830 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2831 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2832 #endif
2833 } else {
2834 /* RAM case */
2835 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2836 (addr & ~TARGET_PAGE_MASK);
2837 val = ldq_p(ptr);
2839 return val;
2842 /* XXX: optimize */
2843 uint32_t ldub_phys(target_phys_addr_t addr)
2845 uint8_t val;
2846 cpu_physical_memory_read(addr, &val, 1);
2847 return val;
2850 /* XXX: optimize */
2851 uint32_t lduw_phys(target_phys_addr_t addr)
2853 uint16_t val;
2854 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2855 return tswap16(val);
2858 #ifdef __GNUC__
2859 #define likely(x) __builtin_expect(!!(x), 1)
2860 #define unlikely(x) __builtin_expect(!!(x), 0)
2861 #else
2862 #define likely(x) x
2863 #define unlikely(x) x
2864 #endif
2866 /* warning: addr must be aligned. The ram page is not masked as dirty
2867 and the code inside is not invalidated. It is useful if the dirty
2868 bits are used to track modified PTEs */
2869 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2871 int io_index;
2872 uint8_t *ptr;
2873 unsigned long pd;
2874 PhysPageDesc *p;
2876 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2877 if (!p) {
2878 pd = IO_MEM_UNASSIGNED;
2879 } else {
2880 pd = p->phys_offset;
2883 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2884 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2885 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2886 } else {
2887 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2888 ptr = phys_ram_base + addr1;
2889 stl_p(ptr, val);
2891 if (unlikely(in_migration)) {
2892 if (!cpu_physical_memory_is_dirty(addr1)) {
2893 /* invalidate code */
2894 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2895 /* set dirty bit */
2896 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2897 (0xff & ~CODE_DIRTY_FLAG);
2903 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2905 int io_index;
2906 uint8_t *ptr;
2907 unsigned long pd;
2908 PhysPageDesc *p;
2910 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2911 if (!p) {
2912 pd = IO_MEM_UNASSIGNED;
2913 } else {
2914 pd = p->phys_offset;
2917 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2918 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2919 #ifdef TARGET_WORDS_BIGENDIAN
2920 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2921 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2922 #else
2923 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2924 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2925 #endif
2926 } else {
2927 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2928 (addr & ~TARGET_PAGE_MASK);
2929 stq_p(ptr, val);
2933 /* warning: addr must be aligned */
2934 void stl_phys(target_phys_addr_t addr, uint32_t val)
2936 int io_index;
2937 uint8_t *ptr;
2938 unsigned long pd;
2939 PhysPageDesc *p;
2941 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2942 if (!p) {
2943 pd = IO_MEM_UNASSIGNED;
2944 } else {
2945 pd = p->phys_offset;
2948 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2949 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2950 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2951 } else {
2952 unsigned long addr1;
2953 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2954 /* RAM case */
2955 ptr = phys_ram_base + addr1;
2956 stl_p(ptr, val);
2957 if (!cpu_physical_memory_is_dirty(addr1)) {
2958 /* invalidate code */
2959 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2960 /* set dirty bit */
2961 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2962 (0xff & ~CODE_DIRTY_FLAG);
2967 /* XXX: optimize */
2968 void stb_phys(target_phys_addr_t addr, uint32_t val)
2970 uint8_t v = val;
2971 cpu_physical_memory_write(addr, &v, 1);
2974 /* XXX: optimize */
2975 void stw_phys(target_phys_addr_t addr, uint32_t val)
2977 uint16_t v = tswap16(val);
2978 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2981 /* XXX: optimize */
2982 void stq_phys(target_phys_addr_t addr, uint64_t val)
2984 val = tswap64(val);
2985 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2988 #endif
2990 /* virtual memory access for debug */
2991 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2992 uint8_t *buf, int len, int is_write)
2994 int l;
2995 target_phys_addr_t phys_addr;
2996 target_ulong page;
2998 while (len > 0) {
2999 page = addr & TARGET_PAGE_MASK;
3000 phys_addr = cpu_get_phys_page_debug(env, page);
3001 /* if no physical page mapped, return an error */
3002 if (phys_addr == -1)
3003 return -1;
3004 l = (page + TARGET_PAGE_SIZE) - addr;
3005 if (l > len)
3006 l = len;
3007 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3008 buf, l, is_write);
3009 len -= l;
3010 buf += l;
3011 addr += l;
3013 return 0;
3016 void dump_exec_info(FILE *f,
3017 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3019 int i, target_code_size, max_target_code_size;
3020 int direct_jmp_count, direct_jmp2_count, cross_page;
3021 TranslationBlock *tb;
3023 target_code_size = 0;
3024 max_target_code_size = 0;
3025 cross_page = 0;
3026 direct_jmp_count = 0;
3027 direct_jmp2_count = 0;
3028 for(i = 0; i < nb_tbs; i++) {
3029 tb = &tbs[i];
3030 target_code_size += tb->size;
3031 if (tb->size > max_target_code_size)
3032 max_target_code_size = tb->size;
3033 if (tb->page_addr[1] != -1)
3034 cross_page++;
3035 if (tb->tb_next_offset[0] != 0xffff) {
3036 direct_jmp_count++;
3037 if (tb->tb_next_offset[1] != 0xffff) {
3038 direct_jmp2_count++;
3042 /* XXX: avoid using doubles ? */
3043 cpu_fprintf(f, "Translation buffer state:\n");
3044 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3045 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3046 nb_tbs ? target_code_size / nb_tbs : 0,
3047 max_target_code_size);
3048 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3049 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3050 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3051 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3052 cross_page,
3053 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3054 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3055 direct_jmp_count,
3056 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3057 direct_jmp2_count,
3058 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3059 cpu_fprintf(f, "\nStatistics:\n");
3060 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3061 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3062 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3063 #ifdef CONFIG_PROFILER
3065 int64_t tot;
3066 tot = dyngen_interm_time + dyngen_code_time;
3067 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3068 tot, tot / 2.4e9);
3069 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
3070 dyngen_tb_count,
3071 dyngen_tb_count1 - dyngen_tb_count,
3072 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
3073 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
3074 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
3075 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
3076 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
3077 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3078 dyngen_tb_count ?
3079 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3080 cpu_fprintf(f, "cycles/op %0.1f\n",
3081 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3082 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3083 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3084 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3085 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3086 if (tot == 0)
3087 tot = 1;
3088 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3089 (double)dyngen_interm_time / tot * 100.0);
3090 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3091 (double)dyngen_code_time / tot * 100.0);
3092 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3093 dyngen_restore_count);
3094 cpu_fprintf(f, " avg cycles %0.1f\n",
3095 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3097 extern void dump_op_count(void);
3098 dump_op_count();
3101 #endif
3104 #if !defined(CONFIG_USER_ONLY)
3106 #define MMUSUFFIX _cmmu
3107 #define GETPC() NULL
3108 #define env cpu_single_env
3109 #define SOFTMMU_CODE_ACCESS
3111 #define SHIFT 0
3112 #include "softmmu_template.h"
3114 #define SHIFT 1
3115 #include "softmmu_template.h"
3117 #define SHIFT 2
3118 #include "softmmu_template.h"
3120 #define SHIFT 3
3121 #include "softmmu_template.h"
3123 #undef env
3125 #endif