Virtio-blk async IO
[qemu-kvm/fedora.git] / exec.c
blobc907d3e09bc2f6ec21f0cecc7a0fad1470c1ed12
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
39 #if !defined(NO_CPU_EMULATION)
40 #include "tcg-target.h"
41 #endif
43 #include "qemu-kvm.h"
44 #include "qemu-common.h"
46 #if defined(CONFIG_USER_ONLY)
47 #include <qemu.h>
48 #endif
50 //#define DEBUG_TB_INVALIDATE
51 //#define DEBUG_FLUSH
52 //#define DEBUG_TLB
53 //#define DEBUG_UNASSIGNED
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
64 #undef DEBUG_TB_CHECK
65 #endif
67 /* threshold to flush the translated code buffer */
68 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
70 #define SMC_BITMAP_USE_THRESHOLD 10
72 #define MMAP_AREA_START 0x00000000
73 #define MMAP_AREA_END 0xa8000000
75 #if defined(TARGET_SPARC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 41
77 #elif defined(TARGET_SPARC)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #elif defined(TARGET_ALPHA)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #define TARGET_VIRT_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_PPC64)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 42
84 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
85 #define TARGET_PHYS_ADDR_SPACE_BITS 42
86 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
87 #define TARGET_PHYS_ADDR_SPACE_BITS 36
88 #else
89 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
90 #define TARGET_PHYS_ADDR_SPACE_BITS 32
91 #endif
93 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
94 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
95 int nb_tbs;
96 /* any access to the tbs or the page table must use this lock */
97 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
99 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
100 uint8_t *code_gen_ptr;
102 ram_addr_t phys_ram_size;
103 int phys_ram_fd;
104 uint8_t *phys_ram_base;
105 uint8_t *phys_ram_dirty;
106 uint8_t *bios_mem;
107 static int in_migration;
108 static ram_addr_t phys_ram_alloc_offset = 0;
110 CPUState *first_cpu;
111 /* current CPU in the current thread. It is only valid inside
112 cpu_exec() */
113 CPUState *cpu_single_env;
115 typedef struct PageDesc {
116 /* list of TBs intersecting this ram page */
117 TranslationBlock *first_tb;
118 /* in order to optimize self modifying code, we count the number
119 of lookups we do to a given page to use a bitmap */
120 unsigned int code_write_count;
121 uint8_t *code_bitmap;
122 #if defined(CONFIG_USER_ONLY)
123 unsigned long flags;
124 #endif
125 } PageDesc;
127 typedef struct PhysPageDesc {
128 /* offset in host memory of the page + io_index in the low 12 bits */
129 ram_addr_t phys_offset;
130 } PhysPageDesc;
132 #define L2_BITS 10
133 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
134 /* XXX: this is a temporary hack for alpha target.
135 * In the future, this is to be replaced by a multi-level table
136 * to actually be able to handle the complete 64 bits address space.
138 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
139 #else
140 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
141 #endif
143 #define L1_SIZE (1 << L1_BITS)
144 #define L2_SIZE (1 << L2_BITS)
146 static void io_mem_init(void);
148 unsigned long qemu_real_host_page_size;
149 unsigned long qemu_host_page_bits;
150 unsigned long qemu_host_page_size;
151 unsigned long qemu_host_page_mask;
153 /* XXX: for system emulation, it could just be an array */
154 static PageDesc *l1_map[L1_SIZE];
155 PhysPageDesc **l1_phys_map;
157 /* io memory support */
158 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
159 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
160 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
161 char io_mem_used[IO_MEM_NB_ENTRIES];
162 #if defined(CONFIG_SOFTMMU)
163 static int io_mem_watch;
164 #endif
166 /* log support */
167 char *logfilename = "/tmp/qemu.log";
168 FILE *logfile;
169 int loglevel;
170 static int log_append = 0;
172 /* statistics */
173 static int tlb_flush_count;
174 static int tb_flush_count;
175 static int tb_phys_invalidate_count;
177 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
178 typedef struct subpage_t {
179 target_phys_addr_t base;
180 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
181 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
182 void *opaque[TARGET_PAGE_SIZE][2][4];
183 } subpage_t;
185 static void page_init(void)
187 /* NOTE: we can always suppose that qemu_host_page_size >=
188 TARGET_PAGE_SIZE */
189 #ifdef _WIN32
191 SYSTEM_INFO system_info;
192 DWORD old_protect;
194 GetSystemInfo(&system_info);
195 qemu_real_host_page_size = system_info.dwPageSize;
197 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
198 PAGE_EXECUTE_READWRITE, &old_protect);
200 #else
201 qemu_real_host_page_size = getpagesize();
203 unsigned long start, end;
205 start = (unsigned long)code_gen_buffer;
206 start &= ~(qemu_real_host_page_size - 1);
208 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
209 end += qemu_real_host_page_size - 1;
210 end &= ~(qemu_real_host_page_size - 1);
212 mprotect((void *)start, end - start,
213 PROT_READ | PROT_WRITE | PROT_EXEC);
215 #endif
217 if (qemu_host_page_size == 0)
218 qemu_host_page_size = qemu_real_host_page_size;
219 if (qemu_host_page_size < TARGET_PAGE_SIZE)
220 qemu_host_page_size = TARGET_PAGE_SIZE;
221 qemu_host_page_bits = 0;
222 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
223 qemu_host_page_bits++;
224 qemu_host_page_mask = ~(qemu_host_page_size - 1);
225 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
226 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
228 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
230 long long startaddr, endaddr;
231 FILE *f;
232 int n;
234 f = fopen("/proc/self/maps", "r");
235 if (f) {
236 do {
237 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
238 if (n == 2) {
239 startaddr = MIN(startaddr,
240 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
241 endaddr = MIN(endaddr,
242 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
243 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
244 TARGET_PAGE_ALIGN(endaddr),
245 PAGE_RESERVED);
247 } while (!feof(f));
248 fclose(f);
251 #endif
254 static inline PageDesc *page_find_alloc(target_ulong index)
256 PageDesc **lp, *p;
258 lp = &l1_map[index >> L2_BITS];
259 p = *lp;
260 if (!p) {
261 /* allocate if not found */
262 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
263 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
264 *lp = p;
266 return p + (index & (L2_SIZE - 1));
269 static inline PageDesc *page_find(target_ulong index)
271 PageDesc *p;
273 p = l1_map[index >> L2_BITS];
274 if (!p)
275 return 0;
276 return p + (index & (L2_SIZE - 1));
279 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
281 void **lp, **p;
282 PhysPageDesc *pd;
284 p = (void **)l1_phys_map;
285 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
287 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
288 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
289 #endif
290 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
291 p = *lp;
292 if (!p) {
293 /* allocate if not found */
294 if (!alloc)
295 return NULL;
296 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
297 memset(p, 0, sizeof(void *) * L1_SIZE);
298 *lp = p;
300 #endif
301 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
302 pd = *lp;
303 if (!pd) {
304 int i;
305 /* allocate if not found */
306 if (!alloc)
307 return NULL;
308 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
309 *lp = pd;
310 for (i = 0; i < L2_SIZE; i++)
311 pd[i].phys_offset = IO_MEM_UNASSIGNED;
313 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
316 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
318 return phys_page_find_alloc(index, 0);
321 #if !defined(CONFIG_USER_ONLY)
322 static void tlb_protect_code(ram_addr_t ram_addr);
323 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
324 target_ulong vaddr);
325 #endif
327 void cpu_exec_init(CPUState *env)
329 CPUState **penv;
330 int cpu_index;
332 if (!code_gen_ptr) {
333 cpu_gen_init();
334 code_gen_ptr = code_gen_buffer;
335 page_init();
336 io_mem_init();
338 env->next_cpu = NULL;
339 penv = &first_cpu;
340 cpu_index = 0;
341 while (*penv != NULL) {
342 penv = (CPUState **)&(*penv)->next_cpu;
343 cpu_index++;
345 env->cpu_index = cpu_index;
346 env->nb_watchpoints = 0;
347 #ifdef __WIN32
348 env->thread_id = GetCurrentProcessId();
349 #else
350 env->thread_id = getpid();
351 #endif
352 *penv = env;
355 static inline void invalidate_page_bitmap(PageDesc *p)
357 if (p->code_bitmap) {
358 qemu_free(p->code_bitmap);
359 p->code_bitmap = NULL;
361 p->code_write_count = 0;
364 /* set to NULL all the 'first_tb' fields in all PageDescs */
365 static void page_flush_tb(void)
367 int i, j;
368 PageDesc *p;
370 for(i = 0; i < L1_SIZE; i++) {
371 p = l1_map[i];
372 if (p) {
373 for(j = 0; j < L2_SIZE; j++) {
374 p->first_tb = NULL;
375 invalidate_page_bitmap(p);
376 p++;
382 /* flush all the translation blocks */
383 /* XXX: tb_flush is currently not thread safe */
384 void tb_flush(CPUState *env1)
386 CPUState *env;
387 #if defined(DEBUG_FLUSH)
388 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
389 (unsigned long)(code_gen_ptr - code_gen_buffer),
390 nb_tbs, nb_tbs > 0 ?
391 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
392 #endif
393 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
394 cpu_abort(env1, "Internal error: code buffer overflow\n");
396 nb_tbs = 0;
398 for(env = first_cpu; env != NULL; env = env->next_cpu) {
399 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
402 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
403 page_flush_tb();
405 code_gen_ptr = code_gen_buffer;
406 /* XXX: flush processor icache at this point if cache flush is
407 expensive */
408 tb_flush_count++;
411 #ifdef DEBUG_TB_CHECK
413 static void tb_invalidate_check(target_ulong address)
415 TranslationBlock *tb;
416 int i;
417 address &= TARGET_PAGE_MASK;
418 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
419 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
420 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
421 address >= tb->pc + tb->size)) {
422 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
423 address, (long)tb->pc, tb->size);
429 /* verify that all the pages have correct rights for code */
430 static void tb_page_check(void)
432 TranslationBlock *tb;
433 int i, flags1, flags2;
435 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
436 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
437 flags1 = page_get_flags(tb->pc);
438 flags2 = page_get_flags(tb->pc + tb->size - 1);
439 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
440 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
441 (long)tb->pc, tb->size, flags1, flags2);
447 void tb_jmp_check(TranslationBlock *tb)
449 TranslationBlock *tb1;
450 unsigned int n1;
452 /* suppress any remaining jumps to this TB */
453 tb1 = tb->jmp_first;
454 for(;;) {
455 n1 = (long)tb1 & 3;
456 tb1 = (TranslationBlock *)((long)tb1 & ~3);
457 if (n1 == 2)
458 break;
459 tb1 = tb1->jmp_next[n1];
461 /* check end of list */
462 if (tb1 != tb) {
463 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
467 #endif
469 /* invalidate one TB */
470 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
471 int next_offset)
473 TranslationBlock *tb1;
474 for(;;) {
475 tb1 = *ptb;
476 if (tb1 == tb) {
477 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
478 break;
480 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
484 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
486 TranslationBlock *tb1;
487 unsigned int n1;
489 for(;;) {
490 tb1 = *ptb;
491 n1 = (long)tb1 & 3;
492 tb1 = (TranslationBlock *)((long)tb1 & ~3);
493 if (tb1 == tb) {
494 *ptb = tb1->page_next[n1];
495 break;
497 ptb = &tb1->page_next[n1];
501 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
503 TranslationBlock *tb1, **ptb;
504 unsigned int n1;
506 ptb = &tb->jmp_next[n];
507 tb1 = *ptb;
508 if (tb1) {
509 /* find tb(n) in circular list */
510 for(;;) {
511 tb1 = *ptb;
512 n1 = (long)tb1 & 3;
513 tb1 = (TranslationBlock *)((long)tb1 & ~3);
514 if (n1 == n && tb1 == tb)
515 break;
516 if (n1 == 2) {
517 ptb = &tb1->jmp_first;
518 } else {
519 ptb = &tb1->jmp_next[n1];
522 /* now we can suppress tb(n) from the list */
523 *ptb = tb->jmp_next[n];
525 tb->jmp_next[n] = NULL;
529 /* reset the jump entry 'n' of a TB so that it is not chained to
530 another TB */
531 static inline void tb_reset_jump(TranslationBlock *tb, int n)
533 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
536 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
538 CPUState *env;
539 PageDesc *p;
540 unsigned int h, n1;
541 target_phys_addr_t phys_pc;
542 TranslationBlock *tb1, *tb2;
544 /* remove the TB from the hash list */
545 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
546 h = tb_phys_hash_func(phys_pc);
547 tb_remove(&tb_phys_hash[h], tb,
548 offsetof(TranslationBlock, phys_hash_next));
550 /* remove the TB from the page list */
551 if (tb->page_addr[0] != page_addr) {
552 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
553 tb_page_remove(&p->first_tb, tb);
554 invalidate_page_bitmap(p);
556 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
557 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
558 tb_page_remove(&p->first_tb, tb);
559 invalidate_page_bitmap(p);
562 tb_invalidated_flag = 1;
564 /* remove the TB from the hash list */
565 h = tb_jmp_cache_hash_func(tb->pc);
566 for(env = first_cpu; env != NULL; env = env->next_cpu) {
567 if (env->tb_jmp_cache[h] == tb)
568 env->tb_jmp_cache[h] = NULL;
571 /* suppress this TB from the two jump lists */
572 tb_jmp_remove(tb, 0);
573 tb_jmp_remove(tb, 1);
575 /* suppress any remaining jumps to this TB */
576 tb1 = tb->jmp_first;
577 for(;;) {
578 n1 = (long)tb1 & 3;
579 if (n1 == 2)
580 break;
581 tb1 = (TranslationBlock *)((long)tb1 & ~3);
582 tb2 = tb1->jmp_next[n1];
583 tb_reset_jump(tb1, n1);
584 tb1->jmp_next[n1] = NULL;
585 tb1 = tb2;
587 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
589 tb_phys_invalidate_count++;
592 static inline void set_bits(uint8_t *tab, int start, int len)
594 int end, mask, end1;
596 end = start + len;
597 tab += start >> 3;
598 mask = 0xff << (start & 7);
599 if ((start & ~7) == (end & ~7)) {
600 if (start < end) {
601 mask &= ~(0xff << (end & 7));
602 *tab |= mask;
604 } else {
605 *tab++ |= mask;
606 start = (start + 8) & ~7;
607 end1 = end & ~7;
608 while (start < end1) {
609 *tab++ = 0xff;
610 start += 8;
612 if (start < end) {
613 mask = ~(0xff << (end & 7));
614 *tab |= mask;
619 static void build_page_bitmap(PageDesc *p)
621 int n, tb_start, tb_end;
622 TranslationBlock *tb;
624 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
625 if (!p->code_bitmap)
626 return;
627 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 /* NOTE: this is subtle as a TB may span two physical pages */
634 if (n == 0) {
635 /* NOTE: tb_end may be after the end of the page, but
636 it is not a problem */
637 tb_start = tb->pc & ~TARGET_PAGE_MASK;
638 tb_end = tb_start + tb->size;
639 if (tb_end > TARGET_PAGE_SIZE)
640 tb_end = TARGET_PAGE_SIZE;
641 } else {
642 tb_start = 0;
643 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
645 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
646 tb = tb->page_next[n];
650 #ifdef TARGET_HAS_PRECISE_SMC
652 static void tb_gen_code(CPUState *env,
653 target_ulong pc, target_ulong cs_base, int flags,
654 int cflags)
656 TranslationBlock *tb;
657 uint8_t *tc_ptr;
658 target_ulong phys_pc, phys_page2, virt_page2;
659 int code_gen_size;
661 phys_pc = get_phys_addr_code(env, pc);
662 tb = tb_alloc(pc);
663 if (!tb) {
664 /* flush must be done */
665 tb_flush(env);
666 /* cannot fail at this point */
667 tb = tb_alloc(pc);
669 tc_ptr = code_gen_ptr;
670 tb->tc_ptr = tc_ptr;
671 tb->cs_base = cs_base;
672 tb->flags = flags;
673 tb->cflags = cflags;
674 cpu_gen_code(env, tb, &code_gen_size);
675 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
677 /* check next page if needed */
678 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
679 phys_page2 = -1;
680 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
681 phys_page2 = get_phys_addr_code(env, virt_page2);
683 tb_link_phys(tb, phys_pc, phys_page2);
685 #endif
687 /* invalidate all TBs which intersect with the target physical page
688 starting in range [start;end[. NOTE: start and end must refer to
689 the same physical page. 'is_cpu_write_access' should be true if called
690 from a real cpu write access: the virtual CPU will exit the current
691 TB if code is modified inside this TB. */
692 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
693 int is_cpu_write_access)
695 int n, current_tb_modified, current_tb_not_found, current_flags;
696 CPUState *env = cpu_single_env;
697 PageDesc *p;
698 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
699 target_ulong tb_start, tb_end;
700 target_ulong current_pc, current_cs_base;
702 p = page_find(start >> TARGET_PAGE_BITS);
703 if (!p)
704 return;
705 if (!p->code_bitmap &&
706 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
707 is_cpu_write_access) {
708 /* build code bitmap */
709 build_page_bitmap(p);
712 /* we remove all the TBs in the range [start, end[ */
713 /* XXX: see if in some cases it could be faster to invalidate all the code */
714 current_tb_not_found = is_cpu_write_access;
715 current_tb_modified = 0;
716 current_tb = NULL; /* avoid warning */
717 current_pc = 0; /* avoid warning */
718 current_cs_base = 0; /* avoid warning */
719 current_flags = 0; /* avoid warning */
720 tb = p->first_tb;
721 while (tb != NULL) {
722 n = (long)tb & 3;
723 tb = (TranslationBlock *)((long)tb & ~3);
724 tb_next = tb->page_next[n];
725 /* NOTE: this is subtle as a TB may span two physical pages */
726 if (n == 0) {
727 /* NOTE: tb_end may be after the end of the page, but
728 it is not a problem */
729 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
730 tb_end = tb_start + tb->size;
731 } else {
732 tb_start = tb->page_addr[1];
733 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
735 if (!(tb_end <= start || tb_start >= end)) {
736 #ifdef TARGET_HAS_PRECISE_SMC
737 if (current_tb_not_found) {
738 current_tb_not_found = 0;
739 current_tb = NULL;
740 if (env->mem_write_pc) {
741 /* now we have a real cpu fault */
742 current_tb = tb_find_pc(env->mem_write_pc);
745 if (current_tb == tb &&
746 !(current_tb->cflags & CF_SINGLE_INSN)) {
747 /* If we are modifying the current TB, we must stop
748 its execution. We could be more precise by checking
749 that the modification is after the current PC, but it
750 would require a specialized function to partially
751 restore the CPU state */
753 current_tb_modified = 1;
754 cpu_restore_state(current_tb, env,
755 env->mem_write_pc, NULL);
756 #if defined(TARGET_I386)
757 current_flags = env->hflags;
758 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
759 current_cs_base = (target_ulong)env->segs[R_CS].base;
760 current_pc = current_cs_base + env->eip;
761 #else
762 #error unsupported CPU
763 #endif
765 #endif /* TARGET_HAS_PRECISE_SMC */
766 /* we need to do that to handle the case where a signal
767 occurs while doing tb_phys_invalidate() */
768 saved_tb = NULL;
769 if (env) {
770 saved_tb = env->current_tb;
771 env->current_tb = NULL;
773 tb_phys_invalidate(tb, -1);
774 if (env) {
775 env->current_tb = saved_tb;
776 if (env->interrupt_request && env->current_tb)
777 cpu_interrupt(env, env->interrupt_request);
780 tb = tb_next;
782 #if !defined(CONFIG_USER_ONLY)
783 /* if no code remaining, no need to continue to use slow writes */
784 if (!p->first_tb) {
785 invalidate_page_bitmap(p);
786 if (is_cpu_write_access) {
787 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
790 #endif
791 #ifdef TARGET_HAS_PRECISE_SMC
792 if (current_tb_modified) {
793 /* we generate a block containing just the instruction
794 modifying the memory. It will ensure that it cannot modify
795 itself */
796 env->current_tb = NULL;
797 tb_gen_code(env, current_pc, current_cs_base, current_flags,
798 CF_SINGLE_INSN);
799 cpu_resume_from_signal(env, NULL);
801 #endif
804 /* len must be <= 8 and start must be a multiple of len */
805 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
807 PageDesc *p;
808 int offset, b;
809 #if 0
810 if (1) {
811 if (loglevel) {
812 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
813 cpu_single_env->mem_write_vaddr, len,
814 cpu_single_env->eip,
815 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
818 #endif
819 p = page_find(start >> TARGET_PAGE_BITS);
820 if (!p)
821 return;
822 if (p->code_bitmap) {
823 offset = start & ~TARGET_PAGE_MASK;
824 b = p->code_bitmap[offset >> 3] >> (offset & 7);
825 if (b & ((1 << len) - 1))
826 goto do_invalidate;
827 } else {
828 do_invalidate:
829 tb_invalidate_phys_page_range(start, start + len, 1);
833 #if !defined(CONFIG_SOFTMMU)
834 static void tb_invalidate_phys_page(target_phys_addr_t addr,
835 unsigned long pc, void *puc)
837 int n, current_flags, current_tb_modified;
838 target_ulong current_pc, current_cs_base;
839 PageDesc *p;
840 TranslationBlock *tb, *current_tb;
841 #ifdef TARGET_HAS_PRECISE_SMC
842 CPUState *env = cpu_single_env;
843 #endif
845 addr &= TARGET_PAGE_MASK;
846 p = page_find(addr >> TARGET_PAGE_BITS);
847 if (!p)
848 return;
849 tb = p->first_tb;
850 current_tb_modified = 0;
851 current_tb = NULL;
852 current_pc = 0; /* avoid warning */
853 current_cs_base = 0; /* avoid warning */
854 current_flags = 0; /* avoid warning */
855 #ifdef TARGET_HAS_PRECISE_SMC
856 if (tb && pc != 0) {
857 current_tb = tb_find_pc(pc);
859 #endif
860 while (tb != NULL) {
861 n = (long)tb & 3;
862 tb = (TranslationBlock *)((long)tb & ~3);
863 #ifdef TARGET_HAS_PRECISE_SMC
864 if (current_tb == tb &&
865 !(current_tb->cflags & CF_SINGLE_INSN)) {
866 /* If we are modifying the current TB, we must stop
867 its execution. We could be more precise by checking
868 that the modification is after the current PC, but it
869 would require a specialized function to partially
870 restore the CPU state */
872 current_tb_modified = 1;
873 cpu_restore_state(current_tb, env, pc, puc);
874 #if defined(TARGET_I386)
875 current_flags = env->hflags;
876 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
877 current_cs_base = (target_ulong)env->segs[R_CS].base;
878 current_pc = current_cs_base + env->eip;
879 #else
880 #error unsupported CPU
881 #endif
883 #endif /* TARGET_HAS_PRECISE_SMC */
884 tb_phys_invalidate(tb, addr);
885 tb = tb->page_next[n];
887 p->first_tb = NULL;
888 #ifdef TARGET_HAS_PRECISE_SMC
889 if (current_tb_modified) {
890 /* we generate a block containing just the instruction
891 modifying the memory. It will ensure that it cannot modify
892 itself */
893 env->current_tb = NULL;
894 tb_gen_code(env, current_pc, current_cs_base, current_flags,
895 CF_SINGLE_INSN);
896 cpu_resume_from_signal(env, puc);
898 #endif
900 #endif
902 /* add the tb in the target page and protect it if necessary */
903 static inline void tb_alloc_page(TranslationBlock *tb,
904 unsigned int n, target_ulong page_addr)
906 PageDesc *p;
907 TranslationBlock *last_first_tb;
909 tb->page_addr[n] = page_addr;
910 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
911 tb->page_next[n] = p->first_tb;
912 last_first_tb = p->first_tb;
913 p->first_tb = (TranslationBlock *)((long)tb | n);
914 invalidate_page_bitmap(p);
916 #if defined(TARGET_HAS_SMC) || 1
918 #if defined(CONFIG_USER_ONLY)
919 if (p->flags & PAGE_WRITE) {
920 target_ulong addr;
921 PageDesc *p2;
922 int prot;
924 /* force the host page as non writable (writes will have a
925 page fault + mprotect overhead) */
926 page_addr &= qemu_host_page_mask;
927 prot = 0;
928 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
929 addr += TARGET_PAGE_SIZE) {
931 p2 = page_find (addr >> TARGET_PAGE_BITS);
932 if (!p2)
933 continue;
934 prot |= p2->flags;
935 p2->flags &= ~PAGE_WRITE;
936 page_get_flags(addr);
938 mprotect(g2h(page_addr), qemu_host_page_size,
939 (prot & PAGE_BITS) & ~PAGE_WRITE);
940 #ifdef DEBUG_TB_INVALIDATE
941 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
942 page_addr);
943 #endif
945 #else
946 /* if some code is already present, then the pages are already
947 protected. So we handle the case where only the first TB is
948 allocated in a physical page */
949 if (!last_first_tb) {
950 tlb_protect_code(page_addr);
952 #endif
954 #endif /* TARGET_HAS_SMC */
957 /* Allocate a new translation block. Flush the translation buffer if
958 too many translation blocks or too much generated code. */
959 TranslationBlock *tb_alloc(target_ulong pc)
961 TranslationBlock *tb;
963 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
964 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
965 return NULL;
966 tb = &tbs[nb_tbs++];
967 tb->pc = pc;
968 tb->cflags = 0;
969 return tb;
972 /* add a new TB and link it to the physical page tables. phys_page2 is
973 (-1) to indicate that only one page contains the TB. */
974 void tb_link_phys(TranslationBlock *tb,
975 target_ulong phys_pc, target_ulong phys_page2)
977 unsigned int h;
978 TranslationBlock **ptb;
980 /* add in the physical hash table */
981 h = tb_phys_hash_func(phys_pc);
982 ptb = &tb_phys_hash[h];
983 tb->phys_hash_next = *ptb;
984 *ptb = tb;
986 /* add in the page list */
987 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
988 if (phys_page2 != -1)
989 tb_alloc_page(tb, 1, phys_page2);
990 else
991 tb->page_addr[1] = -1;
993 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
994 tb->jmp_next[0] = NULL;
995 tb->jmp_next[1] = NULL;
997 /* init original jump addresses */
998 if (tb->tb_next_offset[0] != 0xffff)
999 tb_reset_jump(tb, 0);
1000 if (tb->tb_next_offset[1] != 0xffff)
1001 tb_reset_jump(tb, 1);
1003 #ifdef DEBUG_TB_CHECK
1004 tb_page_check();
1005 #endif
1008 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1009 tb[1].tc_ptr. Return NULL if not found */
1010 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1012 int m_min, m_max, m;
1013 unsigned long v;
1014 TranslationBlock *tb;
1016 if (nb_tbs <= 0)
1017 return NULL;
1018 if (tc_ptr < (unsigned long)code_gen_buffer ||
1019 tc_ptr >= (unsigned long)code_gen_ptr)
1020 return NULL;
1021 /* binary search (cf Knuth) */
1022 m_min = 0;
1023 m_max = nb_tbs - 1;
1024 while (m_min <= m_max) {
1025 m = (m_min + m_max) >> 1;
1026 tb = &tbs[m];
1027 v = (unsigned long)tb->tc_ptr;
1028 if (v == tc_ptr)
1029 return tb;
1030 else if (tc_ptr < v) {
1031 m_max = m - 1;
1032 } else {
1033 m_min = m + 1;
1036 return &tbs[m_max];
1039 static void tb_reset_jump_recursive(TranslationBlock *tb);
1041 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1043 TranslationBlock *tb1, *tb_next, **ptb;
1044 unsigned int n1;
1046 tb1 = tb->jmp_next[n];
1047 if (tb1 != NULL) {
1048 /* find head of list */
1049 for(;;) {
1050 n1 = (long)tb1 & 3;
1051 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1052 if (n1 == 2)
1053 break;
1054 tb1 = tb1->jmp_next[n1];
1056 /* we are now sure now that tb jumps to tb1 */
1057 tb_next = tb1;
1059 /* remove tb from the jmp_first list */
1060 ptb = &tb_next->jmp_first;
1061 for(;;) {
1062 tb1 = *ptb;
1063 n1 = (long)tb1 & 3;
1064 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1065 if (n1 == n && tb1 == tb)
1066 break;
1067 ptb = &tb1->jmp_next[n1];
1069 *ptb = tb->jmp_next[n];
1070 tb->jmp_next[n] = NULL;
1072 /* suppress the jump to next tb in generated code */
1073 tb_reset_jump(tb, n);
1075 /* suppress jumps in the tb on which we could have jumped */
1076 tb_reset_jump_recursive(tb_next);
1080 static void tb_reset_jump_recursive(TranslationBlock *tb)
1082 tb_reset_jump_recursive2(tb, 0);
1083 tb_reset_jump_recursive2(tb, 1);
1086 #if defined(TARGET_HAS_ICE)
1087 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1089 target_phys_addr_t addr;
1090 target_ulong pd;
1091 ram_addr_t ram_addr;
1092 PhysPageDesc *p;
1094 addr = cpu_get_phys_page_debug(env, pc);
1095 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1096 if (!p) {
1097 pd = IO_MEM_UNASSIGNED;
1098 } else {
1099 pd = p->phys_offset;
1101 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1102 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1104 #endif
1106 /* Add a watchpoint. */
1107 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1109 int i;
1111 for (i = 0; i < env->nb_watchpoints; i++) {
1112 if (addr == env->watchpoint[i].vaddr)
1113 return 0;
1115 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1116 return -1;
1118 i = env->nb_watchpoints++;
1119 env->watchpoint[i].vaddr = addr;
1120 tlb_flush_page(env, addr);
1121 /* FIXME: This flush is needed because of the hack to make memory ops
1122 terminate the TB. It can be removed once the proper IO trap and
1123 re-execute bits are in. */
1124 tb_flush(env);
1125 return i;
1128 /* Remove a watchpoint. */
1129 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1131 int i;
1133 for (i = 0; i < env->nb_watchpoints; i++) {
1134 if (addr == env->watchpoint[i].vaddr) {
1135 env->nb_watchpoints--;
1136 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1137 tlb_flush_page(env, addr);
1138 return 0;
1141 return -1;
1144 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1145 breakpoint is reached */
1146 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1148 #if defined(TARGET_HAS_ICE)
1149 int i;
1151 for(i = 0; i < env->nb_breakpoints; i++) {
1152 if (env->breakpoints[i] == pc)
1153 return 0;
1156 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1157 return -1;
1158 env->breakpoints[env->nb_breakpoints++] = pc;
1160 if (kvm_enabled())
1161 kvm_update_debugger(env);
1163 breakpoint_invalidate(env, pc);
1164 return 0;
1165 #else
1166 return -1;
1167 #endif
1170 /* remove a breakpoint */
1171 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1173 #if defined(TARGET_HAS_ICE)
1174 int i;
1175 for(i = 0; i < env->nb_breakpoints; i++) {
1176 if (env->breakpoints[i] == pc)
1177 goto found;
1179 return -1;
1180 found:
1181 env->nb_breakpoints--;
1182 if (i < env->nb_breakpoints)
1183 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1185 if (kvm_enabled())
1186 kvm_update_debugger(env);
1188 breakpoint_invalidate(env, pc);
1189 return 0;
1190 #else
1191 return -1;
1192 #endif
1195 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1196 CPU loop after each instruction */
1197 void cpu_single_step(CPUState *env, int enabled)
1199 #if defined(TARGET_HAS_ICE)
1200 if (env->singlestep_enabled != enabled) {
1201 env->singlestep_enabled = enabled;
1202 /* must flush all the translated code to avoid inconsistancies */
1203 /* XXX: only flush what is necessary */
1204 tb_flush(env);
1206 if (kvm_enabled())
1207 kvm_update_debugger(env);
1208 #endif
1211 /* enable or disable low levels log */
1212 void cpu_set_log(int log_flags)
1214 loglevel = log_flags;
1215 if (loglevel && !logfile) {
1216 logfile = fopen(logfilename, log_append ? "a" : "w");
1217 if (!logfile) {
1218 perror(logfilename);
1219 _exit(1);
1221 #if !defined(CONFIG_SOFTMMU)
1222 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1224 static uint8_t logfile_buf[4096];
1225 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1227 #else
1228 setvbuf(logfile, NULL, _IOLBF, 0);
1229 #endif
1230 log_append = 1;
1232 if (!loglevel && logfile) {
1233 fclose(logfile);
1234 logfile = NULL;
1238 void cpu_set_log_filename(const char *filename)
1240 logfilename = strdup(filename);
1241 if (logfile) {
1242 fclose(logfile);
1243 logfile = NULL;
1245 cpu_set_log(loglevel);
1248 /* mask must never be zero, except for A20 change call */
1249 void cpu_interrupt(CPUState *env, int mask)
1251 TranslationBlock *tb;
1252 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1254 env->interrupt_request |= mask;
1255 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1256 kvm_update_interrupt_request(env);
1258 /* if the cpu is currently executing code, we must unlink it and
1259 all the potentially executing TB */
1260 tb = env->current_tb;
1261 if (tb && !testandset(&interrupt_lock)) {
1262 env->current_tb = NULL;
1263 tb_reset_jump_recursive(tb);
1264 resetlock(&interrupt_lock);
1268 void cpu_reset_interrupt(CPUState *env, int mask)
1270 env->interrupt_request &= ~mask;
1273 CPULogItem cpu_log_items[] = {
1274 { CPU_LOG_TB_OUT_ASM, "out_asm",
1275 "show generated host assembly code for each compiled TB" },
1276 { CPU_LOG_TB_IN_ASM, "in_asm",
1277 "show target assembly code for each compiled TB" },
1278 { CPU_LOG_TB_OP, "op",
1279 "show micro ops for each compiled TB" },
1280 { CPU_LOG_TB_OP_OPT, "op_opt",
1281 "show micro ops "
1282 #ifdef TARGET_I386
1283 "before eflags optimization and "
1284 #endif
1285 "after liveness analysis" },
1286 { CPU_LOG_INT, "int",
1287 "show interrupts/exceptions in short format" },
1288 { CPU_LOG_EXEC, "exec",
1289 "show trace before each executed TB (lots of logs)" },
1290 { CPU_LOG_TB_CPU, "cpu",
1291 "show CPU state before block translation" },
1292 #ifdef TARGET_I386
1293 { CPU_LOG_PCALL, "pcall",
1294 "show protected mode far calls/returns/exceptions" },
1295 #endif
1296 #ifdef DEBUG_IOPORT
1297 { CPU_LOG_IOPORT, "ioport",
1298 "show all i/o ports accesses" },
1299 #endif
1300 { 0, NULL, NULL },
1303 static int cmp1(const char *s1, int n, const char *s2)
1305 if (strlen(s2) != n)
1306 return 0;
1307 return memcmp(s1, s2, n) == 0;
1310 /* takes a comma separated list of log masks. Return 0 if error. */
1311 int cpu_str_to_log_mask(const char *str)
1313 CPULogItem *item;
1314 int mask;
1315 const char *p, *p1;
1317 p = str;
1318 mask = 0;
1319 for(;;) {
1320 p1 = strchr(p, ',');
1321 if (!p1)
1322 p1 = p + strlen(p);
1323 if(cmp1(p,p1-p,"all")) {
1324 for(item = cpu_log_items; item->mask != 0; item++) {
1325 mask |= item->mask;
1327 } else {
1328 for(item = cpu_log_items; item->mask != 0; item++) {
1329 if (cmp1(p, p1 - p, item->name))
1330 goto found;
1332 return 0;
1334 found:
1335 mask |= item->mask;
1336 if (*p1 != ',')
1337 break;
1338 p = p1 + 1;
1340 return mask;
1343 void cpu_abort(CPUState *env, const char *fmt, ...)
1345 va_list ap;
1346 va_list ap2;
1348 va_start(ap, fmt);
1349 va_copy(ap2, ap);
1350 fprintf(stderr, "qemu: fatal: ");
1351 vfprintf(stderr, fmt, ap);
1352 fprintf(stderr, "\n");
1353 #ifdef TARGET_I386
1354 if(env->intercept & INTERCEPT_SVM_MASK) {
1355 /* most probably the virtual machine should not
1356 be shut down but rather caught by the VMM */
1357 vmexit(SVM_EXIT_SHUTDOWN, 0);
1359 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1360 #else
1361 cpu_dump_state(env, stderr, fprintf, 0);
1362 #endif
1363 if (logfile) {
1364 fprintf(logfile, "qemu: fatal: ");
1365 vfprintf(logfile, fmt, ap2);
1366 fprintf(logfile, "\n");
1367 #ifdef TARGET_I386
1368 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1369 #else
1370 cpu_dump_state(env, logfile, fprintf, 0);
1371 #endif
1372 fflush(logfile);
1373 fclose(logfile);
1375 va_end(ap2);
1376 va_end(ap);
1377 abort();
1380 CPUState *cpu_copy(CPUState *env)
1382 CPUState *new_env = cpu_init(env->cpu_model_str);
1383 /* preserve chaining and index */
1384 CPUState *next_cpu = new_env->next_cpu;
1385 int cpu_index = new_env->cpu_index;
1386 memcpy(new_env, env, sizeof(CPUState));
1387 new_env->next_cpu = next_cpu;
1388 new_env->cpu_index = cpu_index;
1389 return new_env;
1392 #if !defined(CONFIG_USER_ONLY)
1394 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1396 unsigned int i;
1398 /* Discard jump cache entries for any tb which might potentially
1399 overlap the flushed page. */
1400 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1401 memset (&env->tb_jmp_cache[i], 0,
1402 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1404 i = tb_jmp_cache_hash_page(addr);
1405 memset (&env->tb_jmp_cache[i], 0,
1406 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1409 /* NOTE: if flush_global is true, also flush global entries (not
1410 implemented yet) */
1411 void tlb_flush(CPUState *env, int flush_global)
1413 int i;
1415 #if defined(DEBUG_TLB)
1416 printf("tlb_flush:\n");
1417 #endif
1418 /* must reset current TB so that interrupts cannot modify the
1419 links while we are modifying them */
1420 env->current_tb = NULL;
1422 for(i = 0; i < CPU_TLB_SIZE; i++) {
1423 env->tlb_table[0][i].addr_read = -1;
1424 env->tlb_table[0][i].addr_write = -1;
1425 env->tlb_table[0][i].addr_code = -1;
1426 env->tlb_table[1][i].addr_read = -1;
1427 env->tlb_table[1][i].addr_write = -1;
1428 env->tlb_table[1][i].addr_code = -1;
1429 #if (NB_MMU_MODES >= 3)
1430 env->tlb_table[2][i].addr_read = -1;
1431 env->tlb_table[2][i].addr_write = -1;
1432 env->tlb_table[2][i].addr_code = -1;
1433 #if (NB_MMU_MODES == 4)
1434 env->tlb_table[3][i].addr_read = -1;
1435 env->tlb_table[3][i].addr_write = -1;
1436 env->tlb_table[3][i].addr_code = -1;
1437 #endif
1438 #endif
1441 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1443 #if !defined(CONFIG_SOFTMMU)
1444 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1445 #endif
1446 #ifdef USE_KQEMU
1447 if (env->kqemu_enabled) {
1448 kqemu_flush(env, flush_global);
1450 #endif
1451 tlb_flush_count++;
1454 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1456 if (addr == (tlb_entry->addr_read &
1457 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1458 addr == (tlb_entry->addr_write &
1459 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1460 addr == (tlb_entry->addr_code &
1461 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1462 tlb_entry->addr_read = -1;
1463 tlb_entry->addr_write = -1;
1464 tlb_entry->addr_code = -1;
1468 void tlb_flush_page(CPUState *env, target_ulong addr)
1470 int i;
1472 #if defined(DEBUG_TLB)
1473 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1474 #endif
1475 /* must reset current TB so that interrupts cannot modify the
1476 links while we are modifying them */
1477 env->current_tb = NULL;
1479 addr &= TARGET_PAGE_MASK;
1480 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1481 tlb_flush_entry(&env->tlb_table[0][i], addr);
1482 tlb_flush_entry(&env->tlb_table[1][i], addr);
1483 #if (NB_MMU_MODES >= 3)
1484 tlb_flush_entry(&env->tlb_table[2][i], addr);
1485 #if (NB_MMU_MODES == 4)
1486 tlb_flush_entry(&env->tlb_table[3][i], addr);
1487 #endif
1488 #endif
1490 tlb_flush_jmp_cache(env, addr);
1492 #if !defined(CONFIG_SOFTMMU)
1493 if (addr < MMAP_AREA_END)
1494 munmap((void *)addr, TARGET_PAGE_SIZE);
1495 #endif
1496 #ifdef USE_KQEMU
1497 if (env->kqemu_enabled) {
1498 kqemu_flush_page(env, addr);
1500 #endif
1503 /* update the TLBs so that writes to code in the virtual page 'addr'
1504 can be detected */
1505 static void tlb_protect_code(ram_addr_t ram_addr)
1507 cpu_physical_memory_reset_dirty(ram_addr,
1508 ram_addr + TARGET_PAGE_SIZE,
1509 CODE_DIRTY_FLAG);
1512 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1513 tested for self modifying code */
1514 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1515 target_ulong vaddr)
1517 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1520 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1521 unsigned long start, unsigned long length)
1523 unsigned long addr;
1524 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1525 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1526 if ((addr - start) < length) {
1527 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1532 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1533 int dirty_flags)
1535 CPUState *env;
1536 unsigned long length, start1;
1537 int i, mask, len;
1538 uint8_t *p;
1540 start &= TARGET_PAGE_MASK;
1541 end = TARGET_PAGE_ALIGN(end);
1543 length = end - start;
1544 if (length == 0)
1545 return;
1546 len = length >> TARGET_PAGE_BITS;
1547 #ifdef USE_KQEMU
1548 /* XXX: should not depend on cpu context */
1549 env = first_cpu;
1550 if (env->kqemu_enabled) {
1551 ram_addr_t addr;
1552 addr = start;
1553 for(i = 0; i < len; i++) {
1554 kqemu_set_notdirty(env, addr);
1555 addr += TARGET_PAGE_SIZE;
1558 #endif
1559 mask = ~dirty_flags;
1560 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1561 for(i = 0; i < len; i++)
1562 p[i] &= mask;
1564 /* we modify the TLB cache so that the dirty bit will be set again
1565 when accessing the range */
1566 start1 = start + (unsigned long)phys_ram_base;
1567 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1568 for(i = 0; i < CPU_TLB_SIZE; i++)
1569 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1570 for(i = 0; i < CPU_TLB_SIZE; i++)
1571 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1572 #if (NB_MMU_MODES >= 3)
1573 for(i = 0; i < CPU_TLB_SIZE; i++)
1574 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1575 #if (NB_MMU_MODES == 4)
1576 for(i = 0; i < CPU_TLB_SIZE; i++)
1577 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1578 #endif
1579 #endif
1582 #if !defined(CONFIG_SOFTMMU)
1583 /* XXX: this is expensive */
1585 VirtPageDesc *p;
1586 int j;
1587 target_ulong addr;
1589 for(i = 0; i < L1_SIZE; i++) {
1590 p = l1_virt_map[i];
1591 if (p) {
1592 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1593 for(j = 0; j < L2_SIZE; j++) {
1594 if (p->valid_tag == virt_valid_tag &&
1595 p->phys_addr >= start && p->phys_addr < end &&
1596 (p->prot & PROT_WRITE)) {
1597 if (addr < MMAP_AREA_END) {
1598 mprotect((void *)addr, TARGET_PAGE_SIZE,
1599 p->prot & ~PROT_WRITE);
1602 addr += TARGET_PAGE_SIZE;
1603 p++;
1608 #endif
1611 int cpu_physical_memory_set_dirty_tracking(int enable)
1613 int r=0;
1615 if (kvm_enabled())
1616 r = kvm_physical_memory_set_dirty_tracking(enable);
1617 in_migration = enable;
1618 return r;
1621 int cpu_physical_memory_get_dirty_tracking(void)
1623 return in_migration;
1626 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1628 ram_addr_t ram_addr;
1630 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1631 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1632 tlb_entry->addend - (unsigned long)phys_ram_base;
1633 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1634 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1639 /* update the TLB according to the current state of the dirty bits */
1640 void cpu_tlb_update_dirty(CPUState *env)
1642 int i;
1643 for(i = 0; i < CPU_TLB_SIZE; i++)
1644 tlb_update_dirty(&env->tlb_table[0][i]);
1645 for(i = 0; i < CPU_TLB_SIZE; i++)
1646 tlb_update_dirty(&env->tlb_table[1][i]);
1647 #if (NB_MMU_MODES >= 3)
1648 for(i = 0; i < CPU_TLB_SIZE; i++)
1649 tlb_update_dirty(&env->tlb_table[2][i]);
1650 #if (NB_MMU_MODES == 4)
1651 for(i = 0; i < CPU_TLB_SIZE; i++)
1652 tlb_update_dirty(&env->tlb_table[3][i]);
1653 #endif
1654 #endif
1657 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1658 unsigned long start)
1660 unsigned long addr;
1661 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1662 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1663 if (addr == start) {
1664 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1669 /* update the TLB corresponding to virtual page vaddr and phys addr
1670 addr so that it is no longer dirty */
1671 static inline void tlb_set_dirty(CPUState *env,
1672 unsigned long addr, target_ulong vaddr)
1674 int i;
1676 addr &= TARGET_PAGE_MASK;
1677 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1678 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1679 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1680 #if (NB_MMU_MODES >= 3)
1681 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1682 #if (NB_MMU_MODES == 4)
1683 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1684 #endif
1685 #endif
1688 /* add a new TLB entry. At most one entry for a given virtual address
1689 is permitted. Return 0 if OK or 2 if the page could not be mapped
1690 (can only happen in non SOFTMMU mode for I/O pages or pages
1691 conflicting with the host address space). */
1692 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1693 target_phys_addr_t paddr, int prot,
1694 int mmu_idx, int is_softmmu)
1696 PhysPageDesc *p;
1697 unsigned long pd;
1698 unsigned int index;
1699 target_ulong address;
1700 target_phys_addr_t addend;
1701 int ret;
1702 CPUTLBEntry *te;
1703 int i;
1705 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1706 if (!p) {
1707 pd = IO_MEM_UNASSIGNED;
1708 } else {
1709 pd = p->phys_offset;
1711 #if defined(DEBUG_TLB)
1712 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1713 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1714 #endif
1716 ret = 0;
1717 #if !defined(CONFIG_SOFTMMU)
1718 if (is_softmmu)
1719 #endif
1721 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1722 /* IO memory case */
1723 address = vaddr | pd;
1724 addend = paddr;
1725 } else {
1726 /* standard memory */
1727 address = vaddr;
1728 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1731 /* Make accesses to pages with watchpoints go via the
1732 watchpoint trap routines. */
1733 for (i = 0; i < env->nb_watchpoints; i++) {
1734 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1735 if (address & ~TARGET_PAGE_MASK) {
1736 env->watchpoint[i].addend = 0;
1737 address = vaddr | io_mem_watch;
1738 } else {
1739 env->watchpoint[i].addend = pd - paddr +
1740 (unsigned long) phys_ram_base;
1741 /* TODO: Figure out how to make read watchpoints coexist
1742 with code. */
1743 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1748 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1749 addend -= vaddr;
1750 te = &env->tlb_table[mmu_idx][index];
1751 te->addend = addend;
1752 if (prot & PAGE_READ) {
1753 te->addr_read = address;
1754 } else {
1755 te->addr_read = -1;
1758 if (te->addr_code != -1) {
1759 tlb_flush_jmp_cache(env, te->addr_code);
1761 if (prot & PAGE_EXEC) {
1762 te->addr_code = address;
1763 } else {
1764 te->addr_code = -1;
1766 if (prot & PAGE_WRITE) {
1767 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1768 (pd & IO_MEM_ROMD)) {
1769 /* write access calls the I/O callback */
1770 te->addr_write = vaddr |
1771 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1772 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1773 !cpu_physical_memory_is_dirty(pd)) {
1774 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1775 } else {
1776 te->addr_write = address;
1778 } else {
1779 te->addr_write = -1;
1782 #if !defined(CONFIG_SOFTMMU)
1783 else {
1784 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1785 /* IO access: no mapping is done as it will be handled by the
1786 soft MMU */
1787 if (!(env->hflags & HF_SOFTMMU_MASK))
1788 ret = 2;
1789 } else {
1790 void *map_addr;
1792 if (vaddr >= MMAP_AREA_END) {
1793 ret = 2;
1794 } else {
1795 if (prot & PROT_WRITE) {
1796 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1797 #if defined(TARGET_HAS_SMC) || 1
1798 first_tb ||
1799 #endif
1800 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1801 !cpu_physical_memory_is_dirty(pd))) {
1802 /* ROM: we do as if code was inside */
1803 /* if code is present, we only map as read only and save the
1804 original mapping */
1805 VirtPageDesc *vp;
1807 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1808 vp->phys_addr = pd;
1809 vp->prot = prot;
1810 vp->valid_tag = virt_valid_tag;
1811 prot &= ~PAGE_WRITE;
1814 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1815 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1816 if (map_addr == MAP_FAILED) {
1817 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1818 paddr, vaddr);
1823 #endif
1824 return ret;
1827 /* called from signal handler: invalidate the code and unprotect the
1828 page. Return TRUE if the fault was succesfully handled. */
1829 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1831 #if !defined(CONFIG_SOFTMMU)
1832 VirtPageDesc *vp;
1834 #if defined(DEBUG_TLB)
1835 printf("page_unprotect: addr=0x%08x\n", addr);
1836 #endif
1837 addr &= TARGET_PAGE_MASK;
1839 /* if it is not mapped, no need to worry here */
1840 if (addr >= MMAP_AREA_END)
1841 return 0;
1842 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1843 if (!vp)
1844 return 0;
1845 /* NOTE: in this case, validate_tag is _not_ tested as it
1846 validates only the code TLB */
1847 if (vp->valid_tag != virt_valid_tag)
1848 return 0;
1849 if (!(vp->prot & PAGE_WRITE))
1850 return 0;
1851 #if defined(DEBUG_TLB)
1852 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1853 addr, vp->phys_addr, vp->prot);
1854 #endif
1855 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1856 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1857 (unsigned long)addr, vp->prot);
1858 /* set the dirty bit */
1859 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1860 /* flush the code inside */
1861 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1862 return 1;
1863 #else
1864 return 0;
1865 #endif
1868 #else
1870 void tlb_flush(CPUState *env, int flush_global)
1874 void tlb_flush_page(CPUState *env, target_ulong addr)
1878 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1879 target_phys_addr_t paddr, int prot,
1880 int mmu_idx, int is_softmmu)
1882 return 0;
1885 /* dump memory mappings */
1886 void page_dump(FILE *f)
1888 unsigned long start, end;
1889 int i, j, prot, prot1;
1890 PageDesc *p;
1892 fprintf(f, "%-8s %-8s %-8s %s\n",
1893 "start", "end", "size", "prot");
1894 start = -1;
1895 end = -1;
1896 prot = 0;
1897 for(i = 0; i <= L1_SIZE; i++) {
1898 if (i < L1_SIZE)
1899 p = l1_map[i];
1900 else
1901 p = NULL;
1902 for(j = 0;j < L2_SIZE; j++) {
1903 if (!p)
1904 prot1 = 0;
1905 else
1906 prot1 = p[j].flags;
1907 if (prot1 != prot) {
1908 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1909 if (start != -1) {
1910 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1911 start, end, end - start,
1912 prot & PAGE_READ ? 'r' : '-',
1913 prot & PAGE_WRITE ? 'w' : '-',
1914 prot & PAGE_EXEC ? 'x' : '-');
1916 if (prot1 != 0)
1917 start = end;
1918 else
1919 start = -1;
1920 prot = prot1;
1922 if (!p)
1923 break;
1928 int page_get_flags(target_ulong address)
1930 PageDesc *p;
1932 p = page_find(address >> TARGET_PAGE_BITS);
1933 if (!p)
1934 return 0;
1935 return p->flags;
1938 /* modify the flags of a page and invalidate the code if
1939 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1940 depending on PAGE_WRITE */
1941 void page_set_flags(target_ulong start, target_ulong end, int flags)
1943 PageDesc *p;
1944 target_ulong addr;
1946 start = start & TARGET_PAGE_MASK;
1947 end = TARGET_PAGE_ALIGN(end);
1948 if (flags & PAGE_WRITE)
1949 flags |= PAGE_WRITE_ORG;
1950 spin_lock(&tb_lock);
1951 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1952 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1953 /* if the write protection is set, then we invalidate the code
1954 inside */
1955 if (!(p->flags & PAGE_WRITE) &&
1956 (flags & PAGE_WRITE) &&
1957 p->first_tb) {
1958 tb_invalidate_phys_page(addr, 0, NULL);
1960 p->flags = flags;
1962 spin_unlock(&tb_lock);
1965 int page_check_range(target_ulong start, target_ulong len, int flags)
1967 PageDesc *p;
1968 target_ulong end;
1969 target_ulong addr;
1971 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1972 start = start & TARGET_PAGE_MASK;
1974 if( end < start )
1975 /* we've wrapped around */
1976 return -1;
1977 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1978 p = page_find(addr >> TARGET_PAGE_BITS);
1979 if( !p )
1980 return -1;
1981 if( !(p->flags & PAGE_VALID) )
1982 return -1;
1984 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1985 return -1;
1986 if (flags & PAGE_WRITE) {
1987 if (!(p->flags & PAGE_WRITE_ORG))
1988 return -1;
1989 /* unprotect the page if it was put read-only because it
1990 contains translated code */
1991 if (!(p->flags & PAGE_WRITE)) {
1992 if (!page_unprotect(addr, 0, NULL))
1993 return -1;
1995 return 0;
1998 return 0;
2001 /* called from signal handler: invalidate the code and unprotect the
2002 page. Return TRUE if the fault was succesfully handled. */
2003 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2005 unsigned int page_index, prot, pindex;
2006 PageDesc *p, *p1;
2007 target_ulong host_start, host_end, addr;
2009 host_start = address & qemu_host_page_mask;
2010 page_index = host_start >> TARGET_PAGE_BITS;
2011 p1 = page_find(page_index);
2012 if (!p1)
2013 return 0;
2014 host_end = host_start + qemu_host_page_size;
2015 p = p1;
2016 prot = 0;
2017 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2018 prot |= p->flags;
2019 p++;
2021 /* if the page was really writable, then we change its
2022 protection back to writable */
2023 if (prot & PAGE_WRITE_ORG) {
2024 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2025 if (!(p1[pindex].flags & PAGE_WRITE)) {
2026 mprotect((void *)g2h(host_start), qemu_host_page_size,
2027 (prot & PAGE_BITS) | PAGE_WRITE);
2028 p1[pindex].flags |= PAGE_WRITE;
2029 /* and since the content will be modified, we must invalidate
2030 the corresponding translated code. */
2031 tb_invalidate_phys_page(address, pc, puc);
2032 #ifdef DEBUG_TB_CHECK
2033 tb_invalidate_check(address);
2034 #endif
2035 return 1;
2038 return 0;
2041 static inline void tlb_set_dirty(CPUState *env,
2042 unsigned long addr, target_ulong vaddr)
2045 #endif /* defined(CONFIG_USER_ONLY) */
2047 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2048 ram_addr_t memory);
2049 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2050 ram_addr_t orig_memory);
2051 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2052 need_subpage) \
2053 do { \
2054 if (addr > start_addr) \
2055 start_addr2 = 0; \
2056 else { \
2057 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2058 if (start_addr2 > 0) \
2059 need_subpage = 1; \
2062 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2063 end_addr2 = TARGET_PAGE_SIZE - 1; \
2064 else { \
2065 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2066 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2067 need_subpage = 1; \
2069 } while (0)
2071 /* register physical memory. 'size' must be a multiple of the target
2072 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2073 io memory page */
2074 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2075 ram_addr_t size,
2076 ram_addr_t phys_offset)
2078 target_phys_addr_t addr, end_addr;
2079 PhysPageDesc *p;
2080 CPUState *env;
2081 ram_addr_t orig_size = size;
2082 void *subpage;
2084 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2085 end_addr = start_addr + (target_phys_addr_t)size;
2086 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2087 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2088 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2089 ram_addr_t orig_memory = p->phys_offset;
2090 target_phys_addr_t start_addr2, end_addr2;
2091 int need_subpage = 0;
2093 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2094 need_subpage);
2095 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2096 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2097 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2098 &p->phys_offset, orig_memory);
2099 } else {
2100 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2101 >> IO_MEM_SHIFT];
2103 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2104 } else {
2105 p->phys_offset = phys_offset;
2106 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2107 (phys_offset & IO_MEM_ROMD))
2108 phys_offset += TARGET_PAGE_SIZE;
2110 } else {
2111 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2112 p->phys_offset = phys_offset;
2113 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2114 (phys_offset & IO_MEM_ROMD))
2115 phys_offset += TARGET_PAGE_SIZE;
2116 else {
2117 target_phys_addr_t start_addr2, end_addr2;
2118 int need_subpage = 0;
2120 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2121 end_addr2, need_subpage);
2123 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2124 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2125 &p->phys_offset, IO_MEM_UNASSIGNED);
2126 subpage_register(subpage, start_addr2, end_addr2,
2127 phys_offset);
2133 /* since each CPU stores ram addresses in its TLB cache, we must
2134 reset the modified entries */
2135 /* XXX: slow ! */
2136 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2137 tlb_flush(env, 1);
2141 /* XXX: temporary until new memory mapping API */
2142 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2144 PhysPageDesc *p;
2146 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2147 if (!p)
2148 return IO_MEM_UNASSIGNED;
2149 return p->phys_offset;
2152 /* XXX: better than nothing */
2153 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2155 ram_addr_t addr;
2156 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2157 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2158 size, phys_ram_size);
2159 abort();
2161 addr = phys_ram_alloc_offset;
2162 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2163 return addr;
2166 void qemu_ram_free(ram_addr_t addr)
2170 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2172 #ifdef DEBUG_UNASSIGNED
2173 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2174 #endif
2175 #ifdef TARGET_SPARC
2176 do_unassigned_access(addr, 0, 0, 0);
2177 #elif TARGET_CRIS
2178 do_unassigned_access(addr, 0, 0, 0);
2179 #endif
2180 return 0;
2183 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2185 #ifdef DEBUG_UNASSIGNED
2186 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2187 #endif
2188 #ifdef TARGET_SPARC
2189 do_unassigned_access(addr, 1, 0, 0);
2190 #elif TARGET_CRIS
2191 do_unassigned_access(addr, 1, 0, 0);
2192 #endif
2195 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2196 unassigned_mem_readb,
2197 unassigned_mem_readb,
2198 unassigned_mem_readb,
2201 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2202 unassigned_mem_writeb,
2203 unassigned_mem_writeb,
2204 unassigned_mem_writeb,
2207 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2209 unsigned long ram_addr;
2210 int dirty_flags;
2211 ram_addr = addr - (unsigned long)phys_ram_base;
2212 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2213 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2214 #if !defined(CONFIG_USER_ONLY)
2215 tb_invalidate_phys_page_fast(ram_addr, 1);
2216 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2217 #endif
2219 stb_p((uint8_t *)(long)addr, val);
2220 #ifdef USE_KQEMU
2221 if (cpu_single_env->kqemu_enabled &&
2222 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2223 kqemu_modify_page(cpu_single_env, ram_addr);
2224 #endif
2225 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2226 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2227 /* we remove the notdirty callback only if the code has been
2228 flushed */
2229 if (dirty_flags == 0xff)
2230 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2233 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2235 unsigned long ram_addr;
2236 int dirty_flags;
2237 ram_addr = addr - (unsigned long)phys_ram_base;
2238 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2239 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2240 #if !defined(CONFIG_USER_ONLY)
2241 tb_invalidate_phys_page_fast(ram_addr, 2);
2242 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2243 #endif
2245 stw_p((uint8_t *)(long)addr, val);
2246 #ifdef USE_KQEMU
2247 if (cpu_single_env->kqemu_enabled &&
2248 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2249 kqemu_modify_page(cpu_single_env, ram_addr);
2250 #endif
2251 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2252 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2253 /* we remove the notdirty callback only if the code has been
2254 flushed */
2255 if (dirty_flags == 0xff)
2256 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2259 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2261 unsigned long ram_addr;
2262 int dirty_flags;
2263 ram_addr = addr - (unsigned long)phys_ram_base;
2264 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2265 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2266 #if !defined(CONFIG_USER_ONLY)
2267 tb_invalidate_phys_page_fast(ram_addr, 4);
2268 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2269 #endif
2271 stl_p((uint8_t *)(long)addr, val);
2272 #ifdef USE_KQEMU
2273 if (cpu_single_env->kqemu_enabled &&
2274 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2275 kqemu_modify_page(cpu_single_env, ram_addr);
2276 #endif
2277 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2278 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2279 /* we remove the notdirty callback only if the code has been
2280 flushed */
2281 if (dirty_flags == 0xff)
2282 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2285 static CPUReadMemoryFunc *error_mem_read[3] = {
2286 NULL, /* never used */
2287 NULL, /* never used */
2288 NULL, /* never used */
2291 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2292 notdirty_mem_writeb,
2293 notdirty_mem_writew,
2294 notdirty_mem_writel,
2297 #if defined(CONFIG_SOFTMMU)
2298 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2299 so these check for a hit then pass through to the normal out-of-line
2300 phys routines. */
2301 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2303 return ldub_phys(addr);
2306 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2308 return lduw_phys(addr);
2311 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2313 return ldl_phys(addr);
2316 /* Generate a debug exception if a watchpoint has been hit.
2317 Returns the real physical address of the access. addr will be a host
2318 address in case of a RAM location. */
2319 static target_ulong check_watchpoint(target_phys_addr_t addr)
2321 CPUState *env = cpu_single_env;
2322 target_ulong watch;
2323 target_ulong retaddr;
2324 int i;
2326 retaddr = addr;
2327 for (i = 0; i < env->nb_watchpoints; i++) {
2328 watch = env->watchpoint[i].vaddr;
2329 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2330 retaddr = addr - env->watchpoint[i].addend;
2331 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2332 cpu_single_env->watchpoint_hit = i + 1;
2333 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2334 break;
2338 return retaddr;
2341 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2342 uint32_t val)
2344 addr = check_watchpoint(addr);
2345 stb_phys(addr, val);
2348 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2349 uint32_t val)
2351 addr = check_watchpoint(addr);
2352 stw_phys(addr, val);
2355 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2356 uint32_t val)
2358 addr = check_watchpoint(addr);
2359 stl_phys(addr, val);
2362 static CPUReadMemoryFunc *watch_mem_read[3] = {
2363 watch_mem_readb,
2364 watch_mem_readw,
2365 watch_mem_readl,
2368 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2369 watch_mem_writeb,
2370 watch_mem_writew,
2371 watch_mem_writel,
2373 #endif
2375 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2376 unsigned int len)
2378 uint32_t ret;
2379 unsigned int idx;
2381 idx = SUBPAGE_IDX(addr - mmio->base);
2382 #if defined(DEBUG_SUBPAGE)
2383 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2384 mmio, len, addr, idx);
2385 #endif
2386 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2388 return ret;
2391 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2392 uint32_t value, unsigned int len)
2394 unsigned int idx;
2396 idx = SUBPAGE_IDX(addr - mmio->base);
2397 #if defined(DEBUG_SUBPAGE)
2398 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2399 mmio, len, addr, idx, value);
2400 #endif
2401 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2404 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2406 #if defined(DEBUG_SUBPAGE)
2407 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2408 #endif
2410 return subpage_readlen(opaque, addr, 0);
2413 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2414 uint32_t value)
2416 #if defined(DEBUG_SUBPAGE)
2417 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2418 #endif
2419 subpage_writelen(opaque, addr, value, 0);
2422 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2424 #if defined(DEBUG_SUBPAGE)
2425 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2426 #endif
2428 return subpage_readlen(opaque, addr, 1);
2431 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2432 uint32_t value)
2434 #if defined(DEBUG_SUBPAGE)
2435 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2436 #endif
2437 subpage_writelen(opaque, addr, value, 1);
2440 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2442 #if defined(DEBUG_SUBPAGE)
2443 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2444 #endif
2446 return subpage_readlen(opaque, addr, 2);
2449 static void subpage_writel (void *opaque,
2450 target_phys_addr_t addr, uint32_t value)
2452 #if defined(DEBUG_SUBPAGE)
2453 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2454 #endif
2455 subpage_writelen(opaque, addr, value, 2);
2458 static CPUReadMemoryFunc *subpage_read[] = {
2459 &subpage_readb,
2460 &subpage_readw,
2461 &subpage_readl,
2464 static CPUWriteMemoryFunc *subpage_write[] = {
2465 &subpage_writeb,
2466 &subpage_writew,
2467 &subpage_writel,
2470 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2471 ram_addr_t memory)
2473 int idx, eidx;
2474 unsigned int i;
2476 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2477 return -1;
2478 idx = SUBPAGE_IDX(start);
2479 eidx = SUBPAGE_IDX(end);
2480 #if defined(DEBUG_SUBPAGE)
2481 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2482 mmio, start, end, idx, eidx, memory);
2483 #endif
2484 memory >>= IO_MEM_SHIFT;
2485 for (; idx <= eidx; idx++) {
2486 for (i = 0; i < 4; i++) {
2487 if (io_mem_read[memory][i]) {
2488 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2489 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2491 if (io_mem_write[memory][i]) {
2492 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2493 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2498 return 0;
2501 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2502 ram_addr_t orig_memory)
2504 subpage_t *mmio;
2505 int subpage_memory;
2507 mmio = qemu_mallocz(sizeof(subpage_t));
2508 if (mmio != NULL) {
2509 mmio->base = base;
2510 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2511 #if defined(DEBUG_SUBPAGE)
2512 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2513 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2514 #endif
2515 *phys = subpage_memory | IO_MEM_SUBPAGE;
2516 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2519 return mmio;
2522 static int get_free_io_mem_idx(void)
2524 int i;
2526 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2527 if (!io_mem_used[i]) {
2528 io_mem_used[i] = 1;
2529 return i;
2532 return -1;
2535 static void io_mem_init(void)
2537 int i;
2539 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2540 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2541 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2542 for (i=0; i<5; i++)
2543 io_mem_used[i] = 1;
2545 #if defined(CONFIG_SOFTMMU)
2546 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2547 watch_mem_write, NULL);
2548 #endif
2549 /* alloc dirty bits array */
2550 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2551 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2554 /* mem_read and mem_write are arrays of functions containing the
2555 function to access byte (index 0), word (index 1) and dword (index
2556 2). Functions can be omitted with a NULL function pointer. The
2557 registered functions may be modified dynamically later.
2558 If io_index is non zero, the corresponding io zone is
2559 modified. If it is zero, a new io zone is allocated. The return
2560 value can be used with cpu_register_physical_memory(). (-1) is
2561 returned if error. */
2562 int cpu_register_io_memory(int io_index,
2563 CPUReadMemoryFunc **mem_read,
2564 CPUWriteMemoryFunc **mem_write,
2565 void *opaque)
2567 int i, subwidth = 0;
2569 if (io_index <= 0) {
2570 io_index = get_free_io_mem_idx();
2571 if (io_index == -1)
2572 return io_index;
2573 } else {
2574 if (io_index >= IO_MEM_NB_ENTRIES)
2575 return -1;
2578 for(i = 0;i < 3; i++) {
2579 if (!mem_read[i] || !mem_write[i])
2580 subwidth = IO_MEM_SUBWIDTH;
2581 io_mem_read[io_index][i] = mem_read[i];
2582 io_mem_write[io_index][i] = mem_write[i];
2584 io_mem_opaque[io_index] = opaque;
2585 return (io_index << IO_MEM_SHIFT) | subwidth;
2588 void cpu_unregister_io_memory(int io_table_address)
2590 int i;
2591 int io_index = io_table_address >> IO_MEM_SHIFT;
2593 for (i=0;i < 3; i++) {
2594 io_mem_read[io_index][i] = unassigned_mem_read[i];
2595 io_mem_write[io_index][i] = unassigned_mem_write[i];
2597 io_mem_opaque[io_index] = NULL;
2598 io_mem_used[io_index] = 0;
2601 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2603 return io_mem_write[io_index >> IO_MEM_SHIFT];
2606 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2608 return io_mem_read[io_index >> IO_MEM_SHIFT];
2611 /* physical memory access (slow version, mainly for debug) */
2612 #if defined(CONFIG_USER_ONLY)
2613 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2614 int len, int is_write)
2616 int l, flags;
2617 target_ulong page;
2618 void * p;
2620 while (len > 0) {
2621 page = addr & TARGET_PAGE_MASK;
2622 l = (page + TARGET_PAGE_SIZE) - addr;
2623 if (l > len)
2624 l = len;
2625 flags = page_get_flags(page);
2626 if (!(flags & PAGE_VALID))
2627 return;
2628 if (is_write) {
2629 if (!(flags & PAGE_WRITE))
2630 return;
2631 /* XXX: this code should not depend on lock_user */
2632 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2633 /* FIXME - should this return an error rather than just fail? */
2634 return;
2635 memcpy(p, buf, l);
2636 unlock_user(p, addr, l);
2637 } else {
2638 if (!(flags & PAGE_READ))
2639 return;
2640 /* XXX: this code should not depend on lock_user */
2641 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2642 /* FIXME - should this return an error rather than just fail? */
2643 return;
2644 memcpy(buf, p, l);
2645 unlock_user(p, addr, 0);
2647 len -= l;
2648 buf += l;
2649 addr += l;
2653 #else
2654 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2655 int len, int is_write)
2657 int l, io_index;
2658 uint8_t *ptr;
2659 uint32_t val;
2660 target_phys_addr_t page;
2661 unsigned long pd;
2662 PhysPageDesc *p;
2664 while (len > 0) {
2665 page = addr & TARGET_PAGE_MASK;
2666 l = (page + TARGET_PAGE_SIZE) - addr;
2667 if (l > len)
2668 l = len;
2669 p = phys_page_find(page >> TARGET_PAGE_BITS);
2670 if (!p) {
2671 pd = IO_MEM_UNASSIGNED;
2672 } else {
2673 pd = p->phys_offset;
2676 if (is_write) {
2677 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2678 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2679 /* XXX: could force cpu_single_env to NULL to avoid
2680 potential bugs */
2681 if (l >= 4 && ((addr & 3) == 0)) {
2682 /* 32 bit write access */
2683 val = ldl_p(buf);
2684 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2685 l = 4;
2686 } else if (l >= 2 && ((addr & 1) == 0)) {
2687 /* 16 bit write access */
2688 val = lduw_p(buf);
2689 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2690 l = 2;
2691 } else {
2692 /* 8 bit write access */
2693 val = ldub_p(buf);
2694 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2695 l = 1;
2697 } else {
2698 unsigned long addr1;
2699 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2700 /* RAM case */
2701 ptr = phys_ram_base + addr1;
2702 memcpy(ptr, buf, l);
2703 if (!cpu_physical_memory_is_dirty(addr1)) {
2704 /* invalidate code */
2705 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2706 /* set dirty bit */
2707 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2708 (0xff & ~CODE_DIRTY_FLAG);
2710 /* qemu doesn't execute guest code directly, but kvm does
2711 therefore fluch instruction caches */
2712 if (kvm_enabled())
2713 flush_icache_range((unsigned long)ptr,
2714 ((unsigned long)ptr)+l);
2716 } else {
2717 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2718 !(pd & IO_MEM_ROMD)) {
2719 /* I/O case */
2720 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2721 if (l >= 4 && ((addr & 3) == 0)) {
2722 /* 32 bit read access */
2723 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2724 stl_p(buf, val);
2725 l = 4;
2726 } else if (l >= 2 && ((addr & 1) == 0)) {
2727 /* 16 bit read access */
2728 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2729 stw_p(buf, val);
2730 l = 2;
2731 } else {
2732 /* 8 bit read access */
2733 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2734 stb_p(buf, val);
2735 l = 1;
2737 } else {
2738 /* RAM case */
2739 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2740 (addr & ~TARGET_PAGE_MASK);
2741 memcpy(buf, ptr, l);
2744 len -= l;
2745 buf += l;
2746 addr += l;
2750 /* used for ROM loading : can write in RAM and ROM */
2751 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2752 const uint8_t *buf, int len)
2754 int l;
2755 uint8_t *ptr;
2756 target_phys_addr_t page;
2757 unsigned long pd;
2758 PhysPageDesc *p;
2760 while (len > 0) {
2761 page = addr & TARGET_PAGE_MASK;
2762 l = (page + TARGET_PAGE_SIZE) - addr;
2763 if (l > len)
2764 l = len;
2765 p = phys_page_find(page >> TARGET_PAGE_BITS);
2766 if (!p) {
2767 pd = IO_MEM_UNASSIGNED;
2768 } else {
2769 pd = p->phys_offset;
2772 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2773 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2774 !(pd & IO_MEM_ROMD)) {
2775 /* do nothing */
2776 } else {
2777 unsigned long addr1;
2778 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2779 /* ROM/RAM case */
2780 ptr = phys_ram_base + addr1;
2781 memcpy(ptr, buf, l);
2783 len -= l;
2784 buf += l;
2785 addr += l;
2790 /* warning: addr must be aligned */
2791 uint32_t ldl_phys(target_phys_addr_t addr)
2793 int io_index;
2794 uint8_t *ptr;
2795 uint32_t val;
2796 unsigned long pd;
2797 PhysPageDesc *p;
2799 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2800 if (!p) {
2801 pd = IO_MEM_UNASSIGNED;
2802 } else {
2803 pd = p->phys_offset;
2806 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2807 !(pd & IO_MEM_ROMD)) {
2808 /* I/O case */
2809 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2810 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2811 } else {
2812 /* RAM case */
2813 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2814 (addr & ~TARGET_PAGE_MASK);
2815 val = ldl_p(ptr);
2817 return val;
2820 /* warning: addr must be aligned */
2821 uint64_t ldq_phys(target_phys_addr_t addr)
2823 int io_index;
2824 uint8_t *ptr;
2825 uint64_t val;
2826 unsigned long pd;
2827 PhysPageDesc *p;
2829 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2830 if (!p) {
2831 pd = IO_MEM_UNASSIGNED;
2832 } else {
2833 pd = p->phys_offset;
2836 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2837 !(pd & IO_MEM_ROMD)) {
2838 /* I/O case */
2839 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2840 #ifdef TARGET_WORDS_BIGENDIAN
2841 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2842 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2843 #else
2844 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2845 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2846 #endif
2847 } else {
2848 /* RAM case */
2849 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2850 (addr & ~TARGET_PAGE_MASK);
2851 val = ldq_p(ptr);
2853 return val;
2856 /* XXX: optimize */
2857 uint32_t ldub_phys(target_phys_addr_t addr)
2859 uint8_t val;
2860 cpu_physical_memory_read(addr, &val, 1);
2861 return val;
2864 /* XXX: optimize */
2865 uint32_t lduw_phys(target_phys_addr_t addr)
2867 uint16_t val;
2868 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2869 return tswap16(val);
2872 #ifdef __GNUC__
2873 #define likely(x) __builtin_expect(!!(x), 1)
2874 #define unlikely(x) __builtin_expect(!!(x), 0)
2875 #else
2876 #define likely(x) x
2877 #define unlikely(x) x
2878 #endif
2880 /* warning: addr must be aligned. The ram page is not masked as dirty
2881 and the code inside is not invalidated. It is useful if the dirty
2882 bits are used to track modified PTEs */
2883 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2885 int io_index;
2886 uint8_t *ptr;
2887 unsigned long pd;
2888 PhysPageDesc *p;
2890 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2891 if (!p) {
2892 pd = IO_MEM_UNASSIGNED;
2893 } else {
2894 pd = p->phys_offset;
2897 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2899 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2900 } else {
2901 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2902 ptr = phys_ram_base + addr1;
2903 stl_p(ptr, val);
2905 if (unlikely(in_migration)) {
2906 if (!cpu_physical_memory_is_dirty(addr1)) {
2907 /* invalidate code */
2908 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2909 /* set dirty bit */
2910 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2911 (0xff & ~CODE_DIRTY_FLAG);
2917 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2919 int io_index;
2920 uint8_t *ptr;
2921 unsigned long pd;
2922 PhysPageDesc *p;
2924 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2925 if (!p) {
2926 pd = IO_MEM_UNASSIGNED;
2927 } else {
2928 pd = p->phys_offset;
2931 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2932 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2933 #ifdef TARGET_WORDS_BIGENDIAN
2934 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2935 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2936 #else
2937 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2938 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2939 #endif
2940 } else {
2941 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2942 (addr & ~TARGET_PAGE_MASK);
2943 stq_p(ptr, val);
2947 /* warning: addr must be aligned */
2948 void stl_phys(target_phys_addr_t addr, uint32_t val)
2950 int io_index;
2951 uint8_t *ptr;
2952 unsigned long pd;
2953 PhysPageDesc *p;
2955 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2956 if (!p) {
2957 pd = IO_MEM_UNASSIGNED;
2958 } else {
2959 pd = p->phys_offset;
2962 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2963 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2964 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2965 } else {
2966 unsigned long addr1;
2967 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2968 /* RAM case */
2969 ptr = phys_ram_base + addr1;
2970 stl_p(ptr, val);
2971 if (!cpu_physical_memory_is_dirty(addr1)) {
2972 /* invalidate code */
2973 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2974 /* set dirty bit */
2975 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2976 (0xff & ~CODE_DIRTY_FLAG);
2981 /* XXX: optimize */
2982 void stb_phys(target_phys_addr_t addr, uint32_t val)
2984 uint8_t v = val;
2985 cpu_physical_memory_write(addr, &v, 1);
2988 /* XXX: optimize */
2989 void stw_phys(target_phys_addr_t addr, uint32_t val)
2991 uint16_t v = tswap16(val);
2992 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2995 /* XXX: optimize */
2996 void stq_phys(target_phys_addr_t addr, uint64_t val)
2998 val = tswap64(val);
2999 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3002 #endif
3004 /* virtual memory access for debug */
3005 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3006 uint8_t *buf, int len, int is_write)
3008 int l;
3009 target_phys_addr_t phys_addr;
3010 target_ulong page;
3012 while (len > 0) {
3013 page = addr & TARGET_PAGE_MASK;
3014 phys_addr = cpu_get_phys_page_debug(env, page);
3015 /* if no physical page mapped, return an error */
3016 if (phys_addr == -1)
3017 return -1;
3018 l = (page + TARGET_PAGE_SIZE) - addr;
3019 if (l > len)
3020 l = len;
3021 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3022 buf, l, is_write);
3023 len -= l;
3024 buf += l;
3025 addr += l;
3027 return 0;
3030 void dump_exec_info(FILE *f,
3031 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3033 int i, target_code_size, max_target_code_size;
3034 int direct_jmp_count, direct_jmp2_count, cross_page;
3035 TranslationBlock *tb;
3037 target_code_size = 0;
3038 max_target_code_size = 0;
3039 cross_page = 0;
3040 direct_jmp_count = 0;
3041 direct_jmp2_count = 0;
3042 for(i = 0; i < nb_tbs; i++) {
3043 tb = &tbs[i];
3044 target_code_size += tb->size;
3045 if (tb->size > max_target_code_size)
3046 max_target_code_size = tb->size;
3047 if (tb->page_addr[1] != -1)
3048 cross_page++;
3049 if (tb->tb_next_offset[0] != 0xffff) {
3050 direct_jmp_count++;
3051 if (tb->tb_next_offset[1] != 0xffff) {
3052 direct_jmp2_count++;
3056 /* XXX: avoid using doubles ? */
3057 cpu_fprintf(f, "Translation buffer state:\n");
3058 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3059 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3060 nb_tbs ? target_code_size / nb_tbs : 0,
3061 max_target_code_size);
3062 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3063 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3064 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3065 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3066 cross_page,
3067 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3068 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3069 direct_jmp_count,
3070 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3071 direct_jmp2_count,
3072 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3073 cpu_fprintf(f, "\nStatistics:\n");
3074 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3075 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3076 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3077 #ifdef CONFIG_PROFILER
3079 int64_t tot;
3080 tot = dyngen_interm_time + dyngen_code_time;
3081 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3082 tot, tot / 2.4e9);
3083 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
3084 dyngen_tb_count,
3085 dyngen_tb_count1 - dyngen_tb_count,
3086 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
3087 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
3088 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
3089 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
3090 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
3091 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3092 dyngen_tb_count ?
3093 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3094 cpu_fprintf(f, "cycles/op %0.1f\n",
3095 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3096 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3097 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3098 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3099 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3100 if (tot == 0)
3101 tot = 1;
3102 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3103 (double)dyngen_interm_time / tot * 100.0);
3104 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3105 (double)dyngen_code_time / tot * 100.0);
3106 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3107 dyngen_restore_count);
3108 cpu_fprintf(f, " avg cycles %0.1f\n",
3109 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3111 extern void dump_op_count(void);
3112 dump_op_count();
3115 #endif
3118 #if !defined(CONFIG_USER_ONLY)
3120 #define MMUSUFFIX _cmmu
3121 #define GETPC() NULL
3122 #define env cpu_single_env
3123 #define SOFTMMU_CODE_ACCESS
3125 #define SHIFT 0
3126 #include "softmmu_template.h"
3128 #define SHIFT 1
3129 #include "softmmu_template.h"
3131 #define SHIFT 2
3132 #include "softmmu_template.h"
3134 #define SHIFT 3
3135 #include "softmmu_template.h"
3137 #undef env
3139 #endif