linux-user fchmodat() syscall, by Thayne Harbaugh.
[qemu/qemu_0_9_1_stable.git] / exec.c
blob7d8ae3fd988d66bdf48b90b0e9b585052ad740ef
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
55 #undef DEBUG_TB_CHECK
56 #endif
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #else
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
78 #endif
80 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82 int nb_tbs;
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr;
89 int phys_ram_size;
90 int phys_ram_fd;
91 uint8_t *phys_ram_base;
92 uint8_t *phys_ram_dirty;
93 static ram_addr_t phys_ram_alloc_offset = 0;
95 CPUState *first_cpu;
96 /* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98 CPUState *cpu_single_env;
100 typedef struct PageDesc {
101 /* list of TBs intersecting this ram page */
102 TranslationBlock *first_tb;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107 #if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109 #endif
110 } PageDesc;
112 typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset;
115 } PhysPageDesc;
117 #define L2_BITS 10
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124 #else
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126 #endif
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size;
134 unsigned long qemu_host_page_bits;
135 unsigned long qemu_host_page_size;
136 unsigned long qemu_host_page_mask;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc *l1_map[L1_SIZE];
140 PhysPageDesc **l1_phys_map;
142 /* io memory support */
143 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146 static int io_mem_nb;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch;
149 #endif
151 /* log support */
152 char *logfilename = "/tmp/qemu.log";
153 FILE *logfile;
154 int loglevel;
155 static int log_append = 0;
157 /* statistics */
158 static int tlb_flush_count;
159 static int tb_flush_count;
160 static int tb_phys_invalidate_count;
162 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163 typedef struct subpage_t {
164 target_phys_addr_t base;
165 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167 void *opaque[TARGET_PAGE_SIZE];
168 } subpage_t;
170 static void page_init(void)
172 /* NOTE: we can always suppose that qemu_host_page_size >=
173 TARGET_PAGE_SIZE */
174 #ifdef _WIN32
176 SYSTEM_INFO system_info;
177 DWORD old_protect;
179 GetSystemInfo(&system_info);
180 qemu_real_host_page_size = system_info.dwPageSize;
182 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183 PAGE_EXECUTE_READWRITE, &old_protect);
185 #else
186 qemu_real_host_page_size = getpagesize();
188 unsigned long start, end;
190 start = (unsigned long)code_gen_buffer;
191 start &= ~(qemu_real_host_page_size - 1);
193 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194 end += qemu_real_host_page_size - 1;
195 end &= ~(qemu_real_host_page_size - 1);
197 mprotect((void *)start, end - start,
198 PROT_READ | PROT_WRITE | PROT_EXEC);
200 #endif
202 if (qemu_host_page_size == 0)
203 qemu_host_page_size = qemu_real_host_page_size;
204 if (qemu_host_page_size < TARGET_PAGE_SIZE)
205 qemu_host_page_size = TARGET_PAGE_SIZE;
206 qemu_host_page_bits = 0;
207 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208 qemu_host_page_bits++;
209 qemu_host_page_mask = ~(qemu_host_page_size - 1);
210 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
214 static inline PageDesc *page_find_alloc(unsigned int index)
216 PageDesc **lp, *p;
218 lp = &l1_map[index >> L2_BITS];
219 p = *lp;
220 if (!p) {
221 /* allocate if not found */
222 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
223 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
224 *lp = p;
226 return p + (index & (L2_SIZE - 1));
229 static inline PageDesc *page_find(unsigned int index)
231 PageDesc *p;
233 p = l1_map[index >> L2_BITS];
234 if (!p)
235 return 0;
236 return p + (index & (L2_SIZE - 1));
239 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
241 void **lp, **p;
242 PhysPageDesc *pd;
244 p = (void **)l1_phys_map;
245 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
247 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249 #endif
250 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
251 p = *lp;
252 if (!p) {
253 /* allocate if not found */
254 if (!alloc)
255 return NULL;
256 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257 memset(p, 0, sizeof(void *) * L1_SIZE);
258 *lp = p;
260 #endif
261 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
262 pd = *lp;
263 if (!pd) {
264 int i;
265 /* allocate if not found */
266 if (!alloc)
267 return NULL;
268 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269 *lp = pd;
270 for (i = 0; i < L2_SIZE; i++)
271 pd[i].phys_offset = IO_MEM_UNASSIGNED;
273 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
276 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
278 return phys_page_find_alloc(index, 0);
281 #if !defined(CONFIG_USER_ONLY)
282 static void tlb_protect_code(ram_addr_t ram_addr);
283 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
284 target_ulong vaddr);
285 #endif
287 void cpu_exec_init(CPUState *env)
289 CPUState **penv;
290 int cpu_index;
292 if (!code_gen_ptr) {
293 code_gen_ptr = code_gen_buffer;
294 page_init();
295 io_mem_init();
297 env->next_cpu = NULL;
298 penv = &first_cpu;
299 cpu_index = 0;
300 while (*penv != NULL) {
301 penv = (CPUState **)&(*penv)->next_cpu;
302 cpu_index++;
304 env->cpu_index = cpu_index;
305 env->nb_watchpoints = 0;
306 *penv = env;
309 static inline void invalidate_page_bitmap(PageDesc *p)
311 if (p->code_bitmap) {
312 qemu_free(p->code_bitmap);
313 p->code_bitmap = NULL;
315 p->code_write_count = 0;
318 /* set to NULL all the 'first_tb' fields in all PageDescs */
319 static void page_flush_tb(void)
321 int i, j;
322 PageDesc *p;
324 for(i = 0; i < L1_SIZE; i++) {
325 p = l1_map[i];
326 if (p) {
327 for(j = 0; j < L2_SIZE; j++) {
328 p->first_tb = NULL;
329 invalidate_page_bitmap(p);
330 p++;
336 /* flush all the translation blocks */
337 /* XXX: tb_flush is currently not thread safe */
338 void tb_flush(CPUState *env1)
340 CPUState *env;
341 #if defined(DEBUG_FLUSH)
342 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
343 code_gen_ptr - code_gen_buffer,
344 nb_tbs,
345 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
346 #endif
347 nb_tbs = 0;
349 for(env = first_cpu; env != NULL; env = env->next_cpu) {
350 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
353 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
354 page_flush_tb();
356 code_gen_ptr = code_gen_buffer;
357 /* XXX: flush processor icache at this point if cache flush is
358 expensive */
359 tb_flush_count++;
362 #ifdef DEBUG_TB_CHECK
364 static void tb_invalidate_check(target_ulong address)
366 TranslationBlock *tb;
367 int i;
368 address &= TARGET_PAGE_MASK;
369 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
371 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372 address >= tb->pc + tb->size)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374 address, (long)tb->pc, tb->size);
380 /* verify that all the pages have correct rights for code */
381 static void tb_page_check(void)
383 TranslationBlock *tb;
384 int i, flags1, flags2;
386 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
388 flags1 = page_get_flags(tb->pc);
389 flags2 = page_get_flags(tb->pc + tb->size - 1);
390 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392 (long)tb->pc, tb->size, flags1, flags2);
398 void tb_jmp_check(TranslationBlock *tb)
400 TranslationBlock *tb1;
401 unsigned int n1;
403 /* suppress any remaining jumps to this TB */
404 tb1 = tb->jmp_first;
405 for(;;) {
406 n1 = (long)tb1 & 3;
407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
408 if (n1 == 2)
409 break;
410 tb1 = tb1->jmp_next[n1];
412 /* check end of list */
413 if (tb1 != tb) {
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
418 #endif
420 /* invalidate one TB */
421 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422 int next_offset)
424 TranslationBlock *tb1;
425 for(;;) {
426 tb1 = *ptb;
427 if (tb1 == tb) {
428 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429 break;
431 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
435 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
437 TranslationBlock *tb1;
438 unsigned int n1;
440 for(;;) {
441 tb1 = *ptb;
442 n1 = (long)tb1 & 3;
443 tb1 = (TranslationBlock *)((long)tb1 & ~3);
444 if (tb1 == tb) {
445 *ptb = tb1->page_next[n1];
446 break;
448 ptb = &tb1->page_next[n1];
452 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
454 TranslationBlock *tb1, **ptb;
455 unsigned int n1;
457 ptb = &tb->jmp_next[n];
458 tb1 = *ptb;
459 if (tb1) {
460 /* find tb(n) in circular list */
461 for(;;) {
462 tb1 = *ptb;
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (n1 == n && tb1 == tb)
466 break;
467 if (n1 == 2) {
468 ptb = &tb1->jmp_first;
469 } else {
470 ptb = &tb1->jmp_next[n1];
473 /* now we can suppress tb(n) from the list */
474 *ptb = tb->jmp_next[n];
476 tb->jmp_next[n] = NULL;
480 /* reset the jump entry 'n' of a TB so that it is not chained to
481 another TB */
482 static inline void tb_reset_jump(TranslationBlock *tb, int n)
484 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
487 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
489 CPUState *env;
490 PageDesc *p;
491 unsigned int h, n1;
492 target_ulong phys_pc;
493 TranslationBlock *tb1, *tb2;
495 /* remove the TB from the hash list */
496 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497 h = tb_phys_hash_func(phys_pc);
498 tb_remove(&tb_phys_hash[h], tb,
499 offsetof(TranslationBlock, phys_hash_next));
501 /* remove the TB from the page list */
502 if (tb->page_addr[0] != page_addr) {
503 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
504 tb_page_remove(&p->first_tb, tb);
505 invalidate_page_bitmap(p);
507 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
508 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
509 tb_page_remove(&p->first_tb, tb);
510 invalidate_page_bitmap(p);
513 tb_invalidated_flag = 1;
515 /* remove the TB from the hash list */
516 h = tb_jmp_cache_hash_func(tb->pc);
517 for(env = first_cpu; env != NULL; env = env->next_cpu) {
518 if (env->tb_jmp_cache[h] == tb)
519 env->tb_jmp_cache[h] = NULL;
522 /* suppress this TB from the two jump lists */
523 tb_jmp_remove(tb, 0);
524 tb_jmp_remove(tb, 1);
526 /* suppress any remaining jumps to this TB */
527 tb1 = tb->jmp_first;
528 for(;;) {
529 n1 = (long)tb1 & 3;
530 if (n1 == 2)
531 break;
532 tb1 = (TranslationBlock *)((long)tb1 & ~3);
533 tb2 = tb1->jmp_next[n1];
534 tb_reset_jump(tb1, n1);
535 tb1->jmp_next[n1] = NULL;
536 tb1 = tb2;
538 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
540 tb_phys_invalidate_count++;
543 static inline void set_bits(uint8_t *tab, int start, int len)
545 int end, mask, end1;
547 end = start + len;
548 tab += start >> 3;
549 mask = 0xff << (start & 7);
550 if ((start & ~7) == (end & ~7)) {
551 if (start < end) {
552 mask &= ~(0xff << (end & 7));
553 *tab |= mask;
555 } else {
556 *tab++ |= mask;
557 start = (start + 8) & ~7;
558 end1 = end & ~7;
559 while (start < end1) {
560 *tab++ = 0xff;
561 start += 8;
563 if (start < end) {
564 mask = ~(0xff << (end & 7));
565 *tab |= mask;
570 static void build_page_bitmap(PageDesc *p)
572 int n, tb_start, tb_end;
573 TranslationBlock *tb;
575 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
576 if (!p->code_bitmap)
577 return;
578 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
580 tb = p->first_tb;
581 while (tb != NULL) {
582 n = (long)tb & 3;
583 tb = (TranslationBlock *)((long)tb & ~3);
584 /* NOTE: this is subtle as a TB may span two physical pages */
585 if (n == 0) {
586 /* NOTE: tb_end may be after the end of the page, but
587 it is not a problem */
588 tb_start = tb->pc & ~TARGET_PAGE_MASK;
589 tb_end = tb_start + tb->size;
590 if (tb_end > TARGET_PAGE_SIZE)
591 tb_end = TARGET_PAGE_SIZE;
592 } else {
593 tb_start = 0;
594 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
596 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
597 tb = tb->page_next[n];
601 #ifdef TARGET_HAS_PRECISE_SMC
603 static void tb_gen_code(CPUState *env,
604 target_ulong pc, target_ulong cs_base, int flags,
605 int cflags)
607 TranslationBlock *tb;
608 uint8_t *tc_ptr;
609 target_ulong phys_pc, phys_page2, virt_page2;
610 int code_gen_size;
612 phys_pc = get_phys_addr_code(env, pc);
613 tb = tb_alloc(pc);
614 if (!tb) {
615 /* flush must be done */
616 tb_flush(env);
617 /* cannot fail at this point */
618 tb = tb_alloc(pc);
620 tc_ptr = code_gen_ptr;
621 tb->tc_ptr = tc_ptr;
622 tb->cs_base = cs_base;
623 tb->flags = flags;
624 tb->cflags = cflags;
625 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
628 /* check next page if needed */
629 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
630 phys_page2 = -1;
631 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
632 phys_page2 = get_phys_addr_code(env, virt_page2);
634 tb_link_phys(tb, phys_pc, phys_page2);
636 #endif
638 /* invalidate all TBs which intersect with the target physical page
639 starting in range [start;end[. NOTE: start and end must refer to
640 the same physical page. 'is_cpu_write_access' should be true if called
641 from a real cpu write access: the virtual CPU will exit the current
642 TB if code is modified inside this TB. */
643 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644 int is_cpu_write_access)
646 int n, current_tb_modified, current_tb_not_found, current_flags;
647 CPUState *env = cpu_single_env;
648 PageDesc *p;
649 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
650 target_ulong tb_start, tb_end;
651 target_ulong current_pc, current_cs_base;
653 p = page_find(start >> TARGET_PAGE_BITS);
654 if (!p)
655 return;
656 if (!p->code_bitmap &&
657 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658 is_cpu_write_access) {
659 /* build code bitmap */
660 build_page_bitmap(p);
663 /* we remove all the TBs in the range [start, end[ */
664 /* XXX: see if in some cases it could be faster to invalidate all the code */
665 current_tb_not_found = is_cpu_write_access;
666 current_tb_modified = 0;
667 current_tb = NULL; /* avoid warning */
668 current_pc = 0; /* avoid warning */
669 current_cs_base = 0; /* avoid warning */
670 current_flags = 0; /* avoid warning */
671 tb = p->first_tb;
672 while (tb != NULL) {
673 n = (long)tb & 3;
674 tb = (TranslationBlock *)((long)tb & ~3);
675 tb_next = tb->page_next[n];
676 /* NOTE: this is subtle as a TB may span two physical pages */
677 if (n == 0) {
678 /* NOTE: tb_end may be after the end of the page, but
679 it is not a problem */
680 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
681 tb_end = tb_start + tb->size;
682 } else {
683 tb_start = tb->page_addr[1];
684 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
686 if (!(tb_end <= start || tb_start >= end)) {
687 #ifdef TARGET_HAS_PRECISE_SMC
688 if (current_tb_not_found) {
689 current_tb_not_found = 0;
690 current_tb = NULL;
691 if (env->mem_write_pc) {
692 /* now we have a real cpu fault */
693 current_tb = tb_find_pc(env->mem_write_pc);
696 if (current_tb == tb &&
697 !(current_tb->cflags & CF_SINGLE_INSN)) {
698 /* If we are modifying the current TB, we must stop
699 its execution. We could be more precise by checking
700 that the modification is after the current PC, but it
701 would require a specialized function to partially
702 restore the CPU state */
704 current_tb_modified = 1;
705 cpu_restore_state(current_tb, env,
706 env->mem_write_pc, NULL);
707 #if defined(TARGET_I386)
708 current_flags = env->hflags;
709 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710 current_cs_base = (target_ulong)env->segs[R_CS].base;
711 current_pc = current_cs_base + env->eip;
712 #else
713 #error unsupported CPU
714 #endif
716 #endif /* TARGET_HAS_PRECISE_SMC */
717 /* we need to do that to handle the case where a signal
718 occurs while doing tb_phys_invalidate() */
719 saved_tb = NULL;
720 if (env) {
721 saved_tb = env->current_tb;
722 env->current_tb = NULL;
724 tb_phys_invalidate(tb, -1);
725 if (env) {
726 env->current_tb = saved_tb;
727 if (env->interrupt_request && env->current_tb)
728 cpu_interrupt(env, env->interrupt_request);
731 tb = tb_next;
733 #if !defined(CONFIG_USER_ONLY)
734 /* if no code remaining, no need to continue to use slow writes */
735 if (!p->first_tb) {
736 invalidate_page_bitmap(p);
737 if (is_cpu_write_access) {
738 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
741 #endif
742 #ifdef TARGET_HAS_PRECISE_SMC
743 if (current_tb_modified) {
744 /* we generate a block containing just the instruction
745 modifying the memory. It will ensure that it cannot modify
746 itself */
747 env->current_tb = NULL;
748 tb_gen_code(env, current_pc, current_cs_base, current_flags,
749 CF_SINGLE_INSN);
750 cpu_resume_from_signal(env, NULL);
752 #endif
755 /* len must be <= 8 and start must be a multiple of len */
756 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
758 PageDesc *p;
759 int offset, b;
760 #if 0
761 if (1) {
762 if (loglevel) {
763 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764 cpu_single_env->mem_write_vaddr, len,
765 cpu_single_env->eip,
766 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
769 #endif
770 p = page_find(start >> TARGET_PAGE_BITS);
771 if (!p)
772 return;
773 if (p->code_bitmap) {
774 offset = start & ~TARGET_PAGE_MASK;
775 b = p->code_bitmap[offset >> 3] >> (offset & 7);
776 if (b & ((1 << len) - 1))
777 goto do_invalidate;
778 } else {
779 do_invalidate:
780 tb_invalidate_phys_page_range(start, start + len, 1);
784 #if !defined(CONFIG_SOFTMMU)
785 static void tb_invalidate_phys_page(target_ulong addr,
786 unsigned long pc, void *puc)
788 int n, current_flags, current_tb_modified;
789 target_ulong current_pc, current_cs_base;
790 PageDesc *p;
791 TranslationBlock *tb, *current_tb;
792 #ifdef TARGET_HAS_PRECISE_SMC
793 CPUState *env = cpu_single_env;
794 #endif
796 addr &= TARGET_PAGE_MASK;
797 p = page_find(addr >> TARGET_PAGE_BITS);
798 if (!p)
799 return;
800 tb = p->first_tb;
801 current_tb_modified = 0;
802 current_tb = NULL;
803 current_pc = 0; /* avoid warning */
804 current_cs_base = 0; /* avoid warning */
805 current_flags = 0; /* avoid warning */
806 #ifdef TARGET_HAS_PRECISE_SMC
807 if (tb && pc != 0) {
808 current_tb = tb_find_pc(pc);
810 #endif
811 while (tb != NULL) {
812 n = (long)tb & 3;
813 tb = (TranslationBlock *)((long)tb & ~3);
814 #ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb == tb &&
816 !(current_tb->cflags & CF_SINGLE_INSN)) {
817 /* If we are modifying the current TB, we must stop
818 its execution. We could be more precise by checking
819 that the modification is after the current PC, but it
820 would require a specialized function to partially
821 restore the CPU state */
823 current_tb_modified = 1;
824 cpu_restore_state(current_tb, env, pc, puc);
825 #if defined(TARGET_I386)
826 current_flags = env->hflags;
827 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828 current_cs_base = (target_ulong)env->segs[R_CS].base;
829 current_pc = current_cs_base + env->eip;
830 #else
831 #error unsupported CPU
832 #endif
834 #endif /* TARGET_HAS_PRECISE_SMC */
835 tb_phys_invalidate(tb, addr);
836 tb = tb->page_next[n];
838 p->first_tb = NULL;
839 #ifdef TARGET_HAS_PRECISE_SMC
840 if (current_tb_modified) {
841 /* we generate a block containing just the instruction
842 modifying the memory. It will ensure that it cannot modify
843 itself */
844 env->current_tb = NULL;
845 tb_gen_code(env, current_pc, current_cs_base, current_flags,
846 CF_SINGLE_INSN);
847 cpu_resume_from_signal(env, puc);
849 #endif
851 #endif
853 /* add the tb in the target page and protect it if necessary */
854 static inline void tb_alloc_page(TranslationBlock *tb,
855 unsigned int n, target_ulong page_addr)
857 PageDesc *p;
858 TranslationBlock *last_first_tb;
860 tb->page_addr[n] = page_addr;
861 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
862 tb->page_next[n] = p->first_tb;
863 last_first_tb = p->first_tb;
864 p->first_tb = (TranslationBlock *)((long)tb | n);
865 invalidate_page_bitmap(p);
867 #if defined(TARGET_HAS_SMC) || 1
869 #if defined(CONFIG_USER_ONLY)
870 if (p->flags & PAGE_WRITE) {
871 target_ulong addr;
872 PageDesc *p2;
873 int prot;
875 /* force the host page as non writable (writes will have a
876 page fault + mprotect overhead) */
877 page_addr &= qemu_host_page_mask;
878 prot = 0;
879 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
880 addr += TARGET_PAGE_SIZE) {
882 p2 = page_find (addr >> TARGET_PAGE_BITS);
883 if (!p2)
884 continue;
885 prot |= p2->flags;
886 p2->flags &= ~PAGE_WRITE;
887 page_get_flags(addr);
889 mprotect(g2h(page_addr), qemu_host_page_size,
890 (prot & PAGE_BITS) & ~PAGE_WRITE);
891 #ifdef DEBUG_TB_INVALIDATE
892 printf("protecting code page: 0x%08lx\n",
893 page_addr);
894 #endif
896 #else
897 /* if some code is already present, then the pages are already
898 protected. So we handle the case where only the first TB is
899 allocated in a physical page */
900 if (!last_first_tb) {
901 tlb_protect_code(page_addr);
903 #endif
905 #endif /* TARGET_HAS_SMC */
908 /* Allocate a new translation block. Flush the translation buffer if
909 too many translation blocks or too much generated code. */
910 TranslationBlock *tb_alloc(target_ulong pc)
912 TranslationBlock *tb;
914 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
916 return NULL;
917 tb = &tbs[nb_tbs++];
918 tb->pc = pc;
919 tb->cflags = 0;
920 return tb;
923 /* add a new TB and link it to the physical page tables. phys_page2 is
924 (-1) to indicate that only one page contains the TB. */
925 void tb_link_phys(TranslationBlock *tb,
926 target_ulong phys_pc, target_ulong phys_page2)
928 unsigned int h;
929 TranslationBlock **ptb;
931 /* add in the physical hash table */
932 h = tb_phys_hash_func(phys_pc);
933 ptb = &tb_phys_hash[h];
934 tb->phys_hash_next = *ptb;
935 *ptb = tb;
937 /* add in the page list */
938 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
939 if (phys_page2 != -1)
940 tb_alloc_page(tb, 1, phys_page2);
941 else
942 tb->page_addr[1] = -1;
944 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945 tb->jmp_next[0] = NULL;
946 tb->jmp_next[1] = NULL;
947 #ifdef USE_CODE_COPY
948 tb->cflags &= ~CF_FP_USED;
949 if (tb->cflags & CF_TB_FP_USED)
950 tb->cflags |= CF_FP_USED;
951 #endif
953 /* init original jump addresses */
954 if (tb->tb_next_offset[0] != 0xffff)
955 tb_reset_jump(tb, 0);
956 if (tb->tb_next_offset[1] != 0xffff)
957 tb_reset_jump(tb, 1);
959 #ifdef DEBUG_TB_CHECK
960 tb_page_check();
961 #endif
964 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
965 tb[1].tc_ptr. Return NULL if not found */
966 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
968 int m_min, m_max, m;
969 unsigned long v;
970 TranslationBlock *tb;
972 if (nb_tbs <= 0)
973 return NULL;
974 if (tc_ptr < (unsigned long)code_gen_buffer ||
975 tc_ptr >= (unsigned long)code_gen_ptr)
976 return NULL;
977 /* binary search (cf Knuth) */
978 m_min = 0;
979 m_max = nb_tbs - 1;
980 while (m_min <= m_max) {
981 m = (m_min + m_max) >> 1;
982 tb = &tbs[m];
983 v = (unsigned long)tb->tc_ptr;
984 if (v == tc_ptr)
985 return tb;
986 else if (tc_ptr < v) {
987 m_max = m - 1;
988 } else {
989 m_min = m + 1;
992 return &tbs[m_max];
995 static void tb_reset_jump_recursive(TranslationBlock *tb);
997 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
999 TranslationBlock *tb1, *tb_next, **ptb;
1000 unsigned int n1;
1002 tb1 = tb->jmp_next[n];
1003 if (tb1 != NULL) {
1004 /* find head of list */
1005 for(;;) {
1006 n1 = (long)tb1 & 3;
1007 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1008 if (n1 == 2)
1009 break;
1010 tb1 = tb1->jmp_next[n1];
1012 /* we are now sure now that tb jumps to tb1 */
1013 tb_next = tb1;
1015 /* remove tb from the jmp_first list */
1016 ptb = &tb_next->jmp_first;
1017 for(;;) {
1018 tb1 = *ptb;
1019 n1 = (long)tb1 & 3;
1020 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1021 if (n1 == n && tb1 == tb)
1022 break;
1023 ptb = &tb1->jmp_next[n1];
1025 *ptb = tb->jmp_next[n];
1026 tb->jmp_next[n] = NULL;
1028 /* suppress the jump to next tb in generated code */
1029 tb_reset_jump(tb, n);
1031 /* suppress jumps in the tb on which we could have jumped */
1032 tb_reset_jump_recursive(tb_next);
1036 static void tb_reset_jump_recursive(TranslationBlock *tb)
1038 tb_reset_jump_recursive2(tb, 0);
1039 tb_reset_jump_recursive2(tb, 1);
1042 #if defined(TARGET_HAS_ICE)
1043 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1045 target_phys_addr_t addr;
1046 target_ulong pd;
1047 ram_addr_t ram_addr;
1048 PhysPageDesc *p;
1050 addr = cpu_get_phys_page_debug(env, pc);
1051 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1052 if (!p) {
1053 pd = IO_MEM_UNASSIGNED;
1054 } else {
1055 pd = p->phys_offset;
1057 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1058 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1060 #endif
1062 /* Add a watchpoint. */
1063 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1065 int i;
1067 for (i = 0; i < env->nb_watchpoints; i++) {
1068 if (addr == env->watchpoint[i].vaddr)
1069 return 0;
1071 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1072 return -1;
1074 i = env->nb_watchpoints++;
1075 env->watchpoint[i].vaddr = addr;
1076 tlb_flush_page(env, addr);
1077 /* FIXME: This flush is needed because of the hack to make memory ops
1078 terminate the TB. It can be removed once the proper IO trap and
1079 re-execute bits are in. */
1080 tb_flush(env);
1081 return i;
1084 /* Remove a watchpoint. */
1085 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1087 int i;
1089 for (i = 0; i < env->nb_watchpoints; i++) {
1090 if (addr == env->watchpoint[i].vaddr) {
1091 env->nb_watchpoints--;
1092 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1093 tlb_flush_page(env, addr);
1094 return 0;
1097 return -1;
1100 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1101 breakpoint is reached */
1102 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1104 #if defined(TARGET_HAS_ICE)
1105 int i;
1107 for(i = 0; i < env->nb_breakpoints; i++) {
1108 if (env->breakpoints[i] == pc)
1109 return 0;
1112 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1113 return -1;
1114 env->breakpoints[env->nb_breakpoints++] = pc;
1116 breakpoint_invalidate(env, pc);
1117 return 0;
1118 #else
1119 return -1;
1120 #endif
1123 /* remove a breakpoint */
1124 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1126 #if defined(TARGET_HAS_ICE)
1127 int i;
1128 for(i = 0; i < env->nb_breakpoints; i++) {
1129 if (env->breakpoints[i] == pc)
1130 goto found;
1132 return -1;
1133 found:
1134 env->nb_breakpoints--;
1135 if (i < env->nb_breakpoints)
1136 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1138 breakpoint_invalidate(env, pc);
1139 return 0;
1140 #else
1141 return -1;
1142 #endif
1145 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1146 CPU loop after each instruction */
1147 void cpu_single_step(CPUState *env, int enabled)
1149 #if defined(TARGET_HAS_ICE)
1150 if (env->singlestep_enabled != enabled) {
1151 env->singlestep_enabled = enabled;
1152 /* must flush all the translated code to avoid inconsistancies */
1153 /* XXX: only flush what is necessary */
1154 tb_flush(env);
1156 #endif
1159 /* enable or disable low levels log */
1160 void cpu_set_log(int log_flags)
1162 loglevel = log_flags;
1163 if (loglevel && !logfile) {
1164 logfile = fopen(logfilename, log_append ? "a" : "w");
1165 if (!logfile) {
1166 perror(logfilename);
1167 _exit(1);
1169 #if !defined(CONFIG_SOFTMMU)
1170 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1172 static uint8_t logfile_buf[4096];
1173 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1175 #else
1176 setvbuf(logfile, NULL, _IOLBF, 0);
1177 #endif
1178 log_append = 1;
1180 if (!loglevel && logfile) {
1181 fclose(logfile);
1182 logfile = NULL;
1186 void cpu_set_log_filename(const char *filename)
1188 logfilename = strdup(filename);
1189 if (logfile) {
1190 fclose(logfile);
1191 logfile = NULL;
1193 cpu_set_log(loglevel);
1196 /* mask must never be zero, except for A20 change call */
1197 void cpu_interrupt(CPUState *env, int mask)
1199 TranslationBlock *tb;
1200 static int interrupt_lock;
1202 env->interrupt_request |= mask;
1203 /* if the cpu is currently executing code, we must unlink it and
1204 all the potentially executing TB */
1205 tb = env->current_tb;
1206 if (tb && !testandset(&interrupt_lock)) {
1207 env->current_tb = NULL;
1208 tb_reset_jump_recursive(tb);
1209 interrupt_lock = 0;
1213 void cpu_reset_interrupt(CPUState *env, int mask)
1215 env->interrupt_request &= ~mask;
1218 CPULogItem cpu_log_items[] = {
1219 { CPU_LOG_TB_OUT_ASM, "out_asm",
1220 "show generated host assembly code for each compiled TB" },
1221 { CPU_LOG_TB_IN_ASM, "in_asm",
1222 "show target assembly code for each compiled TB" },
1223 { CPU_LOG_TB_OP, "op",
1224 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1225 #ifdef TARGET_I386
1226 { CPU_LOG_TB_OP_OPT, "op_opt",
1227 "show micro ops after optimization for each compiled TB" },
1228 #endif
1229 { CPU_LOG_INT, "int",
1230 "show interrupts/exceptions in short format" },
1231 { CPU_LOG_EXEC, "exec",
1232 "show trace before each executed TB (lots of logs)" },
1233 { CPU_LOG_TB_CPU, "cpu",
1234 "show CPU state before block translation" },
1235 #ifdef TARGET_I386
1236 { CPU_LOG_PCALL, "pcall",
1237 "show protected mode far calls/returns/exceptions" },
1238 #endif
1239 #ifdef DEBUG_IOPORT
1240 { CPU_LOG_IOPORT, "ioport",
1241 "show all i/o ports accesses" },
1242 #endif
1243 { 0, NULL, NULL },
1246 static int cmp1(const char *s1, int n, const char *s2)
1248 if (strlen(s2) != n)
1249 return 0;
1250 return memcmp(s1, s2, n) == 0;
1253 /* takes a comma separated list of log masks. Return 0 if error. */
1254 int cpu_str_to_log_mask(const char *str)
1256 CPULogItem *item;
1257 int mask;
1258 const char *p, *p1;
1260 p = str;
1261 mask = 0;
1262 for(;;) {
1263 p1 = strchr(p, ',');
1264 if (!p1)
1265 p1 = p + strlen(p);
1266 if(cmp1(p,p1-p,"all")) {
1267 for(item = cpu_log_items; item->mask != 0; item++) {
1268 mask |= item->mask;
1270 } else {
1271 for(item = cpu_log_items; item->mask != 0; item++) {
1272 if (cmp1(p, p1 - p, item->name))
1273 goto found;
1275 return 0;
1277 found:
1278 mask |= item->mask;
1279 if (*p1 != ',')
1280 break;
1281 p = p1 + 1;
1283 return mask;
1286 void cpu_abort(CPUState *env, const char *fmt, ...)
1288 va_list ap;
1290 va_start(ap, fmt);
1291 fprintf(stderr, "qemu: fatal: ");
1292 vfprintf(stderr, fmt, ap);
1293 fprintf(stderr, "\n");
1294 #ifdef TARGET_I386
1295 if(env->intercept & INTERCEPT_SVM_MASK) {
1296 /* most probably the virtual machine should not
1297 be shut down but rather caught by the VMM */
1298 vmexit(SVM_EXIT_SHUTDOWN, 0);
1300 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1301 #else
1302 cpu_dump_state(env, stderr, fprintf, 0);
1303 #endif
1304 va_end(ap);
1305 if (logfile) {
1306 fflush(logfile);
1307 fclose(logfile);
1309 abort();
1312 CPUState *cpu_copy(CPUState *env)
1314 CPUState *new_env = cpu_init();
1315 /* preserve chaining and index */
1316 CPUState *next_cpu = new_env->next_cpu;
1317 int cpu_index = new_env->cpu_index;
1318 memcpy(new_env, env, sizeof(CPUState));
1319 new_env->next_cpu = next_cpu;
1320 new_env->cpu_index = cpu_index;
1321 return new_env;
1324 #if !defined(CONFIG_USER_ONLY)
1326 /* NOTE: if flush_global is true, also flush global entries (not
1327 implemented yet) */
1328 void tlb_flush(CPUState *env, int flush_global)
1330 int i;
1332 #if defined(DEBUG_TLB)
1333 printf("tlb_flush:\n");
1334 #endif
1335 /* must reset current TB so that interrupts cannot modify the
1336 links while we are modifying them */
1337 env->current_tb = NULL;
1339 for(i = 0; i < CPU_TLB_SIZE; i++) {
1340 env->tlb_table[0][i].addr_read = -1;
1341 env->tlb_table[0][i].addr_write = -1;
1342 env->tlb_table[0][i].addr_code = -1;
1343 env->tlb_table[1][i].addr_read = -1;
1344 env->tlb_table[1][i].addr_write = -1;
1345 env->tlb_table[1][i].addr_code = -1;
1346 #if (NB_MMU_MODES >= 3)
1347 env->tlb_table[2][i].addr_read = -1;
1348 env->tlb_table[2][i].addr_write = -1;
1349 env->tlb_table[2][i].addr_code = -1;
1350 #if (NB_MMU_MODES == 4)
1351 env->tlb_table[3][i].addr_read = -1;
1352 env->tlb_table[3][i].addr_write = -1;
1353 env->tlb_table[3][i].addr_code = -1;
1354 #endif
1355 #endif
1358 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1360 #if !defined(CONFIG_SOFTMMU)
1361 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1362 #endif
1363 #ifdef USE_KQEMU
1364 if (env->kqemu_enabled) {
1365 kqemu_flush(env, flush_global);
1367 #endif
1368 tlb_flush_count++;
1371 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1373 if (addr == (tlb_entry->addr_read &
1374 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1375 addr == (tlb_entry->addr_write &
1376 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1377 addr == (tlb_entry->addr_code &
1378 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1379 tlb_entry->addr_read = -1;
1380 tlb_entry->addr_write = -1;
1381 tlb_entry->addr_code = -1;
1385 void tlb_flush_page(CPUState *env, target_ulong addr)
1387 int i;
1388 TranslationBlock *tb;
1390 #if defined(DEBUG_TLB)
1391 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1392 #endif
1393 /* must reset current TB so that interrupts cannot modify the
1394 links while we are modifying them */
1395 env->current_tb = NULL;
1397 addr &= TARGET_PAGE_MASK;
1398 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1399 tlb_flush_entry(&env->tlb_table[0][i], addr);
1400 tlb_flush_entry(&env->tlb_table[1][i], addr);
1401 #if (NB_MMU_MODES >= 3)
1402 tlb_flush_entry(&env->tlb_table[2][i], addr);
1403 #if (NB_MMU_MODES == 4)
1404 tlb_flush_entry(&env->tlb_table[3][i], addr);
1405 #endif
1406 #endif
1408 /* Discard jump cache entries for any tb which might potentially
1409 overlap the flushed page. */
1410 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1411 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1413 i = tb_jmp_cache_hash_page(addr);
1414 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1416 #if !defined(CONFIG_SOFTMMU)
1417 if (addr < MMAP_AREA_END)
1418 munmap((void *)addr, TARGET_PAGE_SIZE);
1419 #endif
1420 #ifdef USE_KQEMU
1421 if (env->kqemu_enabled) {
1422 kqemu_flush_page(env, addr);
1424 #endif
1427 /* update the TLBs so that writes to code in the virtual page 'addr'
1428 can be detected */
1429 static void tlb_protect_code(ram_addr_t ram_addr)
1431 cpu_physical_memory_reset_dirty(ram_addr,
1432 ram_addr + TARGET_PAGE_SIZE,
1433 CODE_DIRTY_FLAG);
1436 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1437 tested for self modifying code */
1438 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1439 target_ulong vaddr)
1441 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1444 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1445 unsigned long start, unsigned long length)
1447 unsigned long addr;
1448 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1449 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1450 if ((addr - start) < length) {
1451 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1456 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1457 int dirty_flags)
1459 CPUState *env;
1460 unsigned long length, start1;
1461 int i, mask, len;
1462 uint8_t *p;
1464 start &= TARGET_PAGE_MASK;
1465 end = TARGET_PAGE_ALIGN(end);
1467 length = end - start;
1468 if (length == 0)
1469 return;
1470 len = length >> TARGET_PAGE_BITS;
1471 #ifdef USE_KQEMU
1472 /* XXX: should not depend on cpu context */
1473 env = first_cpu;
1474 if (env->kqemu_enabled) {
1475 ram_addr_t addr;
1476 addr = start;
1477 for(i = 0; i < len; i++) {
1478 kqemu_set_notdirty(env, addr);
1479 addr += TARGET_PAGE_SIZE;
1482 #endif
1483 mask = ~dirty_flags;
1484 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1485 for(i = 0; i < len; i++)
1486 p[i] &= mask;
1488 /* we modify the TLB cache so that the dirty bit will be set again
1489 when accessing the range */
1490 start1 = start + (unsigned long)phys_ram_base;
1491 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1492 for(i = 0; i < CPU_TLB_SIZE; i++)
1493 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1494 for(i = 0; i < CPU_TLB_SIZE; i++)
1495 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1496 #if (NB_MMU_MODES >= 3)
1497 for(i = 0; i < CPU_TLB_SIZE; i++)
1498 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1499 #if (NB_MMU_MODES == 4)
1500 for(i = 0; i < CPU_TLB_SIZE; i++)
1501 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1502 #endif
1503 #endif
1506 #if !defined(CONFIG_SOFTMMU)
1507 /* XXX: this is expensive */
1509 VirtPageDesc *p;
1510 int j;
1511 target_ulong addr;
1513 for(i = 0; i < L1_SIZE; i++) {
1514 p = l1_virt_map[i];
1515 if (p) {
1516 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1517 for(j = 0; j < L2_SIZE; j++) {
1518 if (p->valid_tag == virt_valid_tag &&
1519 p->phys_addr >= start && p->phys_addr < end &&
1520 (p->prot & PROT_WRITE)) {
1521 if (addr < MMAP_AREA_END) {
1522 mprotect((void *)addr, TARGET_PAGE_SIZE,
1523 p->prot & ~PROT_WRITE);
1526 addr += TARGET_PAGE_SIZE;
1527 p++;
1532 #endif
1535 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1537 ram_addr_t ram_addr;
1539 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1540 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1541 tlb_entry->addend - (unsigned long)phys_ram_base;
1542 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1543 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1548 /* update the TLB according to the current state of the dirty bits */
1549 void cpu_tlb_update_dirty(CPUState *env)
1551 int i;
1552 for(i = 0; i < CPU_TLB_SIZE; i++)
1553 tlb_update_dirty(&env->tlb_table[0][i]);
1554 for(i = 0; i < CPU_TLB_SIZE; i++)
1555 tlb_update_dirty(&env->tlb_table[1][i]);
1556 #if (NB_MMU_MODES >= 3)
1557 for(i = 0; i < CPU_TLB_SIZE; i++)
1558 tlb_update_dirty(&env->tlb_table[2][i]);
1559 #if (NB_MMU_MODES == 4)
1560 for(i = 0; i < CPU_TLB_SIZE; i++)
1561 tlb_update_dirty(&env->tlb_table[3][i]);
1562 #endif
1563 #endif
1566 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1567 unsigned long start)
1569 unsigned long addr;
1570 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1571 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1572 if (addr == start) {
1573 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1578 /* update the TLB corresponding to virtual page vaddr and phys addr
1579 addr so that it is no longer dirty */
1580 static inline void tlb_set_dirty(CPUState *env,
1581 unsigned long addr, target_ulong vaddr)
1583 int i;
1585 addr &= TARGET_PAGE_MASK;
1586 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1587 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1588 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1589 #if (NB_MMU_MODES >= 3)
1590 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1591 #if (NB_MMU_MODES == 4)
1592 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1593 #endif
1594 #endif
1597 /* add a new TLB entry. At most one entry for a given virtual address
1598 is permitted. Return 0 if OK or 2 if the page could not be mapped
1599 (can only happen in non SOFTMMU mode for I/O pages or pages
1600 conflicting with the host address space). */
1601 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1602 target_phys_addr_t paddr, int prot,
1603 int is_user, int is_softmmu)
1605 PhysPageDesc *p;
1606 unsigned long pd;
1607 unsigned int index;
1608 target_ulong address;
1609 target_phys_addr_t addend;
1610 int ret;
1611 CPUTLBEntry *te;
1612 int i;
1614 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1615 if (!p) {
1616 pd = IO_MEM_UNASSIGNED;
1617 } else {
1618 pd = p->phys_offset;
1620 #if defined(DEBUG_TLB)
1621 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1622 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1623 #endif
1625 ret = 0;
1626 #if !defined(CONFIG_SOFTMMU)
1627 if (is_softmmu)
1628 #endif
1630 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1631 /* IO memory case */
1632 address = vaddr | pd;
1633 addend = paddr;
1634 } else {
1635 /* standard memory */
1636 address = vaddr;
1637 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1640 /* Make accesses to pages with watchpoints go via the
1641 watchpoint trap routines. */
1642 for (i = 0; i < env->nb_watchpoints; i++) {
1643 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1644 if (address & ~TARGET_PAGE_MASK) {
1645 env->watchpoint[i].addend = 0;
1646 address = vaddr | io_mem_watch;
1647 } else {
1648 env->watchpoint[i].addend = pd - paddr +
1649 (unsigned long) phys_ram_base;
1650 /* TODO: Figure out how to make read watchpoints coexist
1651 with code. */
1652 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1657 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1658 addend -= vaddr;
1659 te = &env->tlb_table[is_user][index];
1660 te->addend = addend;
1661 if (prot & PAGE_READ) {
1662 te->addr_read = address;
1663 } else {
1664 te->addr_read = -1;
1666 if (prot & PAGE_EXEC) {
1667 te->addr_code = address;
1668 } else {
1669 te->addr_code = -1;
1671 if (prot & PAGE_WRITE) {
1672 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1673 (pd & IO_MEM_ROMD)) {
1674 /* write access calls the I/O callback */
1675 te->addr_write = vaddr |
1676 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1677 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1678 !cpu_physical_memory_is_dirty(pd)) {
1679 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1680 } else {
1681 te->addr_write = address;
1683 } else {
1684 te->addr_write = -1;
1687 #if !defined(CONFIG_SOFTMMU)
1688 else {
1689 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1690 /* IO access: no mapping is done as it will be handled by the
1691 soft MMU */
1692 if (!(env->hflags & HF_SOFTMMU_MASK))
1693 ret = 2;
1694 } else {
1695 void *map_addr;
1697 if (vaddr >= MMAP_AREA_END) {
1698 ret = 2;
1699 } else {
1700 if (prot & PROT_WRITE) {
1701 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1702 #if defined(TARGET_HAS_SMC) || 1
1703 first_tb ||
1704 #endif
1705 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1706 !cpu_physical_memory_is_dirty(pd))) {
1707 /* ROM: we do as if code was inside */
1708 /* if code is present, we only map as read only and save the
1709 original mapping */
1710 VirtPageDesc *vp;
1712 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1713 vp->phys_addr = pd;
1714 vp->prot = prot;
1715 vp->valid_tag = virt_valid_tag;
1716 prot &= ~PAGE_WRITE;
1719 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1720 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1721 if (map_addr == MAP_FAILED) {
1722 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1723 paddr, vaddr);
1728 #endif
1729 return ret;
1732 /* called from signal handler: invalidate the code and unprotect the
1733 page. Return TRUE if the fault was succesfully handled. */
1734 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1736 #if !defined(CONFIG_SOFTMMU)
1737 VirtPageDesc *vp;
1739 #if defined(DEBUG_TLB)
1740 printf("page_unprotect: addr=0x%08x\n", addr);
1741 #endif
1742 addr &= TARGET_PAGE_MASK;
1744 /* if it is not mapped, no need to worry here */
1745 if (addr >= MMAP_AREA_END)
1746 return 0;
1747 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1748 if (!vp)
1749 return 0;
1750 /* NOTE: in this case, validate_tag is _not_ tested as it
1751 validates only the code TLB */
1752 if (vp->valid_tag != virt_valid_tag)
1753 return 0;
1754 if (!(vp->prot & PAGE_WRITE))
1755 return 0;
1756 #if defined(DEBUG_TLB)
1757 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1758 addr, vp->phys_addr, vp->prot);
1759 #endif
1760 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1761 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1762 (unsigned long)addr, vp->prot);
1763 /* set the dirty bit */
1764 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1765 /* flush the code inside */
1766 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1767 return 1;
1768 #else
1769 return 0;
1770 #endif
1773 #else
1775 void tlb_flush(CPUState *env, int flush_global)
1779 void tlb_flush_page(CPUState *env, target_ulong addr)
1783 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1784 target_phys_addr_t paddr, int prot,
1785 int is_user, int is_softmmu)
1787 return 0;
1790 /* dump memory mappings */
1791 void page_dump(FILE *f)
1793 unsigned long start, end;
1794 int i, j, prot, prot1;
1795 PageDesc *p;
1797 fprintf(f, "%-8s %-8s %-8s %s\n",
1798 "start", "end", "size", "prot");
1799 start = -1;
1800 end = -1;
1801 prot = 0;
1802 for(i = 0; i <= L1_SIZE; i++) {
1803 if (i < L1_SIZE)
1804 p = l1_map[i];
1805 else
1806 p = NULL;
1807 for(j = 0;j < L2_SIZE; j++) {
1808 if (!p)
1809 prot1 = 0;
1810 else
1811 prot1 = p[j].flags;
1812 if (prot1 != prot) {
1813 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1814 if (start != -1) {
1815 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1816 start, end, end - start,
1817 prot & PAGE_READ ? 'r' : '-',
1818 prot & PAGE_WRITE ? 'w' : '-',
1819 prot & PAGE_EXEC ? 'x' : '-');
1821 if (prot1 != 0)
1822 start = end;
1823 else
1824 start = -1;
1825 prot = prot1;
1827 if (!p)
1828 break;
1833 int page_get_flags(target_ulong address)
1835 PageDesc *p;
1837 p = page_find(address >> TARGET_PAGE_BITS);
1838 if (!p)
1839 return 0;
1840 return p->flags;
1843 /* modify the flags of a page and invalidate the code if
1844 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1845 depending on PAGE_WRITE */
1846 void page_set_flags(target_ulong start, target_ulong end, int flags)
1848 PageDesc *p;
1849 target_ulong addr;
1851 start = start & TARGET_PAGE_MASK;
1852 end = TARGET_PAGE_ALIGN(end);
1853 if (flags & PAGE_WRITE)
1854 flags |= PAGE_WRITE_ORG;
1855 spin_lock(&tb_lock);
1856 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1857 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1858 /* if the write protection is set, then we invalidate the code
1859 inside */
1860 if (!(p->flags & PAGE_WRITE) &&
1861 (flags & PAGE_WRITE) &&
1862 p->first_tb) {
1863 tb_invalidate_phys_page(addr, 0, NULL);
1865 p->flags = flags;
1867 spin_unlock(&tb_lock);
1870 /* called from signal handler: invalidate the code and unprotect the
1871 page. Return TRUE if the fault was succesfully handled. */
1872 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1874 unsigned int page_index, prot, pindex;
1875 PageDesc *p, *p1;
1876 target_ulong host_start, host_end, addr;
1878 host_start = address & qemu_host_page_mask;
1879 page_index = host_start >> TARGET_PAGE_BITS;
1880 p1 = page_find(page_index);
1881 if (!p1)
1882 return 0;
1883 host_end = host_start + qemu_host_page_size;
1884 p = p1;
1885 prot = 0;
1886 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1887 prot |= p->flags;
1888 p++;
1890 /* if the page was really writable, then we change its
1891 protection back to writable */
1892 if (prot & PAGE_WRITE_ORG) {
1893 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1894 if (!(p1[pindex].flags & PAGE_WRITE)) {
1895 mprotect((void *)g2h(host_start), qemu_host_page_size,
1896 (prot & PAGE_BITS) | PAGE_WRITE);
1897 p1[pindex].flags |= PAGE_WRITE;
1898 /* and since the content will be modified, we must invalidate
1899 the corresponding translated code. */
1900 tb_invalidate_phys_page(address, pc, puc);
1901 #ifdef DEBUG_TB_CHECK
1902 tb_invalidate_check(address);
1903 #endif
1904 return 1;
1907 return 0;
1910 /* call this function when system calls directly modify a memory area */
1911 /* ??? This should be redundant now we have lock_user. */
1912 void page_unprotect_range(target_ulong data, target_ulong data_size)
1914 target_ulong start, end, addr;
1916 start = data;
1917 end = start + data_size;
1918 start &= TARGET_PAGE_MASK;
1919 end = TARGET_PAGE_ALIGN(end);
1920 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1921 page_unprotect(addr, 0, NULL);
1925 static inline void tlb_set_dirty(CPUState *env,
1926 unsigned long addr, target_ulong vaddr)
1929 #endif /* defined(CONFIG_USER_ONLY) */
1931 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1932 int memory);
1933 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1934 int orig_memory);
1935 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1936 need_subpage) \
1937 do { \
1938 if (addr > start_addr) \
1939 start_addr2 = 0; \
1940 else { \
1941 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1942 if (start_addr2 > 0) \
1943 need_subpage = 1; \
1946 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1947 end_addr2 = TARGET_PAGE_SIZE - 1; \
1948 else { \
1949 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1950 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1951 need_subpage = 1; \
1953 } while (0)
1955 /* register physical memory. 'size' must be a multiple of the target
1956 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1957 io memory page */
1958 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1959 unsigned long size,
1960 unsigned long phys_offset)
1962 target_phys_addr_t addr, end_addr;
1963 PhysPageDesc *p;
1964 CPUState *env;
1965 unsigned long orig_size = size;
1966 void *subpage;
1968 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1969 end_addr = start_addr + (target_phys_addr_t)size;
1970 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1971 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1972 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1973 unsigned long orig_memory = p->phys_offset;
1974 target_phys_addr_t start_addr2, end_addr2;
1975 int need_subpage = 0;
1977 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1978 need_subpage);
1979 if (need_subpage) {
1980 if (!(orig_memory & IO_MEM_SUBPAGE)) {
1981 subpage = subpage_init((addr & TARGET_PAGE_MASK),
1982 &p->phys_offset, orig_memory);
1983 } else {
1984 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1985 >> IO_MEM_SHIFT];
1987 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1988 } else {
1989 p->phys_offset = phys_offset;
1990 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1991 (phys_offset & IO_MEM_ROMD))
1992 phys_offset += TARGET_PAGE_SIZE;
1994 } else {
1995 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1996 p->phys_offset = phys_offset;
1997 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1998 (phys_offset & IO_MEM_ROMD))
1999 phys_offset += TARGET_PAGE_SIZE;
2000 else {
2001 target_phys_addr_t start_addr2, end_addr2;
2002 int need_subpage = 0;
2004 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2005 end_addr2, need_subpage);
2007 if (need_subpage) {
2008 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2009 &p->phys_offset, IO_MEM_UNASSIGNED);
2010 subpage_register(subpage, start_addr2, end_addr2,
2011 phys_offset);
2017 /* since each CPU stores ram addresses in its TLB cache, we must
2018 reset the modified entries */
2019 /* XXX: slow ! */
2020 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2021 tlb_flush(env, 1);
2025 /* XXX: temporary until new memory mapping API */
2026 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2028 PhysPageDesc *p;
2030 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2031 if (!p)
2032 return IO_MEM_UNASSIGNED;
2033 return p->phys_offset;
2036 /* XXX: better than nothing */
2037 ram_addr_t qemu_ram_alloc(unsigned int size)
2039 ram_addr_t addr;
2040 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2041 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2042 size, phys_ram_size);
2043 abort();
2045 addr = phys_ram_alloc_offset;
2046 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2047 return addr;
2050 void qemu_ram_free(ram_addr_t addr)
2054 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2056 #ifdef DEBUG_UNASSIGNED
2057 printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
2058 #endif
2059 #ifdef TARGET_SPARC
2060 do_unassigned_access(addr, 0, 0, 0);
2061 #endif
2062 return 0;
2065 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2067 #ifdef DEBUG_UNASSIGNED
2068 printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
2069 #endif
2070 #ifdef TARGET_SPARC
2071 do_unassigned_access(addr, 1, 0, 0);
2072 #endif
2075 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2076 unassigned_mem_readb,
2077 unassigned_mem_readb,
2078 unassigned_mem_readb,
2081 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2082 unassigned_mem_writeb,
2083 unassigned_mem_writeb,
2084 unassigned_mem_writeb,
2087 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2089 unsigned long ram_addr;
2090 int dirty_flags;
2091 ram_addr = addr - (unsigned long)phys_ram_base;
2092 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2093 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2094 #if !defined(CONFIG_USER_ONLY)
2095 tb_invalidate_phys_page_fast(ram_addr, 1);
2096 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2097 #endif
2099 stb_p((uint8_t *)(long)addr, val);
2100 #ifdef USE_KQEMU
2101 if (cpu_single_env->kqemu_enabled &&
2102 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2103 kqemu_modify_page(cpu_single_env, ram_addr);
2104 #endif
2105 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2106 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2107 /* we remove the notdirty callback only if the code has been
2108 flushed */
2109 if (dirty_flags == 0xff)
2110 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2113 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2115 unsigned long ram_addr;
2116 int dirty_flags;
2117 ram_addr = addr - (unsigned long)phys_ram_base;
2118 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2119 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2120 #if !defined(CONFIG_USER_ONLY)
2121 tb_invalidate_phys_page_fast(ram_addr, 2);
2122 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2123 #endif
2125 stw_p((uint8_t *)(long)addr, val);
2126 #ifdef USE_KQEMU
2127 if (cpu_single_env->kqemu_enabled &&
2128 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2129 kqemu_modify_page(cpu_single_env, ram_addr);
2130 #endif
2131 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2132 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2133 /* we remove the notdirty callback only if the code has been
2134 flushed */
2135 if (dirty_flags == 0xff)
2136 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2139 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2141 unsigned long ram_addr;
2142 int dirty_flags;
2143 ram_addr = addr - (unsigned long)phys_ram_base;
2144 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2145 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2146 #if !defined(CONFIG_USER_ONLY)
2147 tb_invalidate_phys_page_fast(ram_addr, 4);
2148 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2149 #endif
2151 stl_p((uint8_t *)(long)addr, val);
2152 #ifdef USE_KQEMU
2153 if (cpu_single_env->kqemu_enabled &&
2154 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2155 kqemu_modify_page(cpu_single_env, ram_addr);
2156 #endif
2157 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2158 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2159 /* we remove the notdirty callback only if the code has been
2160 flushed */
2161 if (dirty_flags == 0xff)
2162 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2165 static CPUReadMemoryFunc *error_mem_read[3] = {
2166 NULL, /* never used */
2167 NULL, /* never used */
2168 NULL, /* never used */
2171 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2172 notdirty_mem_writeb,
2173 notdirty_mem_writew,
2174 notdirty_mem_writel,
2177 #if defined(CONFIG_SOFTMMU)
2178 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2179 so these check for a hit then pass through to the normal out-of-line
2180 phys routines. */
2181 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2183 return ldub_phys(addr);
2186 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2188 return lduw_phys(addr);
2191 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2193 return ldl_phys(addr);
2196 /* Generate a debug exception if a watchpoint has been hit.
2197 Returns the real physical address of the access. addr will be a host
2198 address in case of a RAM location. */
2199 static target_ulong check_watchpoint(target_phys_addr_t addr)
2201 CPUState *env = cpu_single_env;
2202 target_ulong watch;
2203 target_ulong retaddr;
2204 int i;
2206 retaddr = addr;
2207 for (i = 0; i < env->nb_watchpoints; i++) {
2208 watch = env->watchpoint[i].vaddr;
2209 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2210 retaddr = addr - env->watchpoint[i].addend;
2211 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2212 cpu_single_env->watchpoint_hit = i + 1;
2213 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2214 break;
2218 return retaddr;
2221 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2222 uint32_t val)
2224 addr = check_watchpoint(addr);
2225 stb_phys(addr, val);
2228 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2229 uint32_t val)
2231 addr = check_watchpoint(addr);
2232 stw_phys(addr, val);
2235 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2236 uint32_t val)
2238 addr = check_watchpoint(addr);
2239 stl_phys(addr, val);
2242 static CPUReadMemoryFunc *watch_mem_read[3] = {
2243 watch_mem_readb,
2244 watch_mem_readw,
2245 watch_mem_readl,
2248 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2249 watch_mem_writeb,
2250 watch_mem_writew,
2251 watch_mem_writel,
2253 #endif
2255 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2256 unsigned int len)
2258 CPUReadMemoryFunc **mem_read;
2259 uint32_t ret;
2260 unsigned int idx;
2262 idx = SUBPAGE_IDX(addr - mmio->base);
2263 #if defined(DEBUG_SUBPAGE)
2264 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2265 mmio, len, addr, idx);
2266 #endif
2267 mem_read = mmio->mem_read[idx];
2268 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2270 return ret;
2273 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2274 uint32_t value, unsigned int len)
2276 CPUWriteMemoryFunc **mem_write;
2277 unsigned int idx;
2279 idx = SUBPAGE_IDX(addr - mmio->base);
2280 #if defined(DEBUG_SUBPAGE)
2281 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2282 mmio, len, addr, idx, value);
2283 #endif
2284 mem_write = mmio->mem_write[idx];
2285 (*mem_write[len])(mmio->opaque[idx], addr, value);
2288 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2290 #if defined(DEBUG_SUBPAGE)
2291 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2292 #endif
2294 return subpage_readlen(opaque, addr, 0);
2297 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2298 uint32_t value)
2300 #if defined(DEBUG_SUBPAGE)
2301 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2302 #endif
2303 subpage_writelen(opaque, addr, value, 0);
2306 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2308 #if defined(DEBUG_SUBPAGE)
2309 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2310 #endif
2312 return subpage_readlen(opaque, addr, 1);
2315 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2316 uint32_t value)
2318 #if defined(DEBUG_SUBPAGE)
2319 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2320 #endif
2321 subpage_writelen(opaque, addr, value, 1);
2324 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2326 #if defined(DEBUG_SUBPAGE)
2327 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2328 #endif
2330 return subpage_readlen(opaque, addr, 2);
2333 static void subpage_writel (void *opaque,
2334 target_phys_addr_t addr, uint32_t value)
2336 #if defined(DEBUG_SUBPAGE)
2337 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2338 #endif
2339 subpage_writelen(opaque, addr, value, 2);
2342 static CPUReadMemoryFunc *subpage_read[] = {
2343 &subpage_readb,
2344 &subpage_readw,
2345 &subpage_readl,
2348 static CPUWriteMemoryFunc *subpage_write[] = {
2349 &subpage_writeb,
2350 &subpage_writew,
2351 &subpage_writel,
2354 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2355 int memory)
2357 int idx, eidx;
2359 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2360 return -1;
2361 idx = SUBPAGE_IDX(start);
2362 eidx = SUBPAGE_IDX(end);
2363 #if defined(DEBUG_SUBPAGE)
2364 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2365 mmio, start, end, idx, eidx, memory);
2366 #endif
2367 memory >>= IO_MEM_SHIFT;
2368 for (; idx <= eidx; idx++) {
2369 mmio->mem_read[idx] = io_mem_read[memory];
2370 mmio->mem_write[idx] = io_mem_write[memory];
2371 mmio->opaque[idx] = io_mem_opaque[memory];
2374 return 0;
2377 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2378 int orig_memory)
2380 subpage_t *mmio;
2381 int subpage_memory;
2383 mmio = qemu_mallocz(sizeof(subpage_t));
2384 if (mmio != NULL) {
2385 mmio->base = base;
2386 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2387 #if defined(DEBUG_SUBPAGE)
2388 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2389 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2390 #endif
2391 *phys = subpage_memory | IO_MEM_SUBPAGE;
2392 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2395 return mmio;
2398 static void io_mem_init(void)
2400 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2401 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2402 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2403 io_mem_nb = 5;
2405 #if defined(CONFIG_SOFTMMU)
2406 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2407 watch_mem_write, NULL);
2408 #endif
2409 /* alloc dirty bits array */
2410 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2411 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2414 /* mem_read and mem_write are arrays of functions containing the
2415 function to access byte (index 0), word (index 1) and dword (index
2416 2). All functions must be supplied. If io_index is non zero, the
2417 corresponding io zone is modified. If it is zero, a new io zone is
2418 allocated. The return value can be used with
2419 cpu_register_physical_memory(). (-1) is returned if error. */
2420 int cpu_register_io_memory(int io_index,
2421 CPUReadMemoryFunc **mem_read,
2422 CPUWriteMemoryFunc **mem_write,
2423 void *opaque)
2425 int i;
2427 if (io_index <= 0) {
2428 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2429 return -1;
2430 io_index = io_mem_nb++;
2431 } else {
2432 if (io_index >= IO_MEM_NB_ENTRIES)
2433 return -1;
2436 for(i = 0;i < 3; i++) {
2437 io_mem_read[io_index][i] = mem_read[i];
2438 io_mem_write[io_index][i] = mem_write[i];
2440 io_mem_opaque[io_index] = opaque;
2441 return io_index << IO_MEM_SHIFT;
2444 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2446 return io_mem_write[io_index >> IO_MEM_SHIFT];
2449 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2451 return io_mem_read[io_index >> IO_MEM_SHIFT];
2454 /* physical memory access (slow version, mainly for debug) */
2455 #if defined(CONFIG_USER_ONLY)
2456 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2457 int len, int is_write)
2459 int l, flags;
2460 target_ulong page;
2461 void * p;
2463 while (len > 0) {
2464 page = addr & TARGET_PAGE_MASK;
2465 l = (page + TARGET_PAGE_SIZE) - addr;
2466 if (l > len)
2467 l = len;
2468 flags = page_get_flags(page);
2469 if (!(flags & PAGE_VALID))
2470 return;
2471 if (is_write) {
2472 if (!(flags & PAGE_WRITE))
2473 return;
2474 p = lock_user(addr, len, 0);
2475 memcpy(p, buf, len);
2476 unlock_user(p, addr, len);
2477 } else {
2478 if (!(flags & PAGE_READ))
2479 return;
2480 p = lock_user(addr, len, 1);
2481 memcpy(buf, p, len);
2482 unlock_user(p, addr, 0);
2484 len -= l;
2485 buf += l;
2486 addr += l;
2490 #else
2491 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2492 int len, int is_write)
2494 int l, io_index;
2495 uint8_t *ptr;
2496 uint32_t val;
2497 target_phys_addr_t page;
2498 unsigned long pd;
2499 PhysPageDesc *p;
2501 while (len > 0) {
2502 page = addr & TARGET_PAGE_MASK;
2503 l = (page + TARGET_PAGE_SIZE) - addr;
2504 if (l > len)
2505 l = len;
2506 p = phys_page_find(page >> TARGET_PAGE_BITS);
2507 if (!p) {
2508 pd = IO_MEM_UNASSIGNED;
2509 } else {
2510 pd = p->phys_offset;
2513 if (is_write) {
2514 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2515 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2516 /* XXX: could force cpu_single_env to NULL to avoid
2517 potential bugs */
2518 if (l >= 4 && ((addr & 3) == 0)) {
2519 /* 32 bit write access */
2520 val = ldl_p(buf);
2521 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2522 l = 4;
2523 } else if (l >= 2 && ((addr & 1) == 0)) {
2524 /* 16 bit write access */
2525 val = lduw_p(buf);
2526 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2527 l = 2;
2528 } else {
2529 /* 8 bit write access */
2530 val = ldub_p(buf);
2531 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2532 l = 1;
2534 } else {
2535 unsigned long addr1;
2536 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2537 /* RAM case */
2538 ptr = phys_ram_base + addr1;
2539 memcpy(ptr, buf, l);
2540 if (!cpu_physical_memory_is_dirty(addr1)) {
2541 /* invalidate code */
2542 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2543 /* set dirty bit */
2544 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2545 (0xff & ~CODE_DIRTY_FLAG);
2548 } else {
2549 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2550 !(pd & IO_MEM_ROMD)) {
2551 /* I/O case */
2552 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2553 if (l >= 4 && ((addr & 3) == 0)) {
2554 /* 32 bit read access */
2555 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2556 stl_p(buf, val);
2557 l = 4;
2558 } else if (l >= 2 && ((addr & 1) == 0)) {
2559 /* 16 bit read access */
2560 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2561 stw_p(buf, val);
2562 l = 2;
2563 } else {
2564 /* 8 bit read access */
2565 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2566 stb_p(buf, val);
2567 l = 1;
2569 } else {
2570 /* RAM case */
2571 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2572 (addr & ~TARGET_PAGE_MASK);
2573 memcpy(buf, ptr, l);
2576 len -= l;
2577 buf += l;
2578 addr += l;
2582 /* used for ROM loading : can write in RAM and ROM */
2583 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2584 const uint8_t *buf, int len)
2586 int l;
2587 uint8_t *ptr;
2588 target_phys_addr_t page;
2589 unsigned long pd;
2590 PhysPageDesc *p;
2592 while (len > 0) {
2593 page = addr & TARGET_PAGE_MASK;
2594 l = (page + TARGET_PAGE_SIZE) - addr;
2595 if (l > len)
2596 l = len;
2597 p = phys_page_find(page >> TARGET_PAGE_BITS);
2598 if (!p) {
2599 pd = IO_MEM_UNASSIGNED;
2600 } else {
2601 pd = p->phys_offset;
2604 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2605 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2606 !(pd & IO_MEM_ROMD)) {
2607 /* do nothing */
2608 } else {
2609 unsigned long addr1;
2610 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2611 /* ROM/RAM case */
2612 ptr = phys_ram_base + addr1;
2613 memcpy(ptr, buf, l);
2615 len -= l;
2616 buf += l;
2617 addr += l;
2622 /* warning: addr must be aligned */
2623 uint32_t ldl_phys(target_phys_addr_t addr)
2625 int io_index;
2626 uint8_t *ptr;
2627 uint32_t val;
2628 unsigned long pd;
2629 PhysPageDesc *p;
2631 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2632 if (!p) {
2633 pd = IO_MEM_UNASSIGNED;
2634 } else {
2635 pd = p->phys_offset;
2638 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2639 !(pd & IO_MEM_ROMD)) {
2640 /* I/O case */
2641 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2642 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2643 } else {
2644 /* RAM case */
2645 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2646 (addr & ~TARGET_PAGE_MASK);
2647 val = ldl_p(ptr);
2649 return val;
2652 /* warning: addr must be aligned */
2653 uint64_t ldq_phys(target_phys_addr_t addr)
2655 int io_index;
2656 uint8_t *ptr;
2657 uint64_t val;
2658 unsigned long pd;
2659 PhysPageDesc *p;
2661 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2662 if (!p) {
2663 pd = IO_MEM_UNASSIGNED;
2664 } else {
2665 pd = p->phys_offset;
2668 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2669 !(pd & IO_MEM_ROMD)) {
2670 /* I/O case */
2671 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2672 #ifdef TARGET_WORDS_BIGENDIAN
2673 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2674 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2675 #else
2676 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2677 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2678 #endif
2679 } else {
2680 /* RAM case */
2681 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2682 (addr & ~TARGET_PAGE_MASK);
2683 val = ldq_p(ptr);
2685 return val;
2688 /* XXX: optimize */
2689 uint32_t ldub_phys(target_phys_addr_t addr)
2691 uint8_t val;
2692 cpu_physical_memory_read(addr, &val, 1);
2693 return val;
2696 /* XXX: optimize */
2697 uint32_t lduw_phys(target_phys_addr_t addr)
2699 uint16_t val;
2700 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2701 return tswap16(val);
2704 /* warning: addr must be aligned. The ram page is not masked as dirty
2705 and the code inside is not invalidated. It is useful if the dirty
2706 bits are used to track modified PTEs */
2707 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2709 int io_index;
2710 uint8_t *ptr;
2711 unsigned long pd;
2712 PhysPageDesc *p;
2714 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2715 if (!p) {
2716 pd = IO_MEM_UNASSIGNED;
2717 } else {
2718 pd = p->phys_offset;
2721 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2722 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2723 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2724 } else {
2725 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2726 (addr & ~TARGET_PAGE_MASK);
2727 stl_p(ptr, val);
2731 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2733 int io_index;
2734 uint8_t *ptr;
2735 unsigned long pd;
2736 PhysPageDesc *p;
2738 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2739 if (!p) {
2740 pd = IO_MEM_UNASSIGNED;
2741 } else {
2742 pd = p->phys_offset;
2745 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2746 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2747 #ifdef TARGET_WORDS_BIGENDIAN
2748 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2749 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2750 #else
2751 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2752 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2753 #endif
2754 } else {
2755 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2756 (addr & ~TARGET_PAGE_MASK);
2757 stq_p(ptr, val);
2761 /* warning: addr must be aligned */
2762 void stl_phys(target_phys_addr_t addr, uint32_t val)
2764 int io_index;
2765 uint8_t *ptr;
2766 unsigned long pd;
2767 PhysPageDesc *p;
2769 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2770 if (!p) {
2771 pd = IO_MEM_UNASSIGNED;
2772 } else {
2773 pd = p->phys_offset;
2776 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2777 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2778 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2779 } else {
2780 unsigned long addr1;
2781 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2782 /* RAM case */
2783 ptr = phys_ram_base + addr1;
2784 stl_p(ptr, val);
2785 if (!cpu_physical_memory_is_dirty(addr1)) {
2786 /* invalidate code */
2787 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2788 /* set dirty bit */
2789 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2790 (0xff & ~CODE_DIRTY_FLAG);
2795 /* XXX: optimize */
2796 void stb_phys(target_phys_addr_t addr, uint32_t val)
2798 uint8_t v = val;
2799 cpu_physical_memory_write(addr, &v, 1);
2802 /* XXX: optimize */
2803 void stw_phys(target_phys_addr_t addr, uint32_t val)
2805 uint16_t v = tswap16(val);
2806 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2809 /* XXX: optimize */
2810 void stq_phys(target_phys_addr_t addr, uint64_t val)
2812 val = tswap64(val);
2813 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2816 #endif
2818 /* virtual memory access for debug */
2819 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2820 uint8_t *buf, int len, int is_write)
2822 int l;
2823 target_phys_addr_t phys_addr;
2824 target_ulong page;
2826 while (len > 0) {
2827 page = addr & TARGET_PAGE_MASK;
2828 phys_addr = cpu_get_phys_page_debug(env, page);
2829 /* if no physical page mapped, return an error */
2830 if (phys_addr == -1)
2831 return -1;
2832 l = (page + TARGET_PAGE_SIZE) - addr;
2833 if (l > len)
2834 l = len;
2835 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2836 buf, l, is_write);
2837 len -= l;
2838 buf += l;
2839 addr += l;
2841 return 0;
2844 void dump_exec_info(FILE *f,
2845 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2847 int i, target_code_size, max_target_code_size;
2848 int direct_jmp_count, direct_jmp2_count, cross_page;
2849 TranslationBlock *tb;
2851 target_code_size = 0;
2852 max_target_code_size = 0;
2853 cross_page = 0;
2854 direct_jmp_count = 0;
2855 direct_jmp2_count = 0;
2856 for(i = 0; i < nb_tbs; i++) {
2857 tb = &tbs[i];
2858 target_code_size += tb->size;
2859 if (tb->size > max_target_code_size)
2860 max_target_code_size = tb->size;
2861 if (tb->page_addr[1] != -1)
2862 cross_page++;
2863 if (tb->tb_next_offset[0] != 0xffff) {
2864 direct_jmp_count++;
2865 if (tb->tb_next_offset[1] != 0xffff) {
2866 direct_jmp2_count++;
2870 /* XXX: avoid using doubles ? */
2871 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2872 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2873 nb_tbs ? target_code_size / nb_tbs : 0,
2874 max_target_code_size);
2875 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2876 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2877 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2878 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2879 cross_page,
2880 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2881 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2882 direct_jmp_count,
2883 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2884 direct_jmp2_count,
2885 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2886 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2887 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2888 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2891 #if !defined(CONFIG_USER_ONLY)
2893 #define MMUSUFFIX _cmmu
2894 #define GETPC() NULL
2895 #define env cpu_single_env
2896 #define SOFTMMU_CODE_ACCESS
2898 #define SHIFT 0
2899 #include "softmmu_template.h"
2901 #define SHIFT 1
2902 #include "softmmu_template.h"
2904 #define SHIFT 2
2905 #include "softmmu_template.h"
2907 #define SHIFT 3
2908 #include "softmmu_template.h"
2910 #undef env
2912 #endif