sendkey command
[qemu.git] / exec.c
blob617dea13ca4b0eb2c8cd4884f5add9c3c845169d
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <stdarg.h>
24 #include <string.h>
25 #include <errno.h>
26 #include <unistd.h>
27 #include <inttypes.h>
28 #if !defined(CONFIG_SOFTMMU)
29 #include <sys/mman.h>
30 #endif
32 #include "cpu.h"
33 #include "exec-all.h"
35 //#define DEBUG_TB_INVALIDATE
36 //#define DEBUG_FLUSH
37 //#define DEBUG_TLB
39 /* make various TB consistency checks */
40 //#define DEBUG_TB_CHECK
41 //#define DEBUG_TLB_CHECK
43 /* threshold to flush the translated code buffer */
44 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
46 #define SMC_BITMAP_USE_THRESHOLD 10
48 #define MMAP_AREA_START 0x00000000
49 #define MMAP_AREA_END 0xa8000000
51 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
53 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
54 int nb_tbs;
55 /* any access to the tbs or the page table must use this lock */
56 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
58 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59 uint8_t *code_gen_ptr;
61 int phys_ram_size;
62 int phys_ram_fd;
63 uint8_t *phys_ram_base;
64 uint8_t *phys_ram_dirty;
66 typedef struct PageDesc {
67 /* list of TBs intersecting this ram page */
68 TranslationBlock *first_tb;
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count;
72 uint8_t *code_bitmap;
73 #if defined(CONFIG_USER_ONLY)
74 unsigned long flags;
75 #endif
76 } PageDesc;
78 typedef struct PhysPageDesc {
79 /* offset in host memory of the page + io_index in the low 12 bits */
80 unsigned long phys_offset;
81 } PhysPageDesc;
83 typedef struct VirtPageDesc {
84 /* physical address of code page. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 target_ulong phys_addr;
87 unsigned int valid_tag;
88 #if !defined(CONFIG_SOFTMMU)
89 /* original page access rights. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
91 unsigned int prot;
92 #endif
93 } VirtPageDesc;
95 #define L2_BITS 10
96 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
98 #define L1_SIZE (1 << L1_BITS)
99 #define L2_SIZE (1 << L2_BITS)
101 static void io_mem_init(void);
103 unsigned long real_host_page_size;
104 unsigned long host_page_bits;
105 unsigned long host_page_size;
106 unsigned long host_page_mask;
108 /* XXX: for system emulation, it could just be an array */
109 static PageDesc *l1_map[L1_SIZE];
110 static PhysPageDesc *l1_phys_map[L1_SIZE];
112 #if !defined(CONFIG_USER_ONLY)
113 static VirtPageDesc *l1_virt_map[L1_SIZE];
114 static unsigned int virt_valid_tag;
115 #endif
117 /* io memory support */
118 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
120 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
121 static int io_mem_nb;
123 /* log support */
124 char *logfilename = "/tmp/qemu.log";
125 FILE *logfile;
126 int loglevel;
128 static void page_init(void)
130 /* NOTE: we can always suppose that host_page_size >=
131 TARGET_PAGE_SIZE */
132 #ifdef _WIN32
133 real_host_page_size = 4096;
134 #else
135 real_host_page_size = getpagesize();
136 #endif
137 if (host_page_size == 0)
138 host_page_size = real_host_page_size;
139 if (host_page_size < TARGET_PAGE_SIZE)
140 host_page_size = TARGET_PAGE_SIZE;
141 host_page_bits = 0;
142 while ((1 << host_page_bits) < host_page_size)
143 host_page_bits++;
144 host_page_mask = ~(host_page_size - 1);
145 #if !defined(CONFIG_USER_ONLY)
146 virt_valid_tag = 1;
147 #endif
150 static inline PageDesc *page_find_alloc(unsigned int index)
152 PageDesc **lp, *p;
154 lp = &l1_map[index >> L2_BITS];
155 p = *lp;
156 if (!p) {
157 /* allocate if not found */
158 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
159 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
160 *lp = p;
162 return p + (index & (L2_SIZE - 1));
165 static inline PageDesc *page_find(unsigned int index)
167 PageDesc *p;
169 p = l1_map[index >> L2_BITS];
170 if (!p)
171 return 0;
172 return p + (index & (L2_SIZE - 1));
175 static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
177 PhysPageDesc **lp, *p;
179 lp = &l1_phys_map[index >> L2_BITS];
180 p = *lp;
181 if (!p) {
182 /* allocate if not found */
183 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
184 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
185 *lp = p;
187 return p + (index & (L2_SIZE - 1));
190 static inline PhysPageDesc *phys_page_find(unsigned int index)
192 PhysPageDesc *p;
194 p = l1_phys_map[index >> L2_BITS];
195 if (!p)
196 return 0;
197 return p + (index & (L2_SIZE - 1));
200 #if !defined(CONFIG_USER_ONLY)
201 static void tlb_protect_code(CPUState *env, target_ulong addr);
202 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
204 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
206 VirtPageDesc **lp, *p;
208 lp = &l1_virt_map[index >> L2_BITS];
209 p = *lp;
210 if (!p) {
211 /* allocate if not found */
212 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
213 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
214 *lp = p;
216 return p + (index & (L2_SIZE - 1));
219 static inline VirtPageDesc *virt_page_find(unsigned int index)
221 VirtPageDesc *p;
223 p = l1_virt_map[index >> L2_BITS];
224 if (!p)
225 return 0;
226 return p + (index & (L2_SIZE - 1));
229 static void virt_page_flush(void)
231 int i, j;
232 VirtPageDesc *p;
234 virt_valid_tag++;
236 if (virt_valid_tag == 0) {
237 virt_valid_tag = 1;
238 for(i = 0; i < L1_SIZE; i++) {
239 p = l1_virt_map[i];
240 if (p) {
241 for(j = 0; j < L2_SIZE; j++)
242 p[j].valid_tag = 0;
247 #else
248 static void virt_page_flush(void)
251 #endif
253 void cpu_exec_init(void)
255 if (!code_gen_ptr) {
256 code_gen_ptr = code_gen_buffer;
257 page_init();
258 io_mem_init();
262 static inline void invalidate_page_bitmap(PageDesc *p)
264 if (p->code_bitmap) {
265 qemu_free(p->code_bitmap);
266 p->code_bitmap = NULL;
268 p->code_write_count = 0;
271 /* set to NULL all the 'first_tb' fields in all PageDescs */
272 static void page_flush_tb(void)
274 int i, j;
275 PageDesc *p;
277 for(i = 0; i < L1_SIZE; i++) {
278 p = l1_map[i];
279 if (p) {
280 for(j = 0; j < L2_SIZE; j++) {
281 p->first_tb = NULL;
282 invalidate_page_bitmap(p);
283 p++;
289 /* flush all the translation blocks */
290 /* XXX: tb_flush is currently not thread safe */
291 void tb_flush(CPUState *env)
293 int i;
294 #if defined(DEBUG_FLUSH)
295 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
296 code_gen_ptr - code_gen_buffer,
297 nb_tbs,
298 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
299 #endif
300 nb_tbs = 0;
301 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
302 tb_hash[i] = NULL;
303 virt_page_flush();
305 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
306 tb_phys_hash[i] = NULL;
307 page_flush_tb();
309 code_gen_ptr = code_gen_buffer;
310 /* XXX: flush processor icache at this point if cache flush is
311 expensive */
314 #ifdef DEBUG_TB_CHECK
316 static void tb_invalidate_check(unsigned long address)
318 TranslationBlock *tb;
319 int i;
320 address &= TARGET_PAGE_MASK;
321 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
322 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
323 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
324 address >= tb->pc + tb->size)) {
325 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
326 address, tb->pc, tb->size);
332 /* verify that all the pages have correct rights for code */
333 static void tb_page_check(void)
335 TranslationBlock *tb;
336 int i, flags1, flags2;
338 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
339 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
340 flags1 = page_get_flags(tb->pc);
341 flags2 = page_get_flags(tb->pc + tb->size - 1);
342 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
343 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
344 tb->pc, tb->size, flags1, flags2);
350 void tb_jmp_check(TranslationBlock *tb)
352 TranslationBlock *tb1;
353 unsigned int n1;
355 /* suppress any remaining jumps to this TB */
356 tb1 = tb->jmp_first;
357 for(;;) {
358 n1 = (long)tb1 & 3;
359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
360 if (n1 == 2)
361 break;
362 tb1 = tb1->jmp_next[n1];
364 /* check end of list */
365 if (tb1 != tb) {
366 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
370 #endif
372 /* invalidate one TB */
373 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
374 int next_offset)
376 TranslationBlock *tb1;
377 for(;;) {
378 tb1 = *ptb;
379 if (tb1 == tb) {
380 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
381 break;
383 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
387 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
389 TranslationBlock *tb1;
390 unsigned int n1;
392 for(;;) {
393 tb1 = *ptb;
394 n1 = (long)tb1 & 3;
395 tb1 = (TranslationBlock *)((long)tb1 & ~3);
396 if (tb1 == tb) {
397 *ptb = tb1->page_next[n1];
398 break;
400 ptb = &tb1->page_next[n1];
404 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
406 TranslationBlock *tb1, **ptb;
407 unsigned int n1;
409 ptb = &tb->jmp_next[n];
410 tb1 = *ptb;
411 if (tb1) {
412 /* find tb(n) in circular list */
413 for(;;) {
414 tb1 = *ptb;
415 n1 = (long)tb1 & 3;
416 tb1 = (TranslationBlock *)((long)tb1 & ~3);
417 if (n1 == n && tb1 == tb)
418 break;
419 if (n1 == 2) {
420 ptb = &tb1->jmp_first;
421 } else {
422 ptb = &tb1->jmp_next[n1];
425 /* now we can suppress tb(n) from the list */
426 *ptb = tb->jmp_next[n];
428 tb->jmp_next[n] = NULL;
432 /* reset the jump entry 'n' of a TB so that it is not chained to
433 another TB */
434 static inline void tb_reset_jump(TranslationBlock *tb, int n)
436 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
439 static inline void tb_invalidate(TranslationBlock *tb)
441 unsigned int h, n1;
442 TranslationBlock *tb1, *tb2, **ptb;
444 tb_invalidated_flag = 1;
446 /* remove the TB from the hash list */
447 h = tb_hash_func(tb->pc);
448 ptb = &tb_hash[h];
449 for(;;) {
450 tb1 = *ptb;
451 /* NOTE: the TB is not necessarily linked in the hash. It
452 indicates that it is not currently used */
453 if (tb1 == NULL)
454 return;
455 if (tb1 == tb) {
456 *ptb = tb1->hash_next;
457 break;
459 ptb = &tb1->hash_next;
462 /* suppress this TB from the two jump lists */
463 tb_jmp_remove(tb, 0);
464 tb_jmp_remove(tb, 1);
466 /* suppress any remaining jumps to this TB */
467 tb1 = tb->jmp_first;
468 for(;;) {
469 n1 = (long)tb1 & 3;
470 if (n1 == 2)
471 break;
472 tb1 = (TranslationBlock *)((long)tb1 & ~3);
473 tb2 = tb1->jmp_next[n1];
474 tb_reset_jump(tb1, n1);
475 tb1->jmp_next[n1] = NULL;
476 tb1 = tb2;
478 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
481 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
483 PageDesc *p;
484 unsigned int h;
485 target_ulong phys_pc;
487 /* remove the TB from the hash list */
488 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
489 h = tb_phys_hash_func(phys_pc);
490 tb_remove(&tb_phys_hash[h], tb,
491 offsetof(TranslationBlock, phys_hash_next));
493 /* remove the TB from the page list */
494 if (tb->page_addr[0] != page_addr) {
495 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
496 tb_page_remove(&p->first_tb, tb);
497 invalidate_page_bitmap(p);
499 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
500 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
501 tb_page_remove(&p->first_tb, tb);
502 invalidate_page_bitmap(p);
505 tb_invalidate(tb);
508 static inline void set_bits(uint8_t *tab, int start, int len)
510 int end, mask, end1;
512 end = start + len;
513 tab += start >> 3;
514 mask = 0xff << (start & 7);
515 if ((start & ~7) == (end & ~7)) {
516 if (start < end) {
517 mask &= ~(0xff << (end & 7));
518 *tab |= mask;
520 } else {
521 *tab++ |= mask;
522 start = (start + 8) & ~7;
523 end1 = end & ~7;
524 while (start < end1) {
525 *tab++ = 0xff;
526 start += 8;
528 if (start < end) {
529 mask = ~(0xff << (end & 7));
530 *tab |= mask;
535 static void build_page_bitmap(PageDesc *p)
537 int n, tb_start, tb_end;
538 TranslationBlock *tb;
540 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
541 if (!p->code_bitmap)
542 return;
543 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
545 tb = p->first_tb;
546 while (tb != NULL) {
547 n = (long)tb & 3;
548 tb = (TranslationBlock *)((long)tb & ~3);
549 /* NOTE: this is subtle as a TB may span two physical pages */
550 if (n == 0) {
551 /* NOTE: tb_end may be after the end of the page, but
552 it is not a problem */
553 tb_start = tb->pc & ~TARGET_PAGE_MASK;
554 tb_end = tb_start + tb->size;
555 if (tb_end > TARGET_PAGE_SIZE)
556 tb_end = TARGET_PAGE_SIZE;
557 } else {
558 tb_start = 0;
559 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
561 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
562 tb = tb->page_next[n];
566 #ifdef TARGET_HAS_PRECISE_SMC
568 static void tb_gen_code(CPUState *env,
569 target_ulong pc, target_ulong cs_base, int flags,
570 int cflags)
572 TranslationBlock *tb;
573 uint8_t *tc_ptr;
574 target_ulong phys_pc, phys_page2, virt_page2;
575 int code_gen_size;
577 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
578 tb = tb_alloc((unsigned long)pc);
579 if (!tb) {
580 /* flush must be done */
581 tb_flush(env);
582 /* cannot fail at this point */
583 tb = tb_alloc((unsigned long)pc);
585 tc_ptr = code_gen_ptr;
586 tb->tc_ptr = tc_ptr;
587 tb->cs_base = cs_base;
588 tb->flags = flags;
589 tb->cflags = cflags;
590 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
591 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
593 /* check next page if needed */
594 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
595 phys_page2 = -1;
596 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
597 phys_page2 = get_phys_addr_code(env, virt_page2);
599 tb_link_phys(tb, phys_pc, phys_page2);
601 #endif
603 /* invalidate all TBs which intersect with the target physical page
604 starting in range [start;end[. NOTE: start and end must refer to
605 the same physical page. 'is_cpu_write_access' should be true if called
606 from a real cpu write access: the virtual CPU will exit the current
607 TB if code is modified inside this TB. */
608 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
609 int is_cpu_write_access)
611 int n, current_tb_modified, current_tb_not_found, current_flags;
612 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
613 CPUState *env = cpu_single_env;
614 #endif
615 PageDesc *p;
616 TranslationBlock *tb, *tb_next, *current_tb;
617 target_ulong tb_start, tb_end;
618 target_ulong current_pc, current_cs_base;
620 p = page_find(start >> TARGET_PAGE_BITS);
621 if (!p)
622 return;
623 if (!p->code_bitmap &&
624 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
625 is_cpu_write_access) {
626 /* build code bitmap */
627 build_page_bitmap(p);
630 /* we remove all the TBs in the range [start, end[ */
631 /* XXX: see if in some cases it could be faster to invalidate all the code */
632 current_tb_not_found = is_cpu_write_access;
633 current_tb_modified = 0;
634 current_tb = NULL; /* avoid warning */
635 current_pc = 0; /* avoid warning */
636 current_cs_base = 0; /* avoid warning */
637 current_flags = 0; /* avoid warning */
638 tb = p->first_tb;
639 while (tb != NULL) {
640 n = (long)tb & 3;
641 tb = (TranslationBlock *)((long)tb & ~3);
642 tb_next = tb->page_next[n];
643 /* NOTE: this is subtle as a TB may span two physical pages */
644 if (n == 0) {
645 /* NOTE: tb_end may be after the end of the page, but
646 it is not a problem */
647 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
648 tb_end = tb_start + tb->size;
649 } else {
650 tb_start = tb->page_addr[1];
651 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
653 if (!(tb_end <= start || tb_start >= end)) {
654 #ifdef TARGET_HAS_PRECISE_SMC
655 if (current_tb_not_found) {
656 current_tb_not_found = 0;
657 current_tb = NULL;
658 if (env->mem_write_pc) {
659 /* now we have a real cpu fault */
660 current_tb = tb_find_pc(env->mem_write_pc);
663 if (current_tb == tb &&
664 !(current_tb->cflags & CF_SINGLE_INSN)) {
665 /* If we are modifying the current TB, we must stop
666 its execution. We could be more precise by checking
667 that the modification is after the current PC, but it
668 would require a specialized function to partially
669 restore the CPU state */
671 current_tb_modified = 1;
672 cpu_restore_state(current_tb, env,
673 env->mem_write_pc, NULL);
674 #if defined(TARGET_I386)
675 current_flags = env->hflags;
676 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
677 current_cs_base = (target_ulong)env->segs[R_CS].base;
678 current_pc = current_cs_base + env->eip;
679 #else
680 #error unsupported CPU
681 #endif
683 #endif /* TARGET_HAS_PRECISE_SMC */
684 tb_phys_invalidate(tb, -1);
686 tb = tb_next;
688 #if !defined(CONFIG_USER_ONLY)
689 /* if no code remaining, no need to continue to use slow writes */
690 if (!p->first_tb) {
691 invalidate_page_bitmap(p);
692 if (is_cpu_write_access) {
693 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
696 #endif
697 #ifdef TARGET_HAS_PRECISE_SMC
698 if (current_tb_modified) {
699 /* we generate a block containing just the instruction
700 modifying the memory. It will ensure that it cannot modify
701 itself */
702 tb_gen_code(env, current_pc, current_cs_base, current_flags,
703 CF_SINGLE_INSN);
704 cpu_resume_from_signal(env, NULL);
706 #endif
709 /* len must be <= 8 and start must be a multiple of len */
710 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
712 PageDesc *p;
713 int offset, b;
714 #if 0
715 if (1) {
716 if (loglevel) {
717 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
718 cpu_single_env->mem_write_vaddr, len,
719 cpu_single_env->eip,
720 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
723 #endif
724 p = page_find(start >> TARGET_PAGE_BITS);
725 if (!p)
726 return;
727 if (p->code_bitmap) {
728 offset = start & ~TARGET_PAGE_MASK;
729 b = p->code_bitmap[offset >> 3] >> (offset & 7);
730 if (b & ((1 << len) - 1))
731 goto do_invalidate;
732 } else {
733 do_invalidate:
734 tb_invalidate_phys_page_range(start, start + len, 1);
738 #if !defined(CONFIG_SOFTMMU)
739 static void tb_invalidate_phys_page(target_ulong addr,
740 unsigned long pc, void *puc)
742 int n, current_flags, current_tb_modified;
743 target_ulong current_pc, current_cs_base;
744 PageDesc *p;
745 TranslationBlock *tb, *current_tb;
746 #ifdef TARGET_HAS_PRECISE_SMC
747 CPUState *env = cpu_single_env;
748 #endif
750 addr &= TARGET_PAGE_MASK;
751 p = page_find(addr >> TARGET_PAGE_BITS);
752 if (!p)
753 return;
754 tb = p->first_tb;
755 current_tb_modified = 0;
756 current_tb = NULL;
757 current_pc = 0; /* avoid warning */
758 current_cs_base = 0; /* avoid warning */
759 current_flags = 0; /* avoid warning */
760 #ifdef TARGET_HAS_PRECISE_SMC
761 if (tb && pc != 0) {
762 current_tb = tb_find_pc(pc);
764 #endif
765 while (tb != NULL) {
766 n = (long)tb & 3;
767 tb = (TranslationBlock *)((long)tb & ~3);
768 #ifdef TARGET_HAS_PRECISE_SMC
769 if (current_tb == tb &&
770 !(current_tb->cflags & CF_SINGLE_INSN)) {
771 /* If we are modifying the current TB, we must stop
772 its execution. We could be more precise by checking
773 that the modification is after the current PC, but it
774 would require a specialized function to partially
775 restore the CPU state */
777 current_tb_modified = 1;
778 cpu_restore_state(current_tb, env, pc, puc);
779 #if defined(TARGET_I386)
780 current_flags = env->hflags;
781 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
782 current_cs_base = (target_ulong)env->segs[R_CS].base;
783 current_pc = current_cs_base + env->eip;
784 #else
785 #error unsupported CPU
786 #endif
788 #endif /* TARGET_HAS_PRECISE_SMC */
789 tb_phys_invalidate(tb, addr);
790 tb = tb->page_next[n];
792 p->first_tb = NULL;
793 #ifdef TARGET_HAS_PRECISE_SMC
794 if (current_tb_modified) {
795 /* we generate a block containing just the instruction
796 modifying the memory. It will ensure that it cannot modify
797 itself */
798 tb_gen_code(env, current_pc, current_cs_base, current_flags,
799 CF_SINGLE_INSN);
800 cpu_resume_from_signal(env, puc);
802 #endif
804 #endif
806 /* add the tb in the target page and protect it if necessary */
807 static inline void tb_alloc_page(TranslationBlock *tb,
808 unsigned int n, unsigned int page_addr)
810 PageDesc *p;
811 TranslationBlock *last_first_tb;
813 tb->page_addr[n] = page_addr;
814 p = page_find(page_addr >> TARGET_PAGE_BITS);
815 tb->page_next[n] = p->first_tb;
816 last_first_tb = p->first_tb;
817 p->first_tb = (TranslationBlock *)((long)tb | n);
818 invalidate_page_bitmap(p);
820 #ifdef TARGET_HAS_SMC
822 #if defined(CONFIG_USER_ONLY)
823 if (p->flags & PAGE_WRITE) {
824 unsigned long host_start, host_end, addr;
825 int prot;
827 /* force the host page as non writable (writes will have a
828 page fault + mprotect overhead) */
829 host_start = page_addr & host_page_mask;
830 host_end = host_start + host_page_size;
831 prot = 0;
832 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
833 prot |= page_get_flags(addr);
834 mprotect((void *)host_start, host_page_size,
835 (prot & PAGE_BITS) & ~PAGE_WRITE);
836 #ifdef DEBUG_TB_INVALIDATE
837 printf("protecting code page: 0x%08lx\n",
838 host_start);
839 #endif
840 p->flags &= ~PAGE_WRITE;
842 #else
843 /* if some code is already present, then the pages are already
844 protected. So we handle the case where only the first TB is
845 allocated in a physical page */
846 if (!last_first_tb) {
847 target_ulong virt_addr;
849 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
850 tlb_protect_code(cpu_single_env, virt_addr);
852 #endif
854 #endif /* TARGET_HAS_SMC */
857 /* Allocate a new translation block. Flush the translation buffer if
858 too many translation blocks or too much generated code. */
859 TranslationBlock *tb_alloc(unsigned long pc)
861 TranslationBlock *tb;
863 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
864 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
865 return NULL;
866 tb = &tbs[nb_tbs++];
867 tb->pc = pc;
868 tb->cflags = 0;
869 return tb;
872 /* add a new TB and link it to the physical page tables. phys_page2 is
873 (-1) to indicate that only one page contains the TB. */
874 void tb_link_phys(TranslationBlock *tb,
875 target_ulong phys_pc, target_ulong phys_page2)
877 unsigned int h;
878 TranslationBlock **ptb;
880 /* add in the physical hash table */
881 h = tb_phys_hash_func(phys_pc);
882 ptb = &tb_phys_hash[h];
883 tb->phys_hash_next = *ptb;
884 *ptb = tb;
886 /* add in the page list */
887 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
888 if (phys_page2 != -1)
889 tb_alloc_page(tb, 1, phys_page2);
890 else
891 tb->page_addr[1] = -1;
892 #ifdef DEBUG_TB_CHECK
893 tb_page_check();
894 #endif
897 /* link the tb with the other TBs */
898 void tb_link(TranslationBlock *tb)
900 #if !defined(CONFIG_USER_ONLY)
902 VirtPageDesc *vp;
903 target_ulong addr;
905 /* save the code memory mappings (needed to invalidate the code) */
906 addr = tb->pc & TARGET_PAGE_MASK;
907 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
908 #ifdef DEBUG_TLB_CHECK
909 if (vp->valid_tag == virt_valid_tag &&
910 vp->phys_addr != tb->page_addr[0]) {
911 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
912 addr, tb->page_addr[0], vp->phys_addr);
914 #endif
915 vp->phys_addr = tb->page_addr[0];
916 if (vp->valid_tag != virt_valid_tag) {
917 vp->valid_tag = virt_valid_tag;
918 #if !defined(CONFIG_SOFTMMU)
919 vp->prot = 0;
920 #endif
923 if (tb->page_addr[1] != -1) {
924 addr += TARGET_PAGE_SIZE;
925 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
926 #ifdef DEBUG_TLB_CHECK
927 if (vp->valid_tag == virt_valid_tag &&
928 vp->phys_addr != tb->page_addr[1]) {
929 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
930 addr, tb->page_addr[1], vp->phys_addr);
932 #endif
933 vp->phys_addr = tb->page_addr[1];
934 if (vp->valid_tag != virt_valid_tag) {
935 vp->valid_tag = virt_valid_tag;
936 #if !defined(CONFIG_SOFTMMU)
937 vp->prot = 0;
938 #endif
942 #endif
944 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945 tb->jmp_next[0] = NULL;
946 tb->jmp_next[1] = NULL;
947 #ifdef USE_CODE_COPY
948 tb->cflags &= ~CF_FP_USED;
949 if (tb->cflags & CF_TB_FP_USED)
950 tb->cflags |= CF_FP_USED;
951 #endif
953 /* init original jump addresses */
954 if (tb->tb_next_offset[0] != 0xffff)
955 tb_reset_jump(tb, 0);
956 if (tb->tb_next_offset[1] != 0xffff)
957 tb_reset_jump(tb, 1);
960 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
961 tb[1].tc_ptr. Return NULL if not found */
962 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
964 int m_min, m_max, m;
965 unsigned long v;
966 TranslationBlock *tb;
968 if (nb_tbs <= 0)
969 return NULL;
970 if (tc_ptr < (unsigned long)code_gen_buffer ||
971 tc_ptr >= (unsigned long)code_gen_ptr)
972 return NULL;
973 /* binary search (cf Knuth) */
974 m_min = 0;
975 m_max = nb_tbs - 1;
976 while (m_min <= m_max) {
977 m = (m_min + m_max) >> 1;
978 tb = &tbs[m];
979 v = (unsigned long)tb->tc_ptr;
980 if (v == tc_ptr)
981 return tb;
982 else if (tc_ptr < v) {
983 m_max = m - 1;
984 } else {
985 m_min = m + 1;
988 return &tbs[m_max];
991 static void tb_reset_jump_recursive(TranslationBlock *tb);
993 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
995 TranslationBlock *tb1, *tb_next, **ptb;
996 unsigned int n1;
998 tb1 = tb->jmp_next[n];
999 if (tb1 != NULL) {
1000 /* find head of list */
1001 for(;;) {
1002 n1 = (long)tb1 & 3;
1003 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1004 if (n1 == 2)
1005 break;
1006 tb1 = tb1->jmp_next[n1];
1008 /* we are now sure now that tb jumps to tb1 */
1009 tb_next = tb1;
1011 /* remove tb from the jmp_first list */
1012 ptb = &tb_next->jmp_first;
1013 for(;;) {
1014 tb1 = *ptb;
1015 n1 = (long)tb1 & 3;
1016 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1017 if (n1 == n && tb1 == tb)
1018 break;
1019 ptb = &tb1->jmp_next[n1];
1021 *ptb = tb->jmp_next[n];
1022 tb->jmp_next[n] = NULL;
1024 /* suppress the jump to next tb in generated code */
1025 tb_reset_jump(tb, n);
1027 /* suppress jumps in the tb on which we could have jumped */
1028 tb_reset_jump_recursive(tb_next);
1032 static void tb_reset_jump_recursive(TranslationBlock *tb)
1034 tb_reset_jump_recursive2(tb, 0);
1035 tb_reset_jump_recursive2(tb, 1);
1038 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1040 target_ulong phys_addr;
1042 phys_addr = cpu_get_phys_page_debug(env, pc);
1043 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1046 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1047 breakpoint is reached */
1048 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1050 #if defined(TARGET_I386) || defined(TARGET_PPC)
1051 int i;
1053 for(i = 0; i < env->nb_breakpoints; i++) {
1054 if (env->breakpoints[i] == pc)
1055 return 0;
1058 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1059 return -1;
1060 env->breakpoints[env->nb_breakpoints++] = pc;
1062 breakpoint_invalidate(env, pc);
1063 return 0;
1064 #else
1065 return -1;
1066 #endif
1069 /* remove a breakpoint */
1070 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1072 #if defined(TARGET_I386) || defined(TARGET_PPC)
1073 int i;
1074 for(i = 0; i < env->nb_breakpoints; i++) {
1075 if (env->breakpoints[i] == pc)
1076 goto found;
1078 return -1;
1079 found:
1080 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1081 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1082 env->nb_breakpoints--;
1084 breakpoint_invalidate(env, pc);
1085 return 0;
1086 #else
1087 return -1;
1088 #endif
1091 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1092 CPU loop after each instruction */
1093 void cpu_single_step(CPUState *env, int enabled)
1095 #if defined(TARGET_I386) || defined(TARGET_PPC)
1096 if (env->singlestep_enabled != enabled) {
1097 env->singlestep_enabled = enabled;
1098 /* must flush all the translated code to avoid inconsistancies */
1099 /* XXX: only flush what is necessary */
1100 tb_flush(env);
1102 #endif
1105 /* enable or disable low levels log */
1106 void cpu_set_log(int log_flags)
1108 loglevel = log_flags;
1109 if (loglevel && !logfile) {
1110 logfile = fopen(logfilename, "w");
1111 if (!logfile) {
1112 perror(logfilename);
1113 _exit(1);
1115 #if !defined(CONFIG_SOFTMMU)
1116 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1118 static uint8_t logfile_buf[4096];
1119 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1121 #else
1122 setvbuf(logfile, NULL, _IOLBF, 0);
1123 #endif
1127 void cpu_set_log_filename(const char *filename)
1129 logfilename = strdup(filename);
1132 /* mask must never be zero, except for A20 change call */
1133 void cpu_interrupt(CPUState *env, int mask)
1135 TranslationBlock *tb;
1136 static int interrupt_lock;
1138 env->interrupt_request |= mask;
1139 /* if the cpu is currently executing code, we must unlink it and
1140 all the potentially executing TB */
1141 tb = env->current_tb;
1142 if (tb && !testandset(&interrupt_lock)) {
1143 env->current_tb = NULL;
1144 tb_reset_jump_recursive(tb);
1145 interrupt_lock = 0;
1149 void cpu_reset_interrupt(CPUState *env, int mask)
1151 env->interrupt_request &= ~mask;
1154 CPULogItem cpu_log_items[] = {
1155 { CPU_LOG_TB_OUT_ASM, "out_asm",
1156 "show generated host assembly code for each compiled TB" },
1157 { CPU_LOG_TB_IN_ASM, "in_asm",
1158 "show target assembly code for each compiled TB" },
1159 { CPU_LOG_TB_OP, "op",
1160 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1161 #ifdef TARGET_I386
1162 { CPU_LOG_TB_OP_OPT, "op_opt",
1163 "show micro ops after optimization for each compiled TB" },
1164 #endif
1165 { CPU_LOG_INT, "int",
1166 "show interrupts/exceptions in short format" },
1167 { CPU_LOG_EXEC, "exec",
1168 "show trace before each executed TB (lots of logs)" },
1169 { CPU_LOG_TB_CPU, "cpu",
1170 "show CPU state before bloc translation" },
1171 #ifdef TARGET_I386
1172 { CPU_LOG_PCALL, "pcall",
1173 "show protected mode far calls/returns/exceptions" },
1174 #endif
1175 { CPU_LOG_IOPORT, "ioport",
1176 "show all i/o ports accesses" },
1177 { 0, NULL, NULL },
1180 static int cmp1(const char *s1, int n, const char *s2)
1182 if (strlen(s2) != n)
1183 return 0;
1184 return memcmp(s1, s2, n) == 0;
1187 /* takes a comma separated list of log masks. Return 0 if error. */
1188 int cpu_str_to_log_mask(const char *str)
1190 CPULogItem *item;
1191 int mask;
1192 const char *p, *p1;
1194 p = str;
1195 mask = 0;
1196 for(;;) {
1197 p1 = strchr(p, ',');
1198 if (!p1)
1199 p1 = p + strlen(p);
1200 for(item = cpu_log_items; item->mask != 0; item++) {
1201 if (cmp1(p, p1 - p, item->name))
1202 goto found;
1204 return 0;
1205 found:
1206 mask |= item->mask;
1207 if (*p1 != ',')
1208 break;
1209 p = p1 + 1;
1211 return mask;
1214 void cpu_abort(CPUState *env, const char *fmt, ...)
1216 va_list ap;
1218 va_start(ap, fmt);
1219 fprintf(stderr, "qemu: fatal: ");
1220 vfprintf(stderr, fmt, ap);
1221 fprintf(stderr, "\n");
1222 #ifdef TARGET_I386
1223 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1224 #endif
1225 va_end(ap);
1226 abort();
1229 #if !defined(CONFIG_USER_ONLY)
1231 /* NOTE: if flush_global is true, also flush global entries (not
1232 implemented yet) */
1233 void tlb_flush(CPUState *env, int flush_global)
1235 int i;
1237 #if defined(DEBUG_TLB)
1238 printf("tlb_flush:\n");
1239 #endif
1240 /* must reset current TB so that interrupts cannot modify the
1241 links while we are modifying them */
1242 env->current_tb = NULL;
1244 for(i = 0; i < CPU_TLB_SIZE; i++) {
1245 env->tlb_read[0][i].address = -1;
1246 env->tlb_write[0][i].address = -1;
1247 env->tlb_read[1][i].address = -1;
1248 env->tlb_write[1][i].address = -1;
1251 virt_page_flush();
1252 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1253 tb_hash[i] = NULL;
1255 #if !defined(CONFIG_SOFTMMU)
1256 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1257 #endif
1260 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1262 if (addr == (tlb_entry->address &
1263 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1264 tlb_entry->address = -1;
1267 void tlb_flush_page(CPUState *env, target_ulong addr)
1269 int i, n;
1270 VirtPageDesc *vp;
1271 PageDesc *p;
1272 TranslationBlock *tb;
1274 #if defined(DEBUG_TLB)
1275 printf("tlb_flush_page: 0x%08x\n", addr);
1276 #endif
1277 /* must reset current TB so that interrupts cannot modify the
1278 links while we are modifying them */
1279 env->current_tb = NULL;
1281 addr &= TARGET_PAGE_MASK;
1282 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1283 tlb_flush_entry(&env->tlb_read[0][i], addr);
1284 tlb_flush_entry(&env->tlb_write[0][i], addr);
1285 tlb_flush_entry(&env->tlb_read[1][i], addr);
1286 tlb_flush_entry(&env->tlb_write[1][i], addr);
1288 /* remove from the virtual pc hash table all the TB at this
1289 virtual address */
1291 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1292 if (vp && vp->valid_tag == virt_valid_tag) {
1293 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1294 if (p) {
1295 /* we remove all the links to the TBs in this virtual page */
1296 tb = p->first_tb;
1297 while (tb != NULL) {
1298 n = (long)tb & 3;
1299 tb = (TranslationBlock *)((long)tb & ~3);
1300 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1301 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1302 tb_invalidate(tb);
1304 tb = tb->page_next[n];
1307 vp->valid_tag = 0;
1310 #if !defined(CONFIG_SOFTMMU)
1311 if (addr < MMAP_AREA_END)
1312 munmap((void *)addr, TARGET_PAGE_SIZE);
1313 #endif
1316 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1318 if (addr == (tlb_entry->address &
1319 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1320 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1321 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1322 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1326 /* update the TLBs so that writes to code in the virtual page 'addr'
1327 can be detected */
1328 static void tlb_protect_code(CPUState *env, target_ulong addr)
1330 int i;
1332 addr &= TARGET_PAGE_MASK;
1333 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1334 tlb_protect_code1(&env->tlb_write[0][i], addr);
1335 tlb_protect_code1(&env->tlb_write[1][i], addr);
1336 #if !defined(CONFIG_SOFTMMU)
1337 /* NOTE: as we generated the code for this page, it is already at
1338 least readable */
1339 if (addr < MMAP_AREA_END)
1340 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1341 #endif
1344 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1345 unsigned long phys_addr)
1347 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1348 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1349 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1353 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1354 tested self modifying code */
1355 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1357 int i;
1359 phys_addr &= TARGET_PAGE_MASK;
1360 phys_addr += (long)phys_ram_base;
1361 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1362 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1363 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1366 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1367 unsigned long start, unsigned long length)
1369 unsigned long addr;
1370 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1371 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1372 if ((addr - start) < length) {
1373 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1378 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1380 CPUState *env;
1381 unsigned long length, start1;
1382 int i;
1384 start &= TARGET_PAGE_MASK;
1385 end = TARGET_PAGE_ALIGN(end);
1387 length = end - start;
1388 if (length == 0)
1389 return;
1390 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1392 env = cpu_single_env;
1393 /* we modify the TLB cache so that the dirty bit will be set again
1394 when accessing the range */
1395 start1 = start + (unsigned long)phys_ram_base;
1396 for(i = 0; i < CPU_TLB_SIZE; i++)
1397 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1398 for(i = 0; i < CPU_TLB_SIZE; i++)
1399 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1401 #if !defined(CONFIG_SOFTMMU)
1402 /* XXX: this is expensive */
1404 VirtPageDesc *p;
1405 int j;
1406 target_ulong addr;
1408 for(i = 0; i < L1_SIZE; i++) {
1409 p = l1_virt_map[i];
1410 if (p) {
1411 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1412 for(j = 0; j < L2_SIZE; j++) {
1413 if (p->valid_tag == virt_valid_tag &&
1414 p->phys_addr >= start && p->phys_addr < end &&
1415 (p->prot & PROT_WRITE)) {
1416 if (addr < MMAP_AREA_END) {
1417 mprotect((void *)addr, TARGET_PAGE_SIZE,
1418 p->prot & ~PROT_WRITE);
1421 addr += TARGET_PAGE_SIZE;
1422 p++;
1427 #endif
1430 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1431 unsigned long start)
1433 unsigned long addr;
1434 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1435 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1436 if (addr == start) {
1437 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1442 /* update the TLB corresponding to virtual page vaddr and phys addr
1443 addr so that it is no longer dirty */
1444 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1446 CPUState *env = cpu_single_env;
1447 int i;
1449 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1451 addr &= TARGET_PAGE_MASK;
1452 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1453 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1454 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1457 /* add a new TLB entry. At most one entry for a given virtual address
1458 is permitted. Return 0 if OK or 2 if the page could not be mapped
1459 (can only happen in non SOFTMMU mode for I/O pages or pages
1460 conflicting with the host address space). */
1461 int tlb_set_page(CPUState *env, target_ulong vaddr,
1462 target_phys_addr_t paddr, int prot,
1463 int is_user, int is_softmmu)
1465 PhysPageDesc *p;
1466 unsigned long pd;
1467 TranslationBlock *first_tb;
1468 unsigned int index;
1469 target_ulong address;
1470 unsigned long addend;
1471 int ret;
1473 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1474 first_tb = NULL;
1475 if (!p) {
1476 pd = IO_MEM_UNASSIGNED;
1477 } else {
1478 PageDesc *p1;
1479 pd = p->phys_offset;
1480 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1481 /* NOTE: we also allocate the page at this stage */
1482 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1483 first_tb = p1->first_tb;
1486 #if defined(DEBUG_TLB)
1487 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1488 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1489 #endif
1491 ret = 0;
1492 #if !defined(CONFIG_SOFTMMU)
1493 if (is_softmmu)
1494 #endif
1496 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1497 /* IO memory case */
1498 address = vaddr | pd;
1499 addend = paddr;
1500 } else {
1501 /* standard memory */
1502 address = vaddr;
1503 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1506 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1507 addend -= vaddr;
1508 if (prot & PAGE_READ) {
1509 env->tlb_read[is_user][index].address = address;
1510 env->tlb_read[is_user][index].addend = addend;
1511 } else {
1512 env->tlb_read[is_user][index].address = -1;
1513 env->tlb_read[is_user][index].addend = -1;
1515 if (prot & PAGE_WRITE) {
1516 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1517 /* ROM: access is ignored (same as unassigned) */
1518 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1519 env->tlb_write[is_user][index].addend = addend;
1520 } else
1521 /* XXX: the PowerPC code seems not ready to handle
1522 self modifying code with DCBI */
1523 #if defined(TARGET_HAS_SMC) || 1
1524 if (first_tb) {
1525 /* if code is present, we use a specific memory
1526 handler. It works only for physical memory access */
1527 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1528 env->tlb_write[is_user][index].addend = addend;
1529 } else
1530 #endif
1531 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1532 !cpu_physical_memory_is_dirty(pd)) {
1533 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1534 env->tlb_write[is_user][index].addend = addend;
1535 } else {
1536 env->tlb_write[is_user][index].address = address;
1537 env->tlb_write[is_user][index].addend = addend;
1539 } else {
1540 env->tlb_write[is_user][index].address = -1;
1541 env->tlb_write[is_user][index].addend = -1;
1544 #if !defined(CONFIG_SOFTMMU)
1545 else {
1546 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1547 /* IO access: no mapping is done as it will be handled by the
1548 soft MMU */
1549 if (!(env->hflags & HF_SOFTMMU_MASK))
1550 ret = 2;
1551 } else {
1552 void *map_addr;
1554 if (vaddr >= MMAP_AREA_END) {
1555 ret = 2;
1556 } else {
1557 if (prot & PROT_WRITE) {
1558 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1559 #if defined(TARGET_HAS_SMC) || 1
1560 first_tb ||
1561 #endif
1562 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1563 !cpu_physical_memory_is_dirty(pd))) {
1564 /* ROM: we do as if code was inside */
1565 /* if code is present, we only map as read only and save the
1566 original mapping */
1567 VirtPageDesc *vp;
1569 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1570 vp->phys_addr = pd;
1571 vp->prot = prot;
1572 vp->valid_tag = virt_valid_tag;
1573 prot &= ~PAGE_WRITE;
1576 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1577 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1578 if (map_addr == MAP_FAILED) {
1579 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1580 paddr, vaddr);
1585 #endif
1586 return ret;
1589 /* called from signal handler: invalidate the code and unprotect the
1590 page. Return TRUE if the fault was succesfully handled. */
1591 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1593 #if !defined(CONFIG_SOFTMMU)
1594 VirtPageDesc *vp;
1596 #if defined(DEBUG_TLB)
1597 printf("page_unprotect: addr=0x%08x\n", addr);
1598 #endif
1599 addr &= TARGET_PAGE_MASK;
1601 /* if it is not mapped, no need to worry here */
1602 if (addr >= MMAP_AREA_END)
1603 return 0;
1604 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1605 if (!vp)
1606 return 0;
1607 /* NOTE: in this case, validate_tag is _not_ tested as it
1608 validates only the code TLB */
1609 if (vp->valid_tag != virt_valid_tag)
1610 return 0;
1611 if (!(vp->prot & PAGE_WRITE))
1612 return 0;
1613 #if defined(DEBUG_TLB)
1614 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1615 addr, vp->phys_addr, vp->prot);
1616 #endif
1617 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1618 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1619 (unsigned long)addr, vp->prot);
1620 /* set the dirty bit */
1621 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1622 /* flush the code inside */
1623 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1624 return 1;
1625 #else
1626 return 0;
1627 #endif
1630 #else
1632 void tlb_flush(CPUState *env, int flush_global)
1636 void tlb_flush_page(CPUState *env, target_ulong addr)
1640 int tlb_set_page(CPUState *env, target_ulong vaddr,
1641 target_phys_addr_t paddr, int prot,
1642 int is_user, int is_softmmu)
1644 return 0;
1647 /* dump memory mappings */
1648 void page_dump(FILE *f)
1650 unsigned long start, end;
1651 int i, j, prot, prot1;
1652 PageDesc *p;
1654 fprintf(f, "%-8s %-8s %-8s %s\n",
1655 "start", "end", "size", "prot");
1656 start = -1;
1657 end = -1;
1658 prot = 0;
1659 for(i = 0; i <= L1_SIZE; i++) {
1660 if (i < L1_SIZE)
1661 p = l1_map[i];
1662 else
1663 p = NULL;
1664 for(j = 0;j < L2_SIZE; j++) {
1665 if (!p)
1666 prot1 = 0;
1667 else
1668 prot1 = p[j].flags;
1669 if (prot1 != prot) {
1670 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1671 if (start != -1) {
1672 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1673 start, end, end - start,
1674 prot & PAGE_READ ? 'r' : '-',
1675 prot & PAGE_WRITE ? 'w' : '-',
1676 prot & PAGE_EXEC ? 'x' : '-');
1678 if (prot1 != 0)
1679 start = end;
1680 else
1681 start = -1;
1682 prot = prot1;
1684 if (!p)
1685 break;
1690 int page_get_flags(unsigned long address)
1692 PageDesc *p;
1694 p = page_find(address >> TARGET_PAGE_BITS);
1695 if (!p)
1696 return 0;
1697 return p->flags;
1700 /* modify the flags of a page and invalidate the code if
1701 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1702 depending on PAGE_WRITE */
1703 void page_set_flags(unsigned long start, unsigned long end, int flags)
1705 PageDesc *p;
1706 unsigned long addr;
1708 start = start & TARGET_PAGE_MASK;
1709 end = TARGET_PAGE_ALIGN(end);
1710 if (flags & PAGE_WRITE)
1711 flags |= PAGE_WRITE_ORG;
1712 spin_lock(&tb_lock);
1713 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1714 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1715 /* if the write protection is set, then we invalidate the code
1716 inside */
1717 if (!(p->flags & PAGE_WRITE) &&
1718 (flags & PAGE_WRITE) &&
1719 p->first_tb) {
1720 tb_invalidate_phys_page(addr, 0, NULL);
1722 p->flags = flags;
1724 spin_unlock(&tb_lock);
1727 /* called from signal handler: invalidate the code and unprotect the
1728 page. Return TRUE if the fault was succesfully handled. */
1729 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1731 unsigned int page_index, prot, pindex;
1732 PageDesc *p, *p1;
1733 unsigned long host_start, host_end, addr;
1735 host_start = address & host_page_mask;
1736 page_index = host_start >> TARGET_PAGE_BITS;
1737 p1 = page_find(page_index);
1738 if (!p1)
1739 return 0;
1740 host_end = host_start + host_page_size;
1741 p = p1;
1742 prot = 0;
1743 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1744 prot |= p->flags;
1745 p++;
1747 /* if the page was really writable, then we change its
1748 protection back to writable */
1749 if (prot & PAGE_WRITE_ORG) {
1750 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1751 if (!(p1[pindex].flags & PAGE_WRITE)) {
1752 mprotect((void *)host_start, host_page_size,
1753 (prot & PAGE_BITS) | PAGE_WRITE);
1754 p1[pindex].flags |= PAGE_WRITE;
1755 /* and since the content will be modified, we must invalidate
1756 the corresponding translated code. */
1757 tb_invalidate_phys_page(address, pc, puc);
1758 #ifdef DEBUG_TB_CHECK
1759 tb_invalidate_check(address);
1760 #endif
1761 return 1;
1764 return 0;
1767 /* call this function when system calls directly modify a memory area */
1768 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1770 unsigned long start, end, addr;
1772 start = (unsigned long)data;
1773 end = start + data_size;
1774 start &= TARGET_PAGE_MASK;
1775 end = TARGET_PAGE_ALIGN(end);
1776 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1777 page_unprotect(addr, 0, NULL);
1781 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1784 #endif /* defined(CONFIG_USER_ONLY) */
1786 /* register physical memory. 'size' must be a multiple of the target
1787 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1788 io memory page */
1789 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1790 unsigned long size,
1791 unsigned long phys_offset)
1793 unsigned long addr, end_addr;
1794 PhysPageDesc *p;
1796 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1797 end_addr = start_addr + size;
1798 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1799 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1800 p->phys_offset = phys_offset;
1801 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1802 phys_offset += TARGET_PAGE_SIZE;
1806 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1808 return 0;
1811 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1815 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1816 unassigned_mem_readb,
1817 unassigned_mem_readb,
1818 unassigned_mem_readb,
1821 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1822 unassigned_mem_writeb,
1823 unassigned_mem_writeb,
1824 unassigned_mem_writeb,
1827 /* self modifying code support in soft mmu mode : writing to a page
1828 containing code comes to these functions */
1830 static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1832 unsigned long phys_addr;
1834 phys_addr = addr - (unsigned long)phys_ram_base;
1835 #if !defined(CONFIG_USER_ONLY)
1836 tb_invalidate_phys_page_fast(phys_addr, 1);
1837 #endif
1838 stb_raw((uint8_t *)addr, val);
1839 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1842 static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1844 unsigned long phys_addr;
1846 phys_addr = addr - (unsigned long)phys_ram_base;
1847 #if !defined(CONFIG_USER_ONLY)
1848 tb_invalidate_phys_page_fast(phys_addr, 2);
1849 #endif
1850 stw_raw((uint8_t *)addr, val);
1851 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1854 static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1856 unsigned long phys_addr;
1858 phys_addr = addr - (unsigned long)phys_ram_base;
1859 #if !defined(CONFIG_USER_ONLY)
1860 tb_invalidate_phys_page_fast(phys_addr, 4);
1861 #endif
1862 stl_raw((uint8_t *)addr, val);
1863 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1866 static CPUReadMemoryFunc *code_mem_read[3] = {
1867 NULL, /* never used */
1868 NULL, /* never used */
1869 NULL, /* never used */
1872 static CPUWriteMemoryFunc *code_mem_write[3] = {
1873 code_mem_writeb,
1874 code_mem_writew,
1875 code_mem_writel,
1878 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1880 stb_raw((uint8_t *)addr, val);
1881 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1884 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1886 stw_raw((uint8_t *)addr, val);
1887 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1890 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1892 stl_raw((uint8_t *)addr, val);
1893 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1896 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1897 notdirty_mem_writeb,
1898 notdirty_mem_writew,
1899 notdirty_mem_writel,
1902 static void io_mem_init(void)
1904 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1905 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1906 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1907 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1908 io_mem_nb = 5;
1910 /* alloc dirty bits array */
1911 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1914 /* mem_read and mem_write are arrays of functions containing the
1915 function to access byte (index 0), word (index 1) and dword (index
1916 2). All functions must be supplied. If io_index is non zero, the
1917 corresponding io zone is modified. If it is zero, a new io zone is
1918 allocated. The return value can be used with
1919 cpu_register_physical_memory(). (-1) is returned if error. */
1920 int cpu_register_io_memory(int io_index,
1921 CPUReadMemoryFunc **mem_read,
1922 CPUWriteMemoryFunc **mem_write,
1923 void *opaque)
1925 int i;
1927 if (io_index <= 0) {
1928 if (io_index >= IO_MEM_NB_ENTRIES)
1929 return -1;
1930 io_index = io_mem_nb++;
1931 } else {
1932 if (io_index >= IO_MEM_NB_ENTRIES)
1933 return -1;
1936 for(i = 0;i < 3; i++) {
1937 io_mem_read[io_index][i] = mem_read[i];
1938 io_mem_write[io_index][i] = mem_write[i];
1940 io_mem_opaque[io_index] = opaque;
1941 return io_index << IO_MEM_SHIFT;
1944 /* physical memory access (slow version, mainly for debug) */
1945 #if defined(CONFIG_USER_ONLY)
1946 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1947 int len, int is_write)
1949 int l, flags;
1950 target_ulong page;
1952 while (len > 0) {
1953 page = addr & TARGET_PAGE_MASK;
1954 l = (page + TARGET_PAGE_SIZE) - addr;
1955 if (l > len)
1956 l = len;
1957 flags = page_get_flags(page);
1958 if (!(flags & PAGE_VALID))
1959 return;
1960 if (is_write) {
1961 if (!(flags & PAGE_WRITE))
1962 return;
1963 memcpy((uint8_t *)addr, buf, len);
1964 } else {
1965 if (!(flags & PAGE_READ))
1966 return;
1967 memcpy(buf, (uint8_t *)addr, len);
1969 len -= l;
1970 buf += l;
1971 addr += l;
1974 #else
1975 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1976 int len, int is_write)
1978 int l, io_index;
1979 uint8_t *ptr;
1980 uint32_t val;
1981 target_phys_addr_t page;
1982 unsigned long pd;
1983 PhysPageDesc *p;
1985 while (len > 0) {
1986 page = addr & TARGET_PAGE_MASK;
1987 l = (page + TARGET_PAGE_SIZE) - addr;
1988 if (l > len)
1989 l = len;
1990 p = phys_page_find(page >> TARGET_PAGE_BITS);
1991 if (!p) {
1992 pd = IO_MEM_UNASSIGNED;
1993 } else {
1994 pd = p->phys_offset;
1997 if (is_write) {
1998 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1999 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2000 if (l >= 4 && ((addr & 3) == 0)) {
2001 /* 32 bit read access */
2002 val = ldl_raw(buf);
2003 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2004 l = 4;
2005 } else if (l >= 2 && ((addr & 1) == 0)) {
2006 /* 16 bit read access */
2007 val = lduw_raw(buf);
2008 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2009 l = 2;
2010 } else {
2011 /* 8 bit access */
2012 val = ldub_raw(buf);
2013 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2014 l = 1;
2016 } else {
2017 unsigned long addr1;
2018 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2019 /* RAM case */
2020 ptr = phys_ram_base + addr1;
2021 memcpy(ptr, buf, l);
2022 /* invalidate code */
2023 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2024 /* set dirty bit */
2025 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
2027 } else {
2028 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2029 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2030 /* I/O case */
2031 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2032 if (l >= 4 && ((addr & 3) == 0)) {
2033 /* 32 bit read access */
2034 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2035 stl_raw(buf, val);
2036 l = 4;
2037 } else if (l >= 2 && ((addr & 1) == 0)) {
2038 /* 16 bit read access */
2039 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2040 stw_raw(buf, val);
2041 l = 2;
2042 } else {
2043 /* 8 bit access */
2044 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2045 stb_raw(buf, val);
2046 l = 1;
2048 } else {
2049 /* RAM case */
2050 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2051 (addr & ~TARGET_PAGE_MASK);
2052 memcpy(buf, ptr, l);
2055 len -= l;
2056 buf += l;
2057 addr += l;
2060 #endif
2062 /* virtual memory access for debug */
2063 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2064 uint8_t *buf, int len, int is_write)
2066 int l;
2067 target_ulong page, phys_addr;
2069 while (len > 0) {
2070 page = addr & TARGET_PAGE_MASK;
2071 phys_addr = cpu_get_phys_page_debug(env, page);
2072 /* if no physical page mapped, return an error */
2073 if (phys_addr == -1)
2074 return -1;
2075 l = (page + TARGET_PAGE_SIZE) - addr;
2076 if (l > len)
2077 l = len;
2078 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2079 buf, l, is_write);
2080 len -= l;
2081 buf += l;
2082 addr += l;
2084 return 0;
2087 #if !defined(CONFIG_USER_ONLY)
2089 #define MMUSUFFIX _cmmu
2090 #define GETPC() NULL
2091 #define env cpu_single_env
2093 #define SHIFT 0
2094 #include "softmmu_template.h"
2096 #define SHIFT 1
2097 #include "softmmu_template.h"
2099 #define SHIFT 2
2100 #include "softmmu_template.h"
2102 #define SHIFT 3
2103 #include "softmmu_template.h"
2105 #undef env
2107 #endif