Fix some signedness issues caught by gcc 4.3
[qemu-kvm/fedora.git] / exec.c
blob2cf0a6a5e3ef82512d0b463c82f2ef8501b01adb
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
58 #undef DEBUG_TB_CHECK
59 #endif
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #else
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #endif
84 TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 int nb_tbs;
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer;
93 unsigned long code_gen_buffer_size;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size;
96 uint8_t *code_gen_ptr;
98 #if !defined(CONFIG_USER_ONLY)
99 ram_addr_t phys_ram_size;
100 int phys_ram_fd;
101 uint8_t *phys_ram_base;
102 uint8_t *phys_ram_dirty;
103 static ram_addr_t phys_ram_alloc_offset = 0;
104 #endif
106 CPUState *first_cpu;
107 /* current CPU in the current thread. It is only valid inside
108 cpu_exec() */
109 CPUState *cpu_single_env;
111 typedef struct PageDesc {
112 /* list of TBs intersecting this ram page */
113 TranslationBlock *first_tb;
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count;
117 uint8_t *code_bitmap;
118 #if defined(CONFIG_USER_ONLY)
119 unsigned long flags;
120 #endif
121 } PageDesc;
123 typedef struct PhysPageDesc {
124 /* offset in host memory of the page + io_index in the low 12 bits */
125 ram_addr_t phys_offset;
126 } PhysPageDesc;
128 #define L2_BITS 10
129 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130 /* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
134 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
135 #else
136 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137 #endif
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
142 unsigned long qemu_real_host_page_size;
143 unsigned long qemu_host_page_bits;
144 unsigned long qemu_host_page_size;
145 unsigned long qemu_host_page_mask;
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc *l1_map[L1_SIZE];
149 PhysPageDesc **l1_phys_map;
151 #if !defined(CONFIG_USER_ONLY)
152 static void io_mem_init(void);
154 /* io memory support */
155 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
156 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
157 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
158 static int io_mem_nb;
159 static int io_mem_watch;
160 #endif
162 /* log support */
163 char *logfilename = "/tmp/qemu.log";
164 FILE *logfile;
165 int loglevel;
166 static int log_append = 0;
168 /* statistics */
169 static int tlb_flush_count;
170 static int tb_flush_count;
171 static int tb_phys_invalidate_count;
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t {
175 target_phys_addr_t base;
176 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178 void *opaque[TARGET_PAGE_SIZE][2][4];
179 } subpage_t;
181 #ifdef _WIN32
182 static void map_exec(void *addr, long size)
184 DWORD old_protect;
185 VirtualProtect(addr, size,
186 PAGE_EXECUTE_READWRITE, &old_protect);
189 #else
190 static void map_exec(void *addr, long size)
192 unsigned long start, end, page_size;
194 page_size = getpagesize();
195 start = (unsigned long)addr;
196 start &= ~(page_size - 1);
198 end = (unsigned long)addr + size;
199 end += page_size - 1;
200 end &= ~(page_size - 1);
202 mprotect((void *)start, end - start,
203 PROT_READ | PROT_WRITE | PROT_EXEC);
205 #endif
207 static void page_init(void)
209 /* NOTE: we can always suppose that qemu_host_page_size >=
210 TARGET_PAGE_SIZE */
211 #ifdef _WIN32
213 SYSTEM_INFO system_info;
214 DWORD old_protect;
216 GetSystemInfo(&system_info);
217 qemu_real_host_page_size = system_info.dwPageSize;
219 #else
220 qemu_real_host_page_size = getpagesize();
221 #endif
222 if (qemu_host_page_size == 0)
223 qemu_host_page_size = qemu_real_host_page_size;
224 if (qemu_host_page_size < TARGET_PAGE_SIZE)
225 qemu_host_page_size = TARGET_PAGE_SIZE;
226 qemu_host_page_bits = 0;
227 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
228 qemu_host_page_bits++;
229 qemu_host_page_mask = ~(qemu_host_page_size - 1);
230 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
231 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
233 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
235 long long startaddr, endaddr;
236 FILE *f;
237 int n;
239 mmap_lock();
240 last_brk = (unsigned long)sbrk(0);
241 f = fopen("/proc/self/maps", "r");
242 if (f) {
243 do {
244 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
245 if (n == 2) {
246 startaddr = MIN(startaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248 endaddr = MIN(endaddr,
249 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
250 page_set_flags(startaddr & TARGET_PAGE_MASK,
251 TARGET_PAGE_ALIGN(endaddr),
252 PAGE_RESERVED);
254 } while (!feof(f));
255 fclose(f);
257 mmap_unlock();
259 #endif
262 static inline PageDesc *page_find_alloc(target_ulong index)
264 PageDesc **lp, *p;
266 lp = &l1_map[index >> L2_BITS];
267 p = *lp;
268 if (!p) {
269 /* allocate if not found */
270 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
271 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
272 *lp = p;
274 return p + (index & (L2_SIZE - 1));
277 static inline PageDesc *page_find(target_ulong index)
279 PageDesc *p;
281 p = l1_map[index >> L2_BITS];
282 if (!p)
283 return 0;
284 return p + (index & (L2_SIZE - 1));
287 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
289 void **lp, **p;
290 PhysPageDesc *pd;
292 p = (void **)l1_phys_map;
293 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
295 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
296 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
297 #endif
298 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
299 p = *lp;
300 if (!p) {
301 /* allocate if not found */
302 if (!alloc)
303 return NULL;
304 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
305 memset(p, 0, sizeof(void *) * L1_SIZE);
306 *lp = p;
308 #endif
309 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
310 pd = *lp;
311 if (!pd) {
312 int i;
313 /* allocate if not found */
314 if (!alloc)
315 return NULL;
316 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
317 *lp = pd;
318 for (i = 0; i < L2_SIZE; i++)
319 pd[i].phys_offset = IO_MEM_UNASSIGNED;
321 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
324 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
326 return phys_page_find_alloc(index, 0);
329 #if !defined(CONFIG_USER_ONLY)
330 static void tlb_protect_code(ram_addr_t ram_addr);
331 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
332 target_ulong vaddr);
333 #define mmap_lock() do { } while(0)
334 #define mmap_unlock() do { } while(0)
335 #endif
337 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
339 #if defined(CONFIG_USER_ONLY)
340 /* Currently it is not recommanded to allocate big chunks of data in
341 user mode. It will change when a dedicated libc will be used */
342 #define USE_STATIC_CODE_GEN_BUFFER
343 #endif
345 #ifdef USE_STATIC_CODE_GEN_BUFFER
346 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
347 #endif
349 void code_gen_alloc(unsigned long tb_size)
351 #ifdef USE_STATIC_CODE_GEN_BUFFER
352 code_gen_buffer = static_code_gen_buffer;
353 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
354 map_exec(code_gen_buffer, code_gen_buffer_size);
355 #else
356 code_gen_buffer_size = tb_size;
357 if (code_gen_buffer_size == 0) {
358 #if defined(CONFIG_USER_ONLY)
359 /* in user mode, phys_ram_size is not meaningful */
360 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
361 #else
362 /* XXX: needs ajustments */
363 code_gen_buffer_size = (int)(phys_ram_size / 4);
364 #endif
366 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
367 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
368 /* The code gen buffer location may have constraints depending on
369 the host cpu and OS */
370 #if defined(__linux__)
372 int flags;
373 flags = MAP_PRIVATE | MAP_ANONYMOUS;
374 #if defined(__x86_64__)
375 flags |= MAP_32BIT;
376 /* Cannot map more than that */
377 if (code_gen_buffer_size > (800 * 1024 * 1024))
378 code_gen_buffer_size = (800 * 1024 * 1024);
379 #endif
380 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
381 PROT_WRITE | PROT_READ | PROT_EXEC,
382 flags, -1, 0);
383 if (code_gen_buffer == MAP_FAILED) {
384 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
385 exit(1);
388 #else
389 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
390 if (!code_gen_buffer) {
391 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
392 exit(1);
394 map_exec(code_gen_buffer, code_gen_buffer_size);
395 #endif
396 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
397 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
398 code_gen_buffer_max_size = code_gen_buffer_size -
399 code_gen_max_block_size();
400 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
401 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
404 /* Must be called before using the QEMU cpus. 'tb_size' is the size
405 (in bytes) allocated to the translation buffer. Zero means default
406 size. */
407 void cpu_exec_init_all(unsigned long tb_size)
409 cpu_gen_init();
410 code_gen_alloc(tb_size);
411 code_gen_ptr = code_gen_buffer;
412 page_init();
413 #if !defined(CONFIG_USER_ONLY)
414 io_mem_init();
415 #endif
418 void cpu_exec_init(CPUState *env)
420 CPUState **penv;
421 int cpu_index;
423 env->next_cpu = NULL;
424 penv = &first_cpu;
425 cpu_index = 0;
426 while (*penv != NULL) {
427 penv = (CPUState **)&(*penv)->next_cpu;
428 cpu_index++;
430 env->cpu_index = cpu_index;
431 env->nb_watchpoints = 0;
432 *penv = env;
435 static inline void invalidate_page_bitmap(PageDesc *p)
437 if (p->code_bitmap) {
438 qemu_free(p->code_bitmap);
439 p->code_bitmap = NULL;
441 p->code_write_count = 0;
444 /* set to NULL all the 'first_tb' fields in all PageDescs */
445 static void page_flush_tb(void)
447 int i, j;
448 PageDesc *p;
450 for(i = 0; i < L1_SIZE; i++) {
451 p = l1_map[i];
452 if (p) {
453 for(j = 0; j < L2_SIZE; j++) {
454 p->first_tb = NULL;
455 invalidate_page_bitmap(p);
456 p++;
462 /* flush all the translation blocks */
463 /* XXX: tb_flush is currently not thread safe */
464 void tb_flush(CPUState *env1)
466 CPUState *env;
467 #if defined(DEBUG_FLUSH)
468 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
469 (unsigned long)(code_gen_ptr - code_gen_buffer),
470 nb_tbs, nb_tbs > 0 ?
471 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
472 #endif
473 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
474 cpu_abort(env1, "Internal error: code buffer overflow\n");
476 nb_tbs = 0;
478 for(env = first_cpu; env != NULL; env = env->next_cpu) {
479 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
482 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
483 page_flush_tb();
485 code_gen_ptr = code_gen_buffer;
486 /* XXX: flush processor icache at this point if cache flush is
487 expensive */
488 tb_flush_count++;
491 #ifdef DEBUG_TB_CHECK
493 static void tb_invalidate_check(target_ulong address)
495 TranslationBlock *tb;
496 int i;
497 address &= TARGET_PAGE_MASK;
498 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
499 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
500 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
501 address >= tb->pc + tb->size)) {
502 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
503 address, (long)tb->pc, tb->size);
509 /* verify that all the pages have correct rights for code */
510 static void tb_page_check(void)
512 TranslationBlock *tb;
513 int i, flags1, flags2;
515 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
516 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
517 flags1 = page_get_flags(tb->pc);
518 flags2 = page_get_flags(tb->pc + tb->size - 1);
519 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
520 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
521 (long)tb->pc, tb->size, flags1, flags2);
527 void tb_jmp_check(TranslationBlock *tb)
529 TranslationBlock *tb1;
530 unsigned int n1;
532 /* suppress any remaining jumps to this TB */
533 tb1 = tb->jmp_first;
534 for(;;) {
535 n1 = (long)tb1 & 3;
536 tb1 = (TranslationBlock *)((long)tb1 & ~3);
537 if (n1 == 2)
538 break;
539 tb1 = tb1->jmp_next[n1];
541 /* check end of list */
542 if (tb1 != tb) {
543 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
547 #endif
549 /* invalidate one TB */
550 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
551 int next_offset)
553 TranslationBlock *tb1;
554 for(;;) {
555 tb1 = *ptb;
556 if (tb1 == tb) {
557 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
558 break;
560 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
564 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
566 TranslationBlock *tb1;
567 unsigned int n1;
569 for(;;) {
570 tb1 = *ptb;
571 n1 = (long)tb1 & 3;
572 tb1 = (TranslationBlock *)((long)tb1 & ~3);
573 if (tb1 == tb) {
574 *ptb = tb1->page_next[n1];
575 break;
577 ptb = &tb1->page_next[n1];
581 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
583 TranslationBlock *tb1, **ptb;
584 unsigned int n1;
586 ptb = &tb->jmp_next[n];
587 tb1 = *ptb;
588 if (tb1) {
589 /* find tb(n) in circular list */
590 for(;;) {
591 tb1 = *ptb;
592 n1 = (long)tb1 & 3;
593 tb1 = (TranslationBlock *)((long)tb1 & ~3);
594 if (n1 == n && tb1 == tb)
595 break;
596 if (n1 == 2) {
597 ptb = &tb1->jmp_first;
598 } else {
599 ptb = &tb1->jmp_next[n1];
602 /* now we can suppress tb(n) from the list */
603 *ptb = tb->jmp_next[n];
605 tb->jmp_next[n] = NULL;
609 /* reset the jump entry 'n' of a TB so that it is not chained to
610 another TB */
611 static inline void tb_reset_jump(TranslationBlock *tb, int n)
613 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
616 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
618 CPUState *env;
619 PageDesc *p;
620 unsigned int h, n1;
621 target_phys_addr_t phys_pc;
622 TranslationBlock *tb1, *tb2;
624 /* remove the TB from the hash list */
625 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
626 h = tb_phys_hash_func(phys_pc);
627 tb_remove(&tb_phys_hash[h], tb,
628 offsetof(TranslationBlock, phys_hash_next));
630 /* remove the TB from the page list */
631 if (tb->page_addr[0] != page_addr) {
632 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
633 tb_page_remove(&p->first_tb, tb);
634 invalidate_page_bitmap(p);
636 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
637 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
638 tb_page_remove(&p->first_tb, tb);
639 invalidate_page_bitmap(p);
642 tb_invalidated_flag = 1;
644 /* remove the TB from the hash list */
645 h = tb_jmp_cache_hash_func(tb->pc);
646 for(env = first_cpu; env != NULL; env = env->next_cpu) {
647 if (env->tb_jmp_cache[h] == tb)
648 env->tb_jmp_cache[h] = NULL;
651 /* suppress this TB from the two jump lists */
652 tb_jmp_remove(tb, 0);
653 tb_jmp_remove(tb, 1);
655 /* suppress any remaining jumps to this TB */
656 tb1 = tb->jmp_first;
657 for(;;) {
658 n1 = (long)tb1 & 3;
659 if (n1 == 2)
660 break;
661 tb1 = (TranslationBlock *)((long)tb1 & ~3);
662 tb2 = tb1->jmp_next[n1];
663 tb_reset_jump(tb1, n1);
664 tb1->jmp_next[n1] = NULL;
665 tb1 = tb2;
667 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
669 tb_phys_invalidate_count++;
672 static inline void set_bits(uint8_t *tab, int start, int len)
674 int end, mask, end1;
676 end = start + len;
677 tab += start >> 3;
678 mask = 0xff << (start & 7);
679 if ((start & ~7) == (end & ~7)) {
680 if (start < end) {
681 mask &= ~(0xff << (end & 7));
682 *tab |= mask;
684 } else {
685 *tab++ |= mask;
686 start = (start + 8) & ~7;
687 end1 = end & ~7;
688 while (start < end1) {
689 *tab++ = 0xff;
690 start += 8;
692 if (start < end) {
693 mask = ~(0xff << (end & 7));
694 *tab |= mask;
699 static void build_page_bitmap(PageDesc *p)
701 int n, tb_start, tb_end;
702 TranslationBlock *tb;
704 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
705 if (!p->code_bitmap)
706 return;
707 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
709 tb = p->first_tb;
710 while (tb != NULL) {
711 n = (long)tb & 3;
712 tb = (TranslationBlock *)((long)tb & ~3);
713 /* NOTE: this is subtle as a TB may span two physical pages */
714 if (n == 0) {
715 /* NOTE: tb_end may be after the end of the page, but
716 it is not a problem */
717 tb_start = tb->pc & ~TARGET_PAGE_MASK;
718 tb_end = tb_start + tb->size;
719 if (tb_end > TARGET_PAGE_SIZE)
720 tb_end = TARGET_PAGE_SIZE;
721 } else {
722 tb_start = 0;
723 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
725 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
726 tb = tb->page_next[n];
730 #ifdef TARGET_HAS_PRECISE_SMC
732 static void tb_gen_code(CPUState *env,
733 target_ulong pc, target_ulong cs_base, int flags,
734 int cflags)
736 TranslationBlock *tb;
737 uint8_t *tc_ptr;
738 target_ulong phys_pc, phys_page2, virt_page2;
739 int code_gen_size;
741 phys_pc = get_phys_addr_code(env, pc);
742 tb = tb_alloc(pc);
743 if (!tb) {
744 /* flush must be done */
745 tb_flush(env);
746 /* cannot fail at this point */
747 tb = tb_alloc(pc);
749 tc_ptr = code_gen_ptr;
750 tb->tc_ptr = tc_ptr;
751 tb->cs_base = cs_base;
752 tb->flags = flags;
753 tb->cflags = cflags;
754 cpu_gen_code(env, tb, &code_gen_size);
755 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
757 /* check next page if needed */
758 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
759 phys_page2 = -1;
760 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
761 phys_page2 = get_phys_addr_code(env, virt_page2);
763 tb_link_phys(tb, phys_pc, phys_page2);
765 #endif
767 /* invalidate all TBs which intersect with the target physical page
768 starting in range [start;end[. NOTE: start and end must refer to
769 the same physical page. 'is_cpu_write_access' should be true if called
770 from a real cpu write access: the virtual CPU will exit the current
771 TB if code is modified inside this TB. */
772 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
773 int is_cpu_write_access)
775 int n, current_tb_modified, current_tb_not_found, current_flags;
776 CPUState *env = cpu_single_env;
777 PageDesc *p;
778 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
779 target_ulong tb_start, tb_end;
780 target_ulong current_pc, current_cs_base;
782 p = page_find(start >> TARGET_PAGE_BITS);
783 if (!p)
784 return;
785 if (!p->code_bitmap &&
786 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
787 is_cpu_write_access) {
788 /* build code bitmap */
789 build_page_bitmap(p);
792 /* we remove all the TBs in the range [start, end[ */
793 /* XXX: see if in some cases it could be faster to invalidate all the code */
794 current_tb_not_found = is_cpu_write_access;
795 current_tb_modified = 0;
796 current_tb = NULL; /* avoid warning */
797 current_pc = 0; /* avoid warning */
798 current_cs_base = 0; /* avoid warning */
799 current_flags = 0; /* avoid warning */
800 tb = p->first_tb;
801 while (tb != NULL) {
802 n = (long)tb & 3;
803 tb = (TranslationBlock *)((long)tb & ~3);
804 tb_next = tb->page_next[n];
805 /* NOTE: this is subtle as a TB may span two physical pages */
806 if (n == 0) {
807 /* NOTE: tb_end may be after the end of the page, but
808 it is not a problem */
809 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
810 tb_end = tb_start + tb->size;
811 } else {
812 tb_start = tb->page_addr[1];
813 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
815 if (!(tb_end <= start || tb_start >= end)) {
816 #ifdef TARGET_HAS_PRECISE_SMC
817 if (current_tb_not_found) {
818 current_tb_not_found = 0;
819 current_tb = NULL;
820 if (env->mem_write_pc) {
821 /* now we have a real cpu fault */
822 current_tb = tb_find_pc(env->mem_write_pc);
825 if (current_tb == tb &&
826 !(current_tb->cflags & CF_SINGLE_INSN)) {
827 /* If we are modifying the current TB, we must stop
828 its execution. We could be more precise by checking
829 that the modification is after the current PC, but it
830 would require a specialized function to partially
831 restore the CPU state */
833 current_tb_modified = 1;
834 cpu_restore_state(current_tb, env,
835 env->mem_write_pc, NULL);
836 #if defined(TARGET_I386)
837 current_flags = env->hflags;
838 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
839 current_cs_base = (target_ulong)env->segs[R_CS].base;
840 current_pc = current_cs_base + env->eip;
841 #else
842 #error unsupported CPU
843 #endif
845 #endif /* TARGET_HAS_PRECISE_SMC */
846 /* we need to do that to handle the case where a signal
847 occurs while doing tb_phys_invalidate() */
848 saved_tb = NULL;
849 if (env) {
850 saved_tb = env->current_tb;
851 env->current_tb = NULL;
853 tb_phys_invalidate(tb, -1);
854 if (env) {
855 env->current_tb = saved_tb;
856 if (env->interrupt_request && env->current_tb)
857 cpu_interrupt(env, env->interrupt_request);
860 tb = tb_next;
862 #if !defined(CONFIG_USER_ONLY)
863 /* if no code remaining, no need to continue to use slow writes */
864 if (!p->first_tb) {
865 invalidate_page_bitmap(p);
866 if (is_cpu_write_access) {
867 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
870 #endif
871 #ifdef TARGET_HAS_PRECISE_SMC
872 if (current_tb_modified) {
873 /* we generate a block containing just the instruction
874 modifying the memory. It will ensure that it cannot modify
875 itself */
876 env->current_tb = NULL;
877 tb_gen_code(env, current_pc, current_cs_base, current_flags,
878 CF_SINGLE_INSN);
879 cpu_resume_from_signal(env, NULL);
881 #endif
884 /* len must be <= 8 and start must be a multiple of len */
885 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
887 PageDesc *p;
888 int offset, b;
889 #if 0
890 if (1) {
891 if (loglevel) {
892 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
893 cpu_single_env->mem_write_vaddr, len,
894 cpu_single_env->eip,
895 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
898 #endif
899 p = page_find(start >> TARGET_PAGE_BITS);
900 if (!p)
901 return;
902 if (p->code_bitmap) {
903 offset = start & ~TARGET_PAGE_MASK;
904 b = p->code_bitmap[offset >> 3] >> (offset & 7);
905 if (b & ((1 << len) - 1))
906 goto do_invalidate;
907 } else {
908 do_invalidate:
909 tb_invalidate_phys_page_range(start, start + len, 1);
913 #if !defined(CONFIG_SOFTMMU)
914 static void tb_invalidate_phys_page(target_phys_addr_t addr,
915 unsigned long pc, void *puc)
917 int n, current_flags, current_tb_modified;
918 target_ulong current_pc, current_cs_base;
919 PageDesc *p;
920 TranslationBlock *tb, *current_tb;
921 #ifdef TARGET_HAS_PRECISE_SMC
922 CPUState *env = cpu_single_env;
923 #endif
925 addr &= TARGET_PAGE_MASK;
926 p = page_find(addr >> TARGET_PAGE_BITS);
927 if (!p)
928 return;
929 tb = p->first_tb;
930 current_tb_modified = 0;
931 current_tb = NULL;
932 current_pc = 0; /* avoid warning */
933 current_cs_base = 0; /* avoid warning */
934 current_flags = 0; /* avoid warning */
935 #ifdef TARGET_HAS_PRECISE_SMC
936 if (tb && pc != 0) {
937 current_tb = tb_find_pc(pc);
939 #endif
940 while (tb != NULL) {
941 n = (long)tb & 3;
942 tb = (TranslationBlock *)((long)tb & ~3);
943 #ifdef TARGET_HAS_PRECISE_SMC
944 if (current_tb == tb &&
945 !(current_tb->cflags & CF_SINGLE_INSN)) {
946 /* If we are modifying the current TB, we must stop
947 its execution. We could be more precise by checking
948 that the modification is after the current PC, but it
949 would require a specialized function to partially
950 restore the CPU state */
952 current_tb_modified = 1;
953 cpu_restore_state(current_tb, env, pc, puc);
954 #if defined(TARGET_I386)
955 current_flags = env->hflags;
956 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
957 current_cs_base = (target_ulong)env->segs[R_CS].base;
958 current_pc = current_cs_base + env->eip;
959 #else
960 #error unsupported CPU
961 #endif
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 tb_phys_invalidate(tb, addr);
965 tb = tb->page_next[n];
967 p->first_tb = NULL;
968 #ifdef TARGET_HAS_PRECISE_SMC
969 if (current_tb_modified) {
970 /* we generate a block containing just the instruction
971 modifying the memory. It will ensure that it cannot modify
972 itself */
973 env->current_tb = NULL;
974 tb_gen_code(env, current_pc, current_cs_base, current_flags,
975 CF_SINGLE_INSN);
976 cpu_resume_from_signal(env, puc);
978 #endif
980 #endif
982 /* add the tb in the target page and protect it if necessary */
983 static inline void tb_alloc_page(TranslationBlock *tb,
984 unsigned int n, target_ulong page_addr)
986 PageDesc *p;
987 TranslationBlock *last_first_tb;
989 tb->page_addr[n] = page_addr;
990 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
991 tb->page_next[n] = p->first_tb;
992 last_first_tb = p->first_tb;
993 p->first_tb = (TranslationBlock *)((long)tb | n);
994 invalidate_page_bitmap(p);
996 #if defined(TARGET_HAS_SMC) || 1
998 #if defined(CONFIG_USER_ONLY)
999 if (p->flags & PAGE_WRITE) {
1000 target_ulong addr;
1001 PageDesc *p2;
1002 int prot;
1004 /* force the host page as non writable (writes will have a
1005 page fault + mprotect overhead) */
1006 page_addr &= qemu_host_page_mask;
1007 prot = 0;
1008 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1009 addr += TARGET_PAGE_SIZE) {
1011 p2 = page_find (addr >> TARGET_PAGE_BITS);
1012 if (!p2)
1013 continue;
1014 prot |= p2->flags;
1015 p2->flags &= ~PAGE_WRITE;
1016 page_get_flags(addr);
1018 mprotect(g2h(page_addr), qemu_host_page_size,
1019 (prot & PAGE_BITS) & ~PAGE_WRITE);
1020 #ifdef DEBUG_TB_INVALIDATE
1021 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1022 page_addr);
1023 #endif
1025 #else
1026 /* if some code is already present, then the pages are already
1027 protected. So we handle the case where only the first TB is
1028 allocated in a physical page */
1029 if (!last_first_tb) {
1030 tlb_protect_code(page_addr);
1032 #endif
1034 #endif /* TARGET_HAS_SMC */
1037 /* Allocate a new translation block. Flush the translation buffer if
1038 too many translation blocks or too much generated code. */
1039 TranslationBlock *tb_alloc(target_ulong pc)
1041 TranslationBlock *tb;
1043 if (nb_tbs >= code_gen_max_blocks ||
1044 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1045 return NULL;
1046 tb = &tbs[nb_tbs++];
1047 tb->pc = pc;
1048 tb->cflags = 0;
1049 return tb;
1052 /* add a new TB and link it to the physical page tables. phys_page2 is
1053 (-1) to indicate that only one page contains the TB. */
1054 void tb_link_phys(TranslationBlock *tb,
1055 target_ulong phys_pc, target_ulong phys_page2)
1057 unsigned int h;
1058 TranslationBlock **ptb;
1060 /* Grab the mmap lock to stop another thread invalidating this TB
1061 before we are done. */
1062 mmap_lock();
1063 /* add in the physical hash table */
1064 h = tb_phys_hash_func(phys_pc);
1065 ptb = &tb_phys_hash[h];
1066 tb->phys_hash_next = *ptb;
1067 *ptb = tb;
1069 /* add in the page list */
1070 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1071 if (phys_page2 != -1)
1072 tb_alloc_page(tb, 1, phys_page2);
1073 else
1074 tb->page_addr[1] = -1;
1076 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1077 tb->jmp_next[0] = NULL;
1078 tb->jmp_next[1] = NULL;
1080 /* init original jump addresses */
1081 if (tb->tb_next_offset[0] != 0xffff)
1082 tb_reset_jump(tb, 0);
1083 if (tb->tb_next_offset[1] != 0xffff)
1084 tb_reset_jump(tb, 1);
1086 #ifdef DEBUG_TB_CHECK
1087 tb_page_check();
1088 #endif
1089 mmap_unlock();
1092 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1093 tb[1].tc_ptr. Return NULL if not found */
1094 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1096 int m_min, m_max, m;
1097 unsigned long v;
1098 TranslationBlock *tb;
1100 if (nb_tbs <= 0)
1101 return NULL;
1102 if (tc_ptr < (unsigned long)code_gen_buffer ||
1103 tc_ptr >= (unsigned long)code_gen_ptr)
1104 return NULL;
1105 /* binary search (cf Knuth) */
1106 m_min = 0;
1107 m_max = nb_tbs - 1;
1108 while (m_min <= m_max) {
1109 m = (m_min + m_max) >> 1;
1110 tb = &tbs[m];
1111 v = (unsigned long)tb->tc_ptr;
1112 if (v == tc_ptr)
1113 return tb;
1114 else if (tc_ptr < v) {
1115 m_max = m - 1;
1116 } else {
1117 m_min = m + 1;
1120 return &tbs[m_max];
1123 static void tb_reset_jump_recursive(TranslationBlock *tb);
1125 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1127 TranslationBlock *tb1, *tb_next, **ptb;
1128 unsigned int n1;
1130 tb1 = tb->jmp_next[n];
1131 if (tb1 != NULL) {
1132 /* find head of list */
1133 for(;;) {
1134 n1 = (long)tb1 & 3;
1135 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1136 if (n1 == 2)
1137 break;
1138 tb1 = tb1->jmp_next[n1];
1140 /* we are now sure now that tb jumps to tb1 */
1141 tb_next = tb1;
1143 /* remove tb from the jmp_first list */
1144 ptb = &tb_next->jmp_first;
1145 for(;;) {
1146 tb1 = *ptb;
1147 n1 = (long)tb1 & 3;
1148 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1149 if (n1 == n && tb1 == tb)
1150 break;
1151 ptb = &tb1->jmp_next[n1];
1153 *ptb = tb->jmp_next[n];
1154 tb->jmp_next[n] = NULL;
1156 /* suppress the jump to next tb in generated code */
1157 tb_reset_jump(tb, n);
1159 /* suppress jumps in the tb on which we could have jumped */
1160 tb_reset_jump_recursive(tb_next);
1164 static void tb_reset_jump_recursive(TranslationBlock *tb)
1166 tb_reset_jump_recursive2(tb, 0);
1167 tb_reset_jump_recursive2(tb, 1);
1170 #if defined(TARGET_HAS_ICE)
1171 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1173 target_phys_addr_t addr;
1174 target_ulong pd;
1175 ram_addr_t ram_addr;
1176 PhysPageDesc *p;
1178 addr = cpu_get_phys_page_debug(env, pc);
1179 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1180 if (!p) {
1181 pd = IO_MEM_UNASSIGNED;
1182 } else {
1183 pd = p->phys_offset;
1185 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1186 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1188 #endif
1190 /* Add a watchpoint. */
1191 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1193 int i;
1195 for (i = 0; i < env->nb_watchpoints; i++) {
1196 if (addr == env->watchpoint[i].vaddr)
1197 return 0;
1199 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1200 return -1;
1202 i = env->nb_watchpoints++;
1203 env->watchpoint[i].vaddr = addr;
1204 tlb_flush_page(env, addr);
1205 /* FIXME: This flush is needed because of the hack to make memory ops
1206 terminate the TB. It can be removed once the proper IO trap and
1207 re-execute bits are in. */
1208 tb_flush(env);
1209 return i;
1212 /* Remove a watchpoint. */
1213 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1215 int i;
1217 for (i = 0; i < env->nb_watchpoints; i++) {
1218 if (addr == env->watchpoint[i].vaddr) {
1219 env->nb_watchpoints--;
1220 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1221 tlb_flush_page(env, addr);
1222 return 0;
1225 return -1;
1228 /* Remove all watchpoints. */
1229 void cpu_watchpoint_remove_all(CPUState *env) {
1230 int i;
1232 for (i = 0; i < env->nb_watchpoints; i++) {
1233 tlb_flush_page(env, env->watchpoint[i].vaddr);
1235 env->nb_watchpoints = 0;
1238 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1239 breakpoint is reached */
1240 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1242 #if defined(TARGET_HAS_ICE)
1243 int i;
1245 for(i = 0; i < env->nb_breakpoints; i++) {
1246 if (env->breakpoints[i] == pc)
1247 return 0;
1250 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1251 return -1;
1252 env->breakpoints[env->nb_breakpoints++] = pc;
1254 breakpoint_invalidate(env, pc);
1255 return 0;
1256 #else
1257 return -1;
1258 #endif
1261 /* remove all breakpoints */
1262 void cpu_breakpoint_remove_all(CPUState *env) {
1263 #if defined(TARGET_HAS_ICE)
1264 int i;
1265 for(i = 0; i < env->nb_breakpoints; i++) {
1266 breakpoint_invalidate(env, env->breakpoints[i]);
1268 env->nb_breakpoints = 0;
1269 #endif
1272 /* remove a breakpoint */
1273 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1275 #if defined(TARGET_HAS_ICE)
1276 int i;
1277 for(i = 0; i < env->nb_breakpoints; i++) {
1278 if (env->breakpoints[i] == pc)
1279 goto found;
1281 return -1;
1282 found:
1283 env->nb_breakpoints--;
1284 if (i < env->nb_breakpoints)
1285 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1287 breakpoint_invalidate(env, pc);
1288 return 0;
1289 #else
1290 return -1;
1291 #endif
1294 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1295 CPU loop after each instruction */
1296 void cpu_single_step(CPUState *env, int enabled)
1298 #if defined(TARGET_HAS_ICE)
1299 if (env->singlestep_enabled != enabled) {
1300 env->singlestep_enabled = enabled;
1301 /* must flush all the translated code to avoid inconsistancies */
1302 /* XXX: only flush what is necessary */
1303 tb_flush(env);
1305 #endif
1308 /* enable or disable low levels log */
1309 void cpu_set_log(int log_flags)
1311 loglevel = log_flags;
1312 if (loglevel && !logfile) {
1313 logfile = fopen(logfilename, log_append ? "a" : "w");
1314 if (!logfile) {
1315 perror(logfilename);
1316 _exit(1);
1318 #if !defined(CONFIG_SOFTMMU)
1319 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1321 static uint8_t logfile_buf[4096];
1322 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1324 #else
1325 setvbuf(logfile, NULL, _IOLBF, 0);
1326 #endif
1327 log_append = 1;
1329 if (!loglevel && logfile) {
1330 fclose(logfile);
1331 logfile = NULL;
1335 void cpu_set_log_filename(const char *filename)
1337 logfilename = strdup(filename);
1338 if (logfile) {
1339 fclose(logfile);
1340 logfile = NULL;
1342 cpu_set_log(loglevel);
1345 /* mask must never be zero, except for A20 change call */
1346 void cpu_interrupt(CPUState *env, int mask)
1348 #if !defined(USE_NPTL)
1349 TranslationBlock *tb;
1350 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1351 #endif
1353 /* FIXME: This is probably not threadsafe. A different thread could
1354 be in the mittle of a read-modify-write operation. */
1355 env->interrupt_request |= mask;
1356 #if defined(USE_NPTL)
1357 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1358 problem and hope the cpu will stop of its own accord. For userspace
1359 emulation this often isn't actually as bad as it sounds. Often
1360 signals are used primarily to interrupt blocking syscalls. */
1361 #else
1362 /* if the cpu is currently executing code, we must unlink it and
1363 all the potentially executing TB */
1364 tb = env->current_tb;
1365 if (tb && !testandset(&interrupt_lock)) {
1366 env->current_tb = NULL;
1367 tb_reset_jump_recursive(tb);
1368 resetlock(&interrupt_lock);
1370 #endif
1373 void cpu_reset_interrupt(CPUState *env, int mask)
1375 env->interrupt_request &= ~mask;
1378 CPULogItem cpu_log_items[] = {
1379 { CPU_LOG_TB_OUT_ASM, "out_asm",
1380 "show generated host assembly code for each compiled TB" },
1381 { CPU_LOG_TB_IN_ASM, "in_asm",
1382 "show target assembly code for each compiled TB" },
1383 { CPU_LOG_TB_OP, "op",
1384 "show micro ops for each compiled TB" },
1385 { CPU_LOG_TB_OP_OPT, "op_opt",
1386 "show micro ops "
1387 #ifdef TARGET_I386
1388 "before eflags optimization and "
1389 #endif
1390 "after liveness analysis" },
1391 { CPU_LOG_INT, "int",
1392 "show interrupts/exceptions in short format" },
1393 { CPU_LOG_EXEC, "exec",
1394 "show trace before each executed TB (lots of logs)" },
1395 { CPU_LOG_TB_CPU, "cpu",
1396 "show CPU state before block translation" },
1397 #ifdef TARGET_I386
1398 { CPU_LOG_PCALL, "pcall",
1399 "show protected mode far calls/returns/exceptions" },
1400 #endif
1401 #ifdef DEBUG_IOPORT
1402 { CPU_LOG_IOPORT, "ioport",
1403 "show all i/o ports accesses" },
1404 #endif
1405 { 0, NULL, NULL },
1408 static int cmp1(const char *s1, int n, const char *s2)
1410 if (strlen(s2) != n)
1411 return 0;
1412 return memcmp(s1, s2, n) == 0;
1415 /* takes a comma separated list of log masks. Return 0 if error. */
1416 int cpu_str_to_log_mask(const char *str)
1418 CPULogItem *item;
1419 int mask;
1420 const char *p, *p1;
1422 p = str;
1423 mask = 0;
1424 for(;;) {
1425 p1 = strchr(p, ',');
1426 if (!p1)
1427 p1 = p + strlen(p);
1428 if(cmp1(p,p1-p,"all")) {
1429 for(item = cpu_log_items; item->mask != 0; item++) {
1430 mask |= item->mask;
1432 } else {
1433 for(item = cpu_log_items; item->mask != 0; item++) {
1434 if (cmp1(p, p1 - p, item->name))
1435 goto found;
1437 return 0;
1439 found:
1440 mask |= item->mask;
1441 if (*p1 != ',')
1442 break;
1443 p = p1 + 1;
1445 return mask;
1448 void cpu_abort(CPUState *env, const char *fmt, ...)
1450 va_list ap;
1451 va_list ap2;
1453 va_start(ap, fmt);
1454 va_copy(ap2, ap);
1455 fprintf(stderr, "qemu: fatal: ");
1456 vfprintf(stderr, fmt, ap);
1457 fprintf(stderr, "\n");
1458 #ifdef TARGET_I386
1459 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1460 #else
1461 cpu_dump_state(env, stderr, fprintf, 0);
1462 #endif
1463 if (logfile) {
1464 fprintf(logfile, "qemu: fatal: ");
1465 vfprintf(logfile, fmt, ap2);
1466 fprintf(logfile, "\n");
1467 #ifdef TARGET_I386
1468 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1469 #else
1470 cpu_dump_state(env, logfile, fprintf, 0);
1471 #endif
1472 fflush(logfile);
1473 fclose(logfile);
1475 va_end(ap2);
1476 va_end(ap);
1477 abort();
1480 CPUState *cpu_copy(CPUState *env)
1482 CPUState *new_env = cpu_init(env->cpu_model_str);
1483 /* preserve chaining and index */
1484 CPUState *next_cpu = new_env->next_cpu;
1485 int cpu_index = new_env->cpu_index;
1486 memcpy(new_env, env, sizeof(CPUState));
1487 new_env->next_cpu = next_cpu;
1488 new_env->cpu_index = cpu_index;
1489 return new_env;
1492 #if !defined(CONFIG_USER_ONLY)
1494 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1496 unsigned int i;
1498 /* Discard jump cache entries for any tb which might potentially
1499 overlap the flushed page. */
1500 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1501 memset (&env->tb_jmp_cache[i], 0,
1502 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1504 i = tb_jmp_cache_hash_page(addr);
1505 memset (&env->tb_jmp_cache[i], 0,
1506 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1509 /* NOTE: if flush_global is true, also flush global entries (not
1510 implemented yet) */
1511 void tlb_flush(CPUState *env, int flush_global)
1513 int i;
1515 #if defined(DEBUG_TLB)
1516 printf("tlb_flush:\n");
1517 #endif
1518 /* must reset current TB so that interrupts cannot modify the
1519 links while we are modifying them */
1520 env->current_tb = NULL;
1522 for(i = 0; i < CPU_TLB_SIZE; i++) {
1523 env->tlb_table[0][i].addr_read = -1;
1524 env->tlb_table[0][i].addr_write = -1;
1525 env->tlb_table[0][i].addr_code = -1;
1526 env->tlb_table[1][i].addr_read = -1;
1527 env->tlb_table[1][i].addr_write = -1;
1528 env->tlb_table[1][i].addr_code = -1;
1529 #if (NB_MMU_MODES >= 3)
1530 env->tlb_table[2][i].addr_read = -1;
1531 env->tlb_table[2][i].addr_write = -1;
1532 env->tlb_table[2][i].addr_code = -1;
1533 #if (NB_MMU_MODES == 4)
1534 env->tlb_table[3][i].addr_read = -1;
1535 env->tlb_table[3][i].addr_write = -1;
1536 env->tlb_table[3][i].addr_code = -1;
1537 #endif
1538 #endif
1541 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1543 #ifdef USE_KQEMU
1544 if (env->kqemu_enabled) {
1545 kqemu_flush(env, flush_global);
1547 #endif
1548 tlb_flush_count++;
1551 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1553 if (addr == (tlb_entry->addr_read &
1554 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1555 addr == (tlb_entry->addr_write &
1556 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1557 addr == (tlb_entry->addr_code &
1558 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1559 tlb_entry->addr_read = -1;
1560 tlb_entry->addr_write = -1;
1561 tlb_entry->addr_code = -1;
1565 void tlb_flush_page(CPUState *env, target_ulong addr)
1567 int i;
1569 #if defined(DEBUG_TLB)
1570 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1571 #endif
1572 /* must reset current TB so that interrupts cannot modify the
1573 links while we are modifying them */
1574 env->current_tb = NULL;
1576 addr &= TARGET_PAGE_MASK;
1577 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1578 tlb_flush_entry(&env->tlb_table[0][i], addr);
1579 tlb_flush_entry(&env->tlb_table[1][i], addr);
1580 #if (NB_MMU_MODES >= 3)
1581 tlb_flush_entry(&env->tlb_table[2][i], addr);
1582 #if (NB_MMU_MODES == 4)
1583 tlb_flush_entry(&env->tlb_table[3][i], addr);
1584 #endif
1585 #endif
1587 tlb_flush_jmp_cache(env, addr);
1589 #ifdef USE_KQEMU
1590 if (env->kqemu_enabled) {
1591 kqemu_flush_page(env, addr);
1593 #endif
1596 /* update the TLBs so that writes to code in the virtual page 'addr'
1597 can be detected */
1598 static void tlb_protect_code(ram_addr_t ram_addr)
1600 cpu_physical_memory_reset_dirty(ram_addr,
1601 ram_addr + TARGET_PAGE_SIZE,
1602 CODE_DIRTY_FLAG);
1605 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1606 tested for self modifying code */
1607 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1608 target_ulong vaddr)
1610 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1613 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1614 unsigned long start, unsigned long length)
1616 unsigned long addr;
1617 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1618 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1619 if ((addr - start) < length) {
1620 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1625 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1626 int dirty_flags)
1628 CPUState *env;
1629 unsigned long length, start1;
1630 int i, mask, len;
1631 uint8_t *p;
1633 start &= TARGET_PAGE_MASK;
1634 end = TARGET_PAGE_ALIGN(end);
1636 length = end - start;
1637 if (length == 0)
1638 return;
1639 len = length >> TARGET_PAGE_BITS;
1640 #ifdef USE_KQEMU
1641 /* XXX: should not depend on cpu context */
1642 env = first_cpu;
1643 if (env->kqemu_enabled) {
1644 ram_addr_t addr;
1645 addr = start;
1646 for(i = 0; i < len; i++) {
1647 kqemu_set_notdirty(env, addr);
1648 addr += TARGET_PAGE_SIZE;
1651 #endif
1652 mask = ~dirty_flags;
1653 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1654 for(i = 0; i < len; i++)
1655 p[i] &= mask;
1657 /* we modify the TLB cache so that the dirty bit will be set again
1658 when accessing the range */
1659 start1 = start + (unsigned long)phys_ram_base;
1660 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1661 for(i = 0; i < CPU_TLB_SIZE; i++)
1662 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1663 for(i = 0; i < CPU_TLB_SIZE; i++)
1664 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1665 #if (NB_MMU_MODES >= 3)
1666 for(i = 0; i < CPU_TLB_SIZE; i++)
1667 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1668 #if (NB_MMU_MODES == 4)
1669 for(i = 0; i < CPU_TLB_SIZE; i++)
1670 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1671 #endif
1672 #endif
1676 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1678 ram_addr_t ram_addr;
1680 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1681 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1682 tlb_entry->addend - (unsigned long)phys_ram_base;
1683 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1684 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1689 /* update the TLB according to the current state of the dirty bits */
1690 void cpu_tlb_update_dirty(CPUState *env)
1692 int i;
1693 for(i = 0; i < CPU_TLB_SIZE; i++)
1694 tlb_update_dirty(&env->tlb_table[0][i]);
1695 for(i = 0; i < CPU_TLB_SIZE; i++)
1696 tlb_update_dirty(&env->tlb_table[1][i]);
1697 #if (NB_MMU_MODES >= 3)
1698 for(i = 0; i < CPU_TLB_SIZE; i++)
1699 tlb_update_dirty(&env->tlb_table[2][i]);
1700 #if (NB_MMU_MODES == 4)
1701 for(i = 0; i < CPU_TLB_SIZE; i++)
1702 tlb_update_dirty(&env->tlb_table[3][i]);
1703 #endif
1704 #endif
1707 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1708 unsigned long start)
1710 unsigned long addr;
1711 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1712 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1713 if (addr == start) {
1714 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1719 /* update the TLB corresponding to virtual page vaddr and phys addr
1720 addr so that it is no longer dirty */
1721 static inline void tlb_set_dirty(CPUState *env,
1722 unsigned long addr, target_ulong vaddr)
1724 int i;
1726 addr &= TARGET_PAGE_MASK;
1727 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1728 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1729 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1730 #if (NB_MMU_MODES >= 3)
1731 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1732 #if (NB_MMU_MODES == 4)
1733 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1734 #endif
1735 #endif
1738 /* add a new TLB entry. At most one entry for a given virtual address
1739 is permitted. Return 0 if OK or 2 if the page could not be mapped
1740 (can only happen in non SOFTMMU mode for I/O pages or pages
1741 conflicting with the host address space). */
1742 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1743 target_phys_addr_t paddr, int prot,
1744 int mmu_idx, int is_softmmu)
1746 PhysPageDesc *p;
1747 unsigned long pd;
1748 unsigned int index;
1749 target_ulong address;
1750 target_phys_addr_t addend;
1751 int ret;
1752 CPUTLBEntry *te;
1753 int i;
1755 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1756 if (!p) {
1757 pd = IO_MEM_UNASSIGNED;
1758 } else {
1759 pd = p->phys_offset;
1761 #if defined(DEBUG_TLB)
1762 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1763 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1764 #endif
1766 ret = 0;
1768 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1769 /* IO memory case */
1770 address = vaddr | pd;
1771 addend = paddr;
1772 } else {
1773 /* standard memory */
1774 address = vaddr;
1775 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1778 /* Make accesses to pages with watchpoints go via the
1779 watchpoint trap routines. */
1780 for (i = 0; i < env->nb_watchpoints; i++) {
1781 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1782 if (address & ~TARGET_PAGE_MASK) {
1783 env->watchpoint[i].addend = 0;
1784 address = vaddr | io_mem_watch;
1785 } else {
1786 env->watchpoint[i].addend = pd - paddr +
1787 (unsigned long) phys_ram_base;
1788 /* TODO: Figure out how to make read watchpoints coexist
1789 with code. */
1790 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1795 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1796 addend -= vaddr;
1797 te = &env->tlb_table[mmu_idx][index];
1798 te->addend = addend;
1799 if (prot & PAGE_READ) {
1800 te->addr_read = address;
1801 } else {
1802 te->addr_read = -1;
1805 if (prot & PAGE_EXEC) {
1806 te->addr_code = address;
1807 } else {
1808 te->addr_code = -1;
1810 if (prot & PAGE_WRITE) {
1811 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1812 (pd & IO_MEM_ROMD)) {
1813 /* write access calls the I/O callback */
1814 te->addr_write = vaddr |
1815 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1816 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1817 !cpu_physical_memory_is_dirty(pd)) {
1818 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1819 } else {
1820 te->addr_write = address;
1822 } else {
1823 te->addr_write = -1;
1826 return ret;
1829 #else
1831 void tlb_flush(CPUState *env, int flush_global)
1835 void tlb_flush_page(CPUState *env, target_ulong addr)
1839 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1840 target_phys_addr_t paddr, int prot,
1841 int mmu_idx, int is_softmmu)
1843 return 0;
1846 /* dump memory mappings */
1847 void page_dump(FILE *f)
1849 unsigned long start, end;
1850 int i, j, prot, prot1;
1851 PageDesc *p;
1853 fprintf(f, "%-8s %-8s %-8s %s\n",
1854 "start", "end", "size", "prot");
1855 start = -1;
1856 end = -1;
1857 prot = 0;
1858 for(i = 0; i <= L1_SIZE; i++) {
1859 if (i < L1_SIZE)
1860 p = l1_map[i];
1861 else
1862 p = NULL;
1863 for(j = 0;j < L2_SIZE; j++) {
1864 if (!p)
1865 prot1 = 0;
1866 else
1867 prot1 = p[j].flags;
1868 if (prot1 != prot) {
1869 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1870 if (start != -1) {
1871 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1872 start, end, end - start,
1873 prot & PAGE_READ ? 'r' : '-',
1874 prot & PAGE_WRITE ? 'w' : '-',
1875 prot & PAGE_EXEC ? 'x' : '-');
1877 if (prot1 != 0)
1878 start = end;
1879 else
1880 start = -1;
1881 prot = prot1;
1883 if (!p)
1884 break;
1889 int page_get_flags(target_ulong address)
1891 PageDesc *p;
1893 p = page_find(address >> TARGET_PAGE_BITS);
1894 if (!p)
1895 return 0;
1896 return p->flags;
1899 /* modify the flags of a page and invalidate the code if
1900 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1901 depending on PAGE_WRITE */
1902 void page_set_flags(target_ulong start, target_ulong end, int flags)
1904 PageDesc *p;
1905 target_ulong addr;
1907 /* mmap_lock should already be held. */
1908 start = start & TARGET_PAGE_MASK;
1909 end = TARGET_PAGE_ALIGN(end);
1910 if (flags & PAGE_WRITE)
1911 flags |= PAGE_WRITE_ORG;
1912 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1913 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1914 /* if the write protection is set, then we invalidate the code
1915 inside */
1916 if (!(p->flags & PAGE_WRITE) &&
1917 (flags & PAGE_WRITE) &&
1918 p->first_tb) {
1919 tb_invalidate_phys_page(addr, 0, NULL);
1921 p->flags = flags;
1925 int page_check_range(target_ulong start, target_ulong len, int flags)
1927 PageDesc *p;
1928 target_ulong end;
1929 target_ulong addr;
1931 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1932 start = start & TARGET_PAGE_MASK;
1934 if( end < start )
1935 /* we've wrapped around */
1936 return -1;
1937 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1938 p = page_find(addr >> TARGET_PAGE_BITS);
1939 if( !p )
1940 return -1;
1941 if( !(p->flags & PAGE_VALID) )
1942 return -1;
1944 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1945 return -1;
1946 if (flags & PAGE_WRITE) {
1947 if (!(p->flags & PAGE_WRITE_ORG))
1948 return -1;
1949 /* unprotect the page if it was put read-only because it
1950 contains translated code */
1951 if (!(p->flags & PAGE_WRITE)) {
1952 if (!page_unprotect(addr, 0, NULL))
1953 return -1;
1955 return 0;
1958 return 0;
1961 /* called from signal handler: invalidate the code and unprotect the
1962 page. Return TRUE if the fault was succesfully handled. */
1963 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1965 unsigned int page_index, prot, pindex;
1966 PageDesc *p, *p1;
1967 target_ulong host_start, host_end, addr;
1969 /* Technically this isn't safe inside a signal handler. However we
1970 know this only ever happens in a synchronous SEGV handler, so in
1971 practice it seems to be ok. */
1972 mmap_lock();
1974 host_start = address & qemu_host_page_mask;
1975 page_index = host_start >> TARGET_PAGE_BITS;
1976 p1 = page_find(page_index);
1977 if (!p1) {
1978 mmap_unlock();
1979 return 0;
1981 host_end = host_start + qemu_host_page_size;
1982 p = p1;
1983 prot = 0;
1984 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1985 prot |= p->flags;
1986 p++;
1988 /* if the page was really writable, then we change its
1989 protection back to writable */
1990 if (prot & PAGE_WRITE_ORG) {
1991 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1992 if (!(p1[pindex].flags & PAGE_WRITE)) {
1993 mprotect((void *)g2h(host_start), qemu_host_page_size,
1994 (prot & PAGE_BITS) | PAGE_WRITE);
1995 p1[pindex].flags |= PAGE_WRITE;
1996 /* and since the content will be modified, we must invalidate
1997 the corresponding translated code. */
1998 tb_invalidate_phys_page(address, pc, puc);
1999 #ifdef DEBUG_TB_CHECK
2000 tb_invalidate_check(address);
2001 #endif
2002 mmap_unlock();
2003 return 1;
2006 mmap_unlock();
2007 return 0;
2010 static inline void tlb_set_dirty(CPUState *env,
2011 unsigned long addr, target_ulong vaddr)
2014 #endif /* defined(CONFIG_USER_ONLY) */
2016 #if !defined(CONFIG_USER_ONLY)
2017 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2018 ram_addr_t memory);
2019 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2020 ram_addr_t orig_memory);
2021 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2022 need_subpage) \
2023 do { \
2024 if (addr > start_addr) \
2025 start_addr2 = 0; \
2026 else { \
2027 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2028 if (start_addr2 > 0) \
2029 need_subpage = 1; \
2032 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2033 end_addr2 = TARGET_PAGE_SIZE - 1; \
2034 else { \
2035 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2036 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2037 need_subpage = 1; \
2039 } while (0)
2041 /* register physical memory. 'size' must be a multiple of the target
2042 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2043 io memory page */
2044 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2045 ram_addr_t size,
2046 ram_addr_t phys_offset)
2048 target_phys_addr_t addr, end_addr;
2049 PhysPageDesc *p;
2050 CPUState *env;
2051 ram_addr_t orig_size = size;
2052 void *subpage;
2054 #ifdef USE_KQEMU
2055 /* XXX: should not depend on cpu context */
2056 env = first_cpu;
2057 if (env->kqemu_enabled) {
2058 kqemu_set_phys_mem(start_addr, size, phys_offset);
2060 #endif
2061 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2062 end_addr = start_addr + (target_phys_addr_t)size;
2063 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2064 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2065 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2066 ram_addr_t orig_memory = p->phys_offset;
2067 target_phys_addr_t start_addr2, end_addr2;
2068 int need_subpage = 0;
2070 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2071 need_subpage);
2072 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2073 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2074 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2075 &p->phys_offset, orig_memory);
2076 } else {
2077 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2078 >> IO_MEM_SHIFT];
2080 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2081 } else {
2082 p->phys_offset = phys_offset;
2083 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2084 (phys_offset & IO_MEM_ROMD))
2085 phys_offset += TARGET_PAGE_SIZE;
2087 } else {
2088 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2089 p->phys_offset = phys_offset;
2090 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2091 (phys_offset & IO_MEM_ROMD))
2092 phys_offset += TARGET_PAGE_SIZE;
2093 else {
2094 target_phys_addr_t start_addr2, end_addr2;
2095 int need_subpage = 0;
2097 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2098 end_addr2, need_subpage);
2100 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2101 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2102 &p->phys_offset, IO_MEM_UNASSIGNED);
2103 subpage_register(subpage, start_addr2, end_addr2,
2104 phys_offset);
2110 /* since each CPU stores ram addresses in its TLB cache, we must
2111 reset the modified entries */
2112 /* XXX: slow ! */
2113 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2114 tlb_flush(env, 1);
2118 /* XXX: temporary until new memory mapping API */
2119 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2121 PhysPageDesc *p;
2123 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2124 if (!p)
2125 return IO_MEM_UNASSIGNED;
2126 return p->phys_offset;
2129 /* XXX: better than nothing */
2130 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2132 ram_addr_t addr;
2133 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2134 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2135 (uint64_t)size, (uint64_t)phys_ram_size);
2136 abort();
2138 addr = phys_ram_alloc_offset;
2139 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2140 return addr;
2143 void qemu_ram_free(ram_addr_t addr)
2147 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2149 #ifdef DEBUG_UNASSIGNED
2150 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2151 #endif
2152 #ifdef TARGET_SPARC
2153 do_unassigned_access(addr, 0, 0, 0);
2154 #elif TARGET_CRIS
2155 do_unassigned_access(addr, 0, 0, 0);
2156 #endif
2157 return 0;
2160 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2162 #ifdef DEBUG_UNASSIGNED
2163 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2164 #endif
2165 #ifdef TARGET_SPARC
2166 do_unassigned_access(addr, 1, 0, 0);
2167 #elif TARGET_CRIS
2168 do_unassigned_access(addr, 1, 0, 0);
2169 #endif
2172 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2173 unassigned_mem_readb,
2174 unassigned_mem_readb,
2175 unassigned_mem_readb,
2178 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2179 unassigned_mem_writeb,
2180 unassigned_mem_writeb,
2181 unassigned_mem_writeb,
2184 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2186 unsigned long ram_addr;
2187 int dirty_flags;
2188 ram_addr = addr - (unsigned long)phys_ram_base;
2189 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2190 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2191 #if !defined(CONFIG_USER_ONLY)
2192 tb_invalidate_phys_page_fast(ram_addr, 1);
2193 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2194 #endif
2196 stb_p((uint8_t *)(long)addr, val);
2197 #ifdef USE_KQEMU
2198 if (cpu_single_env->kqemu_enabled &&
2199 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2200 kqemu_modify_page(cpu_single_env, ram_addr);
2201 #endif
2202 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2203 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2204 /* we remove the notdirty callback only if the code has been
2205 flushed */
2206 if (dirty_flags == 0xff)
2207 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2210 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2212 unsigned long ram_addr;
2213 int dirty_flags;
2214 ram_addr = addr - (unsigned long)phys_ram_base;
2215 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2216 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2217 #if !defined(CONFIG_USER_ONLY)
2218 tb_invalidate_phys_page_fast(ram_addr, 2);
2219 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2220 #endif
2222 stw_p((uint8_t *)(long)addr, val);
2223 #ifdef USE_KQEMU
2224 if (cpu_single_env->kqemu_enabled &&
2225 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2226 kqemu_modify_page(cpu_single_env, ram_addr);
2227 #endif
2228 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2229 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2230 /* we remove the notdirty callback only if the code has been
2231 flushed */
2232 if (dirty_flags == 0xff)
2233 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2236 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2238 unsigned long ram_addr;
2239 int dirty_flags;
2240 ram_addr = addr - (unsigned long)phys_ram_base;
2241 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2242 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2243 #if !defined(CONFIG_USER_ONLY)
2244 tb_invalidate_phys_page_fast(ram_addr, 4);
2245 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2246 #endif
2248 stl_p((uint8_t *)(long)addr, val);
2249 #ifdef USE_KQEMU
2250 if (cpu_single_env->kqemu_enabled &&
2251 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2252 kqemu_modify_page(cpu_single_env, ram_addr);
2253 #endif
2254 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2255 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2256 /* we remove the notdirty callback only if the code has been
2257 flushed */
2258 if (dirty_flags == 0xff)
2259 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2262 static CPUReadMemoryFunc *error_mem_read[3] = {
2263 NULL, /* never used */
2264 NULL, /* never used */
2265 NULL, /* never used */
2268 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2269 notdirty_mem_writeb,
2270 notdirty_mem_writew,
2271 notdirty_mem_writel,
2274 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2275 so these check for a hit then pass through to the normal out-of-line
2276 phys routines. */
2277 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2279 return ldub_phys(addr);
2282 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2284 return lduw_phys(addr);
2287 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2289 return ldl_phys(addr);
2292 /* Generate a debug exception if a watchpoint has been hit.
2293 Returns the real physical address of the access. addr will be a host
2294 address in case of a RAM location. */
2295 static target_ulong check_watchpoint(target_phys_addr_t addr)
2297 CPUState *env = cpu_single_env;
2298 target_ulong watch;
2299 target_ulong retaddr;
2300 int i;
2302 retaddr = addr;
2303 for (i = 0; i < env->nb_watchpoints; i++) {
2304 watch = env->watchpoint[i].vaddr;
2305 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2306 retaddr = addr - env->watchpoint[i].addend;
2307 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2308 cpu_single_env->watchpoint_hit = i + 1;
2309 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2310 break;
2314 return retaddr;
2317 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2318 uint32_t val)
2320 addr = check_watchpoint(addr);
2321 stb_phys(addr, val);
2324 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2325 uint32_t val)
2327 addr = check_watchpoint(addr);
2328 stw_phys(addr, val);
2331 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2332 uint32_t val)
2334 addr = check_watchpoint(addr);
2335 stl_phys(addr, val);
2338 static CPUReadMemoryFunc *watch_mem_read[3] = {
2339 watch_mem_readb,
2340 watch_mem_readw,
2341 watch_mem_readl,
2344 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2345 watch_mem_writeb,
2346 watch_mem_writew,
2347 watch_mem_writel,
2350 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2351 unsigned int len)
2353 uint32_t ret;
2354 unsigned int idx;
2356 idx = SUBPAGE_IDX(addr - mmio->base);
2357 #if defined(DEBUG_SUBPAGE)
2358 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2359 mmio, len, addr, idx);
2360 #endif
2361 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2363 return ret;
2366 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2367 uint32_t value, unsigned int len)
2369 unsigned int idx;
2371 idx = SUBPAGE_IDX(addr - mmio->base);
2372 #if defined(DEBUG_SUBPAGE)
2373 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2374 mmio, len, addr, idx, value);
2375 #endif
2376 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2379 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2381 #if defined(DEBUG_SUBPAGE)
2382 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2383 #endif
2385 return subpage_readlen(opaque, addr, 0);
2388 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2389 uint32_t value)
2391 #if defined(DEBUG_SUBPAGE)
2392 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2393 #endif
2394 subpage_writelen(opaque, addr, value, 0);
2397 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2399 #if defined(DEBUG_SUBPAGE)
2400 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2401 #endif
2403 return subpage_readlen(opaque, addr, 1);
2406 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2407 uint32_t value)
2409 #if defined(DEBUG_SUBPAGE)
2410 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2411 #endif
2412 subpage_writelen(opaque, addr, value, 1);
2415 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2417 #if defined(DEBUG_SUBPAGE)
2418 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2419 #endif
2421 return subpage_readlen(opaque, addr, 2);
2424 static void subpage_writel (void *opaque,
2425 target_phys_addr_t addr, uint32_t value)
2427 #if defined(DEBUG_SUBPAGE)
2428 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2429 #endif
2430 subpage_writelen(opaque, addr, value, 2);
2433 static CPUReadMemoryFunc *subpage_read[] = {
2434 &subpage_readb,
2435 &subpage_readw,
2436 &subpage_readl,
2439 static CPUWriteMemoryFunc *subpage_write[] = {
2440 &subpage_writeb,
2441 &subpage_writew,
2442 &subpage_writel,
2445 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2446 ram_addr_t memory)
2448 int idx, eidx;
2449 unsigned int i;
2451 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2452 return -1;
2453 idx = SUBPAGE_IDX(start);
2454 eidx = SUBPAGE_IDX(end);
2455 #if defined(DEBUG_SUBPAGE)
2456 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2457 mmio, start, end, idx, eidx, memory);
2458 #endif
2459 memory >>= IO_MEM_SHIFT;
2460 for (; idx <= eidx; idx++) {
2461 for (i = 0; i < 4; i++) {
2462 if (io_mem_read[memory][i]) {
2463 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2464 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2466 if (io_mem_write[memory][i]) {
2467 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2468 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2473 return 0;
2476 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2477 ram_addr_t orig_memory)
2479 subpage_t *mmio;
2480 int subpage_memory;
2482 mmio = qemu_mallocz(sizeof(subpage_t));
2483 if (mmio != NULL) {
2484 mmio->base = base;
2485 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2486 #if defined(DEBUG_SUBPAGE)
2487 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2488 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2489 #endif
2490 *phys = subpage_memory | IO_MEM_SUBPAGE;
2491 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2494 return mmio;
2497 static void io_mem_init(void)
2499 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2500 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2501 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2502 io_mem_nb = 5;
2504 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2505 watch_mem_write, NULL);
2506 /* alloc dirty bits array */
2507 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2508 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2511 /* mem_read and mem_write are arrays of functions containing the
2512 function to access byte (index 0), word (index 1) and dword (index
2513 2). Functions can be omitted with a NULL function pointer. The
2514 registered functions may be modified dynamically later.
2515 If io_index is non zero, the corresponding io zone is
2516 modified. If it is zero, a new io zone is allocated. The return
2517 value can be used with cpu_register_physical_memory(). (-1) is
2518 returned if error. */
2519 int cpu_register_io_memory(int io_index,
2520 CPUReadMemoryFunc **mem_read,
2521 CPUWriteMemoryFunc **mem_write,
2522 void *opaque)
2524 int i, subwidth = 0;
2526 if (io_index <= 0) {
2527 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2528 return -1;
2529 io_index = io_mem_nb++;
2530 } else {
2531 if (io_index >= IO_MEM_NB_ENTRIES)
2532 return -1;
2535 for(i = 0;i < 3; i++) {
2536 if (!mem_read[i] || !mem_write[i])
2537 subwidth = IO_MEM_SUBWIDTH;
2538 io_mem_read[io_index][i] = mem_read[i];
2539 io_mem_write[io_index][i] = mem_write[i];
2541 io_mem_opaque[io_index] = opaque;
2542 return (io_index << IO_MEM_SHIFT) | subwidth;
2545 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2547 return io_mem_write[io_index >> IO_MEM_SHIFT];
2550 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2552 return io_mem_read[io_index >> IO_MEM_SHIFT];
2555 #endif /* !defined(CONFIG_USER_ONLY) */
2557 /* physical memory access (slow version, mainly for debug) */
2558 #if defined(CONFIG_USER_ONLY)
2559 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2560 int len, int is_write)
2562 int l, flags;
2563 target_ulong page;
2564 void * p;
2566 while (len > 0) {
2567 page = addr & TARGET_PAGE_MASK;
2568 l = (page + TARGET_PAGE_SIZE) - addr;
2569 if (l > len)
2570 l = len;
2571 flags = page_get_flags(page);
2572 if (!(flags & PAGE_VALID))
2573 return;
2574 if (is_write) {
2575 if (!(flags & PAGE_WRITE))
2576 return;
2577 /* XXX: this code should not depend on lock_user */
2578 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2579 /* FIXME - should this return an error rather than just fail? */
2580 return;
2581 memcpy(p, buf, l);
2582 unlock_user(p, addr, l);
2583 } else {
2584 if (!(flags & PAGE_READ))
2585 return;
2586 /* XXX: this code should not depend on lock_user */
2587 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2588 /* FIXME - should this return an error rather than just fail? */
2589 return;
2590 memcpy(buf, p, l);
2591 unlock_user(p, addr, 0);
2593 len -= l;
2594 buf += l;
2595 addr += l;
2599 #else
2600 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2601 int len, int is_write)
2603 int l, io_index;
2604 uint8_t *ptr;
2605 uint32_t val;
2606 target_phys_addr_t page;
2607 unsigned long pd;
2608 PhysPageDesc *p;
2610 while (len > 0) {
2611 page = addr & TARGET_PAGE_MASK;
2612 l = (page + TARGET_PAGE_SIZE) - addr;
2613 if (l > len)
2614 l = len;
2615 p = phys_page_find(page >> TARGET_PAGE_BITS);
2616 if (!p) {
2617 pd = IO_MEM_UNASSIGNED;
2618 } else {
2619 pd = p->phys_offset;
2622 if (is_write) {
2623 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2624 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2625 /* XXX: could force cpu_single_env to NULL to avoid
2626 potential bugs */
2627 if (l >= 4 && ((addr & 3) == 0)) {
2628 /* 32 bit write access */
2629 val = ldl_p(buf);
2630 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2631 l = 4;
2632 } else if (l >= 2 && ((addr & 1) == 0)) {
2633 /* 16 bit write access */
2634 val = lduw_p(buf);
2635 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2636 l = 2;
2637 } else {
2638 /* 8 bit write access */
2639 val = ldub_p(buf);
2640 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2641 l = 1;
2643 } else {
2644 unsigned long addr1;
2645 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2646 /* RAM case */
2647 ptr = phys_ram_base + addr1;
2648 memcpy(ptr, buf, l);
2649 if (!cpu_physical_memory_is_dirty(addr1)) {
2650 /* invalidate code */
2651 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2652 /* set dirty bit */
2653 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2654 (0xff & ~CODE_DIRTY_FLAG);
2657 } else {
2658 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2659 !(pd & IO_MEM_ROMD)) {
2660 /* I/O case */
2661 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2662 if (l >= 4 && ((addr & 3) == 0)) {
2663 /* 32 bit read access */
2664 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2665 stl_p(buf, val);
2666 l = 4;
2667 } else if (l >= 2 && ((addr & 1) == 0)) {
2668 /* 16 bit read access */
2669 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2670 stw_p(buf, val);
2671 l = 2;
2672 } else {
2673 /* 8 bit read access */
2674 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2675 stb_p(buf, val);
2676 l = 1;
2678 } else {
2679 /* RAM case */
2680 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2681 (addr & ~TARGET_PAGE_MASK);
2682 memcpy(buf, ptr, l);
2685 len -= l;
2686 buf += l;
2687 addr += l;
2691 /* used for ROM loading : can write in RAM and ROM */
2692 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2693 const uint8_t *buf, int len)
2695 int l;
2696 uint8_t *ptr;
2697 target_phys_addr_t page;
2698 unsigned long pd;
2699 PhysPageDesc *p;
2701 while (len > 0) {
2702 page = addr & TARGET_PAGE_MASK;
2703 l = (page + TARGET_PAGE_SIZE) - addr;
2704 if (l > len)
2705 l = len;
2706 p = phys_page_find(page >> TARGET_PAGE_BITS);
2707 if (!p) {
2708 pd = IO_MEM_UNASSIGNED;
2709 } else {
2710 pd = p->phys_offset;
2713 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2714 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2715 !(pd & IO_MEM_ROMD)) {
2716 /* do nothing */
2717 } else {
2718 unsigned long addr1;
2719 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2720 /* ROM/RAM case */
2721 ptr = phys_ram_base + addr1;
2722 memcpy(ptr, buf, l);
2724 len -= l;
2725 buf += l;
2726 addr += l;
2731 /* warning: addr must be aligned */
2732 uint32_t ldl_phys(target_phys_addr_t addr)
2734 int io_index;
2735 uint8_t *ptr;
2736 uint32_t val;
2737 unsigned long pd;
2738 PhysPageDesc *p;
2740 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2741 if (!p) {
2742 pd = IO_MEM_UNASSIGNED;
2743 } else {
2744 pd = p->phys_offset;
2747 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2748 !(pd & IO_MEM_ROMD)) {
2749 /* I/O case */
2750 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2751 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2752 } else {
2753 /* RAM case */
2754 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2755 (addr & ~TARGET_PAGE_MASK);
2756 val = ldl_p(ptr);
2758 return val;
2761 /* warning: addr must be aligned */
2762 uint64_t ldq_phys(target_phys_addr_t addr)
2764 int io_index;
2765 uint8_t *ptr;
2766 uint64_t val;
2767 unsigned long pd;
2768 PhysPageDesc *p;
2770 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2771 if (!p) {
2772 pd = IO_MEM_UNASSIGNED;
2773 } else {
2774 pd = p->phys_offset;
2777 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2778 !(pd & IO_MEM_ROMD)) {
2779 /* I/O case */
2780 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2781 #ifdef TARGET_WORDS_BIGENDIAN
2782 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2783 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2784 #else
2785 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2786 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2787 #endif
2788 } else {
2789 /* RAM case */
2790 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2791 (addr & ~TARGET_PAGE_MASK);
2792 val = ldq_p(ptr);
2794 return val;
2797 /* XXX: optimize */
2798 uint32_t ldub_phys(target_phys_addr_t addr)
2800 uint8_t val;
2801 cpu_physical_memory_read(addr, &val, 1);
2802 return val;
2805 /* XXX: optimize */
2806 uint32_t lduw_phys(target_phys_addr_t addr)
2808 uint16_t val;
2809 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2810 return tswap16(val);
2813 /* warning: addr must be aligned. The ram page is not masked as dirty
2814 and the code inside is not invalidated. It is useful if the dirty
2815 bits are used to track modified PTEs */
2816 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2818 int io_index;
2819 uint8_t *ptr;
2820 unsigned long pd;
2821 PhysPageDesc *p;
2823 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2824 if (!p) {
2825 pd = IO_MEM_UNASSIGNED;
2826 } else {
2827 pd = p->phys_offset;
2830 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2831 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2832 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2833 } else {
2834 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2835 (addr & ~TARGET_PAGE_MASK);
2836 stl_p(ptr, val);
2840 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2842 int io_index;
2843 uint8_t *ptr;
2844 unsigned long pd;
2845 PhysPageDesc *p;
2847 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2848 if (!p) {
2849 pd = IO_MEM_UNASSIGNED;
2850 } else {
2851 pd = p->phys_offset;
2854 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2855 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2856 #ifdef TARGET_WORDS_BIGENDIAN
2857 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2858 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2859 #else
2860 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2861 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2862 #endif
2863 } else {
2864 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2865 (addr & ~TARGET_PAGE_MASK);
2866 stq_p(ptr, val);
2870 /* warning: addr must be aligned */
2871 void stl_phys(target_phys_addr_t addr, uint32_t val)
2873 int io_index;
2874 uint8_t *ptr;
2875 unsigned long pd;
2876 PhysPageDesc *p;
2878 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2879 if (!p) {
2880 pd = IO_MEM_UNASSIGNED;
2881 } else {
2882 pd = p->phys_offset;
2885 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2886 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2887 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2888 } else {
2889 unsigned long addr1;
2890 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2891 /* RAM case */
2892 ptr = phys_ram_base + addr1;
2893 stl_p(ptr, val);
2894 if (!cpu_physical_memory_is_dirty(addr1)) {
2895 /* invalidate code */
2896 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2897 /* set dirty bit */
2898 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2899 (0xff & ~CODE_DIRTY_FLAG);
2904 /* XXX: optimize */
2905 void stb_phys(target_phys_addr_t addr, uint32_t val)
2907 uint8_t v = val;
2908 cpu_physical_memory_write(addr, &v, 1);
2911 /* XXX: optimize */
2912 void stw_phys(target_phys_addr_t addr, uint32_t val)
2914 uint16_t v = tswap16(val);
2915 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2918 /* XXX: optimize */
2919 void stq_phys(target_phys_addr_t addr, uint64_t val)
2921 val = tswap64(val);
2922 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2925 #endif
2927 /* virtual memory access for debug */
2928 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2929 uint8_t *buf, int len, int is_write)
2931 int l;
2932 target_phys_addr_t phys_addr;
2933 target_ulong page;
2935 while (len > 0) {
2936 page = addr & TARGET_PAGE_MASK;
2937 phys_addr = cpu_get_phys_page_debug(env, page);
2938 /* if no physical page mapped, return an error */
2939 if (phys_addr == -1)
2940 return -1;
2941 l = (page + TARGET_PAGE_SIZE) - addr;
2942 if (l > len)
2943 l = len;
2944 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2945 buf, l, is_write);
2946 len -= l;
2947 buf += l;
2948 addr += l;
2950 return 0;
2953 void dump_exec_info(FILE *f,
2954 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2956 int i, target_code_size, max_target_code_size;
2957 int direct_jmp_count, direct_jmp2_count, cross_page;
2958 TranslationBlock *tb;
2960 target_code_size = 0;
2961 max_target_code_size = 0;
2962 cross_page = 0;
2963 direct_jmp_count = 0;
2964 direct_jmp2_count = 0;
2965 for(i = 0; i < nb_tbs; i++) {
2966 tb = &tbs[i];
2967 target_code_size += tb->size;
2968 if (tb->size > max_target_code_size)
2969 max_target_code_size = tb->size;
2970 if (tb->page_addr[1] != -1)
2971 cross_page++;
2972 if (tb->tb_next_offset[0] != 0xffff) {
2973 direct_jmp_count++;
2974 if (tb->tb_next_offset[1] != 0xffff) {
2975 direct_jmp2_count++;
2979 /* XXX: avoid using doubles ? */
2980 cpu_fprintf(f, "Translation buffer state:\n");
2981 cpu_fprintf(f, "gen code size %ld/%ld\n",
2982 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
2983 cpu_fprintf(f, "TB count %d/%d\n",
2984 nb_tbs, code_gen_max_blocks);
2985 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2986 nb_tbs ? target_code_size / nb_tbs : 0,
2987 max_target_code_size);
2988 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2989 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2990 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2991 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2992 cross_page,
2993 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2994 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2995 direct_jmp_count,
2996 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2997 direct_jmp2_count,
2998 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2999 cpu_fprintf(f, "\nStatistics:\n");
3000 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3001 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3002 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3003 tcg_dump_info(f, cpu_fprintf);
3006 #if !defined(CONFIG_USER_ONLY)
3008 #define MMUSUFFIX _cmmu
3009 #define GETPC() NULL
3010 #define env cpu_single_env
3011 #define SOFTMMU_CODE_ACCESS
3013 #define SHIFT 0
3014 #include "softmmu_template.h"
3016 #define SHIFT 1
3017 #include "softmmu_template.h"
3019 #define SHIFT 2
3020 #include "softmmu_template.h"
3022 #define SHIFT 3
3023 #include "softmmu_template.h"
3025 #undef env
3027 #endif