Despite what the documentation says/implies, PTHREAD_STACK_MIN is often not
[qemu/mini2440.git] / exec.c
blob64287e20c75aff7720705fed0ab9ac813891bd84
1 /*
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
58 #undef DEBUG_TB_CHECK
59 #endif
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #else
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #endif
84 TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 int nb_tbs;
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer;
93 unsigned long code_gen_buffer_size;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size;
96 uint8_t *code_gen_ptr;
98 #if !defined(CONFIG_USER_ONLY)
99 ram_addr_t phys_ram_size;
100 int phys_ram_fd;
101 uint8_t *phys_ram_base;
102 uint8_t *phys_ram_dirty;
103 static ram_addr_t phys_ram_alloc_offset = 0;
104 #endif
106 CPUState *first_cpu;
107 /* current CPU in the current thread. It is only valid inside
108 cpu_exec() */
109 CPUState *cpu_single_env;
111 typedef struct PageDesc {
112 /* list of TBs intersecting this ram page */
113 TranslationBlock *first_tb;
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count;
117 uint8_t *code_bitmap;
118 #if defined(CONFIG_USER_ONLY)
119 unsigned long flags;
120 #endif
121 } PageDesc;
123 typedef struct PhysPageDesc {
124 /* offset in host memory of the page + io_index in the low bits */
125 ram_addr_t phys_offset;
126 } PhysPageDesc;
128 #define L2_BITS 10
129 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130 /* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
134 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
135 #else
136 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137 #endif
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
142 unsigned long qemu_real_host_page_size;
143 unsigned long qemu_host_page_bits;
144 unsigned long qemu_host_page_size;
145 unsigned long qemu_host_page_mask;
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc *l1_map[L1_SIZE];
149 PhysPageDesc **l1_phys_map;
151 #if !defined(CONFIG_USER_ONLY)
152 static void io_mem_init(void);
154 /* io memory support */
155 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
156 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
157 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
158 static int io_mem_nb;
159 static int io_mem_watch;
160 #endif
162 /* log support */
163 char *logfilename = "/tmp/qemu.log";
164 FILE *logfile;
165 int loglevel;
166 static int log_append = 0;
168 /* statistics */
169 static int tlb_flush_count;
170 static int tb_flush_count;
171 static int tb_phys_invalidate_count;
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t {
175 target_phys_addr_t base;
176 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178 void *opaque[TARGET_PAGE_SIZE][2][4];
179 } subpage_t;
181 #ifdef _WIN32
182 static void map_exec(void *addr, long size)
184 DWORD old_protect;
185 VirtualProtect(addr, size,
186 PAGE_EXECUTE_READWRITE, &old_protect);
189 #else
190 static void map_exec(void *addr, long size)
192 unsigned long start, end, page_size;
194 page_size = getpagesize();
195 start = (unsigned long)addr;
196 start &= ~(page_size - 1);
198 end = (unsigned long)addr + size;
199 end += page_size - 1;
200 end &= ~(page_size - 1);
202 mprotect((void *)start, end - start,
203 PROT_READ | PROT_WRITE | PROT_EXEC);
205 #endif
207 static void page_init(void)
209 /* NOTE: we can always suppose that qemu_host_page_size >=
210 TARGET_PAGE_SIZE */
211 #ifdef _WIN32
213 SYSTEM_INFO system_info;
214 DWORD old_protect;
216 GetSystemInfo(&system_info);
217 qemu_real_host_page_size = system_info.dwPageSize;
219 #else
220 qemu_real_host_page_size = getpagesize();
221 #endif
222 if (qemu_host_page_size == 0)
223 qemu_host_page_size = qemu_real_host_page_size;
224 if (qemu_host_page_size < TARGET_PAGE_SIZE)
225 qemu_host_page_size = TARGET_PAGE_SIZE;
226 qemu_host_page_bits = 0;
227 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
228 qemu_host_page_bits++;
229 qemu_host_page_mask = ~(qemu_host_page_size - 1);
230 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
231 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
233 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
235 long long startaddr, endaddr;
236 FILE *f;
237 int n;
239 mmap_lock();
240 last_brk = (unsigned long)sbrk(0);
241 f = fopen("/proc/self/maps", "r");
242 if (f) {
243 do {
244 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
245 if (n == 2) {
246 startaddr = MIN(startaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248 endaddr = MIN(endaddr,
249 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
250 page_set_flags(startaddr & TARGET_PAGE_MASK,
251 TARGET_PAGE_ALIGN(endaddr),
252 PAGE_RESERVED);
254 } while (!feof(f));
255 fclose(f);
257 mmap_unlock();
259 #endif
262 static inline PageDesc *page_find_alloc(target_ulong index)
264 PageDesc **lp, *p;
266 lp = &l1_map[index >> L2_BITS];
267 p = *lp;
268 if (!p) {
269 /* allocate if not found */
270 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
271 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
272 *lp = p;
274 return p + (index & (L2_SIZE - 1));
277 static inline PageDesc *page_find(target_ulong index)
279 PageDesc *p;
281 p = l1_map[index >> L2_BITS];
282 if (!p)
283 return 0;
284 return p + (index & (L2_SIZE - 1));
287 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
289 void **lp, **p;
290 PhysPageDesc *pd;
292 p = (void **)l1_phys_map;
293 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
295 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
296 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
297 #endif
298 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
299 p = *lp;
300 if (!p) {
301 /* allocate if not found */
302 if (!alloc)
303 return NULL;
304 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
305 memset(p, 0, sizeof(void *) * L1_SIZE);
306 *lp = p;
308 #endif
309 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
310 pd = *lp;
311 if (!pd) {
312 int i;
313 /* allocate if not found */
314 if (!alloc)
315 return NULL;
316 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
317 *lp = pd;
318 for (i = 0; i < L2_SIZE; i++)
319 pd[i].phys_offset = IO_MEM_UNASSIGNED;
321 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
324 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
326 return phys_page_find_alloc(index, 0);
329 #if !defined(CONFIG_USER_ONLY)
330 static void tlb_protect_code(ram_addr_t ram_addr);
331 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
332 target_ulong vaddr);
333 #define mmap_lock() do { } while(0)
334 #define mmap_unlock() do { } while(0)
335 #endif
337 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
339 #if defined(CONFIG_USER_ONLY)
340 /* Currently it is not recommanded to allocate big chunks of data in
341 user mode. It will change when a dedicated libc will be used */
342 #define USE_STATIC_CODE_GEN_BUFFER
343 #endif
345 #ifdef USE_STATIC_CODE_GEN_BUFFER
346 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
347 #endif
349 void code_gen_alloc(unsigned long tb_size)
351 #ifdef USE_STATIC_CODE_GEN_BUFFER
352 code_gen_buffer = static_code_gen_buffer;
353 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
354 map_exec(code_gen_buffer, code_gen_buffer_size);
355 #else
356 code_gen_buffer_size = tb_size;
357 if (code_gen_buffer_size == 0) {
358 #if defined(CONFIG_USER_ONLY)
359 /* in user mode, phys_ram_size is not meaningful */
360 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
361 #else
362 /* XXX: needs ajustments */
363 code_gen_buffer_size = (int)(phys_ram_size / 4);
364 #endif
366 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
367 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
368 /* The code gen buffer location may have constraints depending on
369 the host cpu and OS */
370 #if defined(__linux__)
372 int flags;
373 flags = MAP_PRIVATE | MAP_ANONYMOUS;
374 #if defined(__x86_64__)
375 flags |= MAP_32BIT;
376 /* Cannot map more than that */
377 if (code_gen_buffer_size > (800 * 1024 * 1024))
378 code_gen_buffer_size = (800 * 1024 * 1024);
379 #endif
380 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
381 PROT_WRITE | PROT_READ | PROT_EXEC,
382 flags, -1, 0);
383 if (code_gen_buffer == MAP_FAILED) {
384 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
385 exit(1);
388 #else
389 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
390 if (!code_gen_buffer) {
391 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
392 exit(1);
394 map_exec(code_gen_buffer, code_gen_buffer_size);
395 #endif
396 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
397 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
398 code_gen_buffer_max_size = code_gen_buffer_size -
399 code_gen_max_block_size();
400 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
401 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
404 /* Must be called before using the QEMU cpus. 'tb_size' is the size
405 (in bytes) allocated to the translation buffer. Zero means default
406 size. */
407 void cpu_exec_init_all(unsigned long tb_size)
409 cpu_gen_init();
410 code_gen_alloc(tb_size);
411 code_gen_ptr = code_gen_buffer;
412 page_init();
413 #if !defined(CONFIG_USER_ONLY)
414 io_mem_init();
415 #endif
418 void cpu_exec_init(CPUState *env)
420 CPUState **penv;
421 int cpu_index;
423 env->next_cpu = NULL;
424 penv = &first_cpu;
425 cpu_index = 0;
426 while (*penv != NULL) {
427 penv = (CPUState **)&(*penv)->next_cpu;
428 cpu_index++;
430 env->cpu_index = cpu_index;
431 env->nb_watchpoints = 0;
432 *penv = env;
435 static inline void invalidate_page_bitmap(PageDesc *p)
437 if (p->code_bitmap) {
438 qemu_free(p->code_bitmap);
439 p->code_bitmap = NULL;
441 p->code_write_count = 0;
444 /* set to NULL all the 'first_tb' fields in all PageDescs */
445 static void page_flush_tb(void)
447 int i, j;
448 PageDesc *p;
450 for(i = 0; i < L1_SIZE; i++) {
451 p = l1_map[i];
452 if (p) {
453 for(j = 0; j < L2_SIZE; j++) {
454 p->first_tb = NULL;
455 invalidate_page_bitmap(p);
456 p++;
462 /* flush all the translation blocks */
463 /* XXX: tb_flush is currently not thread safe */
464 void tb_flush(CPUState *env1)
466 CPUState *env;
467 #if defined(DEBUG_FLUSH)
468 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
469 (unsigned long)(code_gen_ptr - code_gen_buffer),
470 nb_tbs, nb_tbs > 0 ?
471 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
472 #endif
473 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
474 cpu_abort(env1, "Internal error: code buffer overflow\n");
476 nb_tbs = 0;
478 for(env = first_cpu; env != NULL; env = env->next_cpu) {
479 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
482 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
483 page_flush_tb();
485 code_gen_ptr = code_gen_buffer;
486 /* XXX: flush processor icache at this point if cache flush is
487 expensive */
488 tb_flush_count++;
491 #ifdef DEBUG_TB_CHECK
493 static void tb_invalidate_check(target_ulong address)
495 TranslationBlock *tb;
496 int i;
497 address &= TARGET_PAGE_MASK;
498 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
499 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
500 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
501 address >= tb->pc + tb->size)) {
502 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
503 address, (long)tb->pc, tb->size);
509 /* verify that all the pages have correct rights for code */
510 static void tb_page_check(void)
512 TranslationBlock *tb;
513 int i, flags1, flags2;
515 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
516 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
517 flags1 = page_get_flags(tb->pc);
518 flags2 = page_get_flags(tb->pc + tb->size - 1);
519 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
520 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
521 (long)tb->pc, tb->size, flags1, flags2);
527 void tb_jmp_check(TranslationBlock *tb)
529 TranslationBlock *tb1;
530 unsigned int n1;
532 /* suppress any remaining jumps to this TB */
533 tb1 = tb->jmp_first;
534 for(;;) {
535 n1 = (long)tb1 & 3;
536 tb1 = (TranslationBlock *)((long)tb1 & ~3);
537 if (n1 == 2)
538 break;
539 tb1 = tb1->jmp_next[n1];
541 /* check end of list */
542 if (tb1 != tb) {
543 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
547 #endif
549 /* invalidate one TB */
550 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
551 int next_offset)
553 TranslationBlock *tb1;
554 for(;;) {
555 tb1 = *ptb;
556 if (tb1 == tb) {
557 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
558 break;
560 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
564 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
566 TranslationBlock *tb1;
567 unsigned int n1;
569 for(;;) {
570 tb1 = *ptb;
571 n1 = (long)tb1 & 3;
572 tb1 = (TranslationBlock *)((long)tb1 & ~3);
573 if (tb1 == tb) {
574 *ptb = tb1->page_next[n1];
575 break;
577 ptb = &tb1->page_next[n1];
581 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
583 TranslationBlock *tb1, **ptb;
584 unsigned int n1;
586 ptb = &tb->jmp_next[n];
587 tb1 = *ptb;
588 if (tb1) {
589 /* find tb(n) in circular list */
590 for(;;) {
591 tb1 = *ptb;
592 n1 = (long)tb1 & 3;
593 tb1 = (TranslationBlock *)((long)tb1 & ~3);
594 if (n1 == n && tb1 == tb)
595 break;
596 if (n1 == 2) {
597 ptb = &tb1->jmp_first;
598 } else {
599 ptb = &tb1->jmp_next[n1];
602 /* now we can suppress tb(n) from the list */
603 *ptb = tb->jmp_next[n];
605 tb->jmp_next[n] = NULL;
609 /* reset the jump entry 'n' of a TB so that it is not chained to
610 another TB */
611 static inline void tb_reset_jump(TranslationBlock *tb, int n)
613 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
616 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
618 CPUState *env;
619 PageDesc *p;
620 unsigned int h, n1;
621 target_phys_addr_t phys_pc;
622 TranslationBlock *tb1, *tb2;
624 /* remove the TB from the hash list */
625 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
626 h = tb_phys_hash_func(phys_pc);
627 tb_remove(&tb_phys_hash[h], tb,
628 offsetof(TranslationBlock, phys_hash_next));
630 /* remove the TB from the page list */
631 if (tb->page_addr[0] != page_addr) {
632 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
633 tb_page_remove(&p->first_tb, tb);
634 invalidate_page_bitmap(p);
636 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
637 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
638 tb_page_remove(&p->first_tb, tb);
639 invalidate_page_bitmap(p);
642 tb_invalidated_flag = 1;
644 /* remove the TB from the hash list */
645 h = tb_jmp_cache_hash_func(tb->pc);
646 for(env = first_cpu; env != NULL; env = env->next_cpu) {
647 if (env->tb_jmp_cache[h] == tb)
648 env->tb_jmp_cache[h] = NULL;
651 /* suppress this TB from the two jump lists */
652 tb_jmp_remove(tb, 0);
653 tb_jmp_remove(tb, 1);
655 /* suppress any remaining jumps to this TB */
656 tb1 = tb->jmp_first;
657 for(;;) {
658 n1 = (long)tb1 & 3;
659 if (n1 == 2)
660 break;
661 tb1 = (TranslationBlock *)((long)tb1 & ~3);
662 tb2 = tb1->jmp_next[n1];
663 tb_reset_jump(tb1, n1);
664 tb1->jmp_next[n1] = NULL;
665 tb1 = tb2;
667 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
669 tb_phys_invalidate_count++;
672 static inline void set_bits(uint8_t *tab, int start, int len)
674 int end, mask, end1;
676 end = start + len;
677 tab += start >> 3;
678 mask = 0xff << (start & 7);
679 if ((start & ~7) == (end & ~7)) {
680 if (start < end) {
681 mask &= ~(0xff << (end & 7));
682 *tab |= mask;
684 } else {
685 *tab++ |= mask;
686 start = (start + 8) & ~7;
687 end1 = end & ~7;
688 while (start < end1) {
689 *tab++ = 0xff;
690 start += 8;
692 if (start < end) {
693 mask = ~(0xff << (end & 7));
694 *tab |= mask;
699 static void build_page_bitmap(PageDesc *p)
701 int n, tb_start, tb_end;
702 TranslationBlock *tb;
704 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
705 if (!p->code_bitmap)
706 return;
707 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
709 tb = p->first_tb;
710 while (tb != NULL) {
711 n = (long)tb & 3;
712 tb = (TranslationBlock *)((long)tb & ~3);
713 /* NOTE: this is subtle as a TB may span two physical pages */
714 if (n == 0) {
715 /* NOTE: tb_end may be after the end of the page, but
716 it is not a problem */
717 tb_start = tb->pc & ~TARGET_PAGE_MASK;
718 tb_end = tb_start + tb->size;
719 if (tb_end > TARGET_PAGE_SIZE)
720 tb_end = TARGET_PAGE_SIZE;
721 } else {
722 tb_start = 0;
723 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
725 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
726 tb = tb->page_next[n];
730 #ifdef TARGET_HAS_PRECISE_SMC
732 static void tb_gen_code(CPUState *env,
733 target_ulong pc, target_ulong cs_base, int flags,
734 int cflags)
736 TranslationBlock *tb;
737 uint8_t *tc_ptr;
738 target_ulong phys_pc, phys_page2, virt_page2;
739 int code_gen_size;
741 phys_pc = get_phys_addr_code(env, pc);
742 tb = tb_alloc(pc);
743 if (!tb) {
744 /* flush must be done */
745 tb_flush(env);
746 /* cannot fail at this point */
747 tb = tb_alloc(pc);
749 tc_ptr = code_gen_ptr;
750 tb->tc_ptr = tc_ptr;
751 tb->cs_base = cs_base;
752 tb->flags = flags;
753 tb->cflags = cflags;
754 cpu_gen_code(env, tb, &code_gen_size);
755 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
757 /* check next page if needed */
758 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
759 phys_page2 = -1;
760 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
761 phys_page2 = get_phys_addr_code(env, virt_page2);
763 tb_link_phys(tb, phys_pc, phys_page2);
765 #endif
767 /* invalidate all TBs which intersect with the target physical page
768 starting in range [start;end[. NOTE: start and end must refer to
769 the same physical page. 'is_cpu_write_access' should be true if called
770 from a real cpu write access: the virtual CPU will exit the current
771 TB if code is modified inside this TB. */
772 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
773 int is_cpu_write_access)
775 int n, current_tb_modified, current_tb_not_found, current_flags;
776 CPUState *env = cpu_single_env;
777 PageDesc *p;
778 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
779 target_ulong tb_start, tb_end;
780 target_ulong current_pc, current_cs_base;
782 p = page_find(start >> TARGET_PAGE_BITS);
783 if (!p)
784 return;
785 if (!p->code_bitmap &&
786 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
787 is_cpu_write_access) {
788 /* build code bitmap */
789 build_page_bitmap(p);
792 /* we remove all the TBs in the range [start, end[ */
793 /* XXX: see if in some cases it could be faster to invalidate all the code */
794 current_tb_not_found = is_cpu_write_access;
795 current_tb_modified = 0;
796 current_tb = NULL; /* avoid warning */
797 current_pc = 0; /* avoid warning */
798 current_cs_base = 0; /* avoid warning */
799 current_flags = 0; /* avoid warning */
800 tb = p->first_tb;
801 while (tb != NULL) {
802 n = (long)tb & 3;
803 tb = (TranslationBlock *)((long)tb & ~3);
804 tb_next = tb->page_next[n];
805 /* NOTE: this is subtle as a TB may span two physical pages */
806 if (n == 0) {
807 /* NOTE: tb_end may be after the end of the page, but
808 it is not a problem */
809 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
810 tb_end = tb_start + tb->size;
811 } else {
812 tb_start = tb->page_addr[1];
813 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
815 if (!(tb_end <= start || tb_start >= end)) {
816 #ifdef TARGET_HAS_PRECISE_SMC
817 if (current_tb_not_found) {
818 current_tb_not_found = 0;
819 current_tb = NULL;
820 if (env->mem_write_pc) {
821 /* now we have a real cpu fault */
822 current_tb = tb_find_pc(env->mem_write_pc);
825 if (current_tb == tb &&
826 !(current_tb->cflags & CF_SINGLE_INSN)) {
827 /* If we are modifying the current TB, we must stop
828 its execution. We could be more precise by checking
829 that the modification is after the current PC, but it
830 would require a specialized function to partially
831 restore the CPU state */
833 current_tb_modified = 1;
834 cpu_restore_state(current_tb, env,
835 env->mem_write_pc, NULL);
836 #if defined(TARGET_I386)
837 current_flags = env->hflags;
838 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
839 current_cs_base = (target_ulong)env->segs[R_CS].base;
840 current_pc = current_cs_base + env->eip;
841 #else
842 #error unsupported CPU
843 #endif
845 #endif /* TARGET_HAS_PRECISE_SMC */
846 /* we need to do that to handle the case where a signal
847 occurs while doing tb_phys_invalidate() */
848 saved_tb = NULL;
849 if (env) {
850 saved_tb = env->current_tb;
851 env->current_tb = NULL;
853 tb_phys_invalidate(tb, -1);
854 if (env) {
855 env->current_tb = saved_tb;
856 if (env->interrupt_request && env->current_tb)
857 cpu_interrupt(env, env->interrupt_request);
860 tb = tb_next;
862 #if !defined(CONFIG_USER_ONLY)
863 /* if no code remaining, no need to continue to use slow writes */
864 if (!p->first_tb) {
865 invalidate_page_bitmap(p);
866 if (is_cpu_write_access) {
867 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
870 #endif
871 #ifdef TARGET_HAS_PRECISE_SMC
872 if (current_tb_modified) {
873 /* we generate a block containing just the instruction
874 modifying the memory. It will ensure that it cannot modify
875 itself */
876 env->current_tb = NULL;
877 tb_gen_code(env, current_pc, current_cs_base, current_flags,
878 CF_SINGLE_INSN);
879 cpu_resume_from_signal(env, NULL);
881 #endif
884 /* len must be <= 8 and start must be a multiple of len */
885 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
887 PageDesc *p;
888 int offset, b;
889 #if 0
890 if (1) {
891 if (loglevel) {
892 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
893 cpu_single_env->mem_write_vaddr, len,
894 cpu_single_env->eip,
895 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
898 #endif
899 p = page_find(start >> TARGET_PAGE_BITS);
900 if (!p)
901 return;
902 if (p->code_bitmap) {
903 offset = start & ~TARGET_PAGE_MASK;
904 b = p->code_bitmap[offset >> 3] >> (offset & 7);
905 if (b & ((1 << len) - 1))
906 goto do_invalidate;
907 } else {
908 do_invalidate:
909 tb_invalidate_phys_page_range(start, start + len, 1);
913 #if !defined(CONFIG_SOFTMMU)
914 static void tb_invalidate_phys_page(target_phys_addr_t addr,
915 unsigned long pc, void *puc)
917 int n, current_flags, current_tb_modified;
918 target_ulong current_pc, current_cs_base;
919 PageDesc *p;
920 TranslationBlock *tb, *current_tb;
921 #ifdef TARGET_HAS_PRECISE_SMC
922 CPUState *env = cpu_single_env;
923 #endif
925 addr &= TARGET_PAGE_MASK;
926 p = page_find(addr >> TARGET_PAGE_BITS);
927 if (!p)
928 return;
929 tb = p->first_tb;
930 current_tb_modified = 0;
931 current_tb = NULL;
932 current_pc = 0; /* avoid warning */
933 current_cs_base = 0; /* avoid warning */
934 current_flags = 0; /* avoid warning */
935 #ifdef TARGET_HAS_PRECISE_SMC
936 if (tb && pc != 0) {
937 current_tb = tb_find_pc(pc);
939 #endif
940 while (tb != NULL) {
941 n = (long)tb & 3;
942 tb = (TranslationBlock *)((long)tb & ~3);
943 #ifdef TARGET_HAS_PRECISE_SMC
944 if (current_tb == tb &&
945 !(current_tb->cflags & CF_SINGLE_INSN)) {
946 /* If we are modifying the current TB, we must stop
947 its execution. We could be more precise by checking
948 that the modification is after the current PC, but it
949 would require a specialized function to partially
950 restore the CPU state */
952 current_tb_modified = 1;
953 cpu_restore_state(current_tb, env, pc, puc);
954 #if defined(TARGET_I386)
955 current_flags = env->hflags;
956 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
957 current_cs_base = (target_ulong)env->segs[R_CS].base;
958 current_pc = current_cs_base + env->eip;
959 #else
960 #error unsupported CPU
961 #endif
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 tb_phys_invalidate(tb, addr);
965 tb = tb->page_next[n];
967 p->first_tb = NULL;
968 #ifdef TARGET_HAS_PRECISE_SMC
969 if (current_tb_modified) {
970 /* we generate a block containing just the instruction
971 modifying the memory. It will ensure that it cannot modify
972 itself */
973 env->current_tb = NULL;
974 tb_gen_code(env, current_pc, current_cs_base, current_flags,
975 CF_SINGLE_INSN);
976 cpu_resume_from_signal(env, puc);
978 #endif
980 #endif
982 /* add the tb in the target page and protect it if necessary */
983 static inline void tb_alloc_page(TranslationBlock *tb,
984 unsigned int n, target_ulong page_addr)
986 PageDesc *p;
987 TranslationBlock *last_first_tb;
989 tb->page_addr[n] = page_addr;
990 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
991 tb->page_next[n] = p->first_tb;
992 last_first_tb = p->first_tb;
993 p->first_tb = (TranslationBlock *)((long)tb | n);
994 invalidate_page_bitmap(p);
996 #if defined(TARGET_HAS_SMC) || 1
998 #if defined(CONFIG_USER_ONLY)
999 if (p->flags & PAGE_WRITE) {
1000 target_ulong addr;
1001 PageDesc *p2;
1002 int prot;
1004 /* force the host page as non writable (writes will have a
1005 page fault + mprotect overhead) */
1006 page_addr &= qemu_host_page_mask;
1007 prot = 0;
1008 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1009 addr += TARGET_PAGE_SIZE) {
1011 p2 = page_find (addr >> TARGET_PAGE_BITS);
1012 if (!p2)
1013 continue;
1014 prot |= p2->flags;
1015 p2->flags &= ~PAGE_WRITE;
1016 page_get_flags(addr);
1018 mprotect(g2h(page_addr), qemu_host_page_size,
1019 (prot & PAGE_BITS) & ~PAGE_WRITE);
1020 #ifdef DEBUG_TB_INVALIDATE
1021 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1022 page_addr);
1023 #endif
1025 #else
1026 /* if some code is already present, then the pages are already
1027 protected. So we handle the case where only the first TB is
1028 allocated in a physical page */
1029 if (!last_first_tb) {
1030 tlb_protect_code(page_addr);
1032 #endif
1034 #endif /* TARGET_HAS_SMC */
1037 /* Allocate a new translation block. Flush the translation buffer if
1038 too many translation blocks or too much generated code. */
1039 TranslationBlock *tb_alloc(target_ulong pc)
1041 TranslationBlock *tb;
1043 if (nb_tbs >= code_gen_max_blocks ||
1044 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1045 return NULL;
1046 tb = &tbs[nb_tbs++];
1047 tb->pc = pc;
1048 tb->cflags = 0;
1049 return tb;
1052 /* add a new TB and link it to the physical page tables. phys_page2 is
1053 (-1) to indicate that only one page contains the TB. */
1054 void tb_link_phys(TranslationBlock *tb,
1055 target_ulong phys_pc, target_ulong phys_page2)
1057 unsigned int h;
1058 TranslationBlock **ptb;
1060 /* Grab the mmap lock to stop another thread invalidating this TB
1061 before we are done. */
1062 mmap_lock();
1063 /* add in the physical hash table */
1064 h = tb_phys_hash_func(phys_pc);
1065 ptb = &tb_phys_hash[h];
1066 tb->phys_hash_next = *ptb;
1067 *ptb = tb;
1069 /* add in the page list */
1070 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1071 if (phys_page2 != -1)
1072 tb_alloc_page(tb, 1, phys_page2);
1073 else
1074 tb->page_addr[1] = -1;
1076 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1077 tb->jmp_next[0] = NULL;
1078 tb->jmp_next[1] = NULL;
1080 /* init original jump addresses */
1081 if (tb->tb_next_offset[0] != 0xffff)
1082 tb_reset_jump(tb, 0);
1083 if (tb->tb_next_offset[1] != 0xffff)
1084 tb_reset_jump(tb, 1);
1086 #ifdef DEBUG_TB_CHECK
1087 tb_page_check();
1088 #endif
1089 mmap_unlock();
1092 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1093 tb[1].tc_ptr. Return NULL if not found */
1094 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1096 int m_min, m_max, m;
1097 unsigned long v;
1098 TranslationBlock *tb;
1100 if (nb_tbs <= 0)
1101 return NULL;
1102 if (tc_ptr < (unsigned long)code_gen_buffer ||
1103 tc_ptr >= (unsigned long)code_gen_ptr)
1104 return NULL;
1105 /* binary search (cf Knuth) */
1106 m_min = 0;
1107 m_max = nb_tbs - 1;
1108 while (m_min <= m_max) {
1109 m = (m_min + m_max) >> 1;
1110 tb = &tbs[m];
1111 v = (unsigned long)tb->tc_ptr;
1112 if (v == tc_ptr)
1113 return tb;
1114 else if (tc_ptr < v) {
1115 m_max = m - 1;
1116 } else {
1117 m_min = m + 1;
1120 return &tbs[m_max];
1123 static void tb_reset_jump_recursive(TranslationBlock *tb);
1125 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1127 TranslationBlock *tb1, *tb_next, **ptb;
1128 unsigned int n1;
1130 tb1 = tb->jmp_next[n];
1131 if (tb1 != NULL) {
1132 /* find head of list */
1133 for(;;) {
1134 n1 = (long)tb1 & 3;
1135 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1136 if (n1 == 2)
1137 break;
1138 tb1 = tb1->jmp_next[n1];
1140 /* we are now sure now that tb jumps to tb1 */
1141 tb_next = tb1;
1143 /* remove tb from the jmp_first list */
1144 ptb = &tb_next->jmp_first;
1145 for(;;) {
1146 tb1 = *ptb;
1147 n1 = (long)tb1 & 3;
1148 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1149 if (n1 == n && tb1 == tb)
1150 break;
1151 ptb = &tb1->jmp_next[n1];
1153 *ptb = tb->jmp_next[n];
1154 tb->jmp_next[n] = NULL;
1156 /* suppress the jump to next tb in generated code */
1157 tb_reset_jump(tb, n);
1159 /* suppress jumps in the tb on which we could have jumped */
1160 tb_reset_jump_recursive(tb_next);
1164 static void tb_reset_jump_recursive(TranslationBlock *tb)
1166 tb_reset_jump_recursive2(tb, 0);
1167 tb_reset_jump_recursive2(tb, 1);
1170 #if defined(TARGET_HAS_ICE)
1171 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1173 target_phys_addr_t addr;
1174 target_ulong pd;
1175 ram_addr_t ram_addr;
1176 PhysPageDesc *p;
1178 addr = cpu_get_phys_page_debug(env, pc);
1179 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1180 if (!p) {
1181 pd = IO_MEM_UNASSIGNED;
1182 } else {
1183 pd = p->phys_offset;
1185 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1186 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1188 #endif
1190 /* Add a watchpoint. */
1191 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1193 int i;
1195 for (i = 0; i < env->nb_watchpoints; i++) {
1196 if (addr == env->watchpoint[i].vaddr)
1197 return 0;
1199 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1200 return -1;
1202 i = env->nb_watchpoints++;
1203 env->watchpoint[i].vaddr = addr;
1204 env->watchpoint[i].type = type;
1205 tlb_flush_page(env, addr);
1206 /* FIXME: This flush is needed because of the hack to make memory ops
1207 terminate the TB. It can be removed once the proper IO trap and
1208 re-execute bits are in. */
1209 tb_flush(env);
1210 return i;
1213 /* Remove a watchpoint. */
1214 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1216 int i;
1218 for (i = 0; i < env->nb_watchpoints; i++) {
1219 if (addr == env->watchpoint[i].vaddr) {
1220 env->nb_watchpoints--;
1221 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1222 tlb_flush_page(env, addr);
1223 return 0;
1226 return -1;
1229 /* Remove all watchpoints. */
1230 void cpu_watchpoint_remove_all(CPUState *env) {
1231 int i;
1233 for (i = 0; i < env->nb_watchpoints; i++) {
1234 tlb_flush_page(env, env->watchpoint[i].vaddr);
1236 env->nb_watchpoints = 0;
1239 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1240 breakpoint is reached */
1241 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1243 #if defined(TARGET_HAS_ICE)
1244 int i;
1246 for(i = 0; i < env->nb_breakpoints; i++) {
1247 if (env->breakpoints[i] == pc)
1248 return 0;
1251 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1252 return -1;
1253 env->breakpoints[env->nb_breakpoints++] = pc;
1255 breakpoint_invalidate(env, pc);
1256 return 0;
1257 #else
1258 return -1;
1259 #endif
1262 /* remove all breakpoints */
1263 void cpu_breakpoint_remove_all(CPUState *env) {
1264 #if defined(TARGET_HAS_ICE)
1265 int i;
1266 for(i = 0; i < env->nb_breakpoints; i++) {
1267 breakpoint_invalidate(env, env->breakpoints[i]);
1269 env->nb_breakpoints = 0;
1270 #endif
1273 /* remove a breakpoint */
1274 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1276 #if defined(TARGET_HAS_ICE)
1277 int i;
1278 for(i = 0; i < env->nb_breakpoints; i++) {
1279 if (env->breakpoints[i] == pc)
1280 goto found;
1282 return -1;
1283 found:
1284 env->nb_breakpoints--;
1285 if (i < env->nb_breakpoints)
1286 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1288 breakpoint_invalidate(env, pc);
1289 return 0;
1290 #else
1291 return -1;
1292 #endif
1295 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1296 CPU loop after each instruction */
1297 void cpu_single_step(CPUState *env, int enabled)
1299 #if defined(TARGET_HAS_ICE)
1300 if (env->singlestep_enabled != enabled) {
1301 env->singlestep_enabled = enabled;
1302 /* must flush all the translated code to avoid inconsistancies */
1303 /* XXX: only flush what is necessary */
1304 tb_flush(env);
1306 #endif
1309 /* enable or disable low levels log */
1310 void cpu_set_log(int log_flags)
1312 loglevel = log_flags;
1313 if (loglevel && !logfile) {
1314 logfile = fopen(logfilename, log_append ? "a" : "w");
1315 if (!logfile) {
1316 perror(logfilename);
1317 _exit(1);
1319 #if !defined(CONFIG_SOFTMMU)
1320 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1322 static uint8_t logfile_buf[4096];
1323 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1325 #else
1326 setvbuf(logfile, NULL, _IOLBF, 0);
1327 #endif
1328 log_append = 1;
1330 if (!loglevel && logfile) {
1331 fclose(logfile);
1332 logfile = NULL;
1336 void cpu_set_log_filename(const char *filename)
1338 logfilename = strdup(filename);
1339 if (logfile) {
1340 fclose(logfile);
1341 logfile = NULL;
1343 cpu_set_log(loglevel);
1346 /* mask must never be zero, except for A20 change call */
1347 void cpu_interrupt(CPUState *env, int mask)
1349 #if !defined(USE_NPTL)
1350 TranslationBlock *tb;
1351 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1352 #endif
1354 /* FIXME: This is probably not threadsafe. A different thread could
1355 be in the mittle of a read-modify-write operation. */
1356 env->interrupt_request |= mask;
1357 #if defined(USE_NPTL)
1358 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1359 problem and hope the cpu will stop of its own accord. For userspace
1360 emulation this often isn't actually as bad as it sounds. Often
1361 signals are used primarily to interrupt blocking syscalls. */
1362 #else
1363 /* if the cpu is currently executing code, we must unlink it and
1364 all the potentially executing TB */
1365 tb = env->current_tb;
1366 if (tb && !testandset(&interrupt_lock)) {
1367 env->current_tb = NULL;
1368 tb_reset_jump_recursive(tb);
1369 resetlock(&interrupt_lock);
1371 #endif
1374 void cpu_reset_interrupt(CPUState *env, int mask)
1376 env->interrupt_request &= ~mask;
1379 CPULogItem cpu_log_items[] = {
1380 { CPU_LOG_TB_OUT_ASM, "out_asm",
1381 "show generated host assembly code for each compiled TB" },
1382 { CPU_LOG_TB_IN_ASM, "in_asm",
1383 "show target assembly code for each compiled TB" },
1384 { CPU_LOG_TB_OP, "op",
1385 "show micro ops for each compiled TB" },
1386 { CPU_LOG_TB_OP_OPT, "op_opt",
1387 "show micro ops "
1388 #ifdef TARGET_I386
1389 "before eflags optimization and "
1390 #endif
1391 "after liveness analysis" },
1392 { CPU_LOG_INT, "int",
1393 "show interrupts/exceptions in short format" },
1394 { CPU_LOG_EXEC, "exec",
1395 "show trace before each executed TB (lots of logs)" },
1396 { CPU_LOG_TB_CPU, "cpu",
1397 "show CPU state before block translation" },
1398 #ifdef TARGET_I386
1399 { CPU_LOG_PCALL, "pcall",
1400 "show protected mode far calls/returns/exceptions" },
1401 #endif
1402 #ifdef DEBUG_IOPORT
1403 { CPU_LOG_IOPORT, "ioport",
1404 "show all i/o ports accesses" },
1405 #endif
1406 { 0, NULL, NULL },
1409 static int cmp1(const char *s1, int n, const char *s2)
1411 if (strlen(s2) != n)
1412 return 0;
1413 return memcmp(s1, s2, n) == 0;
1416 /* takes a comma separated list of log masks. Return 0 if error. */
1417 int cpu_str_to_log_mask(const char *str)
1419 CPULogItem *item;
1420 int mask;
1421 const char *p, *p1;
1423 p = str;
1424 mask = 0;
1425 for(;;) {
1426 p1 = strchr(p, ',');
1427 if (!p1)
1428 p1 = p + strlen(p);
1429 if(cmp1(p,p1-p,"all")) {
1430 for(item = cpu_log_items; item->mask != 0; item++) {
1431 mask |= item->mask;
1433 } else {
1434 for(item = cpu_log_items; item->mask != 0; item++) {
1435 if (cmp1(p, p1 - p, item->name))
1436 goto found;
1438 return 0;
1440 found:
1441 mask |= item->mask;
1442 if (*p1 != ',')
1443 break;
1444 p = p1 + 1;
1446 return mask;
1449 void cpu_abort(CPUState *env, const char *fmt, ...)
1451 va_list ap;
1452 va_list ap2;
1454 va_start(ap, fmt);
1455 va_copy(ap2, ap);
1456 fprintf(stderr, "qemu: fatal: ");
1457 vfprintf(stderr, fmt, ap);
1458 fprintf(stderr, "\n");
1459 #ifdef TARGET_I386
1460 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1461 #else
1462 cpu_dump_state(env, stderr, fprintf, 0);
1463 #endif
1464 if (logfile) {
1465 fprintf(logfile, "qemu: fatal: ");
1466 vfprintf(logfile, fmt, ap2);
1467 fprintf(logfile, "\n");
1468 #ifdef TARGET_I386
1469 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1470 #else
1471 cpu_dump_state(env, logfile, fprintf, 0);
1472 #endif
1473 fflush(logfile);
1474 fclose(logfile);
1476 va_end(ap2);
1477 va_end(ap);
1478 abort();
1481 CPUState *cpu_copy(CPUState *env)
1483 CPUState *new_env = cpu_init(env->cpu_model_str);
1484 /* preserve chaining and index */
1485 CPUState *next_cpu = new_env->next_cpu;
1486 int cpu_index = new_env->cpu_index;
1487 memcpy(new_env, env, sizeof(CPUState));
1488 new_env->next_cpu = next_cpu;
1489 new_env->cpu_index = cpu_index;
1490 return new_env;
1493 #if !defined(CONFIG_USER_ONLY)
1495 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1497 unsigned int i;
1499 /* Discard jump cache entries for any tb which might potentially
1500 overlap the flushed page. */
1501 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1502 memset (&env->tb_jmp_cache[i], 0,
1503 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1505 i = tb_jmp_cache_hash_page(addr);
1506 memset (&env->tb_jmp_cache[i], 0,
1507 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1510 /* NOTE: if flush_global is true, also flush global entries (not
1511 implemented yet) */
1512 void tlb_flush(CPUState *env, int flush_global)
1514 int i;
1516 #if defined(DEBUG_TLB)
1517 printf("tlb_flush:\n");
1518 #endif
1519 /* must reset current TB so that interrupts cannot modify the
1520 links while we are modifying them */
1521 env->current_tb = NULL;
1523 for(i = 0; i < CPU_TLB_SIZE; i++) {
1524 env->tlb_table[0][i].addr_read = -1;
1525 env->tlb_table[0][i].addr_write = -1;
1526 env->tlb_table[0][i].addr_code = -1;
1527 env->tlb_table[1][i].addr_read = -1;
1528 env->tlb_table[1][i].addr_write = -1;
1529 env->tlb_table[1][i].addr_code = -1;
1530 #if (NB_MMU_MODES >= 3)
1531 env->tlb_table[2][i].addr_read = -1;
1532 env->tlb_table[2][i].addr_write = -1;
1533 env->tlb_table[2][i].addr_code = -1;
1534 #if (NB_MMU_MODES == 4)
1535 env->tlb_table[3][i].addr_read = -1;
1536 env->tlb_table[3][i].addr_write = -1;
1537 env->tlb_table[3][i].addr_code = -1;
1538 #endif
1539 #endif
1542 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1544 #ifdef USE_KQEMU
1545 if (env->kqemu_enabled) {
1546 kqemu_flush(env, flush_global);
1548 #endif
1549 tlb_flush_count++;
1552 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1554 if (addr == (tlb_entry->addr_read &
1555 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1556 addr == (tlb_entry->addr_write &
1557 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1558 addr == (tlb_entry->addr_code &
1559 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1560 tlb_entry->addr_read = -1;
1561 tlb_entry->addr_write = -1;
1562 tlb_entry->addr_code = -1;
1566 void tlb_flush_page(CPUState *env, target_ulong addr)
1568 int i;
1570 #if defined(DEBUG_TLB)
1571 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1572 #endif
1573 /* must reset current TB so that interrupts cannot modify the
1574 links while we are modifying them */
1575 env->current_tb = NULL;
1577 addr &= TARGET_PAGE_MASK;
1578 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1579 tlb_flush_entry(&env->tlb_table[0][i], addr);
1580 tlb_flush_entry(&env->tlb_table[1][i], addr);
1581 #if (NB_MMU_MODES >= 3)
1582 tlb_flush_entry(&env->tlb_table[2][i], addr);
1583 #if (NB_MMU_MODES == 4)
1584 tlb_flush_entry(&env->tlb_table[3][i], addr);
1585 #endif
1586 #endif
1588 tlb_flush_jmp_cache(env, addr);
1590 #ifdef USE_KQEMU
1591 if (env->kqemu_enabled) {
1592 kqemu_flush_page(env, addr);
1594 #endif
1597 /* update the TLBs so that writes to code in the virtual page 'addr'
1598 can be detected */
1599 static void tlb_protect_code(ram_addr_t ram_addr)
1601 cpu_physical_memory_reset_dirty(ram_addr,
1602 ram_addr + TARGET_PAGE_SIZE,
1603 CODE_DIRTY_FLAG);
1606 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1607 tested for self modifying code */
1608 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1609 target_ulong vaddr)
1611 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1614 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1615 unsigned long start, unsigned long length)
1617 unsigned long addr;
1618 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1619 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1620 if ((addr - start) < length) {
1621 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1626 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1627 int dirty_flags)
1629 CPUState *env;
1630 unsigned long length, start1;
1631 int i, mask, len;
1632 uint8_t *p;
1634 start &= TARGET_PAGE_MASK;
1635 end = TARGET_PAGE_ALIGN(end);
1637 length = end - start;
1638 if (length == 0)
1639 return;
1640 len = length >> TARGET_PAGE_BITS;
1641 #ifdef USE_KQEMU
1642 /* XXX: should not depend on cpu context */
1643 env = first_cpu;
1644 if (env->kqemu_enabled) {
1645 ram_addr_t addr;
1646 addr = start;
1647 for(i = 0; i < len; i++) {
1648 kqemu_set_notdirty(env, addr);
1649 addr += TARGET_PAGE_SIZE;
1652 #endif
1653 mask = ~dirty_flags;
1654 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1655 for(i = 0; i < len; i++)
1656 p[i] &= mask;
1658 /* we modify the TLB cache so that the dirty bit will be set again
1659 when accessing the range */
1660 start1 = start + (unsigned long)phys_ram_base;
1661 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1662 for(i = 0; i < CPU_TLB_SIZE; i++)
1663 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1664 for(i = 0; i < CPU_TLB_SIZE; i++)
1665 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1666 #if (NB_MMU_MODES >= 3)
1667 for(i = 0; i < CPU_TLB_SIZE; i++)
1668 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1669 #if (NB_MMU_MODES == 4)
1670 for(i = 0; i < CPU_TLB_SIZE; i++)
1671 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1672 #endif
1673 #endif
1677 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1679 ram_addr_t ram_addr;
1681 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1682 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1683 tlb_entry->addend - (unsigned long)phys_ram_base;
1684 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1685 tlb_entry->addr_write |= TLB_NOTDIRTY;
1690 /* update the TLB according to the current state of the dirty bits */
1691 void cpu_tlb_update_dirty(CPUState *env)
1693 int i;
1694 for(i = 0; i < CPU_TLB_SIZE; i++)
1695 tlb_update_dirty(&env->tlb_table[0][i]);
1696 for(i = 0; i < CPU_TLB_SIZE; i++)
1697 tlb_update_dirty(&env->tlb_table[1][i]);
1698 #if (NB_MMU_MODES >= 3)
1699 for(i = 0; i < CPU_TLB_SIZE; i++)
1700 tlb_update_dirty(&env->tlb_table[2][i]);
1701 #if (NB_MMU_MODES == 4)
1702 for(i = 0; i < CPU_TLB_SIZE; i++)
1703 tlb_update_dirty(&env->tlb_table[3][i]);
1704 #endif
1705 #endif
1708 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1710 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1711 tlb_entry->addr_write = vaddr;
1714 /* update the TLB corresponding to virtual page vaddr
1715 so that it is no longer dirty */
1716 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1718 int i;
1720 vaddr &= TARGET_PAGE_MASK;
1721 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1722 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1723 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1724 #if (NB_MMU_MODES >= 3)
1725 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1726 #if (NB_MMU_MODES == 4)
1727 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1728 #endif
1729 #endif
1732 /* add a new TLB entry. At most one entry for a given virtual address
1733 is permitted. Return 0 if OK or 2 if the page could not be mapped
1734 (can only happen in non SOFTMMU mode for I/O pages or pages
1735 conflicting with the host address space). */
1736 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1737 target_phys_addr_t paddr, int prot,
1738 int mmu_idx, int is_softmmu)
1740 PhysPageDesc *p;
1741 unsigned long pd;
1742 unsigned int index;
1743 target_ulong address;
1744 target_ulong code_address;
1745 target_phys_addr_t addend;
1746 int ret;
1747 CPUTLBEntry *te;
1748 int i;
1749 target_phys_addr_t iotlb;
1751 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1752 if (!p) {
1753 pd = IO_MEM_UNASSIGNED;
1754 } else {
1755 pd = p->phys_offset;
1757 #if defined(DEBUG_TLB)
1758 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1759 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1760 #endif
1762 ret = 0;
1763 address = vaddr;
1764 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1765 /* IO memory case (romd handled later) */
1766 address |= TLB_MMIO;
1768 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1769 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1770 /* Normal RAM. */
1771 iotlb = pd & TARGET_PAGE_MASK;
1772 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1773 iotlb |= IO_MEM_NOTDIRTY;
1774 else
1775 iotlb |= IO_MEM_ROM;
1776 } else {
1777 /* IO handlers are currently passed a phsical address.
1778 It would be nice to pass an offset from the base address
1779 of that region. This would avoid having to special case RAM,
1780 and avoid full address decoding in every device.
1781 We can't use the high bits of pd for this because
1782 IO_MEM_ROMD uses these as a ram address. */
1783 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1786 code_address = address;
1787 /* Make accesses to pages with watchpoints go via the
1788 watchpoint trap routines. */
1789 for (i = 0; i < env->nb_watchpoints; i++) {
1790 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1791 iotlb = io_mem_watch + paddr;
1792 /* TODO: The memory case can be optimized by not trapping
1793 reads of pages with a write breakpoint. */
1794 address |= TLB_MMIO;
1798 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1799 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1800 te = &env->tlb_table[mmu_idx][index];
1801 te->addend = addend - vaddr;
1802 if (prot & PAGE_READ) {
1803 te->addr_read = address;
1804 } else {
1805 te->addr_read = -1;
1808 if (prot & PAGE_EXEC) {
1809 te->addr_code = code_address;
1810 } else {
1811 te->addr_code = -1;
1813 if (prot & PAGE_WRITE) {
1814 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1815 (pd & IO_MEM_ROMD)) {
1816 /* Write access calls the I/O callback. */
1817 te->addr_write = address | TLB_MMIO;
1818 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1819 !cpu_physical_memory_is_dirty(pd)) {
1820 te->addr_write = address | TLB_NOTDIRTY;
1821 } else {
1822 te->addr_write = address;
1824 } else {
1825 te->addr_write = -1;
1827 return ret;
1830 #else
1832 void tlb_flush(CPUState *env, int flush_global)
1836 void tlb_flush_page(CPUState *env, target_ulong addr)
1840 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1841 target_phys_addr_t paddr, int prot,
1842 int mmu_idx, int is_softmmu)
1844 return 0;
1847 /* dump memory mappings */
1848 void page_dump(FILE *f)
1850 unsigned long start, end;
1851 int i, j, prot, prot1;
1852 PageDesc *p;
1854 fprintf(f, "%-8s %-8s %-8s %s\n",
1855 "start", "end", "size", "prot");
1856 start = -1;
1857 end = -1;
1858 prot = 0;
1859 for(i = 0; i <= L1_SIZE; i++) {
1860 if (i < L1_SIZE)
1861 p = l1_map[i];
1862 else
1863 p = NULL;
1864 for(j = 0;j < L2_SIZE; j++) {
1865 if (!p)
1866 prot1 = 0;
1867 else
1868 prot1 = p[j].flags;
1869 if (prot1 != prot) {
1870 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1871 if (start != -1) {
1872 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1873 start, end, end - start,
1874 prot & PAGE_READ ? 'r' : '-',
1875 prot & PAGE_WRITE ? 'w' : '-',
1876 prot & PAGE_EXEC ? 'x' : '-');
1878 if (prot1 != 0)
1879 start = end;
1880 else
1881 start = -1;
1882 prot = prot1;
1884 if (!p)
1885 break;
1890 int page_get_flags(target_ulong address)
1892 PageDesc *p;
1894 p = page_find(address >> TARGET_PAGE_BITS);
1895 if (!p)
1896 return 0;
1897 return p->flags;
1900 /* modify the flags of a page and invalidate the code if
1901 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1902 depending on PAGE_WRITE */
1903 void page_set_flags(target_ulong start, target_ulong end, int flags)
1905 PageDesc *p;
1906 target_ulong addr;
1908 /* mmap_lock should already be held. */
1909 start = start & TARGET_PAGE_MASK;
1910 end = TARGET_PAGE_ALIGN(end);
1911 if (flags & PAGE_WRITE)
1912 flags |= PAGE_WRITE_ORG;
1913 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1914 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1915 /* if the write protection is set, then we invalidate the code
1916 inside */
1917 if (!(p->flags & PAGE_WRITE) &&
1918 (flags & PAGE_WRITE) &&
1919 p->first_tb) {
1920 tb_invalidate_phys_page(addr, 0, NULL);
1922 p->flags = flags;
1926 int page_check_range(target_ulong start, target_ulong len, int flags)
1928 PageDesc *p;
1929 target_ulong end;
1930 target_ulong addr;
1932 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1933 start = start & TARGET_PAGE_MASK;
1935 if( end < start )
1936 /* we've wrapped around */
1937 return -1;
1938 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1939 p = page_find(addr >> TARGET_PAGE_BITS);
1940 if( !p )
1941 return -1;
1942 if( !(p->flags & PAGE_VALID) )
1943 return -1;
1945 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1946 return -1;
1947 if (flags & PAGE_WRITE) {
1948 if (!(p->flags & PAGE_WRITE_ORG))
1949 return -1;
1950 /* unprotect the page if it was put read-only because it
1951 contains translated code */
1952 if (!(p->flags & PAGE_WRITE)) {
1953 if (!page_unprotect(addr, 0, NULL))
1954 return -1;
1956 return 0;
1959 return 0;
1962 /* called from signal handler: invalidate the code and unprotect the
1963 page. Return TRUE if the fault was succesfully handled. */
1964 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1966 unsigned int page_index, prot, pindex;
1967 PageDesc *p, *p1;
1968 target_ulong host_start, host_end, addr;
1970 /* Technically this isn't safe inside a signal handler. However we
1971 know this only ever happens in a synchronous SEGV handler, so in
1972 practice it seems to be ok. */
1973 mmap_lock();
1975 host_start = address & qemu_host_page_mask;
1976 page_index = host_start >> TARGET_PAGE_BITS;
1977 p1 = page_find(page_index);
1978 if (!p1) {
1979 mmap_unlock();
1980 return 0;
1982 host_end = host_start + qemu_host_page_size;
1983 p = p1;
1984 prot = 0;
1985 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1986 prot |= p->flags;
1987 p++;
1989 /* if the page was really writable, then we change its
1990 protection back to writable */
1991 if (prot & PAGE_WRITE_ORG) {
1992 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1993 if (!(p1[pindex].flags & PAGE_WRITE)) {
1994 mprotect((void *)g2h(host_start), qemu_host_page_size,
1995 (prot & PAGE_BITS) | PAGE_WRITE);
1996 p1[pindex].flags |= PAGE_WRITE;
1997 /* and since the content will be modified, we must invalidate
1998 the corresponding translated code. */
1999 tb_invalidate_phys_page(address, pc, puc);
2000 #ifdef DEBUG_TB_CHECK
2001 tb_invalidate_check(address);
2002 #endif
2003 mmap_unlock();
2004 return 1;
2007 mmap_unlock();
2008 return 0;
2011 static inline void tlb_set_dirty(CPUState *env,
2012 unsigned long addr, target_ulong vaddr)
2015 #endif /* defined(CONFIG_USER_ONLY) */
2017 #if !defined(CONFIG_USER_ONLY)
2018 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2019 ram_addr_t memory);
2020 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2021 ram_addr_t orig_memory);
2022 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2023 need_subpage) \
2024 do { \
2025 if (addr > start_addr) \
2026 start_addr2 = 0; \
2027 else { \
2028 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2029 if (start_addr2 > 0) \
2030 need_subpage = 1; \
2033 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2034 end_addr2 = TARGET_PAGE_SIZE - 1; \
2035 else { \
2036 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2037 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2038 need_subpage = 1; \
2040 } while (0)
2042 /* register physical memory. 'size' must be a multiple of the target
2043 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2044 io memory page */
2045 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2046 ram_addr_t size,
2047 ram_addr_t phys_offset)
2049 target_phys_addr_t addr, end_addr;
2050 PhysPageDesc *p;
2051 CPUState *env;
2052 ram_addr_t orig_size = size;
2053 void *subpage;
2055 #ifdef USE_KQEMU
2056 /* XXX: should not depend on cpu context */
2057 env = first_cpu;
2058 if (env->kqemu_enabled) {
2059 kqemu_set_phys_mem(start_addr, size, phys_offset);
2061 #endif
2062 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2063 end_addr = start_addr + (target_phys_addr_t)size;
2064 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2065 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2066 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2067 ram_addr_t orig_memory = p->phys_offset;
2068 target_phys_addr_t start_addr2, end_addr2;
2069 int need_subpage = 0;
2071 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2072 need_subpage);
2073 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2074 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2075 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2076 &p->phys_offset, orig_memory);
2077 } else {
2078 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2079 >> IO_MEM_SHIFT];
2081 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2082 } else {
2083 p->phys_offset = phys_offset;
2084 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2085 (phys_offset & IO_MEM_ROMD))
2086 phys_offset += TARGET_PAGE_SIZE;
2088 } else {
2089 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2090 p->phys_offset = phys_offset;
2091 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2092 (phys_offset & IO_MEM_ROMD))
2093 phys_offset += TARGET_PAGE_SIZE;
2094 else {
2095 target_phys_addr_t start_addr2, end_addr2;
2096 int need_subpage = 0;
2098 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2099 end_addr2, need_subpage);
2101 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2102 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2103 &p->phys_offset, IO_MEM_UNASSIGNED);
2104 subpage_register(subpage, start_addr2, end_addr2,
2105 phys_offset);
2111 /* since each CPU stores ram addresses in its TLB cache, we must
2112 reset the modified entries */
2113 /* XXX: slow ! */
2114 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2115 tlb_flush(env, 1);
2119 /* XXX: temporary until new memory mapping API */
2120 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2122 PhysPageDesc *p;
2124 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2125 if (!p)
2126 return IO_MEM_UNASSIGNED;
2127 return p->phys_offset;
2130 /* XXX: better than nothing */
2131 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2133 ram_addr_t addr;
2134 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2135 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2136 (uint64_t)size, (uint64_t)phys_ram_size);
2137 abort();
2139 addr = phys_ram_alloc_offset;
2140 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2141 return addr;
2144 void qemu_ram_free(ram_addr_t addr)
2148 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2150 #ifdef DEBUG_UNASSIGNED
2151 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2152 #endif
2153 #ifdef TARGET_SPARC
2154 do_unassigned_access(addr, 0, 0, 0);
2155 #elif TARGET_CRIS
2156 do_unassigned_access(addr, 0, 0, 0);
2157 #endif
2158 return 0;
2161 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2163 #ifdef DEBUG_UNASSIGNED
2164 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2165 #endif
2166 #ifdef TARGET_SPARC
2167 do_unassigned_access(addr, 1, 0, 0);
2168 #elif TARGET_CRIS
2169 do_unassigned_access(addr, 1, 0, 0);
2170 #endif
2173 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2174 unassigned_mem_readb,
2175 unassigned_mem_readb,
2176 unassigned_mem_readb,
2179 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2180 unassigned_mem_writeb,
2181 unassigned_mem_writeb,
2182 unassigned_mem_writeb,
2185 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2186 uint32_t val)
2188 int dirty_flags;
2189 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2190 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2191 #if !defined(CONFIG_USER_ONLY)
2192 tb_invalidate_phys_page_fast(ram_addr, 1);
2193 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2194 #endif
2196 stb_p(phys_ram_base + ram_addr, val);
2197 #ifdef USE_KQEMU
2198 if (cpu_single_env->kqemu_enabled &&
2199 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2200 kqemu_modify_page(cpu_single_env, ram_addr);
2201 #endif
2202 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2203 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2204 /* we remove the notdirty callback only if the code has been
2205 flushed */
2206 if (dirty_flags == 0xff)
2207 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2210 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2211 uint32_t val)
2213 int dirty_flags;
2214 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2215 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2216 #if !defined(CONFIG_USER_ONLY)
2217 tb_invalidate_phys_page_fast(ram_addr, 2);
2218 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2219 #endif
2221 stw_p(phys_ram_base + ram_addr, val);
2222 #ifdef USE_KQEMU
2223 if (cpu_single_env->kqemu_enabled &&
2224 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2225 kqemu_modify_page(cpu_single_env, ram_addr);
2226 #endif
2227 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2228 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2229 /* we remove the notdirty callback only if the code has been
2230 flushed */
2231 if (dirty_flags == 0xff)
2232 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2235 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2236 uint32_t val)
2238 int dirty_flags;
2239 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2240 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2241 #if !defined(CONFIG_USER_ONLY)
2242 tb_invalidate_phys_page_fast(ram_addr, 4);
2243 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2244 #endif
2246 stl_p(phys_ram_base + ram_addr, val);
2247 #ifdef USE_KQEMU
2248 if (cpu_single_env->kqemu_enabled &&
2249 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2250 kqemu_modify_page(cpu_single_env, ram_addr);
2251 #endif
2252 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2253 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2254 /* we remove the notdirty callback only if the code has been
2255 flushed */
2256 if (dirty_flags == 0xff)
2257 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2260 static CPUReadMemoryFunc *error_mem_read[3] = {
2261 NULL, /* never used */
2262 NULL, /* never used */
2263 NULL, /* never used */
2266 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2267 notdirty_mem_writeb,
2268 notdirty_mem_writew,
2269 notdirty_mem_writel,
2272 /* Generate a debug exception if a watchpoint has been hit. */
2273 static void check_watchpoint(int offset, int flags)
2275 CPUState *env = cpu_single_env;
2276 target_ulong vaddr;
2277 int i;
2279 vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
2280 for (i = 0; i < env->nb_watchpoints; i++) {
2281 if (vaddr == env->watchpoint[i].vaddr
2282 && (env->watchpoint[i].type & flags)) {
2283 env->watchpoint_hit = i + 1;
2284 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2285 break;
2290 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2291 so these check for a hit then pass through to the normal out-of-line
2292 phys routines. */
2293 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2295 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2296 return ldub_phys(addr);
2299 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2301 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2302 return lduw_phys(addr);
2305 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2307 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2308 return ldl_phys(addr);
2311 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2312 uint32_t val)
2314 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2315 stb_phys(addr, val);
2318 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2319 uint32_t val)
2321 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2322 stw_phys(addr, val);
2325 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2326 uint32_t val)
2328 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2329 stl_phys(addr, val);
2332 static CPUReadMemoryFunc *watch_mem_read[3] = {
2333 watch_mem_readb,
2334 watch_mem_readw,
2335 watch_mem_readl,
2338 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2339 watch_mem_writeb,
2340 watch_mem_writew,
2341 watch_mem_writel,
2344 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2345 unsigned int len)
2347 uint32_t ret;
2348 unsigned int idx;
2350 idx = SUBPAGE_IDX(addr - mmio->base);
2351 #if defined(DEBUG_SUBPAGE)
2352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2353 mmio, len, addr, idx);
2354 #endif
2355 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2357 return ret;
2360 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2361 uint32_t value, unsigned int len)
2363 unsigned int idx;
2365 idx = SUBPAGE_IDX(addr - mmio->base);
2366 #if defined(DEBUG_SUBPAGE)
2367 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2368 mmio, len, addr, idx, value);
2369 #endif
2370 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2373 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2375 #if defined(DEBUG_SUBPAGE)
2376 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2377 #endif
2379 return subpage_readlen(opaque, addr, 0);
2382 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2383 uint32_t value)
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2387 #endif
2388 subpage_writelen(opaque, addr, value, 0);
2391 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2393 #if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2395 #endif
2397 return subpage_readlen(opaque, addr, 1);
2400 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2401 uint32_t value)
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2405 #endif
2406 subpage_writelen(opaque, addr, value, 1);
2409 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2411 #if defined(DEBUG_SUBPAGE)
2412 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2413 #endif
2415 return subpage_readlen(opaque, addr, 2);
2418 static void subpage_writel (void *opaque,
2419 target_phys_addr_t addr, uint32_t value)
2421 #if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2423 #endif
2424 subpage_writelen(opaque, addr, value, 2);
2427 static CPUReadMemoryFunc *subpage_read[] = {
2428 &subpage_readb,
2429 &subpage_readw,
2430 &subpage_readl,
2433 static CPUWriteMemoryFunc *subpage_write[] = {
2434 &subpage_writeb,
2435 &subpage_writew,
2436 &subpage_writel,
2439 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2440 ram_addr_t memory)
2442 int idx, eidx;
2443 unsigned int i;
2445 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2446 return -1;
2447 idx = SUBPAGE_IDX(start);
2448 eidx = SUBPAGE_IDX(end);
2449 #if defined(DEBUG_SUBPAGE)
2450 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2451 mmio, start, end, idx, eidx, memory);
2452 #endif
2453 memory >>= IO_MEM_SHIFT;
2454 for (; idx <= eidx; idx++) {
2455 for (i = 0; i < 4; i++) {
2456 if (io_mem_read[memory][i]) {
2457 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2458 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2460 if (io_mem_write[memory][i]) {
2461 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2462 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2467 return 0;
2470 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2471 ram_addr_t orig_memory)
2473 subpage_t *mmio;
2474 int subpage_memory;
2476 mmio = qemu_mallocz(sizeof(subpage_t));
2477 if (mmio != NULL) {
2478 mmio->base = base;
2479 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2480 #if defined(DEBUG_SUBPAGE)
2481 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2482 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2483 #endif
2484 *phys = subpage_memory | IO_MEM_SUBPAGE;
2485 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2488 return mmio;
2491 static void io_mem_init(void)
2493 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2494 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2495 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2496 io_mem_nb = 5;
2498 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2499 watch_mem_write, NULL);
2500 /* alloc dirty bits array */
2501 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2502 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2505 /* mem_read and mem_write are arrays of functions containing the
2506 function to access byte (index 0), word (index 1) and dword (index
2507 2). Functions can be omitted with a NULL function pointer. The
2508 registered functions may be modified dynamically later.
2509 If io_index is non zero, the corresponding io zone is
2510 modified. If it is zero, a new io zone is allocated. The return
2511 value can be used with cpu_register_physical_memory(). (-1) is
2512 returned if error. */
2513 int cpu_register_io_memory(int io_index,
2514 CPUReadMemoryFunc **mem_read,
2515 CPUWriteMemoryFunc **mem_write,
2516 void *opaque)
2518 int i, subwidth = 0;
2520 if (io_index <= 0) {
2521 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2522 return -1;
2523 io_index = io_mem_nb++;
2524 } else {
2525 if (io_index >= IO_MEM_NB_ENTRIES)
2526 return -1;
2529 for(i = 0;i < 3; i++) {
2530 if (!mem_read[i] || !mem_write[i])
2531 subwidth = IO_MEM_SUBWIDTH;
2532 io_mem_read[io_index][i] = mem_read[i];
2533 io_mem_write[io_index][i] = mem_write[i];
2535 io_mem_opaque[io_index] = opaque;
2536 return (io_index << IO_MEM_SHIFT) | subwidth;
2539 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2541 return io_mem_write[io_index >> IO_MEM_SHIFT];
2544 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2546 return io_mem_read[io_index >> IO_MEM_SHIFT];
2549 #endif /* !defined(CONFIG_USER_ONLY) */
2551 /* physical memory access (slow version, mainly for debug) */
2552 #if defined(CONFIG_USER_ONLY)
2553 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2554 int len, int is_write)
2556 int l, flags;
2557 target_ulong page;
2558 void * p;
2560 while (len > 0) {
2561 page = addr & TARGET_PAGE_MASK;
2562 l = (page + TARGET_PAGE_SIZE) - addr;
2563 if (l > len)
2564 l = len;
2565 flags = page_get_flags(page);
2566 if (!(flags & PAGE_VALID))
2567 return;
2568 if (is_write) {
2569 if (!(flags & PAGE_WRITE))
2570 return;
2571 /* XXX: this code should not depend on lock_user */
2572 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2573 /* FIXME - should this return an error rather than just fail? */
2574 return;
2575 memcpy(p, buf, l);
2576 unlock_user(p, addr, l);
2577 } else {
2578 if (!(flags & PAGE_READ))
2579 return;
2580 /* XXX: this code should not depend on lock_user */
2581 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2582 /* FIXME - should this return an error rather than just fail? */
2583 return;
2584 memcpy(buf, p, l);
2585 unlock_user(p, addr, 0);
2587 len -= l;
2588 buf += l;
2589 addr += l;
2593 #else
2594 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2595 int len, int is_write)
2597 int l, io_index;
2598 uint8_t *ptr;
2599 uint32_t val;
2600 target_phys_addr_t page;
2601 unsigned long pd;
2602 PhysPageDesc *p;
2604 while (len > 0) {
2605 page = addr & TARGET_PAGE_MASK;
2606 l = (page + TARGET_PAGE_SIZE) - addr;
2607 if (l > len)
2608 l = len;
2609 p = phys_page_find(page >> TARGET_PAGE_BITS);
2610 if (!p) {
2611 pd = IO_MEM_UNASSIGNED;
2612 } else {
2613 pd = p->phys_offset;
2616 if (is_write) {
2617 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2618 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2619 /* XXX: could force cpu_single_env to NULL to avoid
2620 potential bugs */
2621 if (l >= 4 && ((addr & 3) == 0)) {
2622 /* 32 bit write access */
2623 val = ldl_p(buf);
2624 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2625 l = 4;
2626 } else if (l >= 2 && ((addr & 1) == 0)) {
2627 /* 16 bit write access */
2628 val = lduw_p(buf);
2629 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2630 l = 2;
2631 } else {
2632 /* 8 bit write access */
2633 val = ldub_p(buf);
2634 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2635 l = 1;
2637 } else {
2638 unsigned long addr1;
2639 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2640 /* RAM case */
2641 ptr = phys_ram_base + addr1;
2642 memcpy(ptr, buf, l);
2643 if (!cpu_physical_memory_is_dirty(addr1)) {
2644 /* invalidate code */
2645 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2646 /* set dirty bit */
2647 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2648 (0xff & ~CODE_DIRTY_FLAG);
2651 } else {
2652 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2653 !(pd & IO_MEM_ROMD)) {
2654 /* I/O case */
2655 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2656 if (l >= 4 && ((addr & 3) == 0)) {
2657 /* 32 bit read access */
2658 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2659 stl_p(buf, val);
2660 l = 4;
2661 } else if (l >= 2 && ((addr & 1) == 0)) {
2662 /* 16 bit read access */
2663 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2664 stw_p(buf, val);
2665 l = 2;
2666 } else {
2667 /* 8 bit read access */
2668 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2669 stb_p(buf, val);
2670 l = 1;
2672 } else {
2673 /* RAM case */
2674 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2675 (addr & ~TARGET_PAGE_MASK);
2676 memcpy(buf, ptr, l);
2679 len -= l;
2680 buf += l;
2681 addr += l;
2685 /* used for ROM loading : can write in RAM and ROM */
2686 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2687 const uint8_t *buf, int len)
2689 int l;
2690 uint8_t *ptr;
2691 target_phys_addr_t page;
2692 unsigned long pd;
2693 PhysPageDesc *p;
2695 while (len > 0) {
2696 page = addr & TARGET_PAGE_MASK;
2697 l = (page + TARGET_PAGE_SIZE) - addr;
2698 if (l > len)
2699 l = len;
2700 p = phys_page_find(page >> TARGET_PAGE_BITS);
2701 if (!p) {
2702 pd = IO_MEM_UNASSIGNED;
2703 } else {
2704 pd = p->phys_offset;
2707 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2708 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2709 !(pd & IO_MEM_ROMD)) {
2710 /* do nothing */
2711 } else {
2712 unsigned long addr1;
2713 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2714 /* ROM/RAM case */
2715 ptr = phys_ram_base + addr1;
2716 memcpy(ptr, buf, l);
2718 len -= l;
2719 buf += l;
2720 addr += l;
2725 /* warning: addr must be aligned */
2726 uint32_t ldl_phys(target_phys_addr_t addr)
2728 int io_index;
2729 uint8_t *ptr;
2730 uint32_t val;
2731 unsigned long pd;
2732 PhysPageDesc *p;
2734 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2735 if (!p) {
2736 pd = IO_MEM_UNASSIGNED;
2737 } else {
2738 pd = p->phys_offset;
2741 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2742 !(pd & IO_MEM_ROMD)) {
2743 /* I/O case */
2744 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2745 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2746 } else {
2747 /* RAM case */
2748 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2749 (addr & ~TARGET_PAGE_MASK);
2750 val = ldl_p(ptr);
2752 return val;
2755 /* warning: addr must be aligned */
2756 uint64_t ldq_phys(target_phys_addr_t addr)
2758 int io_index;
2759 uint8_t *ptr;
2760 uint64_t val;
2761 unsigned long pd;
2762 PhysPageDesc *p;
2764 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2765 if (!p) {
2766 pd = IO_MEM_UNASSIGNED;
2767 } else {
2768 pd = p->phys_offset;
2771 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2772 !(pd & IO_MEM_ROMD)) {
2773 /* I/O case */
2774 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2775 #ifdef TARGET_WORDS_BIGENDIAN
2776 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2777 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2778 #else
2779 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2780 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2781 #endif
2782 } else {
2783 /* RAM case */
2784 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2785 (addr & ~TARGET_PAGE_MASK);
2786 val = ldq_p(ptr);
2788 return val;
2791 /* XXX: optimize */
2792 uint32_t ldub_phys(target_phys_addr_t addr)
2794 uint8_t val;
2795 cpu_physical_memory_read(addr, &val, 1);
2796 return val;
2799 /* XXX: optimize */
2800 uint32_t lduw_phys(target_phys_addr_t addr)
2802 uint16_t val;
2803 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2804 return tswap16(val);
2807 /* warning: addr must be aligned. The ram page is not masked as dirty
2808 and the code inside is not invalidated. It is useful if the dirty
2809 bits are used to track modified PTEs */
2810 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2812 int io_index;
2813 uint8_t *ptr;
2814 unsigned long pd;
2815 PhysPageDesc *p;
2817 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2818 if (!p) {
2819 pd = IO_MEM_UNASSIGNED;
2820 } else {
2821 pd = p->phys_offset;
2824 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2825 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2826 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2827 } else {
2828 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2829 (addr & ~TARGET_PAGE_MASK);
2830 stl_p(ptr, val);
2834 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2836 int io_index;
2837 uint8_t *ptr;
2838 unsigned long pd;
2839 PhysPageDesc *p;
2841 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2842 if (!p) {
2843 pd = IO_MEM_UNASSIGNED;
2844 } else {
2845 pd = p->phys_offset;
2848 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2849 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2850 #ifdef TARGET_WORDS_BIGENDIAN
2851 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2852 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2853 #else
2854 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2855 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2856 #endif
2857 } else {
2858 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2859 (addr & ~TARGET_PAGE_MASK);
2860 stq_p(ptr, val);
2864 /* warning: addr must be aligned */
2865 void stl_phys(target_phys_addr_t addr, uint32_t val)
2867 int io_index;
2868 uint8_t *ptr;
2869 unsigned long pd;
2870 PhysPageDesc *p;
2872 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2873 if (!p) {
2874 pd = IO_MEM_UNASSIGNED;
2875 } else {
2876 pd = p->phys_offset;
2879 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2880 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2881 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2882 } else {
2883 unsigned long addr1;
2884 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2885 /* RAM case */
2886 ptr = phys_ram_base + addr1;
2887 stl_p(ptr, val);
2888 if (!cpu_physical_memory_is_dirty(addr1)) {
2889 /* invalidate code */
2890 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2891 /* set dirty bit */
2892 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2893 (0xff & ~CODE_DIRTY_FLAG);
2898 /* XXX: optimize */
2899 void stb_phys(target_phys_addr_t addr, uint32_t val)
2901 uint8_t v = val;
2902 cpu_physical_memory_write(addr, &v, 1);
2905 /* XXX: optimize */
2906 void stw_phys(target_phys_addr_t addr, uint32_t val)
2908 uint16_t v = tswap16(val);
2909 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2912 /* XXX: optimize */
2913 void stq_phys(target_phys_addr_t addr, uint64_t val)
2915 val = tswap64(val);
2916 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2919 #endif
2921 /* virtual memory access for debug */
2922 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2923 uint8_t *buf, int len, int is_write)
2925 int l;
2926 target_phys_addr_t phys_addr;
2927 target_ulong page;
2929 while (len > 0) {
2930 page = addr & TARGET_PAGE_MASK;
2931 phys_addr = cpu_get_phys_page_debug(env, page);
2932 /* if no physical page mapped, return an error */
2933 if (phys_addr == -1)
2934 return -1;
2935 l = (page + TARGET_PAGE_SIZE) - addr;
2936 if (l > len)
2937 l = len;
2938 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2939 buf, l, is_write);
2940 len -= l;
2941 buf += l;
2942 addr += l;
2944 return 0;
2947 void dump_exec_info(FILE *f,
2948 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2950 int i, target_code_size, max_target_code_size;
2951 int direct_jmp_count, direct_jmp2_count, cross_page;
2952 TranslationBlock *tb;
2954 target_code_size = 0;
2955 max_target_code_size = 0;
2956 cross_page = 0;
2957 direct_jmp_count = 0;
2958 direct_jmp2_count = 0;
2959 for(i = 0; i < nb_tbs; i++) {
2960 tb = &tbs[i];
2961 target_code_size += tb->size;
2962 if (tb->size > max_target_code_size)
2963 max_target_code_size = tb->size;
2964 if (tb->page_addr[1] != -1)
2965 cross_page++;
2966 if (tb->tb_next_offset[0] != 0xffff) {
2967 direct_jmp_count++;
2968 if (tb->tb_next_offset[1] != 0xffff) {
2969 direct_jmp2_count++;
2973 /* XXX: avoid using doubles ? */
2974 cpu_fprintf(f, "Translation buffer state:\n");
2975 cpu_fprintf(f, "gen code size %ld/%ld\n",
2976 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
2977 cpu_fprintf(f, "TB count %d/%d\n",
2978 nb_tbs, code_gen_max_blocks);
2979 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2980 nb_tbs ? target_code_size / nb_tbs : 0,
2981 max_target_code_size);
2982 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2983 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2984 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2985 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2986 cross_page,
2987 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2988 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2989 direct_jmp_count,
2990 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2991 direct_jmp2_count,
2992 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2993 cpu_fprintf(f, "\nStatistics:\n");
2994 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2995 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2996 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2997 tcg_dump_info(f, cpu_fprintf);
3000 #if !defined(CONFIG_USER_ONLY)
3002 #define MMUSUFFIX _cmmu
3003 #define GETPC() NULL
3004 #define env cpu_single_env
3005 #define SOFTMMU_CODE_ACCESS
3007 #define SHIFT 0
3008 #include "softmmu_template.h"
3010 #define SHIFT 1
3011 #include "softmmu_template.h"
3013 #define SHIFT 2
3014 #include "softmmu_template.h"
3016 #define SHIFT 3
3017 #include "softmmu_template.h"
3019 #undef env
3021 #endif