2 * defines common to all virtual CPUs
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu-common.h"
23 #include "cpu-common.h"
25 /* some important defines:
27 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
30 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
31 * otherwise little endian.
33 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
35 * TARGET_WORDS_BIGENDIAN : same for target cpu
38 #include "softfloat.h"
40 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
46 static inline uint16_t tswap16(uint16_t s
)
51 static inline uint32_t tswap32(uint32_t s
)
56 static inline uint64_t tswap64(uint64_t s
)
61 static inline void tswap16s(uint16_t *s
)
66 static inline void tswap32s(uint32_t *s
)
71 static inline void tswap64s(uint64_t *s
)
78 static inline uint16_t tswap16(uint16_t s
)
83 static inline uint32_t tswap32(uint32_t s
)
88 static inline uint64_t tswap64(uint64_t s
)
93 static inline void tswap16s(uint16_t *s
)
97 static inline void tswap32s(uint32_t *s
)
101 static inline void tswap64s(uint64_t *s
)
107 #if TARGET_LONG_SIZE == 4
108 #define tswapl(s) tswap32(s)
109 #define tswapls(s) tswap32s((uint32_t *)(s))
110 #define bswaptls(s) bswap32s(s)
112 #define tswapl(s) tswap64(s)
113 #define tswapls(s) tswap64s((uint64_t *)(s))
114 #define bswaptls(s) bswap64s(s)
122 /* NOTE: arm FPA is horrible as double 32 bit words are stored in big
126 #if defined(HOST_WORDS_BIGENDIAN)
140 #if defined(FLOATX80)
152 #if defined(HOST_WORDS_BIGENDIAN)
177 /* CPU memory access without any memory or io remapping */
180 * the generic syntax for the memory accesses is:
182 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
184 * store: st{type}{size}{endian}_{access_type}(ptr, val)
187 * (empty): integer access
191 * (empty): for floats or 32 bit size
202 * (empty): target cpu endianness or 8 bit access
203 * r : reversed target cpu endianness (not implemented yet)
204 * be : big endian (not implemented yet)
205 * le : little endian (not implemented yet)
208 * raw : host memory access
209 * user : user mode access using soft MMU
210 * kernel : kernel mode access using soft MMU
212 static inline int ldub_p(const void *ptr
)
214 return *(uint8_t *)ptr
;
217 static inline int ldsb_p(const void *ptr
)
219 return *(int8_t *)ptr
;
222 static inline void stb_p(void *ptr
, int v
)
227 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
228 kernel handles unaligned load/stores may give better results, but
229 it is a system wide setting : bad */
230 #if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
232 /* conservative code for little endian unaligned accesses */
233 static inline int lduw_le_p(const void *ptr
)
237 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
240 const uint8_t *p
= ptr
;
241 return p
[0] | (p
[1] << 8);
245 static inline int ldsw_le_p(const void *ptr
)
249 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
252 const uint8_t *p
= ptr
;
253 return (int16_t)(p
[0] | (p
[1] << 8));
257 static inline int ldl_le_p(const void *ptr
)
261 __asm__
__volatile__ ("lwbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
264 const uint8_t *p
= ptr
;
265 return p
[0] | (p
[1] << 8) | (p
[2] << 16) | (p
[3] << 24);
269 static inline uint64_t ldq_le_p(const void *ptr
)
271 const uint8_t *p
= ptr
;
274 v2
= ldl_le_p(p
+ 4);
275 return v1
| ((uint64_t)v2
<< 32);
278 static inline void stw_le_p(void *ptr
, int v
)
281 __asm__
__volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr
) : "r" (v
), "r" (ptr
));
289 static inline void stl_le_p(void *ptr
, int v
)
292 __asm__
__volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr
) : "r" (v
), "r" (ptr
));
302 static inline void stq_le_p(void *ptr
, uint64_t v
)
305 stl_le_p(p
, (uint32_t)v
);
306 stl_le_p(p
+ 4, v
>> 32);
311 static inline float32
ldfl_le_p(const void *ptr
)
321 static inline void stfl_le_p(void *ptr
, float32 v
)
331 static inline float64
ldfq_le_p(const void *ptr
)
334 u
.l
.lower
= ldl_le_p(ptr
);
335 u
.l
.upper
= ldl_le_p(ptr
+ 4);
339 static inline void stfq_le_p(void *ptr
, float64 v
)
343 stl_le_p(ptr
, u
.l
.lower
);
344 stl_le_p(ptr
+ 4, u
.l
.upper
);
349 static inline int lduw_le_p(const void *ptr
)
351 return *(uint16_t *)ptr
;
354 static inline int ldsw_le_p(const void *ptr
)
356 return *(int16_t *)ptr
;
359 static inline int ldl_le_p(const void *ptr
)
361 return *(uint32_t *)ptr
;
364 static inline uint64_t ldq_le_p(const void *ptr
)
366 return *(uint64_t *)ptr
;
369 static inline void stw_le_p(void *ptr
, int v
)
371 *(uint16_t *)ptr
= v
;
374 static inline void stl_le_p(void *ptr
, int v
)
376 *(uint32_t *)ptr
= v
;
379 static inline void stq_le_p(void *ptr
, uint64_t v
)
381 *(uint64_t *)ptr
= v
;
386 static inline float32
ldfl_le_p(const void *ptr
)
388 return *(float32
*)ptr
;
391 static inline float64
ldfq_le_p(const void *ptr
)
393 return *(float64
*)ptr
;
396 static inline void stfl_le_p(void *ptr
, float32 v
)
401 static inline void stfq_le_p(void *ptr
, float64 v
)
407 #if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
409 static inline int lduw_be_p(const void *ptr
)
411 #if defined(__i386__)
413 asm volatile ("movzwl %1, %0\n"
416 : "m" (*(uint16_t *)ptr
));
419 const uint8_t *b
= ptr
;
420 return ((b
[0] << 8) | b
[1]);
424 static inline int ldsw_be_p(const void *ptr
)
426 #if defined(__i386__)
428 asm volatile ("movzwl %1, %0\n"
431 : "m" (*(uint16_t *)ptr
));
434 const uint8_t *b
= ptr
;
435 return (int16_t)((b
[0] << 8) | b
[1]);
439 static inline int ldl_be_p(const void *ptr
)
441 #if defined(__i386__) || defined(__x86_64__)
443 asm volatile ("movl %1, %0\n"
446 : "m" (*(uint32_t *)ptr
));
449 const uint8_t *b
= ptr
;
450 return (b
[0] << 24) | (b
[1] << 16) | (b
[2] << 8) | b
[3];
454 static inline uint64_t ldq_be_p(const void *ptr
)
458 b
= ldl_be_p((uint8_t *)ptr
+ 4);
459 return (((uint64_t)a
<<32)|b
);
462 static inline void stw_be_p(void *ptr
, int v
)
464 #if defined(__i386__)
465 asm volatile ("xchgb %b0, %h0\n"
468 : "m" (*(uint16_t *)ptr
), "0" (v
));
470 uint8_t *d
= (uint8_t *) ptr
;
476 static inline void stl_be_p(void *ptr
, int v
)
478 #if defined(__i386__) || defined(__x86_64__)
479 asm volatile ("bswap %0\n"
482 : "m" (*(uint32_t *)ptr
), "0" (v
));
484 uint8_t *d
= (uint8_t *) ptr
;
492 static inline void stq_be_p(void *ptr
, uint64_t v
)
494 stl_be_p(ptr
, v
>> 32);
495 stl_be_p((uint8_t *)ptr
+ 4, v
);
500 static inline float32
ldfl_be_p(const void *ptr
)
510 static inline void stfl_be_p(void *ptr
, float32 v
)
520 static inline float64
ldfq_be_p(const void *ptr
)
523 u
.l
.upper
= ldl_be_p(ptr
);
524 u
.l
.lower
= ldl_be_p((uint8_t *)ptr
+ 4);
528 static inline void stfq_be_p(void *ptr
, float64 v
)
532 stl_be_p(ptr
, u
.l
.upper
);
533 stl_be_p((uint8_t *)ptr
+ 4, u
.l
.lower
);
538 static inline int lduw_be_p(const void *ptr
)
540 return *(uint16_t *)ptr
;
543 static inline int ldsw_be_p(const void *ptr
)
545 return *(int16_t *)ptr
;
548 static inline int ldl_be_p(const void *ptr
)
550 return *(uint32_t *)ptr
;
553 static inline uint64_t ldq_be_p(const void *ptr
)
555 return *(uint64_t *)ptr
;
558 static inline void stw_be_p(void *ptr
, int v
)
560 *(uint16_t *)ptr
= v
;
563 static inline void stl_be_p(void *ptr
, int v
)
565 *(uint32_t *)ptr
= v
;
568 static inline void stq_be_p(void *ptr
, uint64_t v
)
570 *(uint64_t *)ptr
= v
;
575 static inline float32
ldfl_be_p(const void *ptr
)
577 return *(float32
*)ptr
;
580 static inline float64
ldfq_be_p(const void *ptr
)
582 return *(float64
*)ptr
;
585 static inline void stfl_be_p(void *ptr
, float32 v
)
590 static inline void stfq_be_p(void *ptr
, float64 v
)
597 /* target CPU memory access functions */
598 #if defined(TARGET_WORDS_BIGENDIAN)
599 #define lduw_p(p) lduw_be_p(p)
600 #define ldsw_p(p) ldsw_be_p(p)
601 #define ldl_p(p) ldl_be_p(p)
602 #define ldq_p(p) ldq_be_p(p)
603 #define ldfl_p(p) ldfl_be_p(p)
604 #define ldfq_p(p) ldfq_be_p(p)
605 #define stw_p(p, v) stw_be_p(p, v)
606 #define stl_p(p, v) stl_be_p(p, v)
607 #define stq_p(p, v) stq_be_p(p, v)
608 #define stfl_p(p, v) stfl_be_p(p, v)
609 #define stfq_p(p, v) stfq_be_p(p, v)
611 #define lduw_p(p) lduw_le_p(p)
612 #define ldsw_p(p) ldsw_le_p(p)
613 #define ldl_p(p) ldl_le_p(p)
614 #define ldq_p(p) ldq_le_p(p)
615 #define ldfl_p(p) ldfl_le_p(p)
616 #define ldfq_p(p) ldfq_le_p(p)
617 #define stw_p(p, v) stw_le_p(p, v)
618 #define stl_p(p, v) stl_le_p(p, v)
619 #define stq_p(p, v) stq_le_p(p, v)
620 #define stfl_p(p, v) stfl_le_p(p, v)
621 #define stfq_p(p, v) stfq_le_p(p, v)
624 /* MMU memory access macros */
626 #if defined(CONFIG_USER_ONLY)
628 #include "qemu-types.h"
630 /* On some host systems the guest address space is reserved on the host.
631 * This allows the guest address space to be offset to a convenient location.
633 #if defined(CONFIG_USE_GUEST_BASE)
634 extern unsigned long guest_base
;
635 extern int have_guest_base
;
636 extern unsigned long reserved_va
;
637 #define GUEST_BASE guest_base
638 #define RESERVED_VA reserved_va
640 #define GUEST_BASE 0ul
641 #define RESERVED_VA 0ul
644 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
645 #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
647 #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
648 #define h2g_valid(x) 1
650 #define h2g_valid(x) ({ \
651 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
652 __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \
657 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
658 /* Check if given address fits target address space */ \
659 assert(h2g_valid(x)); \
663 #define saddr(x) g2h(x)
664 #define laddr(x) g2h(x)
666 #else /* !CONFIG_USER_ONLY */
667 /* NOTE: we use double casts if pointers and target_ulong have
669 #define saddr(x) (uint8_t *)(long)(x)
670 #define laddr(x) (uint8_t *)(long)(x)
673 #define ldub_raw(p) ldub_p(laddr((p)))
674 #define ldsb_raw(p) ldsb_p(laddr((p)))
675 #define lduw_raw(p) lduw_p(laddr((p)))
676 #define ldsw_raw(p) ldsw_p(laddr((p)))
677 #define ldl_raw(p) ldl_p(laddr((p)))
678 #define ldq_raw(p) ldq_p(laddr((p)))
679 #define ldfl_raw(p) ldfl_p(laddr((p)))
680 #define ldfq_raw(p) ldfq_p(laddr((p)))
681 #define stb_raw(p, v) stb_p(saddr((p)), v)
682 #define stw_raw(p, v) stw_p(saddr((p)), v)
683 #define stl_raw(p, v) stl_p(saddr((p)), v)
684 #define stq_raw(p, v) stq_p(saddr((p)), v)
685 #define stfl_raw(p, v) stfl_p(saddr((p)), v)
686 #define stfq_raw(p, v) stfq_p(saddr((p)), v)
689 #if defined(CONFIG_USER_ONLY)
691 /* if user mode, no other memory access functions */
692 #define ldub(p) ldub_raw(p)
693 #define ldsb(p) ldsb_raw(p)
694 #define lduw(p) lduw_raw(p)
695 #define ldsw(p) ldsw_raw(p)
696 #define ldl(p) ldl_raw(p)
697 #define ldq(p) ldq_raw(p)
698 #define ldfl(p) ldfl_raw(p)
699 #define ldfq(p) ldfq_raw(p)
700 #define stb(p, v) stb_raw(p, v)
701 #define stw(p, v) stw_raw(p, v)
702 #define stl(p, v) stl_raw(p, v)
703 #define stq(p, v) stq_raw(p, v)
704 #define stfl(p, v) stfl_raw(p, v)
705 #define stfq(p, v) stfq_raw(p, v)
707 #define ldub_code(p) ldub_raw(p)
708 #define ldsb_code(p) ldsb_raw(p)
709 #define lduw_code(p) lduw_raw(p)
710 #define ldsw_code(p) ldsw_raw(p)
711 #define ldl_code(p) ldl_raw(p)
712 #define ldq_code(p) ldq_raw(p)
714 #define ldub_kernel(p) ldub_raw(p)
715 #define ldsb_kernel(p) ldsb_raw(p)
716 #define lduw_kernel(p) lduw_raw(p)
717 #define ldsw_kernel(p) ldsw_raw(p)
718 #define ldl_kernel(p) ldl_raw(p)
719 #define ldq_kernel(p) ldq_raw(p)
720 #define ldfl_kernel(p) ldfl_raw(p)
721 #define ldfq_kernel(p) ldfq_raw(p)
722 #define stb_kernel(p, v) stb_raw(p, v)
723 #define stw_kernel(p, v) stw_raw(p, v)
724 #define stl_kernel(p, v) stl_raw(p, v)
725 #define stq_kernel(p, v) stq_raw(p, v)
726 #define stfl_kernel(p, v) stfl_raw(p, v)
727 #define stfq_kernel(p, vt) stfq_raw(p, v)
729 #endif /* defined(CONFIG_USER_ONLY) */
731 /* page related stuff */
733 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
734 #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
735 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
737 /* ??? These should be the larger of unsigned long and target_ulong. */
738 extern unsigned long qemu_real_host_page_size
;
739 extern unsigned long qemu_host_page_bits
;
740 extern unsigned long qemu_host_page_size
;
741 extern unsigned long qemu_host_page_mask
;
743 #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
745 /* same as PROT_xxx */
746 #define PAGE_READ 0x0001
747 #define PAGE_WRITE 0x0002
748 #define PAGE_EXEC 0x0004
749 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
750 #define PAGE_VALID 0x0008
751 /* original state of the write flag (used when tracking self-modifying
753 #define PAGE_WRITE_ORG 0x0010
754 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
755 /* FIXME: Code that sets/uses this is broken and needs to go away. */
756 #define PAGE_RESERVED 0x0020
759 #if defined(CONFIG_USER_ONLY)
760 void page_dump(FILE *f
);
762 typedef int (*walk_memory_regions_fn
)(void *, abi_ulong
,
763 abi_ulong
, unsigned long);
764 int walk_memory_regions(void *, walk_memory_regions_fn
);
766 int page_get_flags(target_ulong address
);
767 void page_set_flags(target_ulong start
, target_ulong end
, int flags
);
768 int page_check_range(target_ulong start
, target_ulong len
, int flags
);
771 CPUState
*cpu_copy(CPUState
*env
);
772 CPUState
*qemu_get_cpu(int cpu
);
774 #define CPU_DUMP_CODE 0x00010000
776 void cpu_dump_state(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
778 void cpu_dump_statistics(CPUState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
781 void QEMU_NORETURN
cpu_abort(CPUState
*env
, const char *fmt
, ...)
783 extern CPUState
*first_cpu
;
784 extern CPUState
*cpu_single_env
;
786 /* Flags for use in ENV->INTERRUPT_PENDING.
788 The numbers assigned here are non-sequential in order to preserve
789 binary compatibility with the vmstate dump. Bit 0 (0x0001) was
790 previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
793 /* External hardware interrupt pending. This is typically used for
794 interrupts from devices. */
795 #define CPU_INTERRUPT_HARD 0x0002
797 /* Exit the current TB. This is typically used when some system-level device
798 makes some change to the memory mapping. E.g. the a20 line change. */
799 #define CPU_INTERRUPT_EXITTB 0x0004
802 #define CPU_INTERRUPT_HALT 0x0020
804 /* Debug event pending. */
805 #define CPU_INTERRUPT_DEBUG 0x0080
807 /* Several target-specific external hardware interrupts. Each target/cpu.h
808 should define proper names based on these defines. */
809 #define CPU_INTERRUPT_TGT_EXT_0 0x0008
810 #define CPU_INTERRUPT_TGT_EXT_1 0x0010
811 #define CPU_INTERRUPT_TGT_EXT_2 0x0040
812 #define CPU_INTERRUPT_TGT_EXT_3 0x0200
813 #define CPU_INTERRUPT_TGT_EXT_4 0x1000
815 /* Several target-specific internal interrupts. These differ from the
816 preceeding target-specific interrupts in that they are intended to
817 originate from within the cpu itself, typically in response to some
818 instruction being executed. These, therefore, are not masked while
819 single-stepping within the debugger. */
820 #define CPU_INTERRUPT_TGT_INT_0 0x0100
821 #define CPU_INTERRUPT_TGT_INT_1 0x0400
822 #define CPU_INTERRUPT_TGT_INT_2 0x0800
824 /* First unused bit: 0x2000. */
826 /* The set of all bits that should be masked when single-stepping. */
827 #define CPU_INTERRUPT_SSTEP_MASK \
828 (CPU_INTERRUPT_HARD \
829 | CPU_INTERRUPT_TGT_EXT_0 \
830 | CPU_INTERRUPT_TGT_EXT_1 \
831 | CPU_INTERRUPT_TGT_EXT_2 \
832 | CPU_INTERRUPT_TGT_EXT_3 \
833 | CPU_INTERRUPT_TGT_EXT_4)
835 #ifndef CONFIG_USER_ONLY
836 typedef void (*CPUInterruptHandler
)(CPUState
*, int);
838 extern CPUInterruptHandler cpu_interrupt_handler
;
840 static inline void cpu_interrupt(CPUState
*s
, int mask
)
842 cpu_interrupt_handler(s
, mask
);
844 #else /* USER_ONLY */
845 void cpu_interrupt(CPUState
*env
, int mask
);
846 #endif /* USER_ONLY */
848 void cpu_reset_interrupt(CPUState
*env
, int mask
);
850 void cpu_exit(CPUState
*s
);
852 int qemu_cpu_has_work(CPUState
*env
);
854 /* Breakpoint/watchpoint flags */
855 #define BP_MEM_READ 0x01
856 #define BP_MEM_WRITE 0x02
857 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
858 #define BP_STOP_BEFORE_ACCESS 0x04
859 #define BP_WATCHPOINT_HIT 0x08
863 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
864 CPUBreakpoint
**breakpoint
);
865 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
);
866 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
);
867 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
);
868 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
869 int flags
, CPUWatchpoint
**watchpoint
);
870 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
,
871 target_ulong len
, int flags
);
872 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
);
873 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
);
875 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
876 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
877 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
879 void cpu_single_step(CPUState
*env
, int enabled
);
880 void cpu_reset(CPUState
*s
);
881 int cpu_is_stopped(CPUState
*env
);
882 void run_on_cpu(CPUState
*env
, void (*func
)(void *data
), void *data
);
884 #define CPU_LOG_TB_OUT_ASM (1 << 0)
885 #define CPU_LOG_TB_IN_ASM (1 << 1)
886 #define CPU_LOG_TB_OP (1 << 2)
887 #define CPU_LOG_TB_OP_OPT (1 << 3)
888 #define CPU_LOG_INT (1 << 4)
889 #define CPU_LOG_EXEC (1 << 5)
890 #define CPU_LOG_PCALL (1 << 6)
891 #define CPU_LOG_IOPORT (1 << 7)
892 #define CPU_LOG_TB_CPU (1 << 8)
893 #define CPU_LOG_RESET (1 << 9)
895 /* define log items */
896 typedef struct CPULogItem
{
902 extern const CPULogItem cpu_log_items
[];
904 void cpu_set_log(int log_flags
);
905 void cpu_set_log_filename(const char *filename
);
906 int cpu_str_to_log_mask(const char *str
);
908 #if !defined(CONFIG_USER_ONLY)
910 /* Return the physical page corresponding to a virtual one. Use it
911 only for debugging because no protection checks are done. Return -1
913 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
);
917 extern int phys_ram_fd
;
918 extern ram_addr_t ram_size
;
920 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
921 #define RAM_PREALLOC_MASK (1 << 0)
923 typedef struct RAMBlock
{
929 QLIST_ENTRY(RAMBlock
) next
;
930 #if defined(__linux__) && !defined(TARGET_S390X)
935 typedef struct RAMList
{
937 QLIST_HEAD(ram
, RAMBlock
) blocks
;
939 extern RAMList ram_list
;
941 extern const char *mem_path
;
942 extern int mem_prealloc
;
944 /* physical memory access */
946 /* MMIO pages are identified by a combination of an IO device index and
947 3 flags. The ROMD code stores the page ram offset in iotlb entry,
948 so only a limited number of ids are avaiable. */
950 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
952 /* Flags stored in the low bits of the TLB virtual address. These are
953 defined so that fast path ram access is all zeros. */
954 /* Zero if TLB entry is valid. */
955 #define TLB_INVALID_MASK (1 << 3)
956 /* Set if TLB entry references a clean RAM page. The iotlb entry will
957 contain the page physical address. */
958 #define TLB_NOTDIRTY (1 << 4)
959 /* Set if TLB entry is an IO callback. */
960 #define TLB_MMIO (1 << 5)
962 #define VGA_DIRTY_FLAG 0x01
963 #define CODE_DIRTY_FLAG 0x02
964 #define MIGRATION_DIRTY_FLAG 0x08
966 /* read dirty bit (return 0 or 1) */
967 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr
)
969 return ram_list
.phys_dirty
[addr
>> TARGET_PAGE_BITS
] == 0xff;
972 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr
)
974 return ram_list
.phys_dirty
[addr
>> TARGET_PAGE_BITS
];
977 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr
,
980 return ram_list
.phys_dirty
[addr
>> TARGET_PAGE_BITS
] & dirty_flags
;
983 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr
)
985 ram_list
.phys_dirty
[addr
>> TARGET_PAGE_BITS
] = 0xff;
988 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr
,
991 return ram_list
.phys_dirty
[addr
>> TARGET_PAGE_BITS
] |= dirty_flags
;
994 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start
,
1001 len
= length
>> TARGET_PAGE_BITS
;
1002 mask
= ~dirty_flags
;
1003 p
= ram_list
.phys_dirty
+ (start
>> TARGET_PAGE_BITS
);
1004 for (i
= 0; i
< len
; i
++) {
1009 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1011 void cpu_tlb_update_dirty(CPUState
*env
);
1013 int cpu_physical_memory_set_dirty_tracking(int enable
);
1015 int cpu_physical_memory_get_dirty_tracking(void);
1017 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
1018 target_phys_addr_t end_addr
);
1020 int cpu_physical_log_start(target_phys_addr_t start_addr
,
1023 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
1026 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
);
1027 #endif /* !CONFIG_USER_ONLY */
1029 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
1030 uint8_t *buf
, int len
, int is_write
);
1032 #endif /* CPU_ALL_H */