2 * defines common to all virtual CPUs
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
27 /* some important defines:
29 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
32 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
33 * otherwise little endian.
35 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
37 * TARGET_WORDS_BIGENDIAN : same for target cpu
41 #include "softfloat.h"
43 #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
49 static inline uint16_t tswap16(uint16_t s
)
54 static inline uint32_t tswap32(uint32_t s
)
59 static inline uint64_t tswap64(uint64_t s
)
64 static inline void tswap16s(uint16_t *s
)
69 static inline void tswap32s(uint32_t *s
)
74 static inline void tswap64s(uint64_t *s
)
81 static inline uint16_t tswap16(uint16_t s
)
86 static inline uint32_t tswap32(uint32_t s
)
91 static inline uint64_t tswap64(uint64_t s
)
96 static inline void tswap16s(uint16_t *s
)
100 static inline void tswap32s(uint32_t *s
)
104 static inline void tswap64s(uint64_t *s
)
110 #if TARGET_LONG_SIZE == 4
111 #define tswapl(s) tswap32(s)
112 #define tswapls(s) tswap32s((uint32_t *)(s))
113 #define bswaptls(s) bswap32s(s)
115 #define tswapl(s) tswap64(s)
116 #define tswapls(s) tswap64s((uint64_t *)(s))
117 #define bswaptls(s) bswap64s(s)
125 /* NOTE: arm FPA is horrible as double 32 bit words are stored in big
129 #if defined(WORDS_BIGENDIAN) \
130 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
147 #if defined(WORDS_BIGENDIAN) \
148 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
174 /* CPU memory access without any memory or io remapping */
177 * the generic syntax for the memory accesses is:
179 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
181 * store: st{type}{size}{endian}_{access_type}(ptr, val)
184 * (empty): integer access
188 * (empty): for floats or 32 bit size
199 * (empty): target cpu endianness or 8 bit access
200 * r : reversed target cpu endianness (not implemented yet)
201 * be : big endian (not implemented yet)
202 * le : little endian (not implemented yet)
205 * raw : host memory access
206 * user : user mode access using soft MMU
207 * kernel : kernel mode access using soft MMU
209 static inline int ldub_p(const void *ptr
)
211 return *(uint8_t *)ptr
;
214 static inline int ldsb_p(const void *ptr
)
216 return *(int8_t *)ptr
;
219 static inline void stb_p(void *ptr
, int v
)
224 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
225 kernel handles unaligned load/stores may give better results, but
226 it is a system wide setting : bad */
227 #if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
229 /* conservative code for little endian unaligned accesses */
230 static inline int lduw_le_p(const void *ptr
)
234 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
237 const uint8_t *p
= ptr
;
238 return p
[0] | (p
[1] << 8);
242 static inline int ldsw_le_p(const void *ptr
)
246 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
249 const uint8_t *p
= ptr
;
250 return (int16_t)(p
[0] | (p
[1] << 8));
254 static inline int ldl_le_p(const void *ptr
)
258 __asm__
__volatile__ ("lwbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
261 const uint8_t *p
= ptr
;
262 return p
[0] | (p
[1] << 8) | (p
[2] << 16) | (p
[3] << 24);
266 static inline uint64_t ldq_le_p(const void *ptr
)
268 const uint8_t *p
= ptr
;
271 v2
= ldl_le_p(p
+ 4);
272 return v1
| ((uint64_t)v2
<< 32);
275 static inline void stw_le_p(void *ptr
, int v
)
278 __asm__
__volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr
) : "r" (v
), "r" (ptr
));
286 static inline void stl_le_p(void *ptr
, int v
)
289 __asm__
__volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr
) : "r" (v
), "r" (ptr
));
299 static inline void stq_le_p(void *ptr
, uint64_t v
)
302 stl_le_p(p
, (uint32_t)v
);
303 stl_le_p(p
+ 4, v
>> 32);
308 static inline float32
ldfl_le_p(const void *ptr
)
318 static inline void stfl_le_p(void *ptr
, float32 v
)
328 static inline float64
ldfq_le_p(const void *ptr
)
331 u
.l
.lower
= ldl_le_p(ptr
);
332 u
.l
.upper
= ldl_le_p(ptr
+ 4);
336 static inline void stfq_le_p(void *ptr
, float64 v
)
340 stl_le_p(ptr
, u
.l
.lower
);
341 stl_le_p(ptr
+ 4, u
.l
.upper
);
346 static inline int lduw_le_p(const void *ptr
)
348 return *(uint16_t *)ptr
;
351 static inline int ldsw_le_p(const void *ptr
)
353 return *(int16_t *)ptr
;
356 static inline int ldl_le_p(const void *ptr
)
358 return *(uint32_t *)ptr
;
361 static inline uint64_t ldq_le_p(const void *ptr
)
363 return *(uint64_t *)ptr
;
366 static inline void stw_le_p(void *ptr
, int v
)
368 *(uint16_t *)ptr
= v
;
371 static inline void stl_le_p(void *ptr
, int v
)
373 *(uint32_t *)ptr
= v
;
376 static inline void stq_le_p(void *ptr
, uint64_t v
)
378 *(uint64_t *)ptr
= v
;
383 static inline float32
ldfl_le_p(const void *ptr
)
385 return *(float32
*)ptr
;
388 static inline float64
ldfq_le_p(const void *ptr
)
390 return *(float64
*)ptr
;
393 static inline void stfl_le_p(void *ptr
, float32 v
)
398 static inline void stfq_le_p(void *ptr
, float64 v
)
404 #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
406 static inline int lduw_be_p(const void *ptr
)
408 #if defined(__i386__)
410 asm volatile ("movzwl %1, %0\n"
413 : "m" (*(uint16_t *)ptr
));
416 const uint8_t *b
= ptr
;
417 return ((b
[0] << 8) | b
[1]);
421 static inline int ldsw_be_p(const void *ptr
)
423 #if defined(__i386__)
425 asm volatile ("movzwl %1, %0\n"
428 : "m" (*(uint16_t *)ptr
));
431 const uint8_t *b
= ptr
;
432 return (int16_t)((b
[0] << 8) | b
[1]);
436 static inline int ldl_be_p(const void *ptr
)
438 #if defined(__i386__) || defined(__x86_64__)
440 asm volatile ("movl %1, %0\n"
443 : "m" (*(uint32_t *)ptr
));
446 const uint8_t *b
= ptr
;
447 return (b
[0] << 24) | (b
[1] << 16) | (b
[2] << 8) | b
[3];
451 static inline uint64_t ldq_be_p(const void *ptr
)
455 b
= ldl_be_p((uint8_t *)ptr
+ 4);
456 return (((uint64_t)a
<<32)|b
);
459 static inline void stw_be_p(void *ptr
, int v
)
461 #if defined(__i386__)
462 asm volatile ("xchgb %b0, %h0\n"
465 : "m" (*(uint16_t *)ptr
), "0" (v
));
467 uint8_t *d
= (uint8_t *) ptr
;
473 static inline void stl_be_p(void *ptr
, int v
)
475 #if defined(__i386__) || defined(__x86_64__)
476 asm volatile ("bswap %0\n"
479 : "m" (*(uint32_t *)ptr
), "0" (v
));
481 uint8_t *d
= (uint8_t *) ptr
;
489 static inline void stq_be_p(void *ptr
, uint64_t v
)
491 stl_be_p(ptr
, v
>> 32);
492 stl_be_p((uint8_t *)ptr
+ 4, v
);
497 static inline float32
ldfl_be_p(const void *ptr
)
507 static inline void stfl_be_p(void *ptr
, float32 v
)
517 static inline float64
ldfq_be_p(const void *ptr
)
520 u
.l
.upper
= ldl_be_p(ptr
);
521 u
.l
.lower
= ldl_be_p((uint8_t *)ptr
+ 4);
525 static inline void stfq_be_p(void *ptr
, float64 v
)
529 stl_be_p(ptr
, u
.l
.upper
);
530 stl_be_p((uint8_t *)ptr
+ 4, u
.l
.lower
);
535 static inline int lduw_be_p(const void *ptr
)
537 return *(uint16_t *)ptr
;
540 static inline int ldsw_be_p(const void *ptr
)
542 return *(int16_t *)ptr
;
545 static inline int ldl_be_p(const void *ptr
)
547 return *(uint32_t *)ptr
;
550 static inline uint64_t ldq_be_p(const void *ptr
)
552 return *(uint64_t *)ptr
;
555 static inline void stw_be_p(void *ptr
, int v
)
557 *(uint16_t *)ptr
= v
;
560 static inline void stl_be_p(void *ptr
, int v
)
562 *(uint32_t *)ptr
= v
;
565 static inline void stq_be_p(void *ptr
, uint64_t v
)
567 *(uint64_t *)ptr
= v
;
572 static inline float32
ldfl_be_p(const void *ptr
)
574 return *(float32
*)ptr
;
577 static inline float64
ldfq_be_p(const void *ptr
)
579 return *(float64
*)ptr
;
582 static inline void stfl_be_p(void *ptr
, float32 v
)
587 static inline void stfq_be_p(void *ptr
, float64 v
)
594 /* target CPU memory access functions */
595 #if defined(TARGET_WORDS_BIGENDIAN)
596 #define lduw_p(p) lduw_be_p(p)
597 #define ldsw_p(p) ldsw_be_p(p)
598 #define ldl_p(p) ldl_be_p(p)
599 #define ldq_p(p) ldq_be_p(p)
600 #define ldfl_p(p) ldfl_be_p(p)
601 #define ldfq_p(p) ldfq_be_p(p)
602 #define stw_p(p, v) stw_be_p(p, v)
603 #define stl_p(p, v) stl_be_p(p, v)
604 #define stq_p(p, v) stq_be_p(p, v)
605 #define stfl_p(p, v) stfl_be_p(p, v)
606 #define stfq_p(p, v) stfq_be_p(p, v)
608 #define lduw_p(p) lduw_le_p(p)
609 #define ldsw_p(p) ldsw_le_p(p)
610 #define ldl_p(p) ldl_le_p(p)
611 #define ldq_p(p) ldq_le_p(p)
612 #define ldfl_p(p) ldfl_le_p(p)
613 #define ldfq_p(p) ldfq_le_p(p)
614 #define stw_p(p, v) stw_le_p(p, v)
615 #define stl_p(p, v) stl_le_p(p, v)
616 #define stq_p(p, v) stq_le_p(p, v)
617 #define stfl_p(p, v) stfl_le_p(p, v)
618 #define stfq_p(p, v) stfq_le_p(p, v)
621 /* MMU memory access macros */
623 #if defined(CONFIG_USER_ONLY)
625 #include "qemu-types.h"
627 /* On some host systems the guest address space is reserved on the host.
628 * This allows the guest address space to be offset to a convenient location.
630 //#define GUEST_BASE 0x20000000
633 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
634 #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
636 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
637 /* Check if given address fits target address space */ \
638 assert(__ret == (abi_ulong)__ret); \
641 #define h2g_valid(x) ({ \
642 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
643 (__guest == (abi_ulong)__guest); \
646 #define saddr(x) g2h(x)
647 #define laddr(x) g2h(x)
649 #else /* !CONFIG_USER_ONLY */
650 /* NOTE: we use double casts if pointers and target_ulong have
652 #define saddr(x) (uint8_t *)(long)(x)
653 #define laddr(x) (uint8_t *)(long)(x)
656 #define ldub_raw(p) ldub_p(laddr((p)))
657 #define ldsb_raw(p) ldsb_p(laddr((p)))
658 #define lduw_raw(p) lduw_p(laddr((p)))
659 #define ldsw_raw(p) ldsw_p(laddr((p)))
660 #define ldl_raw(p) ldl_p(laddr((p)))
661 #define ldq_raw(p) ldq_p(laddr((p)))
662 #define ldfl_raw(p) ldfl_p(laddr((p)))
663 #define ldfq_raw(p) ldfq_p(laddr((p)))
664 #define stb_raw(p, v) stb_p(saddr((p)), v)
665 #define stw_raw(p, v) stw_p(saddr((p)), v)
666 #define stl_raw(p, v) stl_p(saddr((p)), v)
667 #define stq_raw(p, v) stq_p(saddr((p)), v)
668 #define stfl_raw(p, v) stfl_p(saddr((p)), v)
669 #define stfq_raw(p, v) stfq_p(saddr((p)), v)
672 #if defined(CONFIG_USER_ONLY)
674 /* if user mode, no other memory access functions */
675 #define ldub(p) ldub_raw(p)
676 #define ldsb(p) ldsb_raw(p)
677 #define lduw(p) lduw_raw(p)
678 #define ldsw(p) ldsw_raw(p)
679 #define ldl(p) ldl_raw(p)
680 #define ldq(p) ldq_raw(p)
681 #define ldfl(p) ldfl_raw(p)
682 #define ldfq(p) ldfq_raw(p)
683 #define stb(p, v) stb_raw(p, v)
684 #define stw(p, v) stw_raw(p, v)
685 #define stl(p, v) stl_raw(p, v)
686 #define stq(p, v) stq_raw(p, v)
687 #define stfl(p, v) stfl_raw(p, v)
688 #define stfq(p, v) stfq_raw(p, v)
690 #define ldub_code(p) ldub_raw(p)
691 #define ldsb_code(p) ldsb_raw(p)
692 #define lduw_code(p) lduw_raw(p)
693 #define ldsw_code(p) ldsw_raw(p)
694 #define ldl_code(p) ldl_raw(p)
695 #define ldq_code(p) ldq_raw(p)
697 #define ldub_kernel(p) ldub_raw(p)
698 #define ldsb_kernel(p) ldsb_raw(p)
699 #define lduw_kernel(p) lduw_raw(p)
700 #define ldsw_kernel(p) ldsw_raw(p)
701 #define ldl_kernel(p) ldl_raw(p)
702 #define ldq_kernel(p) ldq_raw(p)
703 #define ldfl_kernel(p) ldfl_raw(p)
704 #define ldfq_kernel(p) ldfq_raw(p)
705 #define stb_kernel(p, v) stb_raw(p, v)
706 #define stw_kernel(p, v) stw_raw(p, v)
707 #define stl_kernel(p, v) stl_raw(p, v)
708 #define stq_kernel(p, v) stq_raw(p, v)
709 #define stfl_kernel(p, v) stfl_raw(p, v)
710 #define stfq_kernel(p, vt) stfq_raw(p, v)
712 #endif /* defined(CONFIG_USER_ONLY) */
714 /* page related stuff */
716 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
717 #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
718 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
720 /* ??? These should be the larger of unsigned long and target_ulong. */
721 extern unsigned long qemu_real_host_page_size
;
722 extern unsigned long qemu_host_page_bits
;
723 extern unsigned long qemu_host_page_size
;
724 extern unsigned long qemu_host_page_mask
;
726 #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
728 /* same as PROT_xxx */
729 #define PAGE_READ 0x0001
730 #define PAGE_WRITE 0x0002
731 #define PAGE_EXEC 0x0004
732 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
733 #define PAGE_VALID 0x0008
734 /* original state of the write flag (used when tracking self-modifying
736 #define PAGE_WRITE_ORG 0x0010
737 #define PAGE_RESERVED 0x0020
739 void page_dump(FILE *f
);
740 int page_get_flags(target_ulong address
);
741 void page_set_flags(target_ulong start
, target_ulong end
, int flags
);
742 int page_check_range(target_ulong start
, target_ulong len
, int flags
);
744 void cpu_exec_init_all(unsigned long tb_size
);
745 CPUState
*cpu_copy(CPUState
*env
);
747 void cpu_dump_state(CPUState
*env
, FILE *f
,
748 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
750 void cpu_dump_statistics (CPUState
*env
, FILE *f
,
751 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
754 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
755 __attribute__ ((__format__ (__printf__
, 2, 3)))
756 __attribute__ ((__noreturn__
));
757 extern CPUState
*first_cpu
;
758 extern CPUState
*cpu_single_env
;
759 extern int64_t qemu_icount
;
760 extern int use_icount
;
762 #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
763 #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
764 #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
765 #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
766 #define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
767 #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
768 #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
769 #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
770 #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
771 #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
773 void cpu_interrupt(CPUState
*s
, int mask
);
774 void cpu_reset_interrupt(CPUState
*env
, int mask
);
776 /* Breakpoint/watchpoint flags */
777 #define BP_MEM_READ 0x01
778 #define BP_MEM_WRITE 0x02
779 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
780 #define BP_STOP_BEFORE_ACCESS 0x04
781 #define BP_WATCHPOINT_HIT 0x08
785 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
786 CPUBreakpoint
**breakpoint
);
787 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
);
788 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
);
789 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
);
790 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
791 int flags
, CPUWatchpoint
**watchpoint
);
792 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
,
793 target_ulong len
, int flags
);
794 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
);
795 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
);
797 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
798 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
799 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
801 void cpu_single_step(CPUState
*env
, int enabled
);
802 void cpu_reset(CPUState
*s
);
804 /* Return the physical page corresponding to a virtual one. Use it
805 only for debugging because no protection checks are done. Return -1
807 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
);
809 #define CPU_LOG_TB_OUT_ASM (1 << 0)
810 #define CPU_LOG_TB_IN_ASM (1 << 1)
811 #define CPU_LOG_TB_OP (1 << 2)
812 #define CPU_LOG_TB_OP_OPT (1 << 3)
813 #define CPU_LOG_INT (1 << 4)
814 #define CPU_LOG_EXEC (1 << 5)
815 #define CPU_LOG_PCALL (1 << 6)
816 #define CPU_LOG_IOPORT (1 << 7)
817 #define CPU_LOG_TB_CPU (1 << 8)
819 /* define log items */
820 typedef struct CPULogItem
{
826 extern const CPULogItem cpu_log_items
[];
828 void cpu_set_log(int log_flags
);
829 void cpu_set_log_filename(const char *filename
);
830 int cpu_str_to_log_mask(const char *str
);
834 /* NOTE: as these functions may be even used when there is an isa
835 brige on non x86 targets, we always defined them */
836 #ifndef NO_CPU_IO_DEFS
837 void cpu_outb(CPUState
*env
, int addr
, int val
);
838 void cpu_outw(CPUState
*env
, int addr
, int val
);
839 void cpu_outl(CPUState
*env
, int addr
, int val
);
840 int cpu_inb(CPUState
*env
, int addr
);
841 int cpu_inw(CPUState
*env
, int addr
);
842 int cpu_inl(CPUState
*env
, int addr
);
845 /* address in the RAM (different from a physical address) */
847 typedef uint32_t ram_addr_t
;
849 typedef unsigned long ram_addr_t
;
854 extern ram_addr_t phys_ram_size
;
855 extern int phys_ram_fd
;
856 extern uint8_t *phys_ram_base
;
857 extern uint8_t *phys_ram_dirty
;
858 extern ram_addr_t ram_size
;
860 /* physical memory access */
862 /* MMIO pages are identified by a combination of an IO device index and
863 3 flags. The ROMD code stores the page ram offset in iotlb entry,
864 so only a limited number of ids are avaiable. */
866 #define IO_MEM_SHIFT 3
867 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
869 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
870 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
871 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
872 #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
874 /* Acts like a ROM when read and like a device when written. */
875 #define IO_MEM_ROMD (1)
876 #define IO_MEM_SUBPAGE (2)
877 #define IO_MEM_SUBWIDTH (4)
879 /* Flags stored in the low bits of the TLB virtual address. These are
880 defined so that fast path ram access is all zeros. */
881 /* Zero if TLB entry is valid. */
882 #define TLB_INVALID_MASK (1 << 3)
883 /* Set if TLB entry references a clean RAM page. The iotlb entry will
884 contain the page physical address. */
885 #define TLB_NOTDIRTY (1 << 4)
886 /* Set if TLB entry is an IO callback. */
887 #define TLB_MMIO (1 << 5)
889 typedef void CPUWriteMemoryFunc(void *opaque
, target_phys_addr_t addr
, uint32_t value
);
890 typedef uint32_t CPUReadMemoryFunc(void *opaque
, target_phys_addr_t addr
);
892 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
894 ram_addr_t phys_offset
,
895 ram_addr_t region_offset
);
896 static inline void cpu_register_physical_memory(target_phys_addr_t start_addr
,
898 ram_addr_t phys_offset
)
900 cpu_register_physical_memory_offset(start_addr
, size
, phys_offset
, 0);
903 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
);
904 ram_addr_t
qemu_ram_alloc(ram_addr_t
);
905 void qemu_ram_free(ram_addr_t addr
);
906 int cpu_register_io_memory(int io_index
,
907 CPUReadMemoryFunc
**mem_read
,
908 CPUWriteMemoryFunc
**mem_write
,
910 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
);
911 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
);
913 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
914 int len
, int is_write
);
915 static inline void cpu_physical_memory_read(target_phys_addr_t addr
,
916 uint8_t *buf
, int len
)
918 cpu_physical_memory_rw(addr
, buf
, len
, 0);
920 static inline void cpu_physical_memory_write(target_phys_addr_t addr
,
921 const uint8_t *buf
, int len
)
923 cpu_physical_memory_rw(addr
, (uint8_t *)buf
, len
, 1);
925 uint32_t ldub_phys(target_phys_addr_t addr
);
926 uint32_t lduw_phys(target_phys_addr_t addr
);
927 uint32_t ldl_phys(target_phys_addr_t addr
);
928 uint64_t ldq_phys(target_phys_addr_t addr
);
929 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
);
930 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
);
931 void stb_phys(target_phys_addr_t addr
, uint32_t val
);
932 void stw_phys(target_phys_addr_t addr
, uint32_t val
);
933 void stl_phys(target_phys_addr_t addr
, uint32_t val
);
934 void stq_phys(target_phys_addr_t addr
, uint64_t val
);
936 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
937 const uint8_t *buf
, int len
);
938 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
939 uint8_t *buf
, int len
, int is_write
);
941 #define VGA_DIRTY_FLAG 0x01
942 #define CODE_DIRTY_FLAG 0x02
943 #define KQEMU_DIRTY_FLAG 0x04
944 #define MIGRATION_DIRTY_FLAG 0x08
946 /* read dirty bit (return 0 or 1) */
947 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr
)
949 return phys_ram_dirty
[addr
>> TARGET_PAGE_BITS
] == 0xff;
952 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr
,
955 return phys_ram_dirty
[addr
>> TARGET_PAGE_BITS
] & dirty_flags
;
958 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr
)
960 phys_ram_dirty
[addr
>> TARGET_PAGE_BITS
] = 0xff;
963 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
965 void cpu_tlb_update_dirty(CPUState
*env
);
967 int cpu_physical_memory_set_dirty_tracking(int enable
);
969 int cpu_physical_memory_get_dirty_tracking(void);
971 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
);
973 void dump_exec_info(FILE *f
,
974 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...));
976 /* Coalesced MMIO regions are areas where write operations can be reordered.
977 * This usually implies that write operations are side-effect free. This allows
978 * batching which can make a major impact on performance when using
981 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
);
983 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
);
985 /*******************************************/
986 /* host CPU ticks (if available) */
988 #if defined(__powerpc__)
990 static inline uint32_t get_tbl(void)
993 asm volatile("mftb %0" : "=r" (tbl
));
997 static inline uint32_t get_tbu(void)
1000 asm volatile("mftbu %0" : "=r" (tbl
));
1004 static inline int64_t cpu_get_real_ticks(void)
1007 /* NOTE: we test if wrapping has occurred */
1013 return ((int64_t)h
<< 32) | l
;
1016 #elif defined(__i386__)
1018 static inline int64_t cpu_get_real_ticks(void)
1021 asm volatile ("rdtsc" : "=A" (val
));
1025 #elif defined(__x86_64__)
1027 static inline int64_t cpu_get_real_ticks(void)
1031 asm volatile("rdtsc" : "=a" (low
), "=d" (high
));
1038 #elif defined(__hppa__)
1040 static inline int64_t cpu_get_real_ticks(void)
1043 asm volatile ("mfctl %%cr16, %0" : "=r"(val
));
1047 #elif defined(__ia64)
1049 static inline int64_t cpu_get_real_ticks(void)
1052 asm volatile ("mov %0 = ar.itc" : "=r"(val
) :: "memory");
1056 #elif defined(__s390__)
1058 static inline int64_t cpu_get_real_ticks(void)
1061 asm volatile("stck 0(%1)" : "=m" (val
) : "a" (&val
) : "cc");
1065 #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
1067 static inline int64_t cpu_get_real_ticks (void)
1071 asm volatile("rd %%tick,%0" : "=r"(rval
));
1081 asm volatile("rd %%tick,%1; srlx %1,32,%0"
1082 : "=r"(rval
.i32
.high
), "=r"(rval
.i32
.low
));
1087 #elif defined(__mips__)
1089 static inline int64_t cpu_get_real_ticks(void)
1091 #if __mips_isa_rev >= 2
1093 static uint32_t cyc_per_count
= 0;
1096 __asm__
__volatile__("rdhwr %0, $3" : "=r" (cyc_per_count
));
1098 __asm__
__volatile__("rdhwr %1, $2" : "=r" (count
));
1099 return (int64_t)(count
* cyc_per_count
);
1102 static int64_t ticks
= 0;
1108 /* The host CPU doesn't have an easily accessible cycle counter.
1109 Just return a monotonically increasing value. This will be
1110 totally wrong, but hopefully better than nothing. */
1111 static inline int64_t cpu_get_real_ticks (void)
1113 static int64_t ticks
= 0;
1119 #ifdef CONFIG_PROFILER
1120 static inline int64_t profile_getclock(void)
1122 return cpu_get_real_ticks();
1125 extern int64_t kqemu_time
, kqemu_time_start
;
1126 extern int64_t qemu_time
, qemu_time_start
;
1127 extern int64_t tlb_flush_time
;
1128 extern int64_t kqemu_exec_count
;
1129 extern int64_t dev_time
;
1130 extern int64_t kqemu_ret_int_count
;
1131 extern int64_t kqemu_ret_excp_count
;
1132 extern int64_t kqemu_ret_intr_count
;
1135 #endif /* CPU_ALL_H */