tools/*: remove bcc and as86 as they are no longer required to compile the system...
[virtualbox.git] / src / recompiler / cpu-all.h
blob128c672eee43efea3ae0e756d11258ed67357398
1 /*
2 * defines common to all virtual CPUs
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
29 #ifndef CPU_ALL_H
30 #define CPU_ALL_H
32 #ifdef VBOX
33 # ifndef LOG_GROUP
34 # define LOG_GROUP LOG_GROUP_REM
35 # endif
36 # include <VBox/log.h>
37 # include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
38 #endif /* VBOX */
39 #include "qemu-common.h"
40 #include "cpu-common.h"
42 /* some important defines:
44 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
45 * memory accesses.
47 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
48 * otherwise little endian.
50 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
52 * TARGET_WORDS_BIGENDIAN : same for target cpu
55 #include "softfloat.h"
57 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
58 #define BSWAP_NEEDED
59 #endif
61 #ifdef BSWAP_NEEDED
63 static inline uint16_t tswap16(uint16_t s)
65 return bswap16(s);
68 static inline uint32_t tswap32(uint32_t s)
70 return bswap32(s);
73 static inline uint64_t tswap64(uint64_t s)
75 return bswap64(s);
78 static inline void tswap16s(uint16_t *s)
80 *s = bswap16(*s);
83 static inline void tswap32s(uint32_t *s)
85 *s = bswap32(*s);
88 static inline void tswap64s(uint64_t *s)
90 *s = bswap64(*s);
93 #else
95 static inline uint16_t tswap16(uint16_t s)
97 return s;
100 static inline uint32_t tswap32(uint32_t s)
102 return s;
105 static inline uint64_t tswap64(uint64_t s)
107 return s;
110 static inline void tswap16s(uint16_t *s)
114 static inline void tswap32s(uint32_t *s)
118 static inline void tswap64s(uint64_t *s)
122 #endif
124 #if TARGET_LONG_SIZE == 4
125 #define tswapl(s) tswap32(s)
126 #define tswapls(s) tswap32s((uint32_t *)(s))
127 #define bswaptls(s) bswap32s(s)
128 #else
129 #define tswapl(s) tswap64(s)
130 #define tswapls(s) tswap64s((uint64_t *)(s))
131 #define bswaptls(s) bswap64s(s)
132 #endif
134 typedef union {
135 float32 f;
136 uint32_t l;
137 } CPU_FloatU;
139 /* NOTE: arm FPA is horrible as double 32 bit words are stored in big
140 endian ! */
141 typedef union {
142 float64 d;
143 #if defined(HOST_WORDS_BIGENDIAN) \
144 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
145 struct {
146 uint32_t upper;
147 uint32_t lower;
148 } l;
149 #else
150 struct {
151 uint32_t lower;
152 uint32_t upper;
153 } l;
154 #endif
155 uint64_t ll;
156 } CPU_DoubleU;
158 #ifdef TARGET_SPARC
159 typedef union {
160 float128 q;
161 #if defined(HOST_WORDS_BIGENDIAN) \
162 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
163 struct {
164 uint32_t upmost;
165 uint32_t upper;
166 uint32_t lower;
167 uint32_t lowest;
168 } l;
169 struct {
170 uint64_t upper;
171 uint64_t lower;
172 } ll;
173 #else
174 struct {
175 uint32_t lowest;
176 uint32_t lower;
177 uint32_t upper;
178 uint32_t upmost;
179 } l;
180 struct {
181 uint64_t lower;
182 uint64_t upper;
183 } ll;
184 #endif
185 } CPU_QuadU;
186 #endif
188 /* CPU memory access without any memory or io remapping */
191 * the generic syntax for the memory accesses is:
193 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
195 * store: st{type}{size}{endian}_{access_type}(ptr, val)
197 * type is:
198 * (empty): integer access
199 * f : float access
201 * sign is:
202 * (empty): for floats or 32 bit size
203 * u : unsigned
204 * s : signed
206 * size is:
207 * b: 8 bits
208 * w: 16 bits
209 * l: 32 bits
210 * q: 64 bits
212 * endian is:
213 * (empty): target cpu endianness or 8 bit access
214 * r : reversed target cpu endianness (not implemented yet)
215 * be : big endian (not implemented yet)
216 * le : little endian (not implemented yet)
218 * access_type is:
219 * raw : host memory access
220 * user : user mode access using soft MMU
221 * kernel : kernel mode access using soft MMU
224 #ifdef VBOX
225 void remAbort(int rc, const char *pszTip) __attribute__((__noreturn__));
227 void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb);
228 RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys);
229 RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys);
230 RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys);
231 RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys);
232 RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys);
233 RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys);
234 uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys);
235 int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys);
236 void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb);
237 void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val);
238 void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val);
239 void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val);
240 void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val);
242 # ifndef REM_PHYS_ADDR_IN_TLB
243 void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable);
244 # endif
246 #endif /* VBOX */
248 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
250 DECLINLINE(uint8_t) ldub_p(const void *ptr)
252 VBOX_CHECK_ADDR(ptr);
253 return remR3PhysReadU8((uintptr_t)ptr);
256 DECLINLINE(int8_t) ldsb_p(const void *ptr)
258 VBOX_CHECK_ADDR(ptr);
259 return remR3PhysReadS8((uintptr_t)ptr);
262 DECLINLINE(void) stb_p(void *ptr, int v)
264 VBOX_CHECK_ADDR(ptr);
265 remR3PhysWriteU8((uintptr_t)ptr, v);
268 DECLINLINE(uint32_t) lduw_le_p(const void *ptr)
270 VBOX_CHECK_ADDR(ptr);
271 return remR3PhysReadU16((uintptr_t)ptr);
274 DECLINLINE(int32_t) ldsw_le_p(const void *ptr)
276 VBOX_CHECK_ADDR(ptr);
277 return remR3PhysReadS16((uintptr_t)ptr);
280 DECLINLINE(void) stw_le_p(void *ptr, int v)
282 VBOX_CHECK_ADDR(ptr);
283 remR3PhysWriteU16((uintptr_t)ptr, v);
286 DECLINLINE(uint32_t) ldl_le_p(const void *ptr)
288 VBOX_CHECK_ADDR(ptr);
289 return remR3PhysReadU32((uintptr_t)ptr);
292 DECLINLINE(void) stl_le_p(void *ptr, int v)
294 VBOX_CHECK_ADDR(ptr);
295 remR3PhysWriteU32((uintptr_t)ptr, v);
298 DECLINLINE(void) stq_le_p(void *ptr, uint64_t v)
300 VBOX_CHECK_ADDR(ptr);
301 remR3PhysWriteU64((uintptr_t)ptr, v);
304 DECLINLINE(uint64_t) ldq_le_p(const void *ptr)
306 VBOX_CHECK_ADDR(ptr);
307 return remR3PhysReadU64((uintptr_t)ptr);
310 # undef VBOX_CHECK_ADDR
312 /* float access */
314 DECLINLINE(float32) ldfl_le_p(const void *ptr)
316 union {
317 float32 f;
318 uint32_t i;
319 } u;
320 u.i = ldl_le_p(ptr);
321 return u.f;
324 DECLINLINE(void) stfl_le_p(void *ptr, float32 v)
326 union {
327 float32 f;
328 uint32_t i;
329 } u;
330 u.f = v;
331 stl_le_p(ptr, u.i);
334 DECLINLINE(float64) ldfq_le_p(const void *ptr)
336 CPU_DoubleU u;
337 u.l.lower = ldl_le_p(ptr);
338 u.l.upper = ldl_le_p((uint8_t*)ptr + 4);
339 return u.d;
342 DECLINLINE(void) stfq_le_p(void *ptr, float64 v)
344 CPU_DoubleU u;
345 u.d = v;
346 stl_le_p(ptr, u.l.lower);
347 stl_le_p((uint8_t*)ptr + 4, u.l.upper);
350 #else /* !VBOX || !REM_PHYS_ADDR_IN_TLB */
352 static inline int ldub_p(const void *ptr)
354 return *(uint8_t *)ptr;
357 static inline int ldsb_p(const void *ptr)
359 return *(int8_t *)ptr;
362 static inline void stb_p(void *ptr, int v)
364 *(uint8_t *)ptr = v;
367 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
368 kernel handles unaligned load/stores may give better results, but
369 it is a system wide setting : bad */
370 #if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
372 /* conservative code for little endian unaligned accesses */
373 static inline int lduw_le_p(const void *ptr)
375 #ifdef _ARCH_PPC
376 int val;
377 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
378 return val;
379 #else
380 const uint8_t *p = ptr;
381 return p[0] | (p[1] << 8);
382 #endif
385 static inline int ldsw_le_p(const void *ptr)
387 #ifdef _ARCH_PPC
388 int val;
389 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
390 return (int16_t)val;
391 #else
392 const uint8_t *p = ptr;
393 return (int16_t)(p[0] | (p[1] << 8));
394 #endif
397 static inline int ldl_le_p(const void *ptr)
399 #ifdef _ARCH_PPC
400 int val;
401 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
402 return val;
403 #else
404 const uint8_t *p = ptr;
405 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
406 #endif
409 static inline uint64_t ldq_le_p(const void *ptr)
411 const uint8_t *p = ptr;
412 uint32_t v1, v2;
413 v1 = ldl_le_p(p);
414 v2 = ldl_le_p(p + 4);
415 return v1 | ((uint64_t)v2 << 32);
418 static inline void stw_le_p(void *ptr, int v)
420 #ifdef _ARCH_PPC
421 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
422 #else
423 uint8_t *p = ptr;
424 p[0] = v;
425 p[1] = v >> 8;
426 #endif
429 static inline void stl_le_p(void *ptr, int v)
431 #ifdef _ARCH_PPC
432 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
433 #else
434 uint8_t *p = ptr;
435 p[0] = v;
436 p[1] = v >> 8;
437 p[2] = v >> 16;
438 p[3] = v >> 24;
439 #endif
442 static inline void stq_le_p(void *ptr, uint64_t v)
444 uint8_t *p = ptr;
445 stl_le_p(p, (uint32_t)v);
446 stl_le_p(p + 4, v >> 32);
449 /* float access */
451 static inline float32 ldfl_le_p(const void *ptr)
453 union {
454 float32 f;
455 uint32_t i;
456 } u;
457 u.i = ldl_le_p(ptr);
458 return u.f;
461 static inline void stfl_le_p(void *ptr, float32 v)
463 union {
464 float32 f;
465 uint32_t i;
466 } u;
467 u.f = v;
468 stl_le_p(ptr, u.i);
471 static inline float64 ldfq_le_p(const void *ptr)
473 CPU_DoubleU u;
474 u.l.lower = ldl_le_p(ptr);
475 u.l.upper = ldl_le_p(ptr + 4);
476 return u.d;
479 static inline void stfq_le_p(void *ptr, float64 v)
481 CPU_DoubleU u;
482 u.d = v;
483 stl_le_p(ptr, u.l.lower);
484 stl_le_p(ptr + 4, u.l.upper);
487 #else
489 static inline int lduw_le_p(const void *ptr)
491 return *(uint16_t *)ptr;
494 static inline int ldsw_le_p(const void *ptr)
496 return *(int16_t *)ptr;
499 static inline int ldl_le_p(const void *ptr)
501 return *(uint32_t *)ptr;
504 static inline uint64_t ldq_le_p(const void *ptr)
506 return *(uint64_t *)ptr;
509 static inline void stw_le_p(void *ptr, int v)
511 *(uint16_t *)ptr = v;
514 static inline void stl_le_p(void *ptr, int v)
516 *(uint32_t *)ptr = v;
519 static inline void stq_le_p(void *ptr, uint64_t v)
521 *(uint64_t *)ptr = v;
524 /* float access */
526 static inline float32 ldfl_le_p(const void *ptr)
528 return *(float32 *)ptr;
531 static inline float64 ldfq_le_p(const void *ptr)
533 return *(float64 *)ptr;
536 static inline void stfl_le_p(void *ptr, float32 v)
538 *(float32 *)ptr = v;
541 static inline void stfq_le_p(void *ptr, float64 v)
543 *(float64 *)ptr = v;
545 #endif
547 #endif /* !VBOX || !REM_PHYS_ADDR_IN_TLB */
549 #if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
551 static inline int lduw_be_p(const void *ptr)
553 #if defined(__i386__)
554 int val;
555 asm volatile ("movzwl %1, %0\n"
556 "xchgb %b0, %h0\n"
557 : "=q" (val)
558 : "m" (*(uint16_t *)ptr));
559 return val;
560 #else
561 const uint8_t *b = ptr;
562 return ((b[0] << 8) | b[1]);
563 #endif
566 static inline int ldsw_be_p(const void *ptr)
568 #if defined(__i386__)
569 int val;
570 asm volatile ("movzwl %1, %0\n"
571 "xchgb %b0, %h0\n"
572 : "=q" (val)
573 : "m" (*(uint16_t *)ptr));
574 return (int16_t)val;
575 #else
576 const uint8_t *b = ptr;
577 return (int16_t)((b[0] << 8) | b[1]);
578 #endif
581 static inline int ldl_be_p(const void *ptr)
583 #if defined(__i386__) || defined(__x86_64__)
584 int val;
585 asm volatile ("movl %1, %0\n"
586 "bswap %0\n"
587 : "=r" (val)
588 : "m" (*(uint32_t *)ptr));
589 return val;
590 #else
591 const uint8_t *b = ptr;
592 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
593 #endif
596 static inline uint64_t ldq_be_p(const void *ptr)
598 uint32_t a,b;
599 a = ldl_be_p(ptr);
600 b = ldl_be_p((uint8_t *)ptr + 4);
601 return (((uint64_t)a<<32)|b);
604 static inline void stw_be_p(void *ptr, int v)
606 #if defined(__i386__)
607 asm volatile ("xchgb %b0, %h0\n"
608 "movw %w0, %1\n"
609 : "=q" (v)
610 : "m" (*(uint16_t *)ptr), "0" (v));
611 #else
612 uint8_t *d = (uint8_t *) ptr;
613 d[0] = v >> 8;
614 d[1] = v;
615 #endif
618 static inline void stl_be_p(void *ptr, int v)
620 #if defined(__i386__) || defined(__x86_64__)
621 asm volatile ("bswap %0\n"
622 "movl %0, %1\n"
623 : "=r" (v)
624 : "m" (*(uint32_t *)ptr), "0" (v));
625 #else
626 uint8_t *d = (uint8_t *) ptr;
627 d[0] = v >> 24;
628 d[1] = v >> 16;
629 d[2] = v >> 8;
630 d[3] = v;
631 #endif
634 static inline void stq_be_p(void *ptr, uint64_t v)
636 stl_be_p(ptr, v >> 32);
637 stl_be_p((uint8_t *)ptr + 4, v);
640 /* float access */
642 static inline float32 ldfl_be_p(const void *ptr)
644 union {
645 float32 f;
646 uint32_t i;
647 } u;
648 u.i = ldl_be_p(ptr);
649 return u.f;
652 static inline void stfl_be_p(void *ptr, float32 v)
654 union {
655 float32 f;
656 uint32_t i;
657 } u;
658 u.f = v;
659 stl_be_p(ptr, u.i);
662 static inline float64 ldfq_be_p(const void *ptr)
664 CPU_DoubleU u;
665 u.l.upper = ldl_be_p(ptr);
666 u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
667 return u.d;
670 static inline void stfq_be_p(void *ptr, float64 v)
672 CPU_DoubleU u;
673 u.d = v;
674 stl_be_p(ptr, u.l.upper);
675 stl_be_p((uint8_t *)ptr + 4, u.l.lower);
678 #else
680 static inline int lduw_be_p(const void *ptr)
682 return *(uint16_t *)ptr;
685 static inline int ldsw_be_p(const void *ptr)
687 return *(int16_t *)ptr;
690 static inline int ldl_be_p(const void *ptr)
692 return *(uint32_t *)ptr;
695 static inline uint64_t ldq_be_p(const void *ptr)
697 return *(uint64_t *)ptr;
700 static inline void stw_be_p(void *ptr, int v)
702 *(uint16_t *)ptr = v;
705 static inline void stl_be_p(void *ptr, int v)
707 *(uint32_t *)ptr = v;
710 static inline void stq_be_p(void *ptr, uint64_t v)
712 *(uint64_t *)ptr = v;
715 /* float access */
717 static inline float32 ldfl_be_p(const void *ptr)
719 return *(float32 *)ptr;
722 static inline float64 ldfq_be_p(const void *ptr)
724 return *(float64 *)ptr;
727 static inline void stfl_be_p(void *ptr, float32 v)
729 *(float32 *)ptr = v;
732 static inline void stfq_be_p(void *ptr, float64 v)
734 *(float64 *)ptr = v;
737 #endif
739 /* target CPU memory access functions */
740 #if defined(TARGET_WORDS_BIGENDIAN)
741 #define lduw_p(p) lduw_be_p(p)
742 #define ldsw_p(p) ldsw_be_p(p)
743 #define ldl_p(p) ldl_be_p(p)
744 #define ldq_p(p) ldq_be_p(p)
745 #define ldfl_p(p) ldfl_be_p(p)
746 #define ldfq_p(p) ldfq_be_p(p)
747 #define stw_p(p, v) stw_be_p(p, v)
748 #define stl_p(p, v) stl_be_p(p, v)
749 #define stq_p(p, v) stq_be_p(p, v)
750 #define stfl_p(p, v) stfl_be_p(p, v)
751 #define stfq_p(p, v) stfq_be_p(p, v)
752 #else
753 #define lduw_p(p) lduw_le_p(p)
754 #define ldsw_p(p) ldsw_le_p(p)
755 #define ldl_p(p) ldl_le_p(p)
756 #define ldq_p(p) ldq_le_p(p)
757 #define ldfl_p(p) ldfl_le_p(p)
758 #define ldfq_p(p) ldfq_le_p(p)
759 #define stw_p(p, v) stw_le_p(p, v)
760 #define stl_p(p, v) stl_le_p(p, v)
761 #define stq_p(p, v) stq_le_p(p, v)
762 #define stfl_p(p, v) stfl_le_p(p, v)
763 #define stfq_p(p, v) stfq_le_p(p, v)
764 #endif
766 /* MMU memory access macros */
768 #if defined(CONFIG_USER_ONLY)
769 #include <assert.h>
770 #include "qemu-types.h"
772 /* On some host systems the guest address space is reserved on the host.
773 * This allows the guest address space to be offset to a convenient location.
775 #if defined(CONFIG_USE_GUEST_BASE)
776 extern uintptr_t guest_base;
777 extern int have_guest_base;
778 extern uintptr_t reserved_va;
779 #define GUEST_BASE guest_base
780 #define RESERVED_VA reserved_va
781 #else
782 #define GUEST_BASE 0ul
783 #define RESERVED_VA 0ul
784 #endif
786 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
787 #define g2h(x) ((void *)((uintptr_t)(x) + GUEST_BASE))
789 #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
790 #define h2g_valid(x) 1
791 #else
792 #define h2g_valid(x) ({ \
793 uintptr_t __guest = (uintptr_t)(x) - GUEST_BASE; \
794 __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \
796 #endif
798 #define h2g(x) ({ \
799 uintptr_t __ret = (uintptr_t)(x) - GUEST_BASE; \
800 /* Check if given address fits target address space */ \
801 assert(h2g_valid(x)); \
802 (abi_ulong)__ret; \
805 #define saddr(x) g2h(x)
806 #define laddr(x) g2h(x)
808 #else /* !CONFIG_USER_ONLY */
809 /* NOTE: we use double casts if pointers and target_ulong have
810 different sizes */
811 #define saddr(x) (uint8_t *)(intptr_t)(x)
812 #define laddr(x) (uint8_t *)(intptr_t)(x)
813 #endif
815 #define ldub_raw(p) ldub_p(laddr((p)))
816 #define ldsb_raw(p) ldsb_p(laddr((p)))
817 #define lduw_raw(p) lduw_p(laddr((p)))
818 #define ldsw_raw(p) ldsw_p(laddr((p)))
819 #define ldl_raw(p) ldl_p(laddr((p)))
820 #define ldq_raw(p) ldq_p(laddr((p)))
821 #define ldfl_raw(p) ldfl_p(laddr((p)))
822 #define ldfq_raw(p) ldfq_p(laddr((p)))
823 #define stb_raw(p, v) stb_p(saddr((p)), v)
824 #define stw_raw(p, v) stw_p(saddr((p)), v)
825 #define stl_raw(p, v) stl_p(saddr((p)), v)
826 #define stq_raw(p, v) stq_p(saddr((p)), v)
827 #define stfl_raw(p, v) stfl_p(saddr((p)), v)
828 #define stfq_raw(p, v) stfq_p(saddr((p)), v)
831 #if defined(CONFIG_USER_ONLY)
833 /* if user mode, no other memory access functions */
834 #define ldub(p) ldub_raw(p)
835 #define ldsb(p) ldsb_raw(p)
836 #define lduw(p) lduw_raw(p)
837 #define ldsw(p) ldsw_raw(p)
838 #define ldl(p) ldl_raw(p)
839 #define ldq(p) ldq_raw(p)
840 #define ldfl(p) ldfl_raw(p)
841 #define ldfq(p) ldfq_raw(p)
842 #define stb(p, v) stb_raw(p, v)
843 #define stw(p, v) stw_raw(p, v)
844 #define stl(p, v) stl_raw(p, v)
845 #define stq(p, v) stq_raw(p, v)
846 #define stfl(p, v) stfl_raw(p, v)
847 #define stfq(p, v) stfq_raw(p, v)
849 #define ldub_code(p) ldub_raw(p)
850 #define ldsb_code(p) ldsb_raw(p)
851 #define lduw_code(p) lduw_raw(p)
852 #define ldsw_code(p) ldsw_raw(p)
853 #define ldl_code(p) ldl_raw(p)
854 #define ldq_code(p) ldq_raw(p)
856 #define ldub_kernel(p) ldub_raw(p)
857 #define ldsb_kernel(p) ldsb_raw(p)
858 #define lduw_kernel(p) lduw_raw(p)
859 #define ldsw_kernel(p) ldsw_raw(p)
860 #define ldl_kernel(p) ldl_raw(p)
861 #define ldq_kernel(p) ldq_raw(p)
862 #define ldfl_kernel(p) ldfl_raw(p)
863 #define ldfq_kernel(p) ldfq_raw(p)
864 #define stb_kernel(p, v) stb_raw(p, v)
865 #define stw_kernel(p, v) stw_raw(p, v)
866 #define stl_kernel(p, v) stl_raw(p, v)
867 #define stq_kernel(p, v) stq_raw(p, v)
868 #define stfl_kernel(p, v) stfl_raw(p, v)
869 #define stfq_kernel(p, vt) stfq_raw(p, v)
871 #endif /* defined(CONFIG_USER_ONLY) */
873 /* page related stuff */
875 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
876 #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
877 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
879 /* ??? These should be the larger of uintptr_t and target_ulong. */
880 extern size_t qemu_real_host_page_size;
881 extern size_t qemu_host_page_bits;
882 extern size_t qemu_host_page_size;
883 extern uintptr_t qemu_host_page_mask;
885 #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
887 /* same as PROT_xxx */
888 #define PAGE_READ 0x0001
889 #define PAGE_WRITE 0x0002
890 #define PAGE_EXEC 0x0004
891 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
892 #define PAGE_VALID 0x0008
893 /* original state of the write flag (used when tracking self-modifying
894 code */
895 #define PAGE_WRITE_ORG 0x0010
896 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
897 /* FIXME: Code that sets/uses this is broken and needs to go away. */
898 #define PAGE_RESERVED 0x0020
899 #endif
901 #if defined(CONFIG_USER_ONLY)
902 void page_dump(FILE *f);
904 typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
905 abi_ulong, uintptr_t);
906 int walk_memory_regions(void *, walk_memory_regions_fn);
908 int page_get_flags(target_ulong address);
909 void page_set_flags(target_ulong start, target_ulong end, int flags);
910 int page_check_range(target_ulong start, target_ulong len, int flags);
911 #endif
913 CPUState *cpu_copy(CPUState *env);
914 CPUState *qemu_get_cpu(int cpu);
916 void cpu_dump_state(CPUState *env, FILE *f,
917 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
918 int flags);
919 void cpu_dump_statistics (CPUState *env, FILE *f,
920 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
921 int flags);
923 void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
924 #ifndef VBOX
925 __attribute__ ((__format__ (__printf__, 2, 3)));
926 #else /* VBOX */
928 #endif /* VBOX */
929 extern CPUState *first_cpu;
930 extern CPUState *cpu_single_env;
932 #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
933 #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
934 #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
935 #define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
936 #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
937 #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
938 #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
939 #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
940 #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
941 #define CPU_INTERRUPT_INIT 0x400 /* INIT pending. */
942 #define CPU_INTERRUPT_SIPI 0x800 /* SIPI pending. */
943 #define CPU_INTERRUPT_MCE 0x1000 /* (x86 only) MCE pending. */
945 #ifdef VBOX
946 /** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
947 # define CPU_INTERRUPT_SINGLE_INSTR 0x01000000
948 /** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
949 # define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x02000000
950 /** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
951 # define CPU_INTERRUPT_RC 0x04000000
952 /** Exit current TB to process an external request. */
953 # define CPU_INTERRUPT_EXTERNAL_FLUSH_TLB 0x08000000
954 /** Exit current TB to process an external request. */
955 # define CPU_INTERRUPT_EXTERNAL_EXIT 0x10000000
956 /** Exit current TB to process an external interrupt request. */
957 # define CPU_INTERRUPT_EXTERNAL_HARD 0x20000000
958 /** Exit current TB to process an external timer request. */
959 # define CPU_INTERRUPT_EXTERNAL_TIMER 0x40000000
960 /** Exit current TB to process an external DMA request. */
961 # define CPU_INTERRUPT_EXTERNAL_DMA 0x80000000
962 #endif /* VBOX */
963 void cpu_interrupt(CPUState *s, int mask);
964 void cpu_reset_interrupt(CPUState *env, int mask);
966 void cpu_exit(CPUState *s);
968 int qemu_cpu_has_work(CPUState *env);
970 /* Breakpoint/watchpoint flags */
971 #define BP_MEM_READ 0x01
972 #define BP_MEM_WRITE 0x02
973 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
974 #define BP_STOP_BEFORE_ACCESS 0x04
975 #define BP_WATCHPOINT_HIT 0x08
976 #define BP_GDB 0x10
977 #define BP_CPU 0x20
979 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
980 CPUBreakpoint **breakpoint);
981 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
982 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
983 void cpu_breakpoint_remove_all(CPUState *env, int mask);
984 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
985 int flags, CPUWatchpoint **watchpoint);
986 int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
987 target_ulong len, int flags);
988 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
989 void cpu_watchpoint_remove_all(CPUState *env, int mask);
991 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
992 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
993 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
995 void cpu_single_step(CPUState *env, int enabled);
996 void cpu_reset(CPUState *s);
997 int cpu_is_stopped(CPUState *env);
998 void run_on_cpu(CPUState *env, void (*func)(void *data), void *data);
1000 #define CPU_LOG_TB_OUT_ASM (1 << 0)
1001 #define CPU_LOG_TB_IN_ASM (1 << 1)
1002 #define CPU_LOG_TB_OP (1 << 2)
1003 #define CPU_LOG_TB_OP_OPT (1 << 3)
1004 #define CPU_LOG_INT (1 << 4)
1005 #define CPU_LOG_EXEC (1 << 5)
1006 #define CPU_LOG_PCALL (1 << 6)
1007 #define CPU_LOG_IOPORT (1 << 7)
1008 #define CPU_LOG_TB_CPU (1 << 8)
1009 #define CPU_LOG_RESET (1 << 9)
1011 /* define log items */
1012 typedef struct CPULogItem {
1013 int mask;
1014 const char *name;
1015 const char *help;
1016 } CPULogItem;
1018 extern const CPULogItem cpu_log_items[];
1020 void cpu_set_log(int log_flags);
1021 void cpu_set_log_filename(const char *filename);
1022 int cpu_str_to_log_mask(const char *str);
1024 #if !defined(CONFIG_USER_ONLY)
1026 /* Return the physical page corresponding to a virtual one. Use it
1027 only for debugging because no protection checks are done. Return -1
1028 if no page found. */
1029 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
1031 /* memory API */
1033 #ifndef VBOX
1034 extern int phys_ram_fd;
1035 extern ram_addr_t ram_size;
1036 #endif /* !VBOX */
1038 typedef struct RAMBlock {
1039 uint8_t *host;
1040 ram_addr_t offset;
1041 ram_addr_t length;
1042 char idstr[256];
1043 QLIST_ENTRY(RAMBlock) next;
1044 #if defined(__linux__) && !defined(TARGET_S390X)
1045 int fd;
1046 #endif
1047 } RAMBlock;
1049 typedef struct RAMList {
1050 uint8_t *phys_dirty;
1051 #ifdef VBOX
1052 /** This is required for bounds checking the phys_ram_dirty accesses.
1053 * We have memory ranges (the high PC-BIOS mapping) which causes some pages
1054 * to fall outside the dirty map. */
1055 RTGCPHYS phys_dirty_size;
1056 #if 1
1057 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr,rv) \
1058 do { \
1059 if (RT_UNLIKELY( ((addr) >> TARGET_PAGE_BITS) >= ram_list.phys_dirty_size)) { \
1060 Log(("%s: %RGp\n", __FUNCTION__, (RTGCPHYS)addr)); \
1061 return (rv); \
1063 } while (0)
1064 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(addr) \
1065 do { \
1066 if (RT_UNLIKELY( ((addr) >> TARGET_PAGE_BITS) >= ram_list.phys_dirty_size)) { \
1067 Log(("%s: %RGp\n", __FUNCTION__, (RTGCPHYS)addr)); \
1068 return; \
1070 } while (0)
1071 #else
1072 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr,rv) \
1073 AssertMsgReturn(((addr) >> TARGET_PAGE_BITS) < ram_list.phys_dirty_size, ("%#RGp\n", (RTGCPHYS)(addr)), (rv));
1074 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(addr) \
1075 AssertMsgReturnVoid(((addr) >> TARGET_PAGE_BITS) < ram_list.phys_dirty_size, ("%#RGp\n", (RTGCPHYS)(addr)));
1076 # endif
1077 #else
1078 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr,rv) do {} while()
1079 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(addr) do {} while()
1080 #endif /* VBOX */
1081 QLIST_HEAD(ram, RAMBlock) blocks;
1082 } RAMList;
1083 extern RAMList ram_list;
1085 extern const char *mem_path;
1086 extern int mem_prealloc;
1088 /* physical memory access */
1090 /* MMIO pages are identified by a combination of an IO device index and
1091 3 flags. The ROMD code stores the page ram offset in iotlb entry,
1092 so only a limited number of ids are avaiable. */
1094 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
1096 /* Flags stored in the low bits of the TLB virtual address. These are
1097 defined so that fast path ram access is all zeros. */
1098 /* Zero if TLB entry is valid. */
1099 #define TLB_INVALID_MASK (1 << 3)
1100 /* Set if TLB entry references a clean RAM page. The iotlb entry will
1101 contain the page physical address. */
1102 #define TLB_NOTDIRTY (1 << 4)
1103 /* Set if TLB entry is an IO callback. */
1104 #define TLB_MMIO (1 << 5)
1106 #define VGA_DIRTY_FLAG 0x01
1107 #define CODE_DIRTY_FLAG 0x02
1108 #define MIGRATION_DIRTY_FLAG 0x08
1110 /* read dirty bit (return 0 or 1) */
1111 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
1113 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr, 0);
1114 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
1117 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
1119 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr, 0xff);
1120 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
1123 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
1124 int dirty_flags)
1126 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr, 0xff & dirty_flags);
1127 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1130 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
1132 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(addr);
1133 ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1136 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
1137 int dirty_flags)
1139 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr, 0xff);
1140 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
1143 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
1144 int length,
1145 int dirty_flags)
1147 int i, mask, len;
1148 uint8_t *p;
1150 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(start);
1151 len = length >> TARGET_PAGE_BITS;
1152 mask = ~dirty_flags;
1153 p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
1154 for (i = 0; i < len; i++) {
1155 p[i] &= mask;
1159 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1160 int dirty_flags);
1161 void cpu_tlb_update_dirty(CPUState *env);
1163 int cpu_physical_memory_set_dirty_tracking(int enable);
1165 int cpu_physical_memory_get_dirty_tracking(void);
1167 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1168 target_phys_addr_t end_addr);
1170 void dump_exec_info(FILE *f,
1171 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
1172 #endif /* !CONFIG_USER_ONLY */
1174 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1175 uint8_t *buf, int len, int is_write);
1177 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1178 uint64_t mcg_status, uint64_t addr, uint64_t misc);
1180 #ifdef VBOX
1181 void tb_invalidate_virt(CPUState *env, uint32_t eip);
1182 #endif /* VBOX */
1184 #endif /* CPU_ALL_H */