linux-user: fix segmentation fault passing with h2g(x) != x
[qemu/qmp-unstable.git] / include / exec / cpu-all.h
blobf1cde978ab50d7c06bb0eb6e9a0b717553f46067
1 /*
2 * defines common to all virtual CPUs
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifndef CPU_ALL_H
20 #define CPU_ALL_H
22 #include "qemu-common.h"
23 #include "exec/cpu-common.h"
24 #include "qemu/thread.h"
26 /* some important defines:
28 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
29 * memory accesses.
31 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
32 * otherwise little endian.
34 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
36 * TARGET_WORDS_BIGENDIAN : same for target cpu
39 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
40 #define BSWAP_NEEDED
41 #endif
43 #ifdef BSWAP_NEEDED
45 static inline uint16_t tswap16(uint16_t s)
47 return bswap16(s);
50 static inline uint32_t tswap32(uint32_t s)
52 return bswap32(s);
55 static inline uint64_t tswap64(uint64_t s)
57 return bswap64(s);
60 static inline void tswap16s(uint16_t *s)
62 *s = bswap16(*s);
65 static inline void tswap32s(uint32_t *s)
67 *s = bswap32(*s);
70 static inline void tswap64s(uint64_t *s)
72 *s = bswap64(*s);
75 #else
77 static inline uint16_t tswap16(uint16_t s)
79 return s;
82 static inline uint32_t tswap32(uint32_t s)
84 return s;
87 static inline uint64_t tswap64(uint64_t s)
89 return s;
92 static inline void tswap16s(uint16_t *s)
96 static inline void tswap32s(uint32_t *s)
100 static inline void tswap64s(uint64_t *s)
104 #endif
106 #if TARGET_LONG_SIZE == 4
107 #define tswapl(s) tswap32(s)
108 #define tswapls(s) tswap32s((uint32_t *)(s))
109 #define bswaptls(s) bswap32s(s)
110 #else
111 #define tswapl(s) tswap64(s)
112 #define tswapls(s) tswap64s((uint64_t *)(s))
113 #define bswaptls(s) bswap64s(s)
114 #endif
116 /* CPU memory access without any memory or io remapping */
119 * the generic syntax for the memory accesses is:
121 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
123 * store: st{type}{size}{endian}_{access_type}(ptr, val)
125 * type is:
126 * (empty): integer access
127 * f : float access
129 * sign is:
130 * (empty): for floats or 32 bit size
131 * u : unsigned
132 * s : signed
134 * size is:
135 * b: 8 bits
136 * w: 16 bits
137 * l: 32 bits
138 * q: 64 bits
140 * endian is:
141 * (empty): target cpu endianness or 8 bit access
142 * r : reversed target cpu endianness (not implemented yet)
143 * be : big endian (not implemented yet)
144 * le : little endian (not implemented yet)
146 * access_type is:
147 * raw : host memory access
148 * user : user mode access using soft MMU
149 * kernel : kernel mode access using soft MMU
152 /* target-endianness CPU memory access functions */
153 #if defined(TARGET_WORDS_BIGENDIAN)
154 #define lduw_p(p) lduw_be_p(p)
155 #define ldsw_p(p) ldsw_be_p(p)
156 #define ldl_p(p) ldl_be_p(p)
157 #define ldq_p(p) ldq_be_p(p)
158 #define ldfl_p(p) ldfl_be_p(p)
159 #define ldfq_p(p) ldfq_be_p(p)
160 #define stw_p(p, v) stw_be_p(p, v)
161 #define stl_p(p, v) stl_be_p(p, v)
162 #define stq_p(p, v) stq_be_p(p, v)
163 #define stfl_p(p, v) stfl_be_p(p, v)
164 #define stfq_p(p, v) stfq_be_p(p, v)
165 #else
166 #define lduw_p(p) lduw_le_p(p)
167 #define ldsw_p(p) ldsw_le_p(p)
168 #define ldl_p(p) ldl_le_p(p)
169 #define ldq_p(p) ldq_le_p(p)
170 #define ldfl_p(p) ldfl_le_p(p)
171 #define ldfq_p(p) ldfq_le_p(p)
172 #define stw_p(p, v) stw_le_p(p, v)
173 #define stl_p(p, v) stl_le_p(p, v)
174 #define stq_p(p, v) stq_le_p(p, v)
175 #define stfl_p(p, v) stfl_le_p(p, v)
176 #define stfq_p(p, v) stfq_le_p(p, v)
177 #endif
179 /* MMU memory access macros */
181 #if defined(CONFIG_USER_ONLY)
182 #include <assert.h>
183 #include "exec/user/abitypes.h"
185 /* On some host systems the guest address space is reserved on the host.
186 * This allows the guest address space to be offset to a convenient location.
188 #if defined(CONFIG_USE_GUEST_BASE)
189 extern unsigned long guest_base;
190 extern int have_guest_base;
191 extern unsigned long reserved_va;
192 #define GUEST_BASE guest_base
193 #define RESERVED_VA reserved_va
194 #else
195 #define GUEST_BASE 0ul
196 #define RESERVED_VA 0ul
197 #endif
199 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
200 #define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
202 #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
203 #define h2g_valid(x) 1
204 #else
205 #define h2g_valid(x) ({ \
206 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
207 (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
208 (!RESERVED_VA || (__guest < RESERVED_VA)); \
210 #endif
212 #define h2g_nocheck(x) ({ \
213 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
214 (abi_ulong)__ret; \
217 #define h2g(x) ({ \
218 /* Check if given address fits target address space */ \
219 assert(h2g_valid(x)); \
220 h2g_nocheck(x); \
223 #define saddr(x) g2h(x)
224 #define laddr(x) g2h(x)
226 #else /* !CONFIG_USER_ONLY */
227 /* NOTE: we use double casts if pointers and target_ulong have
228 different sizes */
229 #define saddr(x) (uint8_t *)(intptr_t)(x)
230 #define laddr(x) (uint8_t *)(intptr_t)(x)
231 #endif
233 #define ldub_raw(p) ldub_p(laddr((p)))
234 #define ldsb_raw(p) ldsb_p(laddr((p)))
235 #define lduw_raw(p) lduw_p(laddr((p)))
236 #define ldsw_raw(p) ldsw_p(laddr((p)))
237 #define ldl_raw(p) ldl_p(laddr((p)))
238 #define ldq_raw(p) ldq_p(laddr((p)))
239 #define ldfl_raw(p) ldfl_p(laddr((p)))
240 #define ldfq_raw(p) ldfq_p(laddr((p)))
241 #define stb_raw(p, v) stb_p(saddr((p)), v)
242 #define stw_raw(p, v) stw_p(saddr((p)), v)
243 #define stl_raw(p, v) stl_p(saddr((p)), v)
244 #define stq_raw(p, v) stq_p(saddr((p)), v)
245 #define stfl_raw(p, v) stfl_p(saddr((p)), v)
246 #define stfq_raw(p, v) stfq_p(saddr((p)), v)
249 #if defined(CONFIG_USER_ONLY)
251 /* if user mode, no other memory access functions */
252 #define ldub(p) ldub_raw(p)
253 #define ldsb(p) ldsb_raw(p)
254 #define lduw(p) lduw_raw(p)
255 #define ldsw(p) ldsw_raw(p)
256 #define ldl(p) ldl_raw(p)
257 #define ldq(p) ldq_raw(p)
258 #define ldfl(p) ldfl_raw(p)
259 #define ldfq(p) ldfq_raw(p)
260 #define stb(p, v) stb_raw(p, v)
261 #define stw(p, v) stw_raw(p, v)
262 #define stl(p, v) stl_raw(p, v)
263 #define stq(p, v) stq_raw(p, v)
264 #define stfl(p, v) stfl_raw(p, v)
265 #define stfq(p, v) stfq_raw(p, v)
267 #define cpu_ldub_code(env1, p) ldub_raw(p)
268 #define cpu_ldsb_code(env1, p) ldsb_raw(p)
269 #define cpu_lduw_code(env1, p) lduw_raw(p)
270 #define cpu_ldsw_code(env1, p) ldsw_raw(p)
271 #define cpu_ldl_code(env1, p) ldl_raw(p)
272 #define cpu_ldq_code(env1, p) ldq_raw(p)
274 #define cpu_ldub_data(env, addr) ldub_raw(addr)
275 #define cpu_lduw_data(env, addr) lduw_raw(addr)
276 #define cpu_ldsw_data(env, addr) ldsw_raw(addr)
277 #define cpu_ldl_data(env, addr) ldl_raw(addr)
278 #define cpu_ldq_data(env, addr) ldq_raw(addr)
280 #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
281 #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
282 #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
283 #define cpu_stq_data(env, addr, data) stq_raw(addr, data)
285 #define cpu_ldub_kernel(env, addr) ldub_raw(addr)
286 #define cpu_lduw_kernel(env, addr) lduw_raw(addr)
287 #define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
288 #define cpu_ldl_kernel(env, addr) ldl_raw(addr)
289 #define cpu_ldq_kernel(env, addr) ldq_raw(addr)
291 #define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
292 #define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
293 #define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
294 #define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
296 #define ldub_kernel(p) ldub_raw(p)
297 #define ldsb_kernel(p) ldsb_raw(p)
298 #define lduw_kernel(p) lduw_raw(p)
299 #define ldsw_kernel(p) ldsw_raw(p)
300 #define ldl_kernel(p) ldl_raw(p)
301 #define ldq_kernel(p) ldq_raw(p)
302 #define ldfl_kernel(p) ldfl_raw(p)
303 #define ldfq_kernel(p) ldfq_raw(p)
304 #define stb_kernel(p, v) stb_raw(p, v)
305 #define stw_kernel(p, v) stw_raw(p, v)
306 #define stl_kernel(p, v) stl_raw(p, v)
307 #define stq_kernel(p, v) stq_raw(p, v)
308 #define stfl_kernel(p, v) stfl_raw(p, v)
309 #define stfq_kernel(p, vt) stfq_raw(p, v)
311 #define cpu_ldub_data(env, addr) ldub_raw(addr)
312 #define cpu_lduw_data(env, addr) lduw_raw(addr)
313 #define cpu_ldl_data(env, addr) ldl_raw(addr)
315 #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
316 #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
317 #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
318 #endif /* defined(CONFIG_USER_ONLY) */
320 /* page related stuff */
322 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
323 #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
324 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
326 /* ??? These should be the larger of uintptr_t and target_ulong. */
327 extern uintptr_t qemu_real_host_page_size;
328 extern uintptr_t qemu_host_page_size;
329 extern uintptr_t qemu_host_page_mask;
331 #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
333 /* same as PROT_xxx */
334 #define PAGE_READ 0x0001
335 #define PAGE_WRITE 0x0002
336 #define PAGE_EXEC 0x0004
337 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
338 #define PAGE_VALID 0x0008
339 /* original state of the write flag (used when tracking self-modifying
340 code */
341 #define PAGE_WRITE_ORG 0x0010
342 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
343 /* FIXME: Code that sets/uses this is broken and needs to go away. */
344 #define PAGE_RESERVED 0x0020
345 #endif
347 #if defined(CONFIG_USER_ONLY)
348 void page_dump(FILE *f);
350 typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
351 abi_ulong, unsigned long);
352 int walk_memory_regions(void *, walk_memory_regions_fn);
354 int page_get_flags(target_ulong address);
355 void page_set_flags(target_ulong start, target_ulong end, int flags);
356 int page_check_range(target_ulong start, target_ulong len, int flags);
357 #endif
359 CPUArchState *cpu_copy(CPUArchState *env);
361 void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
362 GCC_FMT_ATTR(2, 3);
364 /* Flags for use in ENV->INTERRUPT_PENDING.
366 The numbers assigned here are non-sequential in order to preserve
367 binary compatibility with the vmstate dump. Bit 0 (0x0001) was
368 previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
369 the vmstate dump. */
371 /* External hardware interrupt pending. This is typically used for
372 interrupts from devices. */
373 #define CPU_INTERRUPT_HARD 0x0002
375 /* Exit the current TB. This is typically used when some system-level device
376 makes some change to the memory mapping. E.g. the a20 line change. */
377 #define CPU_INTERRUPT_EXITTB 0x0004
379 /* Halt the CPU. */
380 #define CPU_INTERRUPT_HALT 0x0020
382 /* Debug event pending. */
383 #define CPU_INTERRUPT_DEBUG 0x0080
385 /* Several target-specific external hardware interrupts. Each target/cpu.h
386 should define proper names based on these defines. */
387 #define CPU_INTERRUPT_TGT_EXT_0 0x0008
388 #define CPU_INTERRUPT_TGT_EXT_1 0x0010
389 #define CPU_INTERRUPT_TGT_EXT_2 0x0040
390 #define CPU_INTERRUPT_TGT_EXT_3 0x0200
391 #define CPU_INTERRUPT_TGT_EXT_4 0x1000
393 /* Several target-specific internal interrupts. These differ from the
394 preceding target-specific interrupts in that they are intended to
395 originate from within the cpu itself, typically in response to some
396 instruction being executed. These, therefore, are not masked while
397 single-stepping within the debugger. */
398 #define CPU_INTERRUPT_TGT_INT_0 0x0100
399 #define CPU_INTERRUPT_TGT_INT_1 0x0400
400 #define CPU_INTERRUPT_TGT_INT_2 0x0800
401 #define CPU_INTERRUPT_TGT_INT_3 0x2000
403 /* First unused bit: 0x4000. */
405 /* The set of all bits that should be masked when single-stepping. */
406 #define CPU_INTERRUPT_SSTEP_MASK \
407 (CPU_INTERRUPT_HARD \
408 | CPU_INTERRUPT_TGT_EXT_0 \
409 | CPU_INTERRUPT_TGT_EXT_1 \
410 | CPU_INTERRUPT_TGT_EXT_2 \
411 | CPU_INTERRUPT_TGT_EXT_3 \
412 | CPU_INTERRUPT_TGT_EXT_4)
414 /* Breakpoint/watchpoint flags */
415 #define BP_MEM_READ 0x01
416 #define BP_MEM_WRITE 0x02
417 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
418 #define BP_STOP_BEFORE_ACCESS 0x04
419 #define BP_WATCHPOINT_HIT 0x08
420 #define BP_GDB 0x10
421 #define BP_CPU 0x20
423 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
424 CPUBreakpoint **breakpoint);
425 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
426 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
427 void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
428 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
429 int flags, CPUWatchpoint **watchpoint);
430 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
431 target_ulong len, int flags);
432 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
433 void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
435 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
436 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
437 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
439 void cpu_single_step(CPUArchState *env, int enabled);
441 #if !defined(CONFIG_USER_ONLY)
443 /* Return the physical page corresponding to a virtual one. Use it
444 only for debugging because no protection checks are done. Return -1
445 if no page found. */
446 hwaddr cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
448 /* memory API */
450 extern ram_addr_t ram_size;
452 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
453 #define RAM_PREALLOC_MASK (1 << 0)
455 typedef struct RAMBlock {
456 struct MemoryRegion *mr;
457 uint8_t *host;
458 ram_addr_t offset;
459 ram_addr_t length;
460 uint32_t flags;
461 char idstr[256];
462 /* Reads can take either the iothread or the ramlist lock.
463 * Writes must take both locks.
465 QTAILQ_ENTRY(RAMBlock) next;
466 #if defined(__linux__) && !defined(TARGET_S390X)
467 int fd;
468 #endif
469 } RAMBlock;
471 typedef struct RAMList {
472 QemuMutex mutex;
473 /* Protected by the iothread lock. */
474 uint8_t *phys_dirty;
475 RAMBlock *mru_block;
476 /* Protected by the ramlist lock. */
477 QTAILQ_HEAD(, RAMBlock) blocks;
478 uint32_t version;
479 } RAMList;
480 extern RAMList ram_list;
482 extern const char *mem_path;
483 extern int mem_prealloc;
485 /* Flags stored in the low bits of the TLB virtual address. These are
486 defined so that fast path ram access is all zeros. */
487 /* Zero if TLB entry is valid. */
488 #define TLB_INVALID_MASK (1 << 3)
489 /* Set if TLB entry references a clean RAM page. The iotlb entry will
490 contain the page physical address. */
491 #define TLB_NOTDIRTY (1 << 4)
492 /* Set if TLB entry is an IO callback. */
493 #define TLB_MMIO (1 << 5)
495 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
496 ram_addr_t last_ram_offset(void);
497 void qemu_mutex_lock_ramlist(void);
498 void qemu_mutex_unlock_ramlist(void);
499 #endif /* !CONFIG_USER_ONLY */
501 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
502 uint8_t *buf, int len, int is_write);
504 #endif /* CPU_ALL_H */