iotests: Test unaligned raw images with O_DIRECT
[qemu/ar7.git] / tcg / tcg.h
blob0e01a70d66863b4c253f9c2b8614fa1b65939719
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef TCG_H
26 #define TCG_H
28 #include "qemu-common.h"
29 #include "cpu.h"
30 #include "exec/tb-context.h"
31 #include "qemu/bitops.h"
32 #include "qemu/queue.h"
33 #include "tcg-mo.h"
34 #include "tcg-target.h"
35 #include "qemu/int128.h"
37 /* XXX: make safe guess about sizes */
38 #define MAX_OP_PER_INSTR 266
40 #if HOST_LONG_BITS == 32
41 #define MAX_OPC_PARAM_PER_ARG 2
42 #else
43 #define MAX_OPC_PARAM_PER_ARG 1
44 #endif
45 #define MAX_OPC_PARAM_IARGS 6
46 #define MAX_OPC_PARAM_OARGS 1
47 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
49 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
50 * and up to 4 + N parameters on 64-bit archs
51 * (N = number of input arguments + output arguments). */
52 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
54 #define CPU_TEMP_BUF_NLONGS 128
56 /* Default target word size to pointer size. */
57 #ifndef TCG_TARGET_REG_BITS
58 # if UINTPTR_MAX == UINT32_MAX
59 # define TCG_TARGET_REG_BITS 32
60 # elif UINTPTR_MAX == UINT64_MAX
61 # define TCG_TARGET_REG_BITS 64
62 # else
63 # error Unknown pointer size for tcg target
64 # endif
65 #endif
67 #if TCG_TARGET_REG_BITS == 32
68 typedef int32_t tcg_target_long;
69 typedef uint32_t tcg_target_ulong;
70 #define TCG_PRIlx PRIx32
71 #define TCG_PRIld PRId32
72 #elif TCG_TARGET_REG_BITS == 64
73 typedef int64_t tcg_target_long;
74 typedef uint64_t tcg_target_ulong;
75 #define TCG_PRIlx PRIx64
76 #define TCG_PRIld PRId64
77 #else
78 #error unsupported
79 #endif
81 /* Oversized TCG guests make things like MTTCG hard
82 * as we can't use atomics for cputlb updates.
84 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
85 #define TCG_OVERSIZED_GUEST 1
86 #else
87 #define TCG_OVERSIZED_GUEST 0
88 #endif
90 #if TCG_TARGET_NB_REGS <= 32
91 typedef uint32_t TCGRegSet;
92 #elif TCG_TARGET_NB_REGS <= 64
93 typedef uint64_t TCGRegSet;
94 #else
95 #error unsupported
96 #endif
98 #if TCG_TARGET_REG_BITS == 32
99 /* Turn some undef macros into false macros. */
100 #define TCG_TARGET_HAS_extrl_i64_i32 0
101 #define TCG_TARGET_HAS_extrh_i64_i32 0
102 #define TCG_TARGET_HAS_div_i64 0
103 #define TCG_TARGET_HAS_rem_i64 0
104 #define TCG_TARGET_HAS_div2_i64 0
105 #define TCG_TARGET_HAS_rot_i64 0
106 #define TCG_TARGET_HAS_ext8s_i64 0
107 #define TCG_TARGET_HAS_ext16s_i64 0
108 #define TCG_TARGET_HAS_ext32s_i64 0
109 #define TCG_TARGET_HAS_ext8u_i64 0
110 #define TCG_TARGET_HAS_ext16u_i64 0
111 #define TCG_TARGET_HAS_ext32u_i64 0
112 #define TCG_TARGET_HAS_bswap16_i64 0
113 #define TCG_TARGET_HAS_bswap32_i64 0
114 #define TCG_TARGET_HAS_bswap64_i64 0
115 #define TCG_TARGET_HAS_neg_i64 0
116 #define TCG_TARGET_HAS_not_i64 0
117 #define TCG_TARGET_HAS_andc_i64 0
118 #define TCG_TARGET_HAS_orc_i64 0
119 #define TCG_TARGET_HAS_eqv_i64 0
120 #define TCG_TARGET_HAS_nand_i64 0
121 #define TCG_TARGET_HAS_nor_i64 0
122 #define TCG_TARGET_HAS_clz_i64 0
123 #define TCG_TARGET_HAS_ctz_i64 0
124 #define TCG_TARGET_HAS_ctpop_i64 0
125 #define TCG_TARGET_HAS_deposit_i64 0
126 #define TCG_TARGET_HAS_extract_i64 0
127 #define TCG_TARGET_HAS_sextract_i64 0
128 #define TCG_TARGET_HAS_extract2_i64 0
129 #define TCG_TARGET_HAS_movcond_i64 0
130 #define TCG_TARGET_HAS_add2_i64 0
131 #define TCG_TARGET_HAS_sub2_i64 0
132 #define TCG_TARGET_HAS_mulu2_i64 0
133 #define TCG_TARGET_HAS_muls2_i64 0
134 #define TCG_TARGET_HAS_muluh_i64 0
135 #define TCG_TARGET_HAS_mulsh_i64 0
136 /* Turn some undef macros into true macros. */
137 #define TCG_TARGET_HAS_add2_i32 1
138 #define TCG_TARGET_HAS_sub2_i32 1
139 #endif
141 #ifndef TCG_TARGET_deposit_i32_valid
142 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
143 #endif
144 #ifndef TCG_TARGET_deposit_i64_valid
145 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
146 #endif
147 #ifndef TCG_TARGET_extract_i32_valid
148 #define TCG_TARGET_extract_i32_valid(ofs, len) 1
149 #endif
150 #ifndef TCG_TARGET_extract_i64_valid
151 #define TCG_TARGET_extract_i64_valid(ofs, len) 1
152 #endif
154 /* Only one of DIV or DIV2 should be defined. */
155 #if defined(TCG_TARGET_HAS_div_i32)
156 #define TCG_TARGET_HAS_div2_i32 0
157 #elif defined(TCG_TARGET_HAS_div2_i32)
158 #define TCG_TARGET_HAS_div_i32 0
159 #define TCG_TARGET_HAS_rem_i32 0
160 #endif
161 #if defined(TCG_TARGET_HAS_div_i64)
162 #define TCG_TARGET_HAS_div2_i64 0
163 #elif defined(TCG_TARGET_HAS_div2_i64)
164 #define TCG_TARGET_HAS_div_i64 0
165 #define TCG_TARGET_HAS_rem_i64 0
166 #endif
168 /* For 32-bit targets, some sort of unsigned widening multiply is required. */
169 #if TCG_TARGET_REG_BITS == 32 \
170 && !(defined(TCG_TARGET_HAS_mulu2_i32) \
171 || defined(TCG_TARGET_HAS_muluh_i32))
172 # error "Missing unsigned widening multiply"
173 #endif
175 #if !defined(TCG_TARGET_HAS_v64) \
176 && !defined(TCG_TARGET_HAS_v128) \
177 && !defined(TCG_TARGET_HAS_v256)
178 #define TCG_TARGET_MAYBE_vec 0
179 #define TCG_TARGET_HAS_abs_vec 0
180 #define TCG_TARGET_HAS_neg_vec 0
181 #define TCG_TARGET_HAS_not_vec 0
182 #define TCG_TARGET_HAS_andc_vec 0
183 #define TCG_TARGET_HAS_orc_vec 0
184 #define TCG_TARGET_HAS_shi_vec 0
185 #define TCG_TARGET_HAS_shs_vec 0
186 #define TCG_TARGET_HAS_shv_vec 0
187 #define TCG_TARGET_HAS_mul_vec 0
188 #define TCG_TARGET_HAS_sat_vec 0
189 #define TCG_TARGET_HAS_minmax_vec 0
190 #else
191 #define TCG_TARGET_MAYBE_vec 1
192 #endif
193 #ifndef TCG_TARGET_HAS_v64
194 #define TCG_TARGET_HAS_v64 0
195 #endif
196 #ifndef TCG_TARGET_HAS_v128
197 #define TCG_TARGET_HAS_v128 0
198 #endif
199 #ifndef TCG_TARGET_HAS_v256
200 #define TCG_TARGET_HAS_v256 0
201 #endif
203 #ifndef TARGET_INSN_START_EXTRA_WORDS
204 # define TARGET_INSN_START_WORDS 1
205 #else
206 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
207 #endif
209 typedef enum TCGOpcode {
210 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
211 #include "tcg-opc.h"
212 #undef DEF
213 NB_OPS,
214 } TCGOpcode;
216 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r))
217 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
218 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
220 #ifndef TCG_TARGET_INSN_UNIT_SIZE
221 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
222 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
223 typedef uint8_t tcg_insn_unit;
224 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
225 typedef uint16_t tcg_insn_unit;
226 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
227 typedef uint32_t tcg_insn_unit;
228 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
229 typedef uint64_t tcg_insn_unit;
230 #else
231 /* The port better have done this. */
232 #endif
235 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
236 # define tcg_debug_assert(X) do { assert(X); } while (0)
237 #else
238 # define tcg_debug_assert(X) \
239 do { if (!(X)) { __builtin_unreachable(); } } while (0)
240 #endif
242 typedef struct TCGRelocation TCGRelocation;
243 struct TCGRelocation {
244 QSIMPLEQ_ENTRY(TCGRelocation) next;
245 tcg_insn_unit *ptr;
246 intptr_t addend;
247 int type;
250 typedef struct TCGLabel TCGLabel;
251 struct TCGLabel {
252 unsigned present : 1;
253 unsigned has_value : 1;
254 unsigned id : 14;
255 unsigned refs : 16;
256 union {
257 uintptr_t value;
258 tcg_insn_unit *value_ptr;
259 } u;
260 QSIMPLEQ_HEAD(, TCGRelocation) relocs;
261 QSIMPLEQ_ENTRY(TCGLabel) next;
264 typedef struct TCGPool {
265 struct TCGPool *next;
266 int size;
267 uint8_t data[0] __attribute__ ((aligned));
268 } TCGPool;
270 #define TCG_POOL_CHUNK_SIZE 32768
272 #define TCG_MAX_TEMPS 512
273 #define TCG_MAX_INSNS 512
275 /* when the size of the arguments of a called function is smaller than
276 this value, they are statically allocated in the TB stack frame */
277 #define TCG_STATIC_CALL_ARGS_SIZE 128
279 typedef enum TCGType {
280 TCG_TYPE_I32,
281 TCG_TYPE_I64,
283 TCG_TYPE_V64,
284 TCG_TYPE_V128,
285 TCG_TYPE_V256,
287 TCG_TYPE_COUNT, /* number of different types */
289 /* An alias for the size of the host register. */
290 #if TCG_TARGET_REG_BITS == 32
291 TCG_TYPE_REG = TCG_TYPE_I32,
292 #else
293 TCG_TYPE_REG = TCG_TYPE_I64,
294 #endif
296 /* An alias for the size of the native pointer. */
297 #if UINTPTR_MAX == UINT32_MAX
298 TCG_TYPE_PTR = TCG_TYPE_I32,
299 #else
300 TCG_TYPE_PTR = TCG_TYPE_I64,
301 #endif
303 /* An alias for the size of the target "long", aka register. */
304 #if TARGET_LONG_BITS == 64
305 TCG_TYPE_TL = TCG_TYPE_I64,
306 #else
307 TCG_TYPE_TL = TCG_TYPE_I32,
308 #endif
309 } TCGType;
311 /* Constants for qemu_ld and qemu_st for the Memory Operation field. */
312 typedef enum TCGMemOp {
313 MO_8 = 0,
314 MO_16 = 1,
315 MO_32 = 2,
316 MO_64 = 3,
317 MO_SIZE = 3, /* Mask for the above. */
319 MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
321 MO_BSWAP = 8, /* Host reverse endian. */
322 #ifdef HOST_WORDS_BIGENDIAN
323 MO_LE = MO_BSWAP,
324 MO_BE = 0,
325 #else
326 MO_LE = 0,
327 MO_BE = MO_BSWAP,
328 #endif
329 #ifdef TARGET_WORDS_BIGENDIAN
330 MO_TE = MO_BE,
331 #else
332 MO_TE = MO_LE,
333 #endif
335 /* MO_UNALN accesses are never checked for alignment.
336 * MO_ALIGN accesses will result in a call to the CPU's
337 * do_unaligned_access hook if the guest address is not aligned.
338 * The default depends on whether the target CPU defines ALIGNED_ONLY.
340 * Some architectures (e.g. ARMv8) need the address which is aligned
341 * to a size more than the size of the memory access.
342 * Some architectures (e.g. SPARCv9) need an address which is aligned,
343 * but less strictly than the natural alignment.
345 * MO_ALIGN supposes the alignment size is the size of a memory access.
347 * There are three options:
348 * - unaligned access permitted (MO_UNALN).
349 * - an alignment to the size of an access (MO_ALIGN);
350 * - an alignment to a specified size, which may be more or less than
351 * the access size (MO_ALIGN_x where 'x' is a size in bytes);
353 MO_ASHIFT = 4,
354 MO_AMASK = 7 << MO_ASHIFT,
355 #ifdef ALIGNED_ONLY
356 MO_ALIGN = 0,
357 MO_UNALN = MO_AMASK,
358 #else
359 MO_ALIGN = MO_AMASK,
360 MO_UNALN = 0,
361 #endif
362 MO_ALIGN_2 = 1 << MO_ASHIFT,
363 MO_ALIGN_4 = 2 << MO_ASHIFT,
364 MO_ALIGN_8 = 3 << MO_ASHIFT,
365 MO_ALIGN_16 = 4 << MO_ASHIFT,
366 MO_ALIGN_32 = 5 << MO_ASHIFT,
367 MO_ALIGN_64 = 6 << MO_ASHIFT,
369 /* Combinations of the above, for ease of use. */
370 MO_UB = MO_8,
371 MO_UW = MO_16,
372 MO_UL = MO_32,
373 MO_SB = MO_SIGN | MO_8,
374 MO_SW = MO_SIGN | MO_16,
375 MO_SL = MO_SIGN | MO_32,
376 MO_Q = MO_64,
378 MO_LEUW = MO_LE | MO_UW,
379 MO_LEUL = MO_LE | MO_UL,
380 MO_LESW = MO_LE | MO_SW,
381 MO_LESL = MO_LE | MO_SL,
382 MO_LEQ = MO_LE | MO_Q,
384 MO_BEUW = MO_BE | MO_UW,
385 MO_BEUL = MO_BE | MO_UL,
386 MO_BESW = MO_BE | MO_SW,
387 MO_BESL = MO_BE | MO_SL,
388 MO_BEQ = MO_BE | MO_Q,
390 MO_TEUW = MO_TE | MO_UW,
391 MO_TEUL = MO_TE | MO_UL,
392 MO_TESW = MO_TE | MO_SW,
393 MO_TESL = MO_TE | MO_SL,
394 MO_TEQ = MO_TE | MO_Q,
396 MO_SSIZE = MO_SIZE | MO_SIGN,
397 } TCGMemOp;
400 * get_alignment_bits
401 * @memop: TCGMemOp value
403 * Extract the alignment size from the memop.
405 static inline unsigned get_alignment_bits(TCGMemOp memop)
407 unsigned a = memop & MO_AMASK;
409 if (a == MO_UNALN) {
410 /* No alignment required. */
411 a = 0;
412 } else if (a == MO_ALIGN) {
413 /* A natural alignment requirement. */
414 a = memop & MO_SIZE;
415 } else {
416 /* A specific alignment requirement. */
417 a = a >> MO_ASHIFT;
419 #if defined(CONFIG_SOFTMMU)
420 /* The requested alignment cannot overlap the TLB flags. */
421 tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
422 #endif
423 return a;
426 typedef tcg_target_ulong TCGArg;
428 /* Define type and accessor macros for TCG variables.
430 TCG variables are the inputs and outputs of TCG ops, as described
431 in tcg/README. Target CPU front-end code uses these types to deal
432 with TCG variables as it emits TCG code via the tcg_gen_* functions.
433 They come in several flavours:
434 * TCGv_i32 : 32 bit integer type
435 * TCGv_i64 : 64 bit integer type
436 * TCGv_ptr : a host pointer type
437 * TCGv_vec : a host vector type; the exact size is not exposed
438 to the CPU front-end code.
439 * TCGv : an integer type the same size as target_ulong
440 (an alias for either TCGv_i32 or TCGv_i64)
441 The compiler's type checking will complain if you mix them
442 up and pass the wrong sized TCGv to a function.
444 Users of tcg_gen_* don't need to know about any of the internal
445 details of these, and should treat them as opaque types.
446 You won't be able to look inside them in a debugger either.
448 Internal implementation details follow:
450 Note that there is no definition of the structs TCGv_i32_d etc anywhere.
451 This is deliberate, because the values we store in variables of type
452 TCGv_i32 are not really pointers-to-structures. They're just small
453 integers, but keeping them in pointer types like this means that the
454 compiler will complain if you accidentally pass a TCGv_i32 to a
455 function which takes a TCGv_i64, and so on. Only the internals of
456 TCG need to care about the actual contents of the types. */
458 typedef struct TCGv_i32_d *TCGv_i32;
459 typedef struct TCGv_i64_d *TCGv_i64;
460 typedef struct TCGv_ptr_d *TCGv_ptr;
461 typedef struct TCGv_vec_d *TCGv_vec;
462 typedef TCGv_ptr TCGv_env;
463 #if TARGET_LONG_BITS == 32
464 #define TCGv TCGv_i32
465 #elif TARGET_LONG_BITS == 64
466 #define TCGv TCGv_i64
467 #else
468 #error Unhandled TARGET_LONG_BITS value
469 #endif
471 /* call flags */
472 /* Helper does not read globals (either directly or through an exception). It
473 implies TCG_CALL_NO_WRITE_GLOBALS. */
474 #define TCG_CALL_NO_READ_GLOBALS 0x0001
475 /* Helper does not write globals */
476 #define TCG_CALL_NO_WRITE_GLOBALS 0x0002
477 /* Helper can be safely suppressed if the return value is not used. */
478 #define TCG_CALL_NO_SIDE_EFFECTS 0x0004
479 /* Helper is QEMU_NORETURN. */
480 #define TCG_CALL_NO_RETURN 0x0008
482 /* convenience version of most used call flags */
483 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
484 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS
485 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS
486 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
487 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
489 /* Used to align parameters. See the comment before tcgv_i32_temp. */
490 #define TCG_CALL_DUMMY_ARG ((TCGArg)0)
492 /* Conditions. Note that these are laid out for easy manipulation by
493 the functions below:
494 bit 0 is used for inverting;
495 bit 1 is signed,
496 bit 2 is unsigned,
497 bit 3 is used with bit 0 for swapping signed/unsigned. */
498 typedef enum {
499 /* non-signed */
500 TCG_COND_NEVER = 0 | 0 | 0 | 0,
501 TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
502 TCG_COND_EQ = 8 | 0 | 0 | 0,
503 TCG_COND_NE = 8 | 0 | 0 | 1,
504 /* signed */
505 TCG_COND_LT = 0 | 0 | 2 | 0,
506 TCG_COND_GE = 0 | 0 | 2 | 1,
507 TCG_COND_LE = 8 | 0 | 2 | 0,
508 TCG_COND_GT = 8 | 0 | 2 | 1,
509 /* unsigned */
510 TCG_COND_LTU = 0 | 4 | 0 | 0,
511 TCG_COND_GEU = 0 | 4 | 0 | 1,
512 TCG_COND_LEU = 8 | 4 | 0 | 0,
513 TCG_COND_GTU = 8 | 4 | 0 | 1,
514 } TCGCond;
516 /* Invert the sense of the comparison. */
517 static inline TCGCond tcg_invert_cond(TCGCond c)
519 return (TCGCond)(c ^ 1);
522 /* Swap the operands in a comparison. */
523 static inline TCGCond tcg_swap_cond(TCGCond c)
525 return c & 6 ? (TCGCond)(c ^ 9) : c;
528 /* Create an "unsigned" version of a "signed" comparison. */
529 static inline TCGCond tcg_unsigned_cond(TCGCond c)
531 return c & 2 ? (TCGCond)(c ^ 6) : c;
534 /* Create a "signed" version of an "unsigned" comparison. */
535 static inline TCGCond tcg_signed_cond(TCGCond c)
537 return c & 4 ? (TCGCond)(c ^ 6) : c;
540 /* Must a comparison be considered unsigned? */
541 static inline bool is_unsigned_cond(TCGCond c)
543 return (c & 4) != 0;
546 /* Create a "high" version of a double-word comparison.
547 This removes equality from a LTE or GTE comparison. */
548 static inline TCGCond tcg_high_cond(TCGCond c)
550 switch (c) {
551 case TCG_COND_GE:
552 case TCG_COND_LE:
553 case TCG_COND_GEU:
554 case TCG_COND_LEU:
555 return (TCGCond)(c ^ 8);
556 default:
557 return c;
561 typedef enum TCGTempVal {
562 TEMP_VAL_DEAD,
563 TEMP_VAL_REG,
564 TEMP_VAL_MEM,
565 TEMP_VAL_CONST,
566 } TCGTempVal;
568 typedef struct TCGTemp {
569 TCGReg reg:8;
570 TCGTempVal val_type:8;
571 TCGType base_type:8;
572 TCGType type:8;
573 unsigned int fixed_reg:1;
574 unsigned int indirect_reg:1;
575 unsigned int indirect_base:1;
576 unsigned int mem_coherent:1;
577 unsigned int mem_allocated:1;
578 /* If true, the temp is saved across both basic blocks and
579 translation blocks. */
580 unsigned int temp_global:1;
581 /* If true, the temp is saved across basic blocks but dead
582 at the end of translation blocks. If false, the temp is
583 dead at the end of basic blocks. */
584 unsigned int temp_local:1;
585 unsigned int temp_allocated:1;
587 tcg_target_long val;
588 struct TCGTemp *mem_base;
589 intptr_t mem_offset;
590 const char *name;
592 /* Pass-specific information that can be stored for a temporary.
593 One word worth of integer data, and one pointer to data
594 allocated separately. */
595 uintptr_t state;
596 void *state_ptr;
597 } TCGTemp;
599 typedef struct TCGContext TCGContext;
601 typedef struct TCGTempSet {
602 unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
603 } TCGTempSet;
605 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
606 this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
607 There are never more than 2 outputs, which means that we can store all
608 dead + sync data within 16 bits. */
609 #define DEAD_ARG 4
610 #define SYNC_ARG 1
611 typedef uint16_t TCGLifeData;
613 /* The layout here is designed to avoid a bitfield crossing of
614 a 32-bit boundary, which would cause GCC to add extra padding. */
615 typedef struct TCGOp {
616 TCGOpcode opc : 8; /* 8 */
618 /* Parameters for this opcode. See below. */
619 unsigned param1 : 4; /* 12 */
620 unsigned param2 : 4; /* 16 */
622 /* Lifetime data of the operands. */
623 unsigned life : 16; /* 32 */
625 /* Next and previous opcodes. */
626 QTAILQ_ENTRY(TCGOp) link;
628 /* Arguments for the opcode. */
629 TCGArg args[MAX_OPC_PARAM];
631 /* Register preferences for the output(s). */
632 TCGRegSet output_pref[2];
633 } TCGOp;
635 #define TCGOP_CALLI(X) (X)->param1
636 #define TCGOP_CALLO(X) (X)->param2
638 #define TCGOP_VECL(X) (X)->param1
639 #define TCGOP_VECE(X) (X)->param2
641 /* Make sure operands fit in the bitfields above. */
642 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
644 typedef struct TCGProfile {
645 int64_t cpu_exec_time;
646 int64_t tb_count1;
647 int64_t tb_count;
648 int64_t op_count; /* total insn count */
649 int op_count_max; /* max insn per TB */
650 int temp_count_max;
651 int64_t temp_count;
652 int64_t del_op_count;
653 int64_t code_in_len;
654 int64_t code_out_len;
655 int64_t search_out_len;
656 int64_t interm_time;
657 int64_t code_time;
658 int64_t la_time;
659 int64_t opt_time;
660 int64_t restore_count;
661 int64_t restore_time;
662 int64_t table_op_count[NB_OPS];
663 } TCGProfile;
665 struct TCGContext {
666 uint8_t *pool_cur, *pool_end;
667 TCGPool *pool_first, *pool_current, *pool_first_large;
668 int nb_labels;
669 int nb_globals;
670 int nb_temps;
671 int nb_indirects;
672 int nb_ops;
674 /* goto_tb support */
675 tcg_insn_unit *code_buf;
676 uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
677 uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
678 uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
680 TCGRegSet reserved_regs;
681 uint32_t tb_cflags; /* cflags of the current TB */
682 intptr_t current_frame_offset;
683 intptr_t frame_start;
684 intptr_t frame_end;
685 TCGTemp *frame_temp;
687 tcg_insn_unit *code_ptr;
689 #ifdef CONFIG_PROFILER
690 TCGProfile prof;
691 #endif
693 #ifdef CONFIG_DEBUG_TCG
694 int temps_in_use;
695 int goto_tb_issue_mask;
696 const TCGOpcode *vecop_list;
697 #endif
699 /* Code generation. Note that we specifically do not use tcg_insn_unit
700 here, because there's too much arithmetic throughout that relies
701 on addition and subtraction working on bytes. Rely on the GCC
702 extension that allows arithmetic on void*. */
703 void *code_gen_prologue;
704 void *code_gen_epilogue;
705 void *code_gen_buffer;
706 size_t code_gen_buffer_size;
707 void *code_gen_ptr;
708 void *data_gen_ptr;
710 /* Threshold to flush the translated code buffer. */
711 void *code_gen_highwater;
713 size_t tb_phys_invalidate_count;
715 /* Track which vCPU triggers events */
716 CPUState *cpu; /* *_trans */
718 /* These structures are private to tcg-target.inc.c. */
719 #ifdef TCG_TARGET_NEED_LDST_LABELS
720 QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
721 #endif
722 #ifdef TCG_TARGET_NEED_POOL_LABELS
723 struct TCGLabelPoolData *pool_labels;
724 #endif
726 TCGLabel *exitreq_label;
728 TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
729 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
731 QTAILQ_HEAD(, TCGOp) ops, free_ops;
732 QSIMPLEQ_HEAD(, TCGLabel) labels;
734 /* Tells which temporary holds a given register.
735 It does not take into account fixed registers */
736 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
738 uint16_t gen_insn_end_off[TCG_MAX_INSNS];
739 target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
742 extern TCGContext tcg_init_ctx;
743 extern __thread TCGContext *tcg_ctx;
744 extern TCGv_env cpu_env;
746 static inline size_t temp_idx(TCGTemp *ts)
748 ptrdiff_t n = ts - tcg_ctx->temps;
749 tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
750 return n;
753 static inline TCGArg temp_arg(TCGTemp *ts)
755 return (uintptr_t)ts;
758 static inline TCGTemp *arg_temp(TCGArg a)
760 return (TCGTemp *)(uintptr_t)a;
763 /* Using the offset of a temporary, relative to TCGContext, rather than
764 its index means that we don't use 0. That leaves offset 0 free for
765 a NULL representation without having to leave index 0 unused. */
766 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
768 uintptr_t o = (uintptr_t)v;
769 TCGTemp *t = (void *)tcg_ctx + o;
770 tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
771 return t;
774 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
776 return tcgv_i32_temp((TCGv_i32)v);
779 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
781 return tcgv_i32_temp((TCGv_i32)v);
784 static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
786 return tcgv_i32_temp((TCGv_i32)v);
789 static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
791 return temp_arg(tcgv_i32_temp(v));
794 static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
796 return temp_arg(tcgv_i64_temp(v));
799 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
801 return temp_arg(tcgv_ptr_temp(v));
804 static inline TCGArg tcgv_vec_arg(TCGv_vec v)
806 return temp_arg(tcgv_vec_temp(v));
809 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
811 (void)temp_idx(t); /* trigger embedded assert */
812 return (TCGv_i32)((void *)t - (void *)tcg_ctx);
815 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
817 return (TCGv_i64)temp_tcgv_i32(t);
820 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
822 return (TCGv_ptr)temp_tcgv_i32(t);
825 static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
827 return (TCGv_vec)temp_tcgv_i32(t);
830 #if TCG_TARGET_REG_BITS == 32
831 static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
833 return temp_tcgv_i32(tcgv_i64_temp(t));
836 static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
838 return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
840 #endif
842 static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
844 op->args[arg] = v;
847 static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
849 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
850 tcg_set_insn_param(op, arg, v);
851 #else
852 tcg_set_insn_param(op, arg * 2, v);
853 tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
854 #endif
857 /* The last op that was emitted. */
858 static inline TCGOp *tcg_last_op(void)
860 return QTAILQ_LAST(&tcg_ctx->ops);
863 /* Test for whether to terminate the TB for using too many opcodes. */
864 static inline bool tcg_op_buf_full(void)
866 /* This is not a hard limit, it merely stops translation when
867 * we have produced "enough" opcodes. We want to limit TB size
868 * such that a RISC host can reasonably use a 16-bit signed
869 * branch within the TB. We also need to be mindful of the
870 * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
871 * and TCGContext.gen_insn_end_off[].
873 return tcg_ctx->nb_ops >= 4000;
876 /* pool based memory allocation */
878 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */
879 void *tcg_malloc_internal(TCGContext *s, int size);
880 void tcg_pool_reset(TCGContext *s);
881 TranslationBlock *tcg_tb_alloc(TCGContext *s);
883 void tcg_region_init(void);
884 void tcg_region_reset_all(void);
886 size_t tcg_code_size(void);
887 size_t tcg_code_capacity(void);
889 void tcg_tb_insert(TranslationBlock *tb);
890 void tcg_tb_remove(TranslationBlock *tb);
891 size_t tcg_tb_phys_invalidate_count(void);
892 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
893 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
894 size_t tcg_nb_tbs(void);
896 /* user-mode: Called with mmap_lock held. */
897 static inline void *tcg_malloc(int size)
899 TCGContext *s = tcg_ctx;
900 uint8_t *ptr, *ptr_end;
902 /* ??? This is a weak placeholder for minimum malloc alignment. */
903 size = QEMU_ALIGN_UP(size, 8);
905 ptr = s->pool_cur;
906 ptr_end = ptr + size;
907 if (unlikely(ptr_end > s->pool_end)) {
908 return tcg_malloc_internal(tcg_ctx, size);
909 } else {
910 s->pool_cur = ptr_end;
911 return ptr;
915 void tcg_context_init(TCGContext *s);
916 void tcg_register_thread(void);
917 void tcg_prologue_init(TCGContext *s);
918 void tcg_func_start(TCGContext *s);
920 int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
922 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
924 TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
925 intptr_t, const char *);
926 TCGTemp *tcg_temp_new_internal(TCGType, bool);
927 void tcg_temp_free_internal(TCGTemp *);
928 TCGv_vec tcg_temp_new_vec(TCGType type);
929 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
931 static inline void tcg_temp_free_i32(TCGv_i32 arg)
933 tcg_temp_free_internal(tcgv_i32_temp(arg));
936 static inline void tcg_temp_free_i64(TCGv_i64 arg)
938 tcg_temp_free_internal(tcgv_i64_temp(arg));
941 static inline void tcg_temp_free_ptr(TCGv_ptr arg)
943 tcg_temp_free_internal(tcgv_ptr_temp(arg));
946 static inline void tcg_temp_free_vec(TCGv_vec arg)
948 tcg_temp_free_internal(tcgv_vec_temp(arg));
951 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
952 const char *name)
954 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
955 return temp_tcgv_i32(t);
958 static inline TCGv_i32 tcg_temp_new_i32(void)
960 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
961 return temp_tcgv_i32(t);
964 static inline TCGv_i32 tcg_temp_local_new_i32(void)
966 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
967 return temp_tcgv_i32(t);
970 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
971 const char *name)
973 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
974 return temp_tcgv_i64(t);
977 static inline TCGv_i64 tcg_temp_new_i64(void)
979 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
980 return temp_tcgv_i64(t);
983 static inline TCGv_i64 tcg_temp_local_new_i64(void)
985 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
986 return temp_tcgv_i64(t);
989 static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
990 const char *name)
992 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
993 return temp_tcgv_ptr(t);
996 static inline TCGv_ptr tcg_temp_new_ptr(void)
998 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
999 return temp_tcgv_ptr(t);
1002 static inline TCGv_ptr tcg_temp_local_new_ptr(void)
1004 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
1005 return temp_tcgv_ptr(t);
1008 #if defined(CONFIG_DEBUG_TCG)
1009 /* If you call tcg_clear_temp_count() at the start of a section of
1010 * code which is not supposed to leak any TCG temporaries, then
1011 * calling tcg_check_temp_count() at the end of the section will
1012 * return 1 if the section did in fact leak a temporary.
1014 void tcg_clear_temp_count(void);
1015 int tcg_check_temp_count(void);
1016 #else
1017 #define tcg_clear_temp_count() do { } while (0)
1018 #define tcg_check_temp_count() 0
1019 #endif
1021 int64_t tcg_cpu_exec_time(void);
1022 void tcg_dump_info(void);
1023 void tcg_dump_op_count(void);
1025 #define TCG_CT_ALIAS 0x80
1026 #define TCG_CT_IALIAS 0x40
1027 #define TCG_CT_NEWREG 0x20 /* output requires a new register */
1028 #define TCG_CT_REG 0x01
1029 #define TCG_CT_CONST 0x02 /* any constant of register size */
1031 typedef struct TCGArgConstraint {
1032 uint16_t ct;
1033 uint8_t alias_index;
1034 union {
1035 TCGRegSet regs;
1036 } u;
1037 } TCGArgConstraint;
1039 #define TCG_MAX_OP_ARGS 16
1041 /* Bits for TCGOpDef->flags, 8 bits available. */
1042 enum {
1043 /* Instruction exits the translation block. */
1044 TCG_OPF_BB_EXIT = 0x01,
1045 /* Instruction defines the end of a basic block. */
1046 TCG_OPF_BB_END = 0x02,
1047 /* Instruction clobbers call registers and potentially update globals. */
1048 TCG_OPF_CALL_CLOBBER = 0x04,
1049 /* Instruction has side effects: it cannot be removed if its outputs
1050 are not used, and might trigger exceptions. */
1051 TCG_OPF_SIDE_EFFECTS = 0x08,
1052 /* Instruction operands are 64-bits (otherwise 32-bits). */
1053 TCG_OPF_64BIT = 0x10,
1054 /* Instruction is optional and not implemented by the host, or insn
1055 is generic and should not be implemened by the host. */
1056 TCG_OPF_NOT_PRESENT = 0x20,
1057 /* Instruction operands are vectors. */
1058 TCG_OPF_VECTOR = 0x40,
1061 typedef struct TCGOpDef {
1062 const char *name;
1063 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
1064 uint8_t flags;
1065 TCGArgConstraint *args_ct;
1066 int *sorted_args;
1067 #if defined(CONFIG_DEBUG_TCG)
1068 int used;
1069 #endif
1070 } TCGOpDef;
1072 extern TCGOpDef tcg_op_defs[];
1073 extern const size_t tcg_op_defs_max;
1075 typedef struct TCGTargetOpDef {
1076 TCGOpcode op;
1077 const char *args_ct_str[TCG_MAX_OP_ARGS];
1078 } TCGTargetOpDef;
1080 #define tcg_abort() \
1081 do {\
1082 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
1083 abort();\
1084 } while (0)
1086 bool tcg_op_supported(TCGOpcode op);
1088 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
1090 TCGOp *tcg_emit_op(TCGOpcode opc);
1091 void tcg_op_remove(TCGContext *s, TCGOp *op);
1092 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
1093 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
1095 void tcg_optimize(TCGContext *s);
1097 TCGv_i32 tcg_const_i32(int32_t val);
1098 TCGv_i64 tcg_const_i64(int64_t val);
1099 TCGv_i32 tcg_const_local_i32(int32_t val);
1100 TCGv_i64 tcg_const_local_i64(int64_t val);
1101 TCGv_vec tcg_const_zeros_vec(TCGType);
1102 TCGv_vec tcg_const_ones_vec(TCGType);
1103 TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
1104 TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
1106 #if UINTPTR_MAX == UINT32_MAX
1107 # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
1108 # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
1109 #else
1110 # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
1111 # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
1112 #endif
1114 TCGLabel *gen_new_label(void);
1117 * label_arg
1118 * @l: label
1120 * Encode a label for storage in the TCG opcode stream.
1123 static inline TCGArg label_arg(TCGLabel *l)
1125 return (uintptr_t)l;
1129 * arg_label
1130 * @i: value
1132 * The opposite of label_arg. Retrieve a label from the
1133 * encoding of the TCG opcode stream.
1136 static inline TCGLabel *arg_label(TCGArg i)
1138 return (TCGLabel *)(uintptr_t)i;
1142 * tcg_ptr_byte_diff
1143 * @a, @b: addresses to be differenced
1145 * There are many places within the TCG backends where we need a byte
1146 * difference between two pointers. While this can be accomplished
1147 * with local casting, it's easy to get wrong -- especially if one is
1148 * concerned with the signedness of the result.
1150 * This version relies on GCC's void pointer arithmetic to get the
1151 * correct result.
1154 static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
1156 return a - b;
1160 * tcg_pcrel_diff
1161 * @s: the tcg context
1162 * @target: address of the target
1164 * Produce a pc-relative difference, from the current code_ptr
1165 * to the destination address.
1168 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
1170 return tcg_ptr_byte_diff(target, s->code_ptr);
1174 * tcg_current_code_size
1175 * @s: the tcg context
1177 * Compute the current code size within the translation block.
1178 * This is used to fill in qemu's data structures for goto_tb.
1181 static inline size_t tcg_current_code_size(TCGContext *s)
1183 return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1186 /* Combine the TCGMemOp and mmu_idx parameters into a single value. */
1187 typedef uint32_t TCGMemOpIdx;
1190 * make_memop_idx
1191 * @op: memory operation
1192 * @idx: mmu index
1194 * Encode these values into a single parameter.
1196 static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
1198 tcg_debug_assert(idx <= 15);
1199 return (op << 4) | idx;
1203 * get_memop
1204 * @oi: combined op/idx parameter
1206 * Extract the memory operation from the combined value.
1208 static inline TCGMemOp get_memop(TCGMemOpIdx oi)
1210 return oi >> 4;
1214 * get_mmuidx
1215 * @oi: combined op/idx parameter
1217 * Extract the mmu index from the combined value.
1219 static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1221 return oi & 15;
1225 * tcg_qemu_tb_exec:
1226 * @env: pointer to CPUArchState for the CPU
1227 * @tb_ptr: address of generated code for the TB to execute
1229 * Start executing code from a given translation block.
1230 * Where translation blocks have been linked, execution
1231 * may proceed from the given TB into successive ones.
1232 * Control eventually returns only when some action is needed
1233 * from the top-level loop: either control must pass to a TB
1234 * which has not yet been directly linked, or an asynchronous
1235 * event such as an interrupt needs handling.
1237 * Return: The return value is the value passed to the corresponding
1238 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1239 * The value is either zero or a 4-byte aligned pointer to that TB combined
1240 * with additional information in its two least significant bits. The
1241 * additional information is encoded as follows:
1242 * 0, 1: the link between this TB and the next is via the specified
1243 * TB index (0 or 1). That is, we left the TB via (the equivalent
1244 * of) "goto_tb <index>". The main loop uses this to determine
1245 * how to link the TB just executed to the next.
1246 * 2: we are using instruction counting code generation, and we
1247 * did not start executing this TB because the instruction counter
1248 * would hit zero midway through it. In this case the pointer
1249 * returned is the TB we were about to execute, and the caller must
1250 * arrange to execute the remaining count of instructions.
1251 * 3: we stopped because the CPU's exit_request flag was set
1252 * (usually meaning that there is an interrupt that needs to be
1253 * handled). The pointer returned is the TB we were about to execute
1254 * when we noticed the pending exit request.
1256 * If the bottom two bits indicate an exit-via-index then the CPU
1257 * state is correctly synchronised and ready for execution of the next
1258 * TB (and in particular the guest PC is the address to execute next).
1259 * Otherwise, we gave up on execution of this TB before it started, and
1260 * the caller must fix up the CPU state by calling the CPU's
1261 * synchronize_from_tb() method with the TB pointer we return (falling
1262 * back to calling the CPU's set_pc method with tb->pb if no
1263 * synchronize_from_tb() method exists).
1265 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1266 * to this default (which just calls the prologue.code emitted by
1267 * tcg_target_qemu_prologue()).
1269 #define TB_EXIT_MASK 3
1270 #define TB_EXIT_IDX0 0
1271 #define TB_EXIT_IDX1 1
1272 #define TB_EXIT_IDXMAX 1
1273 #define TB_EXIT_REQUESTED 3
1275 #ifdef HAVE_TCG_QEMU_TB_EXEC
1276 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
1277 #else
1278 # define tcg_qemu_tb_exec(env, tb_ptr) \
1279 ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
1280 #endif
1282 void tcg_register_jit(void *buf, size_t buf_size);
1284 #if TCG_TARGET_MAYBE_vec
1285 /* Return zero if the tuple (opc, type, vece) is unsupportable;
1286 return > 0 if it is directly supportable;
1287 return < 0 if we must call tcg_expand_vec_op. */
1288 int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
1289 #else
1290 static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
1292 return 0;
1294 #endif
1296 /* Expand the tuple (opc, type, vece) on the given arguments. */
1297 void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
1299 /* Replicate a constant C accoring to the log2 of the element size. */
1300 uint64_t dup_const(unsigned vece, uint64_t c);
1302 #define dup_const(VECE, C) \
1303 (__builtin_constant_p(VECE) \
1304 ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \
1305 : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \
1306 : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \
1307 : dup_const(VECE, C)) \
1308 : dup_const(VECE, C))
1312 * Memory helpers that will be used by TCG generated code.
1314 #ifdef CONFIG_SOFTMMU
1315 /* Value zero-extended to tcg register size. */
1316 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1317 TCGMemOpIdx oi, uintptr_t retaddr);
1318 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1319 TCGMemOpIdx oi, uintptr_t retaddr);
1320 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1321 TCGMemOpIdx oi, uintptr_t retaddr);
1322 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1323 TCGMemOpIdx oi, uintptr_t retaddr);
1324 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1325 TCGMemOpIdx oi, uintptr_t retaddr);
1326 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1327 TCGMemOpIdx oi, uintptr_t retaddr);
1328 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1329 TCGMemOpIdx oi, uintptr_t retaddr);
1331 /* Value sign-extended to tcg register size. */
1332 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1333 TCGMemOpIdx oi, uintptr_t retaddr);
1334 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1335 TCGMemOpIdx oi, uintptr_t retaddr);
1336 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1337 TCGMemOpIdx oi, uintptr_t retaddr);
1338 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1339 TCGMemOpIdx oi, uintptr_t retaddr);
1340 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1341 TCGMemOpIdx oi, uintptr_t retaddr);
1343 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1344 TCGMemOpIdx oi, uintptr_t retaddr);
1345 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1346 TCGMemOpIdx oi, uintptr_t retaddr);
1347 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1348 TCGMemOpIdx oi, uintptr_t retaddr);
1349 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1350 TCGMemOpIdx oi, uintptr_t retaddr);
1351 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1352 TCGMemOpIdx oi, uintptr_t retaddr);
1353 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1354 TCGMemOpIdx oi, uintptr_t retaddr);
1355 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1356 TCGMemOpIdx oi, uintptr_t retaddr);
1358 uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1359 TCGMemOpIdx oi, uintptr_t retaddr);
1360 uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1361 TCGMemOpIdx oi, uintptr_t retaddr);
1362 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1363 TCGMemOpIdx oi, uintptr_t retaddr);
1364 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1365 TCGMemOpIdx oi, uintptr_t retaddr);
1366 uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1367 TCGMemOpIdx oi, uintptr_t retaddr);
1368 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1369 TCGMemOpIdx oi, uintptr_t retaddr);
1370 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1371 TCGMemOpIdx oi, uintptr_t retaddr);
1373 /* Temporary aliases until backends are converted. */
1374 #ifdef TARGET_WORDS_BIGENDIAN
1375 # define helper_ret_ldsw_mmu helper_be_ldsw_mmu
1376 # define helper_ret_lduw_mmu helper_be_lduw_mmu
1377 # define helper_ret_ldsl_mmu helper_be_ldsl_mmu
1378 # define helper_ret_ldul_mmu helper_be_ldul_mmu
1379 # define helper_ret_ldl_mmu helper_be_ldul_mmu
1380 # define helper_ret_ldq_mmu helper_be_ldq_mmu
1381 # define helper_ret_stw_mmu helper_be_stw_mmu
1382 # define helper_ret_stl_mmu helper_be_stl_mmu
1383 # define helper_ret_stq_mmu helper_be_stq_mmu
1384 # define helper_ret_ldw_cmmu helper_be_ldw_cmmu
1385 # define helper_ret_ldl_cmmu helper_be_ldl_cmmu
1386 # define helper_ret_ldq_cmmu helper_be_ldq_cmmu
1387 #else
1388 # define helper_ret_ldsw_mmu helper_le_ldsw_mmu
1389 # define helper_ret_lduw_mmu helper_le_lduw_mmu
1390 # define helper_ret_ldsl_mmu helper_le_ldsl_mmu
1391 # define helper_ret_ldul_mmu helper_le_ldul_mmu
1392 # define helper_ret_ldl_mmu helper_le_ldul_mmu
1393 # define helper_ret_ldq_mmu helper_le_ldq_mmu
1394 # define helper_ret_stw_mmu helper_le_stw_mmu
1395 # define helper_ret_stl_mmu helper_le_stl_mmu
1396 # define helper_ret_stq_mmu helper_le_stq_mmu
1397 # define helper_ret_ldw_cmmu helper_le_ldw_cmmu
1398 # define helper_ret_ldl_cmmu helper_le_ldl_cmmu
1399 # define helper_ret_ldq_cmmu helper_le_ldq_cmmu
1400 #endif
1402 uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1403 uint32_t cmpv, uint32_t newv,
1404 TCGMemOpIdx oi, uintptr_t retaddr);
1405 uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1406 uint32_t cmpv, uint32_t newv,
1407 TCGMemOpIdx oi, uintptr_t retaddr);
1408 uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1409 uint32_t cmpv, uint32_t newv,
1410 TCGMemOpIdx oi, uintptr_t retaddr);
1411 uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1412 uint64_t cmpv, uint64_t newv,
1413 TCGMemOpIdx oi, uintptr_t retaddr);
1414 uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1415 uint32_t cmpv, uint32_t newv,
1416 TCGMemOpIdx oi, uintptr_t retaddr);
1417 uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1418 uint32_t cmpv, uint32_t newv,
1419 TCGMemOpIdx oi, uintptr_t retaddr);
1420 uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1421 uint64_t cmpv, uint64_t newv,
1422 TCGMemOpIdx oi, uintptr_t retaddr);
1424 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
1425 TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
1426 (CPUArchState *env, target_ulong addr, TYPE val, \
1427 TCGMemOpIdx oi, uintptr_t retaddr);
1429 #ifdef CONFIG_ATOMIC64
1430 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1431 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1432 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1433 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1434 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1435 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
1436 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
1437 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1438 #else
1439 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1440 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1441 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1442 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1443 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1444 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1445 #endif
1447 GEN_ATOMIC_HELPER_ALL(fetch_add)
1448 GEN_ATOMIC_HELPER_ALL(fetch_sub)
1449 GEN_ATOMIC_HELPER_ALL(fetch_and)
1450 GEN_ATOMIC_HELPER_ALL(fetch_or)
1451 GEN_ATOMIC_HELPER_ALL(fetch_xor)
1452 GEN_ATOMIC_HELPER_ALL(fetch_smin)
1453 GEN_ATOMIC_HELPER_ALL(fetch_umin)
1454 GEN_ATOMIC_HELPER_ALL(fetch_smax)
1455 GEN_ATOMIC_HELPER_ALL(fetch_umax)
1457 GEN_ATOMIC_HELPER_ALL(add_fetch)
1458 GEN_ATOMIC_HELPER_ALL(sub_fetch)
1459 GEN_ATOMIC_HELPER_ALL(and_fetch)
1460 GEN_ATOMIC_HELPER_ALL(or_fetch)
1461 GEN_ATOMIC_HELPER_ALL(xor_fetch)
1462 GEN_ATOMIC_HELPER_ALL(smin_fetch)
1463 GEN_ATOMIC_HELPER_ALL(umin_fetch)
1464 GEN_ATOMIC_HELPER_ALL(smax_fetch)
1465 GEN_ATOMIC_HELPER_ALL(umax_fetch)
1467 GEN_ATOMIC_HELPER_ALL(xchg)
1469 #undef GEN_ATOMIC_HELPER_ALL
1470 #undef GEN_ATOMIC_HELPER
1471 #endif /* CONFIG_SOFTMMU */
1474 * These aren't really a "proper" helpers because TCG cannot manage Int128.
1475 * However, use the same format as the others, for use by the backends.
1477 * The cmpxchg functions are only defined if HAVE_CMPXCHG128;
1478 * the ld/st functions are only defined if HAVE_ATOMIC128,
1479 * as defined by <qemu/atomic128.h>.
1481 Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1482 Int128 cmpv, Int128 newv,
1483 TCGMemOpIdx oi, uintptr_t retaddr);
1484 Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1485 Int128 cmpv, Int128 newv,
1486 TCGMemOpIdx oi, uintptr_t retaddr);
1488 Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1489 TCGMemOpIdx oi, uintptr_t retaddr);
1490 Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1491 TCGMemOpIdx oi, uintptr_t retaddr);
1492 void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1493 TCGMemOpIdx oi, uintptr_t retaddr);
1494 void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1495 TCGMemOpIdx oi, uintptr_t retaddr);
1497 #ifdef CONFIG_DEBUG_TCG
1498 void tcg_assert_listed_vecop(TCGOpcode);
1499 #else
1500 static inline void tcg_assert_listed_vecop(TCGOpcode op) { }
1501 #endif
1503 static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
1505 #ifdef CONFIG_DEBUG_TCG
1506 const TCGOpcode *o = tcg_ctx->vecop_list;
1507 tcg_ctx->vecop_list = n;
1508 return o;
1509 #else
1510 return NULL;
1511 #endif
1514 bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
1516 #endif /* TCG_H */