ui: avoid pointless VNC updates if framebuffer isn't dirty
[qemu/ar7.git] / tcg / tcg.h
blob2ce497cebf02ac46dde11e7c78e82ce2336549b2
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef TCG_H
26 #define TCG_H
28 #include "qemu-common.h"
29 #include "cpu.h"
30 #include "exec/tb-context.h"
31 #include "qemu/bitops.h"
32 #include "qemu/queue.h"
33 #include "tcg-mo.h"
34 #include "tcg-target.h"
36 /* XXX: make safe guess about sizes */
37 #define MAX_OP_PER_INSTR 266
39 #if HOST_LONG_BITS == 32
40 #define MAX_OPC_PARAM_PER_ARG 2
41 #else
42 #define MAX_OPC_PARAM_PER_ARG 1
43 #endif
44 #define MAX_OPC_PARAM_IARGS 6
45 #define MAX_OPC_PARAM_OARGS 1
46 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
48 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
49 * and up to 4 + N parameters on 64-bit archs
50 * (N = number of input arguments + output arguments). */
51 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
53 #define CPU_TEMP_BUF_NLONGS 128
55 /* Default target word size to pointer size. */
56 #ifndef TCG_TARGET_REG_BITS
57 # if UINTPTR_MAX == UINT32_MAX
58 # define TCG_TARGET_REG_BITS 32
59 # elif UINTPTR_MAX == UINT64_MAX
60 # define TCG_TARGET_REG_BITS 64
61 # else
62 # error Unknown pointer size for tcg target
63 # endif
64 #endif
66 #if TCG_TARGET_REG_BITS == 32
67 typedef int32_t tcg_target_long;
68 typedef uint32_t tcg_target_ulong;
69 #define TCG_PRIlx PRIx32
70 #define TCG_PRIld PRId32
71 #elif TCG_TARGET_REG_BITS == 64
72 typedef int64_t tcg_target_long;
73 typedef uint64_t tcg_target_ulong;
74 #define TCG_PRIlx PRIx64
75 #define TCG_PRIld PRId64
76 #else
77 #error unsupported
78 #endif
80 /* Oversized TCG guests make things like MTTCG hard
81 * as we can't use atomics for cputlb updates.
83 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
84 #define TCG_OVERSIZED_GUEST 1
85 #else
86 #define TCG_OVERSIZED_GUEST 0
87 #endif
89 #if TCG_TARGET_NB_REGS <= 32
90 typedef uint32_t TCGRegSet;
91 #elif TCG_TARGET_NB_REGS <= 64
92 typedef uint64_t TCGRegSet;
93 #else
94 #error unsupported
95 #endif
97 #if TCG_TARGET_REG_BITS == 32
98 /* Turn some undef macros into false macros. */
99 #define TCG_TARGET_HAS_extrl_i64_i32 0
100 #define TCG_TARGET_HAS_extrh_i64_i32 0
101 #define TCG_TARGET_HAS_div_i64 0
102 #define TCG_TARGET_HAS_rem_i64 0
103 #define TCG_TARGET_HAS_div2_i64 0
104 #define TCG_TARGET_HAS_rot_i64 0
105 #define TCG_TARGET_HAS_ext8s_i64 0
106 #define TCG_TARGET_HAS_ext16s_i64 0
107 #define TCG_TARGET_HAS_ext32s_i64 0
108 #define TCG_TARGET_HAS_ext8u_i64 0
109 #define TCG_TARGET_HAS_ext16u_i64 0
110 #define TCG_TARGET_HAS_ext32u_i64 0
111 #define TCG_TARGET_HAS_bswap16_i64 0
112 #define TCG_TARGET_HAS_bswap32_i64 0
113 #define TCG_TARGET_HAS_bswap64_i64 0
114 #define TCG_TARGET_HAS_neg_i64 0
115 #define TCG_TARGET_HAS_not_i64 0
116 #define TCG_TARGET_HAS_andc_i64 0
117 #define TCG_TARGET_HAS_orc_i64 0
118 #define TCG_TARGET_HAS_eqv_i64 0
119 #define TCG_TARGET_HAS_nand_i64 0
120 #define TCG_TARGET_HAS_nor_i64 0
121 #define TCG_TARGET_HAS_clz_i64 0
122 #define TCG_TARGET_HAS_ctz_i64 0
123 #define TCG_TARGET_HAS_ctpop_i64 0
124 #define TCG_TARGET_HAS_deposit_i64 0
125 #define TCG_TARGET_HAS_extract_i64 0
126 #define TCG_TARGET_HAS_sextract_i64 0
127 #define TCG_TARGET_HAS_movcond_i64 0
128 #define TCG_TARGET_HAS_add2_i64 0
129 #define TCG_TARGET_HAS_sub2_i64 0
130 #define TCG_TARGET_HAS_mulu2_i64 0
131 #define TCG_TARGET_HAS_muls2_i64 0
132 #define TCG_TARGET_HAS_muluh_i64 0
133 #define TCG_TARGET_HAS_mulsh_i64 0
134 /* Turn some undef macros into true macros. */
135 #define TCG_TARGET_HAS_add2_i32 1
136 #define TCG_TARGET_HAS_sub2_i32 1
137 #endif
139 #ifndef TCG_TARGET_deposit_i32_valid
140 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
141 #endif
142 #ifndef TCG_TARGET_deposit_i64_valid
143 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
144 #endif
145 #ifndef TCG_TARGET_extract_i32_valid
146 #define TCG_TARGET_extract_i32_valid(ofs, len) 1
147 #endif
148 #ifndef TCG_TARGET_extract_i64_valid
149 #define TCG_TARGET_extract_i64_valid(ofs, len) 1
150 #endif
152 /* Only one of DIV or DIV2 should be defined. */
153 #if defined(TCG_TARGET_HAS_div_i32)
154 #define TCG_TARGET_HAS_div2_i32 0
155 #elif defined(TCG_TARGET_HAS_div2_i32)
156 #define TCG_TARGET_HAS_div_i32 0
157 #define TCG_TARGET_HAS_rem_i32 0
158 #endif
159 #if defined(TCG_TARGET_HAS_div_i64)
160 #define TCG_TARGET_HAS_div2_i64 0
161 #elif defined(TCG_TARGET_HAS_div2_i64)
162 #define TCG_TARGET_HAS_div_i64 0
163 #define TCG_TARGET_HAS_rem_i64 0
164 #endif
166 /* For 32-bit targets, some sort of unsigned widening multiply is required. */
167 #if TCG_TARGET_REG_BITS == 32 \
168 && !(defined(TCG_TARGET_HAS_mulu2_i32) \
169 || defined(TCG_TARGET_HAS_muluh_i32))
170 # error "Missing unsigned widening multiply"
171 #endif
173 #ifndef TARGET_INSN_START_EXTRA_WORDS
174 # define TARGET_INSN_START_WORDS 1
175 #else
176 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
177 #endif
179 typedef enum TCGOpcode {
180 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
181 #include "tcg-opc.h"
182 #undef DEF
183 NB_OPS,
184 } TCGOpcode;
186 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r))
187 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
188 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
190 #ifndef TCG_TARGET_INSN_UNIT_SIZE
191 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
192 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
193 typedef uint8_t tcg_insn_unit;
194 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
195 typedef uint16_t tcg_insn_unit;
196 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
197 typedef uint32_t tcg_insn_unit;
198 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
199 typedef uint64_t tcg_insn_unit;
200 #else
201 /* The port better have done this. */
202 #endif
205 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
206 # define tcg_debug_assert(X) do { assert(X); } while (0)
207 #elif QEMU_GNUC_PREREQ(4, 5)
208 # define tcg_debug_assert(X) \
209 do { if (!(X)) { __builtin_unreachable(); } } while (0)
210 #else
211 # define tcg_debug_assert(X) do { (void)(X); } while (0)
212 #endif
214 typedef struct TCGRelocation {
215 struct TCGRelocation *next;
216 int type;
217 tcg_insn_unit *ptr;
218 intptr_t addend;
219 } TCGRelocation;
221 typedef struct TCGLabel {
222 unsigned has_value : 1;
223 unsigned id : 31;
224 union {
225 uintptr_t value;
226 tcg_insn_unit *value_ptr;
227 TCGRelocation *first_reloc;
228 } u;
229 } TCGLabel;
231 typedef struct TCGPool {
232 struct TCGPool *next;
233 int size;
234 uint8_t data[0] __attribute__ ((aligned));
235 } TCGPool;
237 #define TCG_POOL_CHUNK_SIZE 32768
239 #define TCG_MAX_TEMPS 512
240 #define TCG_MAX_INSNS 512
242 /* when the size of the arguments of a called function is smaller than
243 this value, they are statically allocated in the TB stack frame */
244 #define TCG_STATIC_CALL_ARGS_SIZE 128
246 typedef enum TCGType {
247 TCG_TYPE_I32,
248 TCG_TYPE_I64,
249 TCG_TYPE_COUNT, /* number of different types */
251 /* An alias for the size of the host register. */
252 #if TCG_TARGET_REG_BITS == 32
253 TCG_TYPE_REG = TCG_TYPE_I32,
254 #else
255 TCG_TYPE_REG = TCG_TYPE_I64,
256 #endif
258 /* An alias for the size of the native pointer. */
259 #if UINTPTR_MAX == UINT32_MAX
260 TCG_TYPE_PTR = TCG_TYPE_I32,
261 #else
262 TCG_TYPE_PTR = TCG_TYPE_I64,
263 #endif
265 /* An alias for the size of the target "long", aka register. */
266 #if TARGET_LONG_BITS == 64
267 TCG_TYPE_TL = TCG_TYPE_I64,
268 #else
269 TCG_TYPE_TL = TCG_TYPE_I32,
270 #endif
271 } TCGType;
273 /* Constants for qemu_ld and qemu_st for the Memory Operation field. */
274 typedef enum TCGMemOp {
275 MO_8 = 0,
276 MO_16 = 1,
277 MO_32 = 2,
278 MO_64 = 3,
279 MO_SIZE = 3, /* Mask for the above. */
281 MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
283 MO_BSWAP = 8, /* Host reverse endian. */
284 #ifdef HOST_WORDS_BIGENDIAN
285 MO_LE = MO_BSWAP,
286 MO_BE = 0,
287 #else
288 MO_LE = 0,
289 MO_BE = MO_BSWAP,
290 #endif
291 #ifdef TARGET_WORDS_BIGENDIAN
292 MO_TE = MO_BE,
293 #else
294 MO_TE = MO_LE,
295 #endif
297 /* MO_UNALN accesses are never checked for alignment.
298 * MO_ALIGN accesses will result in a call to the CPU's
299 * do_unaligned_access hook if the guest address is not aligned.
300 * The default depends on whether the target CPU defines ALIGNED_ONLY.
302 * Some architectures (e.g. ARMv8) need the address which is aligned
303 * to a size more than the size of the memory access.
304 * Some architectures (e.g. SPARCv9) need an address which is aligned,
305 * but less strictly than the natural alignment.
307 * MO_ALIGN supposes the alignment size is the size of a memory access.
309 * There are three options:
310 * - unaligned access permitted (MO_UNALN).
311 * - an alignment to the size of an access (MO_ALIGN);
312 * - an alignment to a specified size, which may be more or less than
313 * the access size (MO_ALIGN_x where 'x' is a size in bytes);
315 MO_ASHIFT = 4,
316 MO_AMASK = 7 << MO_ASHIFT,
317 #ifdef ALIGNED_ONLY
318 MO_ALIGN = 0,
319 MO_UNALN = MO_AMASK,
320 #else
321 MO_ALIGN = MO_AMASK,
322 MO_UNALN = 0,
323 #endif
324 MO_ALIGN_2 = 1 << MO_ASHIFT,
325 MO_ALIGN_4 = 2 << MO_ASHIFT,
326 MO_ALIGN_8 = 3 << MO_ASHIFT,
327 MO_ALIGN_16 = 4 << MO_ASHIFT,
328 MO_ALIGN_32 = 5 << MO_ASHIFT,
329 MO_ALIGN_64 = 6 << MO_ASHIFT,
331 /* Combinations of the above, for ease of use. */
332 MO_UB = MO_8,
333 MO_UW = MO_16,
334 MO_UL = MO_32,
335 MO_SB = MO_SIGN | MO_8,
336 MO_SW = MO_SIGN | MO_16,
337 MO_SL = MO_SIGN | MO_32,
338 MO_Q = MO_64,
340 MO_LEUW = MO_LE | MO_UW,
341 MO_LEUL = MO_LE | MO_UL,
342 MO_LESW = MO_LE | MO_SW,
343 MO_LESL = MO_LE | MO_SL,
344 MO_LEQ = MO_LE | MO_Q,
346 MO_BEUW = MO_BE | MO_UW,
347 MO_BEUL = MO_BE | MO_UL,
348 MO_BESW = MO_BE | MO_SW,
349 MO_BESL = MO_BE | MO_SL,
350 MO_BEQ = MO_BE | MO_Q,
352 MO_TEUW = MO_TE | MO_UW,
353 MO_TEUL = MO_TE | MO_UL,
354 MO_TESW = MO_TE | MO_SW,
355 MO_TESL = MO_TE | MO_SL,
356 MO_TEQ = MO_TE | MO_Q,
358 MO_SSIZE = MO_SIZE | MO_SIGN,
359 } TCGMemOp;
362 * get_alignment_bits
363 * @memop: TCGMemOp value
365 * Extract the alignment size from the memop.
367 static inline unsigned get_alignment_bits(TCGMemOp memop)
369 unsigned a = memop & MO_AMASK;
371 if (a == MO_UNALN) {
372 /* No alignment required. */
373 a = 0;
374 } else if (a == MO_ALIGN) {
375 /* A natural alignment requirement. */
376 a = memop & MO_SIZE;
377 } else {
378 /* A specific alignment requirement. */
379 a = a >> MO_ASHIFT;
381 #if defined(CONFIG_SOFTMMU)
382 /* The requested alignment cannot overlap the TLB flags. */
383 tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
384 #endif
385 return a;
388 typedef tcg_target_ulong TCGArg;
390 /* Define type and accessor macros for TCG variables.
392 TCG variables are the inputs and outputs of TCG ops, as described
393 in tcg/README. Target CPU front-end code uses these types to deal
394 with TCG variables as it emits TCG code via the tcg_gen_* functions.
395 They come in several flavours:
396 * TCGv_i32 : 32 bit integer type
397 * TCGv_i64 : 64 bit integer type
398 * TCGv_ptr : a host pointer type
399 * TCGv : an integer type the same size as target_ulong
400 (an alias for either TCGv_i32 or TCGv_i64)
401 The compiler's type checking will complain if you mix them
402 up and pass the wrong sized TCGv to a function.
404 Users of tcg_gen_* don't need to know about any of the internal
405 details of these, and should treat them as opaque types.
406 You won't be able to look inside them in a debugger either.
408 Internal implementation details follow:
410 Note that there is no definition of the structs TCGv_i32_d etc anywhere.
411 This is deliberate, because the values we store in variables of type
412 TCGv_i32 are not really pointers-to-structures. They're just small
413 integers, but keeping them in pointer types like this means that the
414 compiler will complain if you accidentally pass a TCGv_i32 to a
415 function which takes a TCGv_i64, and so on. Only the internals of
416 TCG need to care about the actual contents of the types. */
418 typedef struct TCGv_i32_d *TCGv_i32;
419 typedef struct TCGv_i64_d *TCGv_i64;
420 typedef struct TCGv_ptr_d *TCGv_ptr;
421 typedef TCGv_ptr TCGv_env;
422 #if TARGET_LONG_BITS == 32
423 #define TCGv TCGv_i32
424 #elif TARGET_LONG_BITS == 64
425 #define TCGv TCGv_i64
426 #else
427 #error Unhandled TARGET_LONG_BITS value
428 #endif
430 /* call flags */
431 /* Helper does not read globals (either directly or through an exception). It
432 implies TCG_CALL_NO_WRITE_GLOBALS. */
433 #define TCG_CALL_NO_READ_GLOBALS 0x0010
434 /* Helper does not write globals */
435 #define TCG_CALL_NO_WRITE_GLOBALS 0x0020
436 /* Helper can be safely suppressed if the return value is not used. */
437 #define TCG_CALL_NO_SIDE_EFFECTS 0x0040
439 /* convenience version of most used call flags */
440 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
441 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS
442 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS
443 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
444 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
446 /* Used to align parameters. See the comment before tcgv_i32_temp. */
447 #define TCG_CALL_DUMMY_ARG ((TCGArg)0)
449 /* Conditions. Note that these are laid out for easy manipulation by
450 the functions below:
451 bit 0 is used for inverting;
452 bit 1 is signed,
453 bit 2 is unsigned,
454 bit 3 is used with bit 0 for swapping signed/unsigned. */
455 typedef enum {
456 /* non-signed */
457 TCG_COND_NEVER = 0 | 0 | 0 | 0,
458 TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
459 TCG_COND_EQ = 8 | 0 | 0 | 0,
460 TCG_COND_NE = 8 | 0 | 0 | 1,
461 /* signed */
462 TCG_COND_LT = 0 | 0 | 2 | 0,
463 TCG_COND_GE = 0 | 0 | 2 | 1,
464 TCG_COND_LE = 8 | 0 | 2 | 0,
465 TCG_COND_GT = 8 | 0 | 2 | 1,
466 /* unsigned */
467 TCG_COND_LTU = 0 | 4 | 0 | 0,
468 TCG_COND_GEU = 0 | 4 | 0 | 1,
469 TCG_COND_LEU = 8 | 4 | 0 | 0,
470 TCG_COND_GTU = 8 | 4 | 0 | 1,
471 } TCGCond;
473 /* Invert the sense of the comparison. */
474 static inline TCGCond tcg_invert_cond(TCGCond c)
476 return (TCGCond)(c ^ 1);
479 /* Swap the operands in a comparison. */
480 static inline TCGCond tcg_swap_cond(TCGCond c)
482 return c & 6 ? (TCGCond)(c ^ 9) : c;
485 /* Create an "unsigned" version of a "signed" comparison. */
486 static inline TCGCond tcg_unsigned_cond(TCGCond c)
488 return c & 2 ? (TCGCond)(c ^ 6) : c;
491 /* Create a "signed" version of an "unsigned" comparison. */
492 static inline TCGCond tcg_signed_cond(TCGCond c)
494 return c & 4 ? (TCGCond)(c ^ 6) : c;
497 /* Must a comparison be considered unsigned? */
498 static inline bool is_unsigned_cond(TCGCond c)
500 return (c & 4) != 0;
503 /* Create a "high" version of a double-word comparison.
504 This removes equality from a LTE or GTE comparison. */
505 static inline TCGCond tcg_high_cond(TCGCond c)
507 switch (c) {
508 case TCG_COND_GE:
509 case TCG_COND_LE:
510 case TCG_COND_GEU:
511 case TCG_COND_LEU:
512 return (TCGCond)(c ^ 8);
513 default:
514 return c;
518 typedef enum TCGTempVal {
519 TEMP_VAL_DEAD,
520 TEMP_VAL_REG,
521 TEMP_VAL_MEM,
522 TEMP_VAL_CONST,
523 } TCGTempVal;
525 typedef struct TCGTemp {
526 TCGReg reg:8;
527 TCGTempVal val_type:8;
528 TCGType base_type:8;
529 TCGType type:8;
530 unsigned int fixed_reg:1;
531 unsigned int indirect_reg:1;
532 unsigned int indirect_base:1;
533 unsigned int mem_coherent:1;
534 unsigned int mem_allocated:1;
535 /* If true, the temp is saved across both basic blocks and
536 translation blocks. */
537 unsigned int temp_global:1;
538 /* If true, the temp is saved across basic blocks but dead
539 at the end of translation blocks. If false, the temp is
540 dead at the end of basic blocks. */
541 unsigned int temp_local:1;
542 unsigned int temp_allocated:1;
544 tcg_target_long val;
545 struct TCGTemp *mem_base;
546 intptr_t mem_offset;
547 const char *name;
549 /* Pass-specific information that can be stored for a temporary.
550 One word worth of integer data, and one pointer to data
551 allocated separately. */
552 uintptr_t state;
553 void *state_ptr;
554 } TCGTemp;
556 typedef struct TCGContext TCGContext;
558 typedef struct TCGTempSet {
559 unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
560 } TCGTempSet;
562 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
563 this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
564 There are never more than 2 outputs, which means that we can store all
565 dead + sync data within 16 bits. */
566 #define DEAD_ARG 4
567 #define SYNC_ARG 1
568 typedef uint16_t TCGLifeData;
570 /* The layout here is designed to avoid a bitfield crossing of
571 a 32-bit boundary, which would cause GCC to add extra padding. */
572 typedef struct TCGOp {
573 TCGOpcode opc : 8; /* 8 */
575 /* Parameters for this opcode. See below. */
576 unsigned param1 : 4; /* 12 */
577 unsigned param2 : 4; /* 16 */
579 /* Lifetime data of the operands. */
580 unsigned life : 16; /* 32 */
582 /* Next and previous opcodes. */
583 QTAILQ_ENTRY(TCGOp) link;
585 /* Arguments for the opcode. */
586 TCGArg args[MAX_OPC_PARAM];
587 } TCGOp;
589 #define TCGOP_CALLI(X) (X)->param1
590 #define TCGOP_CALLO(X) (X)->param2
592 /* Make sure operands fit in the bitfields above. */
593 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
595 typedef struct TCGProfile {
596 int64_t tb_count1;
597 int64_t tb_count;
598 int64_t op_count; /* total insn count */
599 int op_count_max; /* max insn per TB */
600 int64_t temp_count;
601 int temp_count_max;
602 int64_t del_op_count;
603 int64_t code_in_len;
604 int64_t code_out_len;
605 int64_t search_out_len;
606 int64_t interm_time;
607 int64_t code_time;
608 int64_t la_time;
609 int64_t opt_time;
610 int64_t restore_count;
611 int64_t restore_time;
612 int64_t table_op_count[NB_OPS];
613 } TCGProfile;
615 struct TCGContext {
616 uint8_t *pool_cur, *pool_end;
617 TCGPool *pool_first, *pool_current, *pool_first_large;
618 int nb_labels;
619 int nb_globals;
620 int nb_temps;
621 int nb_indirects;
623 /* goto_tb support */
624 tcg_insn_unit *code_buf;
625 uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
626 uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
627 uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
629 TCGRegSet reserved_regs;
630 uint32_t tb_cflags; /* cflags of the current TB */
631 intptr_t current_frame_offset;
632 intptr_t frame_start;
633 intptr_t frame_end;
634 TCGTemp *frame_temp;
636 tcg_insn_unit *code_ptr;
638 #ifdef CONFIG_PROFILER
639 TCGProfile prof;
640 #endif
642 #ifdef CONFIG_DEBUG_TCG
643 int temps_in_use;
644 int goto_tb_issue_mask;
645 #endif
647 /* Code generation. Note that we specifically do not use tcg_insn_unit
648 here, because there's too much arithmetic throughout that relies
649 on addition and subtraction working on bytes. Rely on the GCC
650 extension that allows arithmetic on void*. */
651 void *code_gen_prologue;
652 void *code_gen_epilogue;
653 void *code_gen_buffer;
654 size_t code_gen_buffer_size;
655 void *code_gen_ptr;
656 void *data_gen_ptr;
658 /* Threshold to flush the translated code buffer. */
659 void *code_gen_highwater;
661 /* Track which vCPU triggers events */
662 CPUState *cpu; /* *_trans */
664 /* These structures are private to tcg-target.inc.c. */
665 #ifdef TCG_TARGET_NEED_LDST_LABELS
666 struct TCGLabelQemuLdst *ldst_labels;
667 #endif
668 #ifdef TCG_TARGET_NEED_POOL_LABELS
669 struct TCGLabelPoolData *pool_labels;
670 #endif
672 TCGLabel *exitreq_label;
674 TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
675 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
677 QTAILQ_HEAD(TCGOpHead, TCGOp) ops, free_ops;
679 /* Tells which temporary holds a given register.
680 It does not take into account fixed registers */
681 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
683 uint16_t gen_insn_end_off[TCG_MAX_INSNS];
684 target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
687 extern TCGContext tcg_init_ctx;
688 extern __thread TCGContext *tcg_ctx;
689 extern TCGv_env cpu_env;
691 static inline size_t temp_idx(TCGTemp *ts)
693 ptrdiff_t n = ts - tcg_ctx->temps;
694 tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
695 return n;
698 static inline TCGArg temp_arg(TCGTemp *ts)
700 return (uintptr_t)ts;
703 static inline TCGTemp *arg_temp(TCGArg a)
705 return (TCGTemp *)(uintptr_t)a;
708 /* Using the offset of a temporary, relative to TCGContext, rather than
709 its index means that we don't use 0. That leaves offset 0 free for
710 a NULL representation without having to leave index 0 unused. */
711 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
713 uintptr_t o = (uintptr_t)v;
714 TCGTemp *t = (void *)tcg_ctx + o;
715 tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
716 return t;
719 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
721 return tcgv_i32_temp((TCGv_i32)v);
724 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
726 return tcgv_i32_temp((TCGv_i32)v);
729 static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
731 return temp_arg(tcgv_i32_temp(v));
734 static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
736 return temp_arg(tcgv_i64_temp(v));
739 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
741 return temp_arg(tcgv_ptr_temp(v));
744 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
746 (void)temp_idx(t); /* trigger embedded assert */
747 return (TCGv_i32)((void *)t - (void *)tcg_ctx);
750 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
752 return (TCGv_i64)temp_tcgv_i32(t);
755 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
757 return (TCGv_ptr)temp_tcgv_i32(t);
760 #if TCG_TARGET_REG_BITS == 32
761 static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
763 return temp_tcgv_i32(tcgv_i64_temp(t));
766 static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
768 return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
770 #endif
772 static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
774 op->args[arg] = v;
777 /* The last op that was emitted. */
778 static inline TCGOp *tcg_last_op(void)
780 return QTAILQ_LAST(&tcg_ctx->ops, TCGOpHead);
783 /* Test for whether to terminate the TB for using too many opcodes. */
784 static inline bool tcg_op_buf_full(void)
786 return false;
789 /* pool based memory allocation */
791 /* user-mode: tb_lock must be held for tcg_malloc_internal. */
792 void *tcg_malloc_internal(TCGContext *s, int size);
793 void tcg_pool_reset(TCGContext *s);
794 TranslationBlock *tcg_tb_alloc(TCGContext *s);
796 void tcg_region_init(void);
797 void tcg_region_reset_all(void);
799 size_t tcg_code_size(void);
800 size_t tcg_code_capacity(void);
802 /* user-mode: Called with tb_lock held. */
803 static inline void *tcg_malloc(int size)
805 TCGContext *s = tcg_ctx;
806 uint8_t *ptr, *ptr_end;
808 /* ??? This is a weak placeholder for minimum malloc alignment. */
809 size = QEMU_ALIGN_UP(size, 8);
811 ptr = s->pool_cur;
812 ptr_end = ptr + size;
813 if (unlikely(ptr_end > s->pool_end)) {
814 return tcg_malloc_internal(tcg_ctx, size);
815 } else {
816 s->pool_cur = ptr_end;
817 return ptr;
821 void tcg_context_init(TCGContext *s);
822 void tcg_register_thread(void);
823 void tcg_prologue_init(TCGContext *s);
824 void tcg_func_start(TCGContext *s);
826 int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
828 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
830 TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
831 intptr_t, const char *);
833 TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
834 TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
836 void tcg_temp_free_i32(TCGv_i32 arg);
837 void tcg_temp_free_i64(TCGv_i64 arg);
839 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
840 const char *name)
842 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
843 return temp_tcgv_i32(t);
846 static inline TCGv_i32 tcg_temp_new_i32(void)
848 return tcg_temp_new_internal_i32(0);
851 static inline TCGv_i32 tcg_temp_local_new_i32(void)
853 return tcg_temp_new_internal_i32(1);
856 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
857 const char *name)
859 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
860 return temp_tcgv_i64(t);
863 static inline TCGv_i64 tcg_temp_new_i64(void)
865 return tcg_temp_new_internal_i64(0);
868 static inline TCGv_i64 tcg_temp_local_new_i64(void)
870 return tcg_temp_new_internal_i64(1);
873 #if defined(CONFIG_DEBUG_TCG)
874 /* If you call tcg_clear_temp_count() at the start of a section of
875 * code which is not supposed to leak any TCG temporaries, then
876 * calling tcg_check_temp_count() at the end of the section will
877 * return 1 if the section did in fact leak a temporary.
879 void tcg_clear_temp_count(void);
880 int tcg_check_temp_count(void);
881 #else
882 #define tcg_clear_temp_count() do { } while (0)
883 #define tcg_check_temp_count() 0
884 #endif
886 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
887 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
889 #define TCG_CT_ALIAS 0x80
890 #define TCG_CT_IALIAS 0x40
891 #define TCG_CT_NEWREG 0x20 /* output requires a new register */
892 #define TCG_CT_REG 0x01
893 #define TCG_CT_CONST 0x02 /* any constant of register size */
895 typedef struct TCGArgConstraint {
896 uint16_t ct;
897 uint8_t alias_index;
898 union {
899 TCGRegSet regs;
900 } u;
901 } TCGArgConstraint;
903 #define TCG_MAX_OP_ARGS 16
905 /* Bits for TCGOpDef->flags, 8 bits available. */
906 enum {
907 /* Instruction defines the end of a basic block. */
908 TCG_OPF_BB_END = 0x01,
909 /* Instruction clobbers call registers and potentially update globals. */
910 TCG_OPF_CALL_CLOBBER = 0x02,
911 /* Instruction has side effects: it cannot be removed if its outputs
912 are not used, and might trigger exceptions. */
913 TCG_OPF_SIDE_EFFECTS = 0x04,
914 /* Instruction operands are 64-bits (otherwise 32-bits). */
915 TCG_OPF_64BIT = 0x08,
916 /* Instruction is optional and not implemented by the host, or insn
917 is generic and should not be implemened by the host. */
918 TCG_OPF_NOT_PRESENT = 0x10,
921 typedef struct TCGOpDef {
922 const char *name;
923 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
924 uint8_t flags;
925 TCGArgConstraint *args_ct;
926 int *sorted_args;
927 #if defined(CONFIG_DEBUG_TCG)
928 int used;
929 #endif
930 } TCGOpDef;
932 extern TCGOpDef tcg_op_defs[];
933 extern const size_t tcg_op_defs_max;
935 typedef struct TCGTargetOpDef {
936 TCGOpcode op;
937 const char *args_ct_str[TCG_MAX_OP_ARGS];
938 } TCGTargetOpDef;
940 #define tcg_abort() \
941 do {\
942 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
943 abort();\
944 } while (0)
946 #if UINTPTR_MAX == UINT32_MAX
947 static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i32 n) { return (TCGv_ptr)n; }
948 static inline TCGv_i32 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i32)n; }
950 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
951 #define tcg_global_mem_new_ptr(R, O, N) \
952 TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
953 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
954 #define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
955 #else
956 static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i64 n) { return (TCGv_ptr)n; }
957 static inline TCGv_i64 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i64)n; }
959 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
960 #define tcg_global_mem_new_ptr(R, O, N) \
961 TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
962 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
963 #define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
964 #endif
966 bool tcg_op_supported(TCGOpcode op);
968 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
970 TCGOp *tcg_emit_op(TCGOpcode opc);
971 void tcg_op_remove(TCGContext *s, TCGOp *op);
972 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
973 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
975 void tcg_optimize(TCGContext *s);
977 /* only used for debugging purposes */
978 void tcg_dump_ops(TCGContext *s);
980 TCGv_i32 tcg_const_i32(int32_t val);
981 TCGv_i64 tcg_const_i64(int64_t val);
982 TCGv_i32 tcg_const_local_i32(int32_t val);
983 TCGv_i64 tcg_const_local_i64(int64_t val);
985 TCGLabel *gen_new_label(void);
988 * label_arg
989 * @l: label
991 * Encode a label for storage in the TCG opcode stream.
994 static inline TCGArg label_arg(TCGLabel *l)
996 return (uintptr_t)l;
1000 * arg_label
1001 * @i: value
1003 * The opposite of label_arg. Retrieve a label from the
1004 * encoding of the TCG opcode stream.
1007 static inline TCGLabel *arg_label(TCGArg i)
1009 return (TCGLabel *)(uintptr_t)i;
1013 * tcg_ptr_byte_diff
1014 * @a, @b: addresses to be differenced
1016 * There are many places within the TCG backends where we need a byte
1017 * difference between two pointers. While this can be accomplished
1018 * with local casting, it's easy to get wrong -- especially if one is
1019 * concerned with the signedness of the result.
1021 * This version relies on GCC's void pointer arithmetic to get the
1022 * correct result.
1025 static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
1027 return a - b;
1031 * tcg_pcrel_diff
1032 * @s: the tcg context
1033 * @target: address of the target
1035 * Produce a pc-relative difference, from the current code_ptr
1036 * to the destination address.
1039 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
1041 return tcg_ptr_byte_diff(target, s->code_ptr);
1045 * tcg_current_code_size
1046 * @s: the tcg context
1048 * Compute the current code size within the translation block.
1049 * This is used to fill in qemu's data structures for goto_tb.
1052 static inline size_t tcg_current_code_size(TCGContext *s)
1054 return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1057 /* Combine the TCGMemOp and mmu_idx parameters into a single value. */
1058 typedef uint32_t TCGMemOpIdx;
1061 * make_memop_idx
1062 * @op: memory operation
1063 * @idx: mmu index
1065 * Encode these values into a single parameter.
1067 static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
1069 tcg_debug_assert(idx <= 15);
1070 return (op << 4) | idx;
1074 * get_memop
1075 * @oi: combined op/idx parameter
1077 * Extract the memory operation from the combined value.
1079 static inline TCGMemOp get_memop(TCGMemOpIdx oi)
1081 return oi >> 4;
1085 * get_mmuidx
1086 * @oi: combined op/idx parameter
1088 * Extract the mmu index from the combined value.
1090 static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1092 return oi & 15;
1096 * tcg_qemu_tb_exec:
1097 * @env: pointer to CPUArchState for the CPU
1098 * @tb_ptr: address of generated code for the TB to execute
1100 * Start executing code from a given translation block.
1101 * Where translation blocks have been linked, execution
1102 * may proceed from the given TB into successive ones.
1103 * Control eventually returns only when some action is needed
1104 * from the top-level loop: either control must pass to a TB
1105 * which has not yet been directly linked, or an asynchronous
1106 * event such as an interrupt needs handling.
1108 * Return: The return value is the value passed to the corresponding
1109 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1110 * The value is either zero or a 4-byte aligned pointer to that TB combined
1111 * with additional information in its two least significant bits. The
1112 * additional information is encoded as follows:
1113 * 0, 1: the link between this TB and the next is via the specified
1114 * TB index (0 or 1). That is, we left the TB via (the equivalent
1115 * of) "goto_tb <index>". The main loop uses this to determine
1116 * how to link the TB just executed to the next.
1117 * 2: we are using instruction counting code generation, and we
1118 * did not start executing this TB because the instruction counter
1119 * would hit zero midway through it. In this case the pointer
1120 * returned is the TB we were about to execute, and the caller must
1121 * arrange to execute the remaining count of instructions.
1122 * 3: we stopped because the CPU's exit_request flag was set
1123 * (usually meaning that there is an interrupt that needs to be
1124 * handled). The pointer returned is the TB we were about to execute
1125 * when we noticed the pending exit request.
1127 * If the bottom two bits indicate an exit-via-index then the CPU
1128 * state is correctly synchronised and ready for execution of the next
1129 * TB (and in particular the guest PC is the address to execute next).
1130 * Otherwise, we gave up on execution of this TB before it started, and
1131 * the caller must fix up the CPU state by calling the CPU's
1132 * synchronize_from_tb() method with the TB pointer we return (falling
1133 * back to calling the CPU's set_pc method with tb->pb if no
1134 * synchronize_from_tb() method exists).
1136 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1137 * to this default (which just calls the prologue.code emitted by
1138 * tcg_target_qemu_prologue()).
1140 #define TB_EXIT_MASK 3
1141 #define TB_EXIT_IDX0 0
1142 #define TB_EXIT_IDX1 1
1143 #define TB_EXIT_REQUESTED 3
1145 #ifdef HAVE_TCG_QEMU_TB_EXEC
1146 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
1147 #else
1148 # define tcg_qemu_tb_exec(env, tb_ptr) \
1149 ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
1150 #endif
1152 void tcg_register_jit(void *buf, size_t buf_size);
1155 * Memory helpers that will be used by TCG generated code.
1157 #ifdef CONFIG_SOFTMMU
1158 /* Value zero-extended to tcg register size. */
1159 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1160 TCGMemOpIdx oi, uintptr_t retaddr);
1161 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1162 TCGMemOpIdx oi, uintptr_t retaddr);
1163 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1164 TCGMemOpIdx oi, uintptr_t retaddr);
1165 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1166 TCGMemOpIdx oi, uintptr_t retaddr);
1167 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1168 TCGMemOpIdx oi, uintptr_t retaddr);
1169 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1170 TCGMemOpIdx oi, uintptr_t retaddr);
1171 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1172 TCGMemOpIdx oi, uintptr_t retaddr);
1174 /* Value sign-extended to tcg register size. */
1175 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1176 TCGMemOpIdx oi, uintptr_t retaddr);
1177 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1178 TCGMemOpIdx oi, uintptr_t retaddr);
1179 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1180 TCGMemOpIdx oi, uintptr_t retaddr);
1181 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1182 TCGMemOpIdx oi, uintptr_t retaddr);
1183 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1184 TCGMemOpIdx oi, uintptr_t retaddr);
1186 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1187 TCGMemOpIdx oi, uintptr_t retaddr);
1188 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1189 TCGMemOpIdx oi, uintptr_t retaddr);
1190 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1191 TCGMemOpIdx oi, uintptr_t retaddr);
1192 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1193 TCGMemOpIdx oi, uintptr_t retaddr);
1194 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1195 TCGMemOpIdx oi, uintptr_t retaddr);
1196 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1197 TCGMemOpIdx oi, uintptr_t retaddr);
1198 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1199 TCGMemOpIdx oi, uintptr_t retaddr);
1201 uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1202 TCGMemOpIdx oi, uintptr_t retaddr);
1203 uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1204 TCGMemOpIdx oi, uintptr_t retaddr);
1205 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1206 TCGMemOpIdx oi, uintptr_t retaddr);
1207 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1208 TCGMemOpIdx oi, uintptr_t retaddr);
1209 uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1210 TCGMemOpIdx oi, uintptr_t retaddr);
1211 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1212 TCGMemOpIdx oi, uintptr_t retaddr);
1213 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1214 TCGMemOpIdx oi, uintptr_t retaddr);
1216 /* Temporary aliases until backends are converted. */
1217 #ifdef TARGET_WORDS_BIGENDIAN
1218 # define helper_ret_ldsw_mmu helper_be_ldsw_mmu
1219 # define helper_ret_lduw_mmu helper_be_lduw_mmu
1220 # define helper_ret_ldsl_mmu helper_be_ldsl_mmu
1221 # define helper_ret_ldul_mmu helper_be_ldul_mmu
1222 # define helper_ret_ldl_mmu helper_be_ldul_mmu
1223 # define helper_ret_ldq_mmu helper_be_ldq_mmu
1224 # define helper_ret_stw_mmu helper_be_stw_mmu
1225 # define helper_ret_stl_mmu helper_be_stl_mmu
1226 # define helper_ret_stq_mmu helper_be_stq_mmu
1227 # define helper_ret_ldw_cmmu helper_be_ldw_cmmu
1228 # define helper_ret_ldl_cmmu helper_be_ldl_cmmu
1229 # define helper_ret_ldq_cmmu helper_be_ldq_cmmu
1230 #else
1231 # define helper_ret_ldsw_mmu helper_le_ldsw_mmu
1232 # define helper_ret_lduw_mmu helper_le_lduw_mmu
1233 # define helper_ret_ldsl_mmu helper_le_ldsl_mmu
1234 # define helper_ret_ldul_mmu helper_le_ldul_mmu
1235 # define helper_ret_ldl_mmu helper_le_ldul_mmu
1236 # define helper_ret_ldq_mmu helper_le_ldq_mmu
1237 # define helper_ret_stw_mmu helper_le_stw_mmu
1238 # define helper_ret_stl_mmu helper_le_stl_mmu
1239 # define helper_ret_stq_mmu helper_le_stq_mmu
1240 # define helper_ret_ldw_cmmu helper_le_ldw_cmmu
1241 # define helper_ret_ldl_cmmu helper_le_ldl_cmmu
1242 # define helper_ret_ldq_cmmu helper_le_ldq_cmmu
1243 #endif
1245 uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1246 uint32_t cmpv, uint32_t newv,
1247 TCGMemOpIdx oi, uintptr_t retaddr);
1248 uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1249 uint32_t cmpv, uint32_t newv,
1250 TCGMemOpIdx oi, uintptr_t retaddr);
1251 uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1252 uint32_t cmpv, uint32_t newv,
1253 TCGMemOpIdx oi, uintptr_t retaddr);
1254 uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1255 uint64_t cmpv, uint64_t newv,
1256 TCGMemOpIdx oi, uintptr_t retaddr);
1257 uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1258 uint32_t cmpv, uint32_t newv,
1259 TCGMemOpIdx oi, uintptr_t retaddr);
1260 uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1261 uint32_t cmpv, uint32_t newv,
1262 TCGMemOpIdx oi, uintptr_t retaddr);
1263 uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1264 uint64_t cmpv, uint64_t newv,
1265 TCGMemOpIdx oi, uintptr_t retaddr);
1267 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
1268 TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
1269 (CPUArchState *env, target_ulong addr, TYPE val, \
1270 TCGMemOpIdx oi, uintptr_t retaddr);
1272 #ifdef CONFIG_ATOMIC64
1273 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1274 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1275 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1276 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1277 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1278 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
1279 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
1280 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1281 #else
1282 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1283 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1284 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1285 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1286 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1287 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1288 #endif
1290 GEN_ATOMIC_HELPER_ALL(fetch_add)
1291 GEN_ATOMIC_HELPER_ALL(fetch_sub)
1292 GEN_ATOMIC_HELPER_ALL(fetch_and)
1293 GEN_ATOMIC_HELPER_ALL(fetch_or)
1294 GEN_ATOMIC_HELPER_ALL(fetch_xor)
1296 GEN_ATOMIC_HELPER_ALL(add_fetch)
1297 GEN_ATOMIC_HELPER_ALL(sub_fetch)
1298 GEN_ATOMIC_HELPER_ALL(and_fetch)
1299 GEN_ATOMIC_HELPER_ALL(or_fetch)
1300 GEN_ATOMIC_HELPER_ALL(xor_fetch)
1302 GEN_ATOMIC_HELPER_ALL(xchg)
1304 #undef GEN_ATOMIC_HELPER_ALL
1305 #undef GEN_ATOMIC_HELPER
1306 #endif /* CONFIG_SOFTMMU */
1308 #ifdef CONFIG_ATOMIC128
1309 #include "qemu/int128.h"
1311 /* These aren't really a "proper" helpers because TCG cannot manage Int128.
1312 However, use the same format as the others, for use by the backends. */
1313 Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1314 Int128 cmpv, Int128 newv,
1315 TCGMemOpIdx oi, uintptr_t retaddr);
1316 Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1317 Int128 cmpv, Int128 newv,
1318 TCGMemOpIdx oi, uintptr_t retaddr);
1320 Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1321 TCGMemOpIdx oi, uintptr_t retaddr);
1322 Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1323 TCGMemOpIdx oi, uintptr_t retaddr);
1324 void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1325 TCGMemOpIdx oi, uintptr_t retaddr);
1326 void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1327 TCGMemOpIdx oi, uintptr_t retaddr);
1329 #endif /* CONFIG_ATOMIC128 */
1331 #endif /* TCG_H */