2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "exec/cpu_ldst.h"
22 #include "tcg/tcg-op.h"
23 #include "tcg/tcg-ldst.h"
28 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
29 * Without assertions, the interpreter runs much faster.
31 #if defined(CONFIG_DEBUG_TCG)
32 # define tci_assert(cond) assert(cond)
34 # define tci_assert(cond) ((void)(cond))
37 __thread
uintptr_t tci_tb_ptr
;
39 static void tci_write_reg64(tcg_target_ulong
*regs
, uint32_t high_index
,
40 uint32_t low_index
, uint64_t value
)
42 regs
[low_index
] = (uint32_t)value
;
43 regs
[high_index
] = value
>> 32;
46 /* Create a 64 bit value from two 32 bit values. */
47 static uint64_t tci_uint64(uint32_t high
, uint32_t low
)
49 return ((uint64_t)high
<< 32) + low
;
53 * Load sets of arguments all at once. The naming convention is:
54 * tci_args_<arguments>
55 * where arguments is a sequence of
57 * b = immediate (bit position)
58 * c = condition (TCGCond)
59 * i = immediate (uint32_t)
60 * I = immediate (tcg_target_ulong)
61 * l = label or pointer
62 * m = immediate (MemOpIdx)
63 * n = immediate (call return length)
65 * s = signed ldst offset
68 static void tci_args_l(uint32_t insn
, const void *tb_ptr
, void **l0
)
70 int diff
= sextract32(insn
, 12, 20);
71 *l0
= diff
? (void *)tb_ptr
+ diff
: NULL
;
74 static void tci_args_r(uint32_t insn
, TCGReg
*r0
)
76 *r0
= extract32(insn
, 8, 4);
79 static void tci_args_nl(uint32_t insn
, const void *tb_ptr
,
80 uint8_t *n0
, void **l1
)
82 *n0
= extract32(insn
, 8, 4);
83 *l1
= sextract32(insn
, 12, 20) + (void *)tb_ptr
;
86 static void tci_args_rl(uint32_t insn
, const void *tb_ptr
,
87 TCGReg
*r0
, void **l1
)
89 *r0
= extract32(insn
, 8, 4);
90 *l1
= sextract32(insn
, 12, 20) + (void *)tb_ptr
;
93 static void tci_args_rr(uint32_t insn
, TCGReg
*r0
, TCGReg
*r1
)
95 *r0
= extract32(insn
, 8, 4);
96 *r1
= extract32(insn
, 12, 4);
99 static void tci_args_ri(uint32_t insn
, TCGReg
*r0
, tcg_target_ulong
*i1
)
101 *r0
= extract32(insn
, 8, 4);
102 *i1
= sextract32(insn
, 12, 20);
105 static void tci_args_rrm(uint32_t insn
, TCGReg
*r0
,
106 TCGReg
*r1
, MemOpIdx
*m2
)
108 *r0
= extract32(insn
, 8, 4);
109 *r1
= extract32(insn
, 12, 4);
110 *m2
= extract32(insn
, 20, 12);
113 static void tci_args_rrr(uint32_t insn
, TCGReg
*r0
, TCGReg
*r1
, TCGReg
*r2
)
115 *r0
= extract32(insn
, 8, 4);
116 *r1
= extract32(insn
, 12, 4);
117 *r2
= extract32(insn
, 16, 4);
120 static void tci_args_rrs(uint32_t insn
, TCGReg
*r0
, TCGReg
*r1
, int32_t *i2
)
122 *r0
= extract32(insn
, 8, 4);
123 *r1
= extract32(insn
, 12, 4);
124 *i2
= sextract32(insn
, 16, 16);
127 static void tci_args_rrbb(uint32_t insn
, TCGReg
*r0
, TCGReg
*r1
,
128 uint8_t *i2
, uint8_t *i3
)
130 *r0
= extract32(insn
, 8, 4);
131 *r1
= extract32(insn
, 12, 4);
132 *i2
= extract32(insn
, 16, 6);
133 *i3
= extract32(insn
, 22, 6);
136 static void tci_args_rrrc(uint32_t insn
,
137 TCGReg
*r0
, TCGReg
*r1
, TCGReg
*r2
, TCGCond
*c3
)
139 *r0
= extract32(insn
, 8, 4);
140 *r1
= extract32(insn
, 12, 4);
141 *r2
= extract32(insn
, 16, 4);
142 *c3
= extract32(insn
, 20, 4);
145 static void tci_args_rrrm(uint32_t insn
,
146 TCGReg
*r0
, TCGReg
*r1
, TCGReg
*r2
, MemOpIdx
*m3
)
148 *r0
= extract32(insn
, 8, 4);
149 *r1
= extract32(insn
, 12, 4);
150 *r2
= extract32(insn
, 16, 4);
151 *m3
= extract32(insn
, 20, 12);
154 static void tci_args_rrrbb(uint32_t insn
, TCGReg
*r0
, TCGReg
*r1
,
155 TCGReg
*r2
, uint8_t *i3
, uint8_t *i4
)
157 *r0
= extract32(insn
, 8, 4);
158 *r1
= extract32(insn
, 12, 4);
159 *r2
= extract32(insn
, 16, 4);
160 *i3
= extract32(insn
, 20, 6);
161 *i4
= extract32(insn
, 26, 6);
164 static void tci_args_rrrrr(uint32_t insn
, TCGReg
*r0
, TCGReg
*r1
,
165 TCGReg
*r2
, TCGReg
*r3
, TCGReg
*r4
)
167 *r0
= extract32(insn
, 8, 4);
168 *r1
= extract32(insn
, 12, 4);
169 *r2
= extract32(insn
, 16, 4);
170 *r3
= extract32(insn
, 20, 4);
171 *r4
= extract32(insn
, 24, 4);
174 static void tci_args_rrrr(uint32_t insn
,
175 TCGReg
*r0
, TCGReg
*r1
, TCGReg
*r2
, TCGReg
*r3
)
177 *r0
= extract32(insn
, 8, 4);
178 *r1
= extract32(insn
, 12, 4);
179 *r2
= extract32(insn
, 16, 4);
180 *r3
= extract32(insn
, 20, 4);
183 static void tci_args_rrrrrc(uint32_t insn
, TCGReg
*r0
, TCGReg
*r1
,
184 TCGReg
*r2
, TCGReg
*r3
, TCGReg
*r4
, TCGCond
*c5
)
186 *r0
= extract32(insn
, 8, 4);
187 *r1
= extract32(insn
, 12, 4);
188 *r2
= extract32(insn
, 16, 4);
189 *r3
= extract32(insn
, 20, 4);
190 *r4
= extract32(insn
, 24, 4);
191 *c5
= extract32(insn
, 28, 4);
194 static void tci_args_rrrrrr(uint32_t insn
, TCGReg
*r0
, TCGReg
*r1
,
195 TCGReg
*r2
, TCGReg
*r3
, TCGReg
*r4
, TCGReg
*r5
)
197 *r0
= extract32(insn
, 8, 4);
198 *r1
= extract32(insn
, 12, 4);
199 *r2
= extract32(insn
, 16, 4);
200 *r3
= extract32(insn
, 20, 4);
201 *r4
= extract32(insn
, 24, 4);
202 *r5
= extract32(insn
, 28, 4);
205 static bool tci_compare32(uint32_t u0
, uint32_t u1
, TCGCond condition
)
242 g_assert_not_reached();
247 static bool tci_compare64(uint64_t u0
, uint64_t u1
, TCGCond condition
)
284 g_assert_not_reached();
289 static uint64_t tci_qemu_ld(CPUArchState
*env
, uint64_t taddr
,
290 MemOpIdx oi
, const void *tb_ptr
)
292 MemOp mop
= get_memop(oi
);
293 uintptr_t ra
= (uintptr_t)tb_ptr
;
295 switch (mop
& MO_SSIZE
) {
297 return helper_ldub_mmu(env
, taddr
, oi
, ra
);
299 return helper_ldsb_mmu(env
, taddr
, oi
, ra
);
301 return helper_lduw_mmu(env
, taddr
, oi
, ra
);
303 return helper_ldsw_mmu(env
, taddr
, oi
, ra
);
305 return helper_ldul_mmu(env
, taddr
, oi
, ra
);
307 return helper_ldsl_mmu(env
, taddr
, oi
, ra
);
309 return helper_ldq_mmu(env
, taddr
, oi
, ra
);
311 g_assert_not_reached();
315 static void tci_qemu_st(CPUArchState
*env
, uint64_t taddr
, uint64_t val
,
316 MemOpIdx oi
, const void *tb_ptr
)
318 MemOp mop
= get_memop(oi
);
319 uintptr_t ra
= (uintptr_t)tb_ptr
;
321 switch (mop
& MO_SIZE
) {
323 helper_stb_mmu(env
, taddr
, val
, oi
, ra
);
326 helper_stw_mmu(env
, taddr
, val
, oi
, ra
);
329 helper_stl_mmu(env
, taddr
, val
, oi
, ra
);
332 helper_stq_mmu(env
, taddr
, val
, oi
, ra
);
335 g_assert_not_reached();
339 #if TCG_TARGET_REG_BITS == 64
340 # define CASE_32_64(x) \
341 case glue(glue(INDEX_op_, x), _i64): \
342 case glue(glue(INDEX_op_, x), _i32):
343 # define CASE_64(x) \
344 case glue(glue(INDEX_op_, x), _i64):
346 # define CASE_32_64(x) \
347 case glue(glue(INDEX_op_, x), _i32):
351 /* Interpret pseudo code in tb. */
353 * Disable CFI checks.
354 * One possible operation in the pseudo code is a call to binary code.
355 * Therefore, disable CFI checks in the interpreter function
357 uintptr_t QEMU_DISABLE_CFI
tcg_qemu_tb_exec(CPUArchState
*env
,
358 const void *v_tb_ptr
)
360 const uint32_t *tb_ptr
= v_tb_ptr
;
361 tcg_target_ulong regs
[TCG_TARGET_NB_REGS
];
362 uint64_t stack
[(TCG_STATIC_CALL_ARGS_SIZE
+ TCG_STATIC_FRAME_SIZE
)
365 regs
[TCG_AREG0
] = (tcg_target_ulong
)env
;
366 regs
[TCG_REG_CALL_STACK
] = (uintptr_t)stack
;
372 TCGReg r0
, r1
, r2
, r3
, r4
, r5
;
377 uint64_t tmp64
, taddr
;
384 opc
= extract32(insn
, 0, 8);
389 void *call_slots
[MAX_CALL_IARGS
];
394 tci_args_nl(insn
, tb_ptr
, &len
, &ptr
);
395 func
= ((void **)ptr
)[0];
396 cif
= ((void **)ptr
)[1];
399 for (i
= s
= 0; i
< n
; ++i
) {
400 ffi_type
*t
= cif
->arg_types
[i
];
401 call_slots
[i
] = &stack
[s
];
402 s
+= DIV_ROUND_UP(t
->size
, 8);
405 /* Helper functions may need to access the "return address" */
406 tci_tb_ptr
= (uintptr_t)tb_ptr
;
407 ffi_call(cif
, func
, stack
, call_slots
);
413 case 1: /* uint32_t */
415 * The result winds up "left-aligned" in the stack[0] slot.
416 * Note that libffi has an odd special case in that it will
417 * always widen an integral result to ffi_arg.
419 if (sizeof(ffi_arg
) == 8) {
420 regs
[TCG_REG_R0
] = (uint32_t)stack
[0];
422 regs
[TCG_REG_R0
] = *(uint32_t *)stack
;
425 case 2: /* uint64_t */
427 * For TCG_TARGET_REG_BITS == 32, the register pair
428 * must stay in host memory order.
430 memcpy(®s
[TCG_REG_R0
], stack
, 8);
433 memcpy(®s
[TCG_REG_R0
], stack
, 16);
436 g_assert_not_reached();
441 tci_args_l(insn
, tb_ptr
, &ptr
);
444 case INDEX_op_setcond_i32
:
445 tci_args_rrrc(insn
, &r0
, &r1
, &r2
, &condition
);
446 regs
[r0
] = tci_compare32(regs
[r1
], regs
[r2
], condition
);
448 case INDEX_op_movcond_i32
:
449 tci_args_rrrrrc(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &condition
);
450 tmp32
= tci_compare32(regs
[r1
], regs
[r2
], condition
);
451 regs
[r0
] = regs
[tmp32
? r3
: r4
];
453 #if TCG_TARGET_REG_BITS == 32
454 case INDEX_op_setcond2_i32
:
455 tci_args_rrrrrc(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &condition
);
456 T1
= tci_uint64(regs
[r2
], regs
[r1
]);
457 T2
= tci_uint64(regs
[r4
], regs
[r3
]);
458 regs
[r0
] = tci_compare64(T1
, T2
, condition
);
460 #elif TCG_TARGET_REG_BITS == 64
461 case INDEX_op_setcond_i64
:
462 tci_args_rrrc(insn
, &r0
, &r1
, &r2
, &condition
);
463 regs
[r0
] = tci_compare64(regs
[r1
], regs
[r2
], condition
);
465 case INDEX_op_movcond_i64
:
466 tci_args_rrrrrc(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &condition
);
467 tmp32
= tci_compare64(regs
[r1
], regs
[r2
], condition
);
468 regs
[r0
] = regs
[tmp32
? r3
: r4
];
472 tci_args_rr(insn
, &r0
, &r1
);
475 case INDEX_op_tci_movi
:
476 tci_args_ri(insn
, &r0
, &t1
);
479 case INDEX_op_tci_movl
:
480 tci_args_rl(insn
, tb_ptr
, &r0
, &ptr
);
481 regs
[r0
] = *(tcg_target_ulong
*)ptr
;
484 /* Load/store operations (32 bit). */
487 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
488 ptr
= (void *)(regs
[r1
] + ofs
);
489 regs
[r0
] = *(uint8_t *)ptr
;
492 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
493 ptr
= (void *)(regs
[r1
] + ofs
);
494 regs
[r0
] = *(int8_t *)ptr
;
497 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
498 ptr
= (void *)(regs
[r1
] + ofs
);
499 regs
[r0
] = *(uint16_t *)ptr
;
502 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
503 ptr
= (void *)(regs
[r1
] + ofs
);
504 regs
[r0
] = *(int16_t *)ptr
;
506 case INDEX_op_ld_i32
:
508 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
509 ptr
= (void *)(regs
[r1
] + ofs
);
510 regs
[r0
] = *(uint32_t *)ptr
;
513 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
514 ptr
= (void *)(regs
[r1
] + ofs
);
515 *(uint8_t *)ptr
= regs
[r0
];
518 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
519 ptr
= (void *)(regs
[r1
] + ofs
);
520 *(uint16_t *)ptr
= regs
[r0
];
522 case INDEX_op_st_i32
:
524 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
525 ptr
= (void *)(regs
[r1
] + ofs
);
526 *(uint32_t *)ptr
= regs
[r0
];
529 /* Arithmetic operations (mixed 32/64 bit). */
532 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
533 regs
[r0
] = regs
[r1
] + regs
[r2
];
536 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
537 regs
[r0
] = regs
[r1
] - regs
[r2
];
540 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
541 regs
[r0
] = regs
[r1
] * regs
[r2
];
544 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
545 regs
[r0
] = regs
[r1
] & regs
[r2
];
548 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
549 regs
[r0
] = regs
[r1
] | regs
[r2
];
552 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
553 regs
[r0
] = regs
[r1
] ^ regs
[r2
];
555 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
557 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
558 regs
[r0
] = regs
[r1
] & ~regs
[r2
];
561 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
563 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
564 regs
[r0
] = regs
[r1
] | ~regs
[r2
];
567 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
569 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
570 regs
[r0
] = ~(regs
[r1
] ^ regs
[r2
]);
573 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
575 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
576 regs
[r0
] = ~(regs
[r1
] & regs
[r2
]);
579 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
581 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
582 regs
[r0
] = ~(regs
[r1
] | regs
[r2
]);
586 /* Arithmetic operations (32 bit). */
588 case INDEX_op_div_i32
:
589 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
590 regs
[r0
] = (int32_t)regs
[r1
] / (int32_t)regs
[r2
];
592 case INDEX_op_divu_i32
:
593 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
594 regs
[r0
] = (uint32_t)regs
[r1
] / (uint32_t)regs
[r2
];
596 case INDEX_op_rem_i32
:
597 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
598 regs
[r0
] = (int32_t)regs
[r1
] % (int32_t)regs
[r2
];
600 case INDEX_op_remu_i32
:
601 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
602 regs
[r0
] = (uint32_t)regs
[r1
] % (uint32_t)regs
[r2
];
604 #if TCG_TARGET_HAS_clz_i32
605 case INDEX_op_clz_i32
:
606 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
608 regs
[r0
] = tmp32
? clz32(tmp32
) : regs
[r2
];
611 #if TCG_TARGET_HAS_ctz_i32
612 case INDEX_op_ctz_i32
:
613 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
615 regs
[r0
] = tmp32
? ctz32(tmp32
) : regs
[r2
];
618 #if TCG_TARGET_HAS_ctpop_i32
619 case INDEX_op_ctpop_i32
:
620 tci_args_rr(insn
, &r0
, &r1
);
621 regs
[r0
] = ctpop32(regs
[r1
]);
625 /* Shift/rotate operations (32 bit). */
627 case INDEX_op_shl_i32
:
628 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
629 regs
[r0
] = (uint32_t)regs
[r1
] << (regs
[r2
] & 31);
631 case INDEX_op_shr_i32
:
632 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
633 regs
[r0
] = (uint32_t)regs
[r1
] >> (regs
[r2
] & 31);
635 case INDEX_op_sar_i32
:
636 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
637 regs
[r0
] = (int32_t)regs
[r1
] >> (regs
[r2
] & 31);
639 #if TCG_TARGET_HAS_rot_i32
640 case INDEX_op_rotl_i32
:
641 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
642 regs
[r0
] = rol32(regs
[r1
], regs
[r2
] & 31);
644 case INDEX_op_rotr_i32
:
645 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
646 regs
[r0
] = ror32(regs
[r1
], regs
[r2
] & 31);
649 #if TCG_TARGET_HAS_deposit_i32
650 case INDEX_op_deposit_i32
:
651 tci_args_rrrbb(insn
, &r0
, &r1
, &r2
, &pos
, &len
);
652 regs
[r0
] = deposit32(regs
[r1
], pos
, len
, regs
[r2
]);
655 #if TCG_TARGET_HAS_extract_i32
656 case INDEX_op_extract_i32
:
657 tci_args_rrbb(insn
, &r0
, &r1
, &pos
, &len
);
658 regs
[r0
] = extract32(regs
[r1
], pos
, len
);
661 #if TCG_TARGET_HAS_sextract_i32
662 case INDEX_op_sextract_i32
:
663 tci_args_rrbb(insn
, &r0
, &r1
, &pos
, &len
);
664 regs
[r0
] = sextract32(regs
[r1
], pos
, len
);
667 case INDEX_op_brcond_i32
:
668 tci_args_rl(insn
, tb_ptr
, &r0
, &ptr
);
669 if ((uint32_t)regs
[r0
]) {
673 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
674 case INDEX_op_add2_i32
:
675 tci_args_rrrrrr(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &r5
);
676 T1
= tci_uint64(regs
[r3
], regs
[r2
]);
677 T2
= tci_uint64(regs
[r5
], regs
[r4
]);
678 tci_write_reg64(regs
, r1
, r0
, T1
+ T2
);
681 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
682 case INDEX_op_sub2_i32
:
683 tci_args_rrrrrr(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &r5
);
684 T1
= tci_uint64(regs
[r3
], regs
[r2
]);
685 T2
= tci_uint64(regs
[r5
], regs
[r4
]);
686 tci_write_reg64(regs
, r1
, r0
, T1
- T2
);
689 #if TCG_TARGET_HAS_mulu2_i32
690 case INDEX_op_mulu2_i32
:
691 tci_args_rrrr(insn
, &r0
, &r1
, &r2
, &r3
);
692 tmp64
= (uint64_t)(uint32_t)regs
[r2
] * (uint32_t)regs
[r3
];
693 tci_write_reg64(regs
, r1
, r0
, tmp64
);
696 #if TCG_TARGET_HAS_muls2_i32
697 case INDEX_op_muls2_i32
:
698 tci_args_rrrr(insn
, &r0
, &r1
, &r2
, &r3
);
699 tmp64
= (int64_t)(int32_t)regs
[r2
] * (int32_t)regs
[r3
];
700 tci_write_reg64(regs
, r1
, r0
, tmp64
);
703 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
705 tci_args_rr(insn
, &r0
, &r1
);
706 regs
[r0
] = (int8_t)regs
[r1
];
709 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
710 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
712 tci_args_rr(insn
, &r0
, &r1
);
713 regs
[r0
] = (int16_t)regs
[r1
];
716 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
718 tci_args_rr(insn
, &r0
, &r1
);
719 regs
[r0
] = (uint8_t)regs
[r1
];
722 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
724 tci_args_rr(insn
, &r0
, &r1
);
725 regs
[r0
] = (uint16_t)regs
[r1
];
728 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
730 tci_args_rr(insn
, &r0
, &r1
);
731 regs
[r0
] = bswap16(regs
[r1
]);
734 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
736 tci_args_rr(insn
, &r0
, &r1
);
737 regs
[r0
] = bswap32(regs
[r1
]);
740 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
742 tci_args_rr(insn
, &r0
, &r1
);
743 regs
[r0
] = ~regs
[r1
];
746 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
748 tci_args_rr(insn
, &r0
, &r1
);
749 regs
[r0
] = -regs
[r1
];
752 #if TCG_TARGET_REG_BITS == 64
753 /* Load/store operations (64 bit). */
755 case INDEX_op_ld32s_i64
:
756 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
757 ptr
= (void *)(regs
[r1
] + ofs
);
758 regs
[r0
] = *(int32_t *)ptr
;
760 case INDEX_op_ld_i64
:
761 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
762 ptr
= (void *)(regs
[r1
] + ofs
);
763 regs
[r0
] = *(uint64_t *)ptr
;
765 case INDEX_op_st_i64
:
766 tci_args_rrs(insn
, &r0
, &r1
, &ofs
);
767 ptr
= (void *)(regs
[r1
] + ofs
);
768 *(uint64_t *)ptr
= regs
[r0
];
771 /* Arithmetic operations (64 bit). */
773 case INDEX_op_div_i64
:
774 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
775 regs
[r0
] = (int64_t)regs
[r1
] / (int64_t)regs
[r2
];
777 case INDEX_op_divu_i64
:
778 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
779 regs
[r0
] = (uint64_t)regs
[r1
] / (uint64_t)regs
[r2
];
781 case INDEX_op_rem_i64
:
782 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
783 regs
[r0
] = (int64_t)regs
[r1
] % (int64_t)regs
[r2
];
785 case INDEX_op_remu_i64
:
786 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
787 regs
[r0
] = (uint64_t)regs
[r1
] % (uint64_t)regs
[r2
];
789 #if TCG_TARGET_HAS_clz_i64
790 case INDEX_op_clz_i64
:
791 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
792 regs
[r0
] = regs
[r1
] ? clz64(regs
[r1
]) : regs
[r2
];
795 #if TCG_TARGET_HAS_ctz_i64
796 case INDEX_op_ctz_i64
:
797 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
798 regs
[r0
] = regs
[r1
] ? ctz64(regs
[r1
]) : regs
[r2
];
801 #if TCG_TARGET_HAS_ctpop_i64
802 case INDEX_op_ctpop_i64
:
803 tci_args_rr(insn
, &r0
, &r1
);
804 regs
[r0
] = ctpop64(regs
[r1
]);
807 #if TCG_TARGET_HAS_mulu2_i64
808 case INDEX_op_mulu2_i64
:
809 tci_args_rrrr(insn
, &r0
, &r1
, &r2
, &r3
);
810 mulu64(®s
[r0
], ®s
[r1
], regs
[r2
], regs
[r3
]);
813 #if TCG_TARGET_HAS_muls2_i64
814 case INDEX_op_muls2_i64
:
815 tci_args_rrrr(insn
, &r0
, &r1
, &r2
, &r3
);
816 muls64(®s
[r0
], ®s
[r1
], regs
[r2
], regs
[r3
]);
819 #if TCG_TARGET_HAS_add2_i64
820 case INDEX_op_add2_i64
:
821 tci_args_rrrrrr(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &r5
);
822 T1
= regs
[r2
] + regs
[r4
];
823 T2
= regs
[r3
] + regs
[r5
] + (T1
< regs
[r2
]);
828 #if TCG_TARGET_HAS_add2_i64
829 case INDEX_op_sub2_i64
:
830 tci_args_rrrrrr(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &r5
);
831 T1
= regs
[r2
] - regs
[r4
];
832 T2
= regs
[r3
] - regs
[r5
] - (regs
[r2
] < regs
[r4
]);
838 /* Shift/rotate operations (64 bit). */
840 case INDEX_op_shl_i64
:
841 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
842 regs
[r0
] = regs
[r1
] << (regs
[r2
] & 63);
844 case INDEX_op_shr_i64
:
845 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
846 regs
[r0
] = regs
[r1
] >> (regs
[r2
] & 63);
848 case INDEX_op_sar_i64
:
849 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
850 regs
[r0
] = (int64_t)regs
[r1
] >> (regs
[r2
] & 63);
852 #if TCG_TARGET_HAS_rot_i64
853 case INDEX_op_rotl_i64
:
854 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
855 regs
[r0
] = rol64(regs
[r1
], regs
[r2
] & 63);
857 case INDEX_op_rotr_i64
:
858 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
859 regs
[r0
] = ror64(regs
[r1
], regs
[r2
] & 63);
862 #if TCG_TARGET_HAS_deposit_i64
863 case INDEX_op_deposit_i64
:
864 tci_args_rrrbb(insn
, &r0
, &r1
, &r2
, &pos
, &len
);
865 regs
[r0
] = deposit64(regs
[r1
], pos
, len
, regs
[r2
]);
868 #if TCG_TARGET_HAS_extract_i64
869 case INDEX_op_extract_i64
:
870 tci_args_rrbb(insn
, &r0
, &r1
, &pos
, &len
);
871 regs
[r0
] = extract64(regs
[r1
], pos
, len
);
874 #if TCG_TARGET_HAS_sextract_i64
875 case INDEX_op_sextract_i64
:
876 tci_args_rrbb(insn
, &r0
, &r1
, &pos
, &len
);
877 regs
[r0
] = sextract64(regs
[r1
], pos
, len
);
880 case INDEX_op_brcond_i64
:
881 tci_args_rl(insn
, tb_ptr
, &r0
, &ptr
);
886 case INDEX_op_ext32s_i64
:
887 case INDEX_op_ext_i32_i64
:
888 tci_args_rr(insn
, &r0
, &r1
);
889 regs
[r0
] = (int32_t)regs
[r1
];
891 case INDEX_op_ext32u_i64
:
892 case INDEX_op_extu_i32_i64
:
893 tci_args_rr(insn
, &r0
, &r1
);
894 regs
[r0
] = (uint32_t)regs
[r1
];
896 #if TCG_TARGET_HAS_bswap64_i64
897 case INDEX_op_bswap64_i64
:
898 tci_args_rr(insn
, &r0
, &r1
);
899 regs
[r0
] = bswap64(regs
[r1
]);
902 #endif /* TCG_TARGET_REG_BITS == 64 */
904 /* QEMU specific operations. */
906 case INDEX_op_exit_tb
:
907 tci_args_l(insn
, tb_ptr
, &ptr
);
908 return (uintptr_t)ptr
;
910 case INDEX_op_goto_tb
:
911 tci_args_l(insn
, tb_ptr
, &ptr
);
912 tb_ptr
= *(void **)ptr
;
915 case INDEX_op_goto_ptr
:
916 tci_args_r(insn
, &r0
);
917 ptr
= (void *)regs
[r0
];
924 case INDEX_op_qemu_ld_a32_i32
:
925 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
926 taddr
= (uint32_t)regs
[r1
];
928 case INDEX_op_qemu_ld_a64_i32
:
929 if (TCG_TARGET_REG_BITS
== 64) {
930 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
933 tci_args_rrrm(insn
, &r0
, &r1
, &r2
, &oi
);
934 taddr
= tci_uint64(regs
[r2
], regs
[r1
]);
937 regs
[r0
] = tci_qemu_ld(env
, taddr
, oi
, tb_ptr
);
940 case INDEX_op_qemu_ld_a32_i64
:
941 if (TCG_TARGET_REG_BITS
== 64) {
942 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
943 taddr
= (uint32_t)regs
[r1
];
945 tci_args_rrrm(insn
, &r0
, &r1
, &r2
, &oi
);
946 taddr
= (uint32_t)regs
[r2
];
949 case INDEX_op_qemu_ld_a64_i64
:
950 if (TCG_TARGET_REG_BITS
== 64) {
951 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
954 tci_args_rrrrr(insn
, &r0
, &r1
, &r2
, &r3
, &r4
);
955 taddr
= tci_uint64(regs
[r3
], regs
[r2
]);
959 tmp64
= tci_qemu_ld(env
, taddr
, oi
, tb_ptr
);
960 if (TCG_TARGET_REG_BITS
== 32) {
961 tci_write_reg64(regs
, r1
, r0
, tmp64
);
967 case INDEX_op_qemu_st_a32_i32
:
968 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
969 taddr
= (uint32_t)regs
[r1
];
971 case INDEX_op_qemu_st_a64_i32
:
972 if (TCG_TARGET_REG_BITS
== 64) {
973 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
976 tci_args_rrrm(insn
, &r0
, &r1
, &r2
, &oi
);
977 taddr
= tci_uint64(regs
[r2
], regs
[r1
]);
980 tci_qemu_st(env
, taddr
, regs
[r0
], oi
, tb_ptr
);
983 case INDEX_op_qemu_st_a32_i64
:
984 if (TCG_TARGET_REG_BITS
== 64) {
985 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
987 taddr
= (uint32_t)regs
[r1
];
989 tci_args_rrrm(insn
, &r0
, &r1
, &r2
, &oi
);
990 tmp64
= tci_uint64(regs
[r1
], regs
[r0
]);
991 taddr
= (uint32_t)regs
[r2
];
994 case INDEX_op_qemu_st_a64_i64
:
995 if (TCG_TARGET_REG_BITS
== 64) {
996 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
1000 tci_args_rrrrr(insn
, &r0
, &r1
, &r2
, &r3
, &r4
);
1001 tmp64
= tci_uint64(regs
[r1
], regs
[r0
]);
1002 taddr
= tci_uint64(regs
[r3
], regs
[r2
]);
1006 tci_qemu_st(env
, taddr
, tmp64
, oi
, tb_ptr
);
1010 /* Ensure ordering for all kinds */
1014 g_assert_not_reached();
1020 * Disassembler that matches the interpreter
1023 static const char *str_r(TCGReg r
)
1025 static const char regs
[TCG_TARGET_NB_REGS
][4] = {
1026 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1027 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1030 QEMU_BUILD_BUG_ON(TCG_AREG0
!= TCG_REG_R14
);
1031 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK
!= TCG_REG_R15
);
1033 assert((unsigned)r
< TCG_TARGET_NB_REGS
);
1037 static const char *str_c(TCGCond c
)
1039 static const char cond
[16][8] = {
1040 [TCG_COND_NEVER
] = "never",
1041 [TCG_COND_ALWAYS
] = "always",
1042 [TCG_COND_EQ
] = "eq",
1043 [TCG_COND_NE
] = "ne",
1044 [TCG_COND_LT
] = "lt",
1045 [TCG_COND_GE
] = "ge",
1046 [TCG_COND_LE
] = "le",
1047 [TCG_COND_GT
] = "gt",
1048 [TCG_COND_LTU
] = "ltu",
1049 [TCG_COND_GEU
] = "geu",
1050 [TCG_COND_LEU
] = "leu",
1051 [TCG_COND_GTU
] = "gtu",
1054 assert((unsigned)c
< ARRAY_SIZE(cond
));
1055 assert(cond
[c
][0] != 0);
1059 /* Disassemble TCI bytecode. */
1060 int print_insn_tci(bfd_vma addr
, disassemble_info
*info
)
1062 const uint32_t *tb_ptr
= (const void *)(uintptr_t)addr
;
1063 const TCGOpDef
*def
;
1064 const char *op_name
;
1067 TCGReg r0
, r1
, r2
, r3
, r4
, r5
;
1068 tcg_target_ulong i1
;
1075 /* TCI is always the host, so we don't need to load indirect. */
1078 info
->fprintf_func(info
->stream
, "%08x ", insn
);
1080 op
= extract32(insn
, 0, 8);
1081 def
= &tcg_op_defs
[op
];
1082 op_name
= def
->name
;
1086 case INDEX_op_exit_tb
:
1087 case INDEX_op_goto_tb
:
1088 tci_args_l(insn
, tb_ptr
, &ptr
);
1089 info
->fprintf_func(info
->stream
, "%-12s %p", op_name
, ptr
);
1092 case INDEX_op_goto_ptr
:
1093 tci_args_r(insn
, &r0
);
1094 info
->fprintf_func(info
->stream
, "%-12s %s", op_name
, str_r(r0
));
1098 tci_args_nl(insn
, tb_ptr
, &len
, &ptr
);
1099 info
->fprintf_func(info
->stream
, "%-12s %d, %p", op_name
, len
, ptr
);
1102 case INDEX_op_brcond_i32
:
1103 case INDEX_op_brcond_i64
:
1104 tci_args_rl(insn
, tb_ptr
, &r0
, &ptr
);
1105 info
->fprintf_func(info
->stream
, "%-12s %s, 0, ne, %p",
1106 op_name
, str_r(r0
), ptr
);
1109 case INDEX_op_setcond_i32
:
1110 case INDEX_op_setcond_i64
:
1111 tci_args_rrrc(insn
, &r0
, &r1
, &r2
, &c
);
1112 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %s, %s",
1113 op_name
, str_r(r0
), str_r(r1
), str_r(r2
), str_c(c
));
1116 case INDEX_op_tci_movi
:
1117 tci_args_ri(insn
, &r0
, &i1
);
1118 info
->fprintf_func(info
->stream
, "%-12s %s, 0x%" TCG_PRIlx
,
1119 op_name
, str_r(r0
), i1
);
1122 case INDEX_op_tci_movl
:
1123 tci_args_rl(insn
, tb_ptr
, &r0
, &ptr
);
1124 info
->fprintf_func(info
->stream
, "%-12s %s, %p",
1125 op_name
, str_r(r0
), ptr
);
1128 case INDEX_op_ld8u_i32
:
1129 case INDEX_op_ld8u_i64
:
1130 case INDEX_op_ld8s_i32
:
1131 case INDEX_op_ld8s_i64
:
1132 case INDEX_op_ld16u_i32
:
1133 case INDEX_op_ld16u_i64
:
1134 case INDEX_op_ld16s_i32
:
1135 case INDEX_op_ld16s_i64
:
1136 case INDEX_op_ld32u_i64
:
1137 case INDEX_op_ld32s_i64
:
1138 case INDEX_op_ld_i32
:
1139 case INDEX_op_ld_i64
:
1140 case INDEX_op_st8_i32
:
1141 case INDEX_op_st8_i64
:
1142 case INDEX_op_st16_i32
:
1143 case INDEX_op_st16_i64
:
1144 case INDEX_op_st32_i64
:
1145 case INDEX_op_st_i32
:
1146 case INDEX_op_st_i64
:
1147 tci_args_rrs(insn
, &r0
, &r1
, &s2
);
1148 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %d",
1149 op_name
, str_r(r0
), str_r(r1
), s2
);
1152 case INDEX_op_mov_i32
:
1153 case INDEX_op_mov_i64
:
1154 case INDEX_op_ext8s_i32
:
1155 case INDEX_op_ext8s_i64
:
1156 case INDEX_op_ext8u_i32
:
1157 case INDEX_op_ext8u_i64
:
1158 case INDEX_op_ext16s_i32
:
1159 case INDEX_op_ext16s_i64
:
1160 case INDEX_op_ext16u_i32
:
1161 case INDEX_op_ext32s_i64
:
1162 case INDEX_op_ext32u_i64
:
1163 case INDEX_op_ext_i32_i64
:
1164 case INDEX_op_extu_i32_i64
:
1165 case INDEX_op_bswap16_i32
:
1166 case INDEX_op_bswap16_i64
:
1167 case INDEX_op_bswap32_i32
:
1168 case INDEX_op_bswap32_i64
:
1169 case INDEX_op_bswap64_i64
:
1170 case INDEX_op_not_i32
:
1171 case INDEX_op_not_i64
:
1172 case INDEX_op_neg_i32
:
1173 case INDEX_op_neg_i64
:
1174 case INDEX_op_ctpop_i32
:
1175 case INDEX_op_ctpop_i64
:
1176 tci_args_rr(insn
, &r0
, &r1
);
1177 info
->fprintf_func(info
->stream
, "%-12s %s, %s",
1178 op_name
, str_r(r0
), str_r(r1
));
1181 case INDEX_op_add_i32
:
1182 case INDEX_op_add_i64
:
1183 case INDEX_op_sub_i32
:
1184 case INDEX_op_sub_i64
:
1185 case INDEX_op_mul_i32
:
1186 case INDEX_op_mul_i64
:
1187 case INDEX_op_and_i32
:
1188 case INDEX_op_and_i64
:
1189 case INDEX_op_or_i32
:
1190 case INDEX_op_or_i64
:
1191 case INDEX_op_xor_i32
:
1192 case INDEX_op_xor_i64
:
1193 case INDEX_op_andc_i32
:
1194 case INDEX_op_andc_i64
:
1195 case INDEX_op_orc_i32
:
1196 case INDEX_op_orc_i64
:
1197 case INDEX_op_eqv_i32
:
1198 case INDEX_op_eqv_i64
:
1199 case INDEX_op_nand_i32
:
1200 case INDEX_op_nand_i64
:
1201 case INDEX_op_nor_i32
:
1202 case INDEX_op_nor_i64
:
1203 case INDEX_op_div_i32
:
1204 case INDEX_op_div_i64
:
1205 case INDEX_op_rem_i32
:
1206 case INDEX_op_rem_i64
:
1207 case INDEX_op_divu_i32
:
1208 case INDEX_op_divu_i64
:
1209 case INDEX_op_remu_i32
:
1210 case INDEX_op_remu_i64
:
1211 case INDEX_op_shl_i32
:
1212 case INDEX_op_shl_i64
:
1213 case INDEX_op_shr_i32
:
1214 case INDEX_op_shr_i64
:
1215 case INDEX_op_sar_i32
:
1216 case INDEX_op_sar_i64
:
1217 case INDEX_op_rotl_i32
:
1218 case INDEX_op_rotl_i64
:
1219 case INDEX_op_rotr_i32
:
1220 case INDEX_op_rotr_i64
:
1221 case INDEX_op_clz_i32
:
1222 case INDEX_op_clz_i64
:
1223 case INDEX_op_ctz_i32
:
1224 case INDEX_op_ctz_i64
:
1225 tci_args_rrr(insn
, &r0
, &r1
, &r2
);
1226 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %s",
1227 op_name
, str_r(r0
), str_r(r1
), str_r(r2
));
1230 case INDEX_op_deposit_i32
:
1231 case INDEX_op_deposit_i64
:
1232 tci_args_rrrbb(insn
, &r0
, &r1
, &r2
, &pos
, &len
);
1233 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %s, %d, %d",
1234 op_name
, str_r(r0
), str_r(r1
), str_r(r2
), pos
, len
);
1237 case INDEX_op_extract_i32
:
1238 case INDEX_op_extract_i64
:
1239 case INDEX_op_sextract_i32
:
1240 case INDEX_op_sextract_i64
:
1241 tci_args_rrbb(insn
, &r0
, &r1
, &pos
, &len
);
1242 info
->fprintf_func(info
->stream
, "%-12s %s,%s,%d,%d",
1243 op_name
, str_r(r0
), str_r(r1
), pos
, len
);
1246 case INDEX_op_movcond_i32
:
1247 case INDEX_op_movcond_i64
:
1248 case INDEX_op_setcond2_i32
:
1249 tci_args_rrrrrc(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &c
);
1250 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %s, %s, %s, %s",
1251 op_name
, str_r(r0
), str_r(r1
), str_r(r2
),
1252 str_r(r3
), str_r(r4
), str_c(c
));
1255 case INDEX_op_mulu2_i32
:
1256 case INDEX_op_mulu2_i64
:
1257 case INDEX_op_muls2_i32
:
1258 case INDEX_op_muls2_i64
:
1259 tci_args_rrrr(insn
, &r0
, &r1
, &r2
, &r3
);
1260 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %s, %s",
1261 op_name
, str_r(r0
), str_r(r1
),
1262 str_r(r2
), str_r(r3
));
1265 case INDEX_op_add2_i32
:
1266 case INDEX_op_add2_i64
:
1267 case INDEX_op_sub2_i32
:
1268 case INDEX_op_sub2_i64
:
1269 tci_args_rrrrrr(insn
, &r0
, &r1
, &r2
, &r3
, &r4
, &r5
);
1270 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %s, %s, %s, %s",
1271 op_name
, str_r(r0
), str_r(r1
), str_r(r2
),
1272 str_r(r3
), str_r(r4
), str_r(r5
));
1275 case INDEX_op_qemu_ld_a32_i32
:
1276 case INDEX_op_qemu_st_a32_i32
:
1279 case INDEX_op_qemu_ld_a32_i64
:
1280 case INDEX_op_qemu_st_a32_i64
:
1281 case INDEX_op_qemu_ld_a64_i32
:
1282 case INDEX_op_qemu_st_a64_i32
:
1283 len
= 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS
);
1285 case INDEX_op_qemu_ld_a64_i64
:
1286 case INDEX_op_qemu_st_a64_i64
:
1287 len
= 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS
);
1292 tci_args_rrm(insn
, &r0
, &r1
, &oi
);
1293 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %x",
1294 op_name
, str_r(r0
), str_r(r1
), oi
);
1297 tci_args_rrrm(insn
, &r0
, &r1
, &r2
, &oi
);
1298 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %s, %x",
1299 op_name
, str_r(r0
), str_r(r1
), str_r(r2
), oi
);
1302 tci_args_rrrrr(insn
, &r0
, &r1
, &r2
, &r3
, &r4
);
1303 info
->fprintf_func(info
->stream
, "%-12s %s, %s, %s, %s, %s",
1304 op_name
, str_r(r0
), str_r(r1
),
1305 str_r(r2
), str_r(r3
), str_r(r4
));
1308 g_assert_not_reached();
1313 /* tcg_out_nop_fill uses zeros */
1315 info
->fprintf_func(info
->stream
, "align");
1321 info
->fprintf_func(info
->stream
, "illegal opcode %d", op
);
1325 return sizeof(insn
);