tcg/tci: Implement andc, orc, eqv, nand, nor
[qemu/ar7.git] / tcg / tci.c
blob8af82c7da7c19a1d90f20a50a838c2b77610e1f0
1 /*
2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
23 #include "exec/cpu_ldst.h"
24 #include "tcg/tcg-op.h"
25 #include "qemu/compiler.h"
26 #include <ffi.h>
30 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
31 * Without assertions, the interpreter runs much faster.
33 #if defined(CONFIG_DEBUG_TCG)
34 # define tci_assert(cond) assert(cond)
35 #else
36 # define tci_assert(cond) ((void)(cond))
37 #endif
39 __thread uintptr_t tci_tb_ptr;
41 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
42 uint32_t low_index, uint64_t value)
44 regs[low_index] = value;
45 regs[high_index] = value >> 32;
48 /* Create a 64 bit value from two 32 bit values. */
49 static uint64_t tci_uint64(uint32_t high, uint32_t low)
51 return ((uint64_t)high << 32) + low;
55 * Load sets of arguments all at once. The naming convention is:
56 * tci_args_<arguments>
57 * where arguments is a sequence of
59 * b = immediate (bit position)
60 * c = condition (TCGCond)
61 * i = immediate (uint32_t)
62 * I = immediate (tcg_target_ulong)
63 * l = label or pointer
64 * m = immediate (TCGMemOpIdx)
65 * n = immediate (call return length)
66 * r = register
67 * s = signed ldst offset
70 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
72 int diff = sextract32(insn, 12, 20);
73 *l0 = diff ? (void *)tb_ptr + diff : NULL;
76 static void tci_args_r(uint32_t insn, TCGReg *r0)
78 *r0 = extract32(insn, 8, 4);
81 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
82 uint8_t *n0, void **l1)
84 *n0 = extract32(insn, 8, 4);
85 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
88 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
89 TCGReg *r0, void **l1)
91 *r0 = extract32(insn, 8, 4);
92 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
95 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
97 *r0 = extract32(insn, 8, 4);
98 *r1 = extract32(insn, 12, 4);
101 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
103 *r0 = extract32(insn, 8, 4);
104 *i1 = sextract32(insn, 12, 20);
107 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
108 TCGReg *r1, TCGMemOpIdx *m2)
110 *r0 = extract32(insn, 8, 4);
111 *r1 = extract32(insn, 12, 4);
112 *m2 = extract32(insn, 20, 12);
115 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
117 *r0 = extract32(insn, 8, 4);
118 *r1 = extract32(insn, 12, 4);
119 *r2 = extract32(insn, 16, 4);
122 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
124 *r0 = extract32(insn, 8, 4);
125 *r1 = extract32(insn, 12, 4);
126 *i2 = sextract32(insn, 16, 16);
129 static void tci_args_rrrc(uint32_t insn,
130 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
132 *r0 = extract32(insn, 8, 4);
133 *r1 = extract32(insn, 12, 4);
134 *r2 = extract32(insn, 16, 4);
135 *c3 = extract32(insn, 20, 4);
138 static void tci_args_rrrm(uint32_t insn,
139 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3)
141 *r0 = extract32(insn, 8, 4);
142 *r1 = extract32(insn, 12, 4);
143 *r2 = extract32(insn, 16, 4);
144 *m3 = extract32(insn, 20, 12);
147 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
148 TCGReg *r2, uint8_t *i3, uint8_t *i4)
150 *r0 = extract32(insn, 8, 4);
151 *r1 = extract32(insn, 12, 4);
152 *r2 = extract32(insn, 16, 4);
153 *i3 = extract32(insn, 20, 6);
154 *i4 = extract32(insn, 26, 6);
157 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
158 TCGReg *r2, TCGReg *r3, TCGReg *r4)
160 *r0 = extract32(insn, 8, 4);
161 *r1 = extract32(insn, 12, 4);
162 *r2 = extract32(insn, 16, 4);
163 *r3 = extract32(insn, 20, 4);
164 *r4 = extract32(insn, 24, 4);
167 #if TCG_TARGET_REG_BITS == 32
168 static void tci_args_rrrr(uint32_t insn,
169 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
171 *r0 = extract32(insn, 8, 4);
172 *r1 = extract32(insn, 12, 4);
173 *r2 = extract32(insn, 16, 4);
174 *r3 = extract32(insn, 20, 4);
176 #endif
178 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
179 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
181 *r0 = extract32(insn, 8, 4);
182 *r1 = extract32(insn, 12, 4);
183 *r2 = extract32(insn, 16, 4);
184 *r3 = extract32(insn, 20, 4);
185 *r4 = extract32(insn, 24, 4);
186 *c5 = extract32(insn, 28, 4);
189 #if TCG_TARGET_REG_BITS == 32
190 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
191 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
193 *r0 = extract32(insn, 8, 4);
194 *r1 = extract32(insn, 12, 4);
195 *r2 = extract32(insn, 16, 4);
196 *r3 = extract32(insn, 20, 4);
197 *r4 = extract32(insn, 24, 4);
198 *r5 = extract32(insn, 28, 4);
200 #endif
202 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
204 bool result = false;
205 int32_t i0 = u0;
206 int32_t i1 = u1;
207 switch (condition) {
208 case TCG_COND_EQ:
209 result = (u0 == u1);
210 break;
211 case TCG_COND_NE:
212 result = (u0 != u1);
213 break;
214 case TCG_COND_LT:
215 result = (i0 < i1);
216 break;
217 case TCG_COND_GE:
218 result = (i0 >= i1);
219 break;
220 case TCG_COND_LE:
221 result = (i0 <= i1);
222 break;
223 case TCG_COND_GT:
224 result = (i0 > i1);
225 break;
226 case TCG_COND_LTU:
227 result = (u0 < u1);
228 break;
229 case TCG_COND_GEU:
230 result = (u0 >= u1);
231 break;
232 case TCG_COND_LEU:
233 result = (u0 <= u1);
234 break;
235 case TCG_COND_GTU:
236 result = (u0 > u1);
237 break;
238 default:
239 g_assert_not_reached();
241 return result;
244 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
246 bool result = false;
247 int64_t i0 = u0;
248 int64_t i1 = u1;
249 switch (condition) {
250 case TCG_COND_EQ:
251 result = (u0 == u1);
252 break;
253 case TCG_COND_NE:
254 result = (u0 != u1);
255 break;
256 case TCG_COND_LT:
257 result = (i0 < i1);
258 break;
259 case TCG_COND_GE:
260 result = (i0 >= i1);
261 break;
262 case TCG_COND_LE:
263 result = (i0 <= i1);
264 break;
265 case TCG_COND_GT:
266 result = (i0 > i1);
267 break;
268 case TCG_COND_LTU:
269 result = (u0 < u1);
270 break;
271 case TCG_COND_GEU:
272 result = (u0 >= u1);
273 break;
274 case TCG_COND_LEU:
275 result = (u0 <= u1);
276 break;
277 case TCG_COND_GTU:
278 result = (u0 > u1);
279 break;
280 default:
281 g_assert_not_reached();
283 return result;
286 #define qemu_ld_ub \
287 cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
288 #define qemu_ld_leuw \
289 cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
290 #define qemu_ld_leul \
291 cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
292 #define qemu_ld_leq \
293 cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
294 #define qemu_ld_beuw \
295 cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
296 #define qemu_ld_beul \
297 cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
298 #define qemu_ld_beq \
299 cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
300 #define qemu_st_b(X) \
301 cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
302 #define qemu_st_lew(X) \
303 cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
304 #define qemu_st_lel(X) \
305 cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
306 #define qemu_st_leq(X) \
307 cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
308 #define qemu_st_bew(X) \
309 cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
310 #define qemu_st_bel(X) \
311 cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
312 #define qemu_st_beq(X) \
313 cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
315 #if TCG_TARGET_REG_BITS == 64
316 # define CASE_32_64(x) \
317 case glue(glue(INDEX_op_, x), _i64): \
318 case glue(glue(INDEX_op_, x), _i32):
319 # define CASE_64(x) \
320 case glue(glue(INDEX_op_, x), _i64):
321 #else
322 # define CASE_32_64(x) \
323 case glue(glue(INDEX_op_, x), _i32):
324 # define CASE_64(x)
325 #endif
327 /* Interpret pseudo code in tb. */
329 * Disable CFI checks.
330 * One possible operation in the pseudo code is a call to binary code.
331 * Therefore, disable CFI checks in the interpreter function
333 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
334 const void *v_tb_ptr)
336 const uint32_t *tb_ptr = v_tb_ptr;
337 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
338 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
339 / sizeof(uint64_t)];
340 void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)];
342 regs[TCG_AREG0] = (tcg_target_ulong)env;
343 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
344 /* Other call_slots entries initialized at first use (see below). */
345 call_slots[0] = NULL;
346 tci_assert(tb_ptr);
348 for (;;) {
349 uint32_t insn;
350 TCGOpcode opc;
351 TCGReg r0, r1, r2, r3, r4;
352 tcg_target_ulong t1;
353 TCGCond condition;
354 target_ulong taddr;
355 uint8_t pos, len;
356 uint32_t tmp32;
357 uint64_t tmp64;
358 #if TCG_TARGET_REG_BITS == 32
359 TCGReg r5;
360 uint64_t T1, T2;
361 #endif
362 TCGMemOpIdx oi;
363 int32_t ofs;
364 void *ptr;
366 insn = *tb_ptr++;
367 opc = extract32(insn, 0, 8);
369 switch (opc) {
370 case INDEX_op_call:
372 * Set up the ffi_avalue array once, delayed until now
373 * because many TB's do not make any calls. In tcg_gen_callN,
374 * we arranged for every real argument to be "left-aligned"
375 * in each 64-bit slot.
377 if (unlikely(call_slots[0] == NULL)) {
378 for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) {
379 call_slots[i] = &stack[i];
383 tci_args_nl(insn, tb_ptr, &len, &ptr);
385 /* Helper functions may need to access the "return address" */
386 tci_tb_ptr = (uintptr_t)tb_ptr;
389 void **pptr = ptr;
390 ffi_call(pptr[1], pptr[0], stack, call_slots);
393 /* Any result winds up "left-aligned" in the stack[0] slot. */
394 switch (len) {
395 case 0: /* void */
396 break;
397 case 1: /* uint32_t */
399 * Note that libffi has an odd special case in that it will
400 * always widen an integral result to ffi_arg.
402 if (sizeof(ffi_arg) == 4) {
403 regs[TCG_REG_R0] = *(uint32_t *)stack;
404 break;
406 /* fall through */
407 case 2: /* uint64_t */
408 if (TCG_TARGET_REG_BITS == 32) {
409 tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]);
410 } else {
411 regs[TCG_REG_R0] = stack[0];
413 break;
414 default:
415 g_assert_not_reached();
417 break;
419 case INDEX_op_br:
420 tci_args_l(insn, tb_ptr, &ptr);
421 tb_ptr = ptr;
422 continue;
423 case INDEX_op_setcond_i32:
424 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
425 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
426 break;
427 case INDEX_op_movcond_i32:
428 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
429 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
430 regs[r0] = regs[tmp32 ? r3 : r4];
431 break;
432 #if TCG_TARGET_REG_BITS == 32
433 case INDEX_op_setcond2_i32:
434 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
435 T1 = tci_uint64(regs[r2], regs[r1]);
436 T2 = tci_uint64(regs[r4], regs[r3]);
437 regs[r0] = tci_compare64(T1, T2, condition);
438 break;
439 #elif TCG_TARGET_REG_BITS == 64
440 case INDEX_op_setcond_i64:
441 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
442 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
443 break;
444 case INDEX_op_movcond_i64:
445 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
446 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
447 regs[r0] = regs[tmp32 ? r3 : r4];
448 break;
449 #endif
450 CASE_32_64(mov)
451 tci_args_rr(insn, &r0, &r1);
452 regs[r0] = regs[r1];
453 break;
454 case INDEX_op_tci_movi:
455 tci_args_ri(insn, &r0, &t1);
456 regs[r0] = t1;
457 break;
458 case INDEX_op_tci_movl:
459 tci_args_rl(insn, tb_ptr, &r0, &ptr);
460 regs[r0] = *(tcg_target_ulong *)ptr;
461 break;
463 /* Load/store operations (32 bit). */
465 CASE_32_64(ld8u)
466 tci_args_rrs(insn, &r0, &r1, &ofs);
467 ptr = (void *)(regs[r1] + ofs);
468 regs[r0] = *(uint8_t *)ptr;
469 break;
470 CASE_32_64(ld8s)
471 tci_args_rrs(insn, &r0, &r1, &ofs);
472 ptr = (void *)(regs[r1] + ofs);
473 regs[r0] = *(int8_t *)ptr;
474 break;
475 CASE_32_64(ld16u)
476 tci_args_rrs(insn, &r0, &r1, &ofs);
477 ptr = (void *)(regs[r1] + ofs);
478 regs[r0] = *(uint16_t *)ptr;
479 break;
480 CASE_32_64(ld16s)
481 tci_args_rrs(insn, &r0, &r1, &ofs);
482 ptr = (void *)(regs[r1] + ofs);
483 regs[r0] = *(int16_t *)ptr;
484 break;
485 case INDEX_op_ld_i32:
486 CASE_64(ld32u)
487 tci_args_rrs(insn, &r0, &r1, &ofs);
488 ptr = (void *)(regs[r1] + ofs);
489 regs[r0] = *(uint32_t *)ptr;
490 break;
491 CASE_32_64(st8)
492 tci_args_rrs(insn, &r0, &r1, &ofs);
493 ptr = (void *)(regs[r1] + ofs);
494 *(uint8_t *)ptr = regs[r0];
495 break;
496 CASE_32_64(st16)
497 tci_args_rrs(insn, &r0, &r1, &ofs);
498 ptr = (void *)(regs[r1] + ofs);
499 *(uint16_t *)ptr = regs[r0];
500 break;
501 case INDEX_op_st_i32:
502 CASE_64(st32)
503 tci_args_rrs(insn, &r0, &r1, &ofs);
504 ptr = (void *)(regs[r1] + ofs);
505 *(uint32_t *)ptr = regs[r0];
506 break;
508 /* Arithmetic operations (mixed 32/64 bit). */
510 CASE_32_64(add)
511 tci_args_rrr(insn, &r0, &r1, &r2);
512 regs[r0] = regs[r1] + regs[r2];
513 break;
514 CASE_32_64(sub)
515 tci_args_rrr(insn, &r0, &r1, &r2);
516 regs[r0] = regs[r1] - regs[r2];
517 break;
518 CASE_32_64(mul)
519 tci_args_rrr(insn, &r0, &r1, &r2);
520 regs[r0] = regs[r1] * regs[r2];
521 break;
522 CASE_32_64(and)
523 tci_args_rrr(insn, &r0, &r1, &r2);
524 regs[r0] = regs[r1] & regs[r2];
525 break;
526 CASE_32_64(or)
527 tci_args_rrr(insn, &r0, &r1, &r2);
528 regs[r0] = regs[r1] | regs[r2];
529 break;
530 CASE_32_64(xor)
531 tci_args_rrr(insn, &r0, &r1, &r2);
532 regs[r0] = regs[r1] ^ regs[r2];
533 break;
534 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
535 CASE_32_64(andc)
536 tci_args_rrr(insn, &r0, &r1, &r2);
537 regs[r0] = regs[r1] & ~regs[r2];
538 break;
539 #endif
540 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
541 CASE_32_64(orc)
542 tci_args_rrr(insn, &r0, &r1, &r2);
543 regs[r0] = regs[r1] | ~regs[r2];
544 break;
545 #endif
546 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
547 CASE_32_64(eqv)
548 tci_args_rrr(insn, &r0, &r1, &r2);
549 regs[r0] = ~(regs[r1] ^ regs[r2]);
550 break;
551 #endif
552 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
553 CASE_32_64(nand)
554 tci_args_rrr(insn, &r0, &r1, &r2);
555 regs[r0] = ~(regs[r1] & regs[r2]);
556 break;
557 #endif
558 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
559 CASE_32_64(nor)
560 tci_args_rrr(insn, &r0, &r1, &r2);
561 regs[r0] = ~(regs[r1] | regs[r2]);
562 break;
563 #endif
565 /* Arithmetic operations (32 bit). */
567 case INDEX_op_div_i32:
568 tci_args_rrr(insn, &r0, &r1, &r2);
569 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
570 break;
571 case INDEX_op_divu_i32:
572 tci_args_rrr(insn, &r0, &r1, &r2);
573 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
574 break;
575 case INDEX_op_rem_i32:
576 tci_args_rrr(insn, &r0, &r1, &r2);
577 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
578 break;
579 case INDEX_op_remu_i32:
580 tci_args_rrr(insn, &r0, &r1, &r2);
581 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
582 break;
584 /* Shift/rotate operations (32 bit). */
586 case INDEX_op_shl_i32:
587 tci_args_rrr(insn, &r0, &r1, &r2);
588 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
589 break;
590 case INDEX_op_shr_i32:
591 tci_args_rrr(insn, &r0, &r1, &r2);
592 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
593 break;
594 case INDEX_op_sar_i32:
595 tci_args_rrr(insn, &r0, &r1, &r2);
596 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
597 break;
598 #if TCG_TARGET_HAS_rot_i32
599 case INDEX_op_rotl_i32:
600 tci_args_rrr(insn, &r0, &r1, &r2);
601 regs[r0] = rol32(regs[r1], regs[r2] & 31);
602 break;
603 case INDEX_op_rotr_i32:
604 tci_args_rrr(insn, &r0, &r1, &r2);
605 regs[r0] = ror32(regs[r1], regs[r2] & 31);
606 break;
607 #endif
608 #if TCG_TARGET_HAS_deposit_i32
609 case INDEX_op_deposit_i32:
610 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
611 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
612 break;
613 #endif
614 case INDEX_op_brcond_i32:
615 tci_args_rl(insn, tb_ptr, &r0, &ptr);
616 if ((uint32_t)regs[r0]) {
617 tb_ptr = ptr;
619 break;
620 #if TCG_TARGET_REG_BITS == 32
621 case INDEX_op_add2_i32:
622 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
623 T1 = tci_uint64(regs[r3], regs[r2]);
624 T2 = tci_uint64(regs[r5], regs[r4]);
625 tci_write_reg64(regs, r1, r0, T1 + T2);
626 break;
627 case INDEX_op_sub2_i32:
628 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
629 T1 = tci_uint64(regs[r3], regs[r2]);
630 T2 = tci_uint64(regs[r5], regs[r4]);
631 tci_write_reg64(regs, r1, r0, T1 - T2);
632 break;
633 case INDEX_op_mulu2_i32:
634 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
635 tci_write_reg64(regs, r1, r0, (uint64_t)regs[r2] * regs[r3]);
636 break;
637 #endif /* TCG_TARGET_REG_BITS == 32 */
638 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
639 CASE_32_64(ext8s)
640 tci_args_rr(insn, &r0, &r1);
641 regs[r0] = (int8_t)regs[r1];
642 break;
643 #endif
644 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
645 CASE_32_64(ext16s)
646 tci_args_rr(insn, &r0, &r1);
647 regs[r0] = (int16_t)regs[r1];
648 break;
649 #endif
650 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
651 CASE_32_64(ext8u)
652 tci_args_rr(insn, &r0, &r1);
653 regs[r0] = (uint8_t)regs[r1];
654 break;
655 #endif
656 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
657 CASE_32_64(ext16u)
658 tci_args_rr(insn, &r0, &r1);
659 regs[r0] = (uint16_t)regs[r1];
660 break;
661 #endif
662 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
663 CASE_32_64(bswap16)
664 tci_args_rr(insn, &r0, &r1);
665 regs[r0] = bswap16(regs[r1]);
666 break;
667 #endif
668 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
669 CASE_32_64(bswap32)
670 tci_args_rr(insn, &r0, &r1);
671 regs[r0] = bswap32(regs[r1]);
672 break;
673 #endif
674 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
675 CASE_32_64(not)
676 tci_args_rr(insn, &r0, &r1);
677 regs[r0] = ~regs[r1];
678 break;
679 #endif
680 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
681 CASE_32_64(neg)
682 tci_args_rr(insn, &r0, &r1);
683 regs[r0] = -regs[r1];
684 break;
685 #endif
686 #if TCG_TARGET_REG_BITS == 64
687 /* Load/store operations (64 bit). */
689 case INDEX_op_ld32s_i64:
690 tci_args_rrs(insn, &r0, &r1, &ofs);
691 ptr = (void *)(regs[r1] + ofs);
692 regs[r0] = *(int32_t *)ptr;
693 break;
694 case INDEX_op_ld_i64:
695 tci_args_rrs(insn, &r0, &r1, &ofs);
696 ptr = (void *)(regs[r1] + ofs);
697 regs[r0] = *(uint64_t *)ptr;
698 break;
699 case INDEX_op_st_i64:
700 tci_args_rrs(insn, &r0, &r1, &ofs);
701 ptr = (void *)(regs[r1] + ofs);
702 *(uint64_t *)ptr = regs[r0];
703 break;
705 /* Arithmetic operations (64 bit). */
707 case INDEX_op_div_i64:
708 tci_args_rrr(insn, &r0, &r1, &r2);
709 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
710 break;
711 case INDEX_op_divu_i64:
712 tci_args_rrr(insn, &r0, &r1, &r2);
713 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
714 break;
715 case INDEX_op_rem_i64:
716 tci_args_rrr(insn, &r0, &r1, &r2);
717 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
718 break;
719 case INDEX_op_remu_i64:
720 tci_args_rrr(insn, &r0, &r1, &r2);
721 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
722 break;
724 /* Shift/rotate operations (64 bit). */
726 case INDEX_op_shl_i64:
727 tci_args_rrr(insn, &r0, &r1, &r2);
728 regs[r0] = regs[r1] << (regs[r2] & 63);
729 break;
730 case INDEX_op_shr_i64:
731 tci_args_rrr(insn, &r0, &r1, &r2);
732 regs[r0] = regs[r1] >> (regs[r2] & 63);
733 break;
734 case INDEX_op_sar_i64:
735 tci_args_rrr(insn, &r0, &r1, &r2);
736 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
737 break;
738 #if TCG_TARGET_HAS_rot_i64
739 case INDEX_op_rotl_i64:
740 tci_args_rrr(insn, &r0, &r1, &r2);
741 regs[r0] = rol64(regs[r1], regs[r2] & 63);
742 break;
743 case INDEX_op_rotr_i64:
744 tci_args_rrr(insn, &r0, &r1, &r2);
745 regs[r0] = ror64(regs[r1], regs[r2] & 63);
746 break;
747 #endif
748 #if TCG_TARGET_HAS_deposit_i64
749 case INDEX_op_deposit_i64:
750 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
751 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
752 break;
753 #endif
754 case INDEX_op_brcond_i64:
755 tci_args_rl(insn, tb_ptr, &r0, &ptr);
756 if (regs[r0]) {
757 tb_ptr = ptr;
759 break;
760 case INDEX_op_ext32s_i64:
761 case INDEX_op_ext_i32_i64:
762 tci_args_rr(insn, &r0, &r1);
763 regs[r0] = (int32_t)regs[r1];
764 break;
765 case INDEX_op_ext32u_i64:
766 case INDEX_op_extu_i32_i64:
767 tci_args_rr(insn, &r0, &r1);
768 regs[r0] = (uint32_t)regs[r1];
769 break;
770 #if TCG_TARGET_HAS_bswap64_i64
771 case INDEX_op_bswap64_i64:
772 tci_args_rr(insn, &r0, &r1);
773 regs[r0] = bswap64(regs[r1]);
774 break;
775 #endif
776 #endif /* TCG_TARGET_REG_BITS == 64 */
778 /* QEMU specific operations. */
780 case INDEX_op_exit_tb:
781 tci_args_l(insn, tb_ptr, &ptr);
782 return (uintptr_t)ptr;
784 case INDEX_op_goto_tb:
785 tci_args_l(insn, tb_ptr, &ptr);
786 tb_ptr = *(void **)ptr;
787 break;
789 case INDEX_op_goto_ptr:
790 tci_args_r(insn, &r0);
791 ptr = (void *)regs[r0];
792 if (!ptr) {
793 return 0;
795 tb_ptr = ptr;
796 break;
798 case INDEX_op_qemu_ld_i32:
799 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
800 tci_args_rrm(insn, &r0, &r1, &oi);
801 taddr = regs[r1];
802 } else {
803 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
804 taddr = tci_uint64(regs[r2], regs[r1]);
806 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
807 case MO_UB:
808 tmp32 = qemu_ld_ub;
809 break;
810 case MO_SB:
811 tmp32 = (int8_t)qemu_ld_ub;
812 break;
813 case MO_LEUW:
814 tmp32 = qemu_ld_leuw;
815 break;
816 case MO_LESW:
817 tmp32 = (int16_t)qemu_ld_leuw;
818 break;
819 case MO_LEUL:
820 tmp32 = qemu_ld_leul;
821 break;
822 case MO_BEUW:
823 tmp32 = qemu_ld_beuw;
824 break;
825 case MO_BESW:
826 tmp32 = (int16_t)qemu_ld_beuw;
827 break;
828 case MO_BEUL:
829 tmp32 = qemu_ld_beul;
830 break;
831 default:
832 g_assert_not_reached();
834 regs[r0] = tmp32;
835 break;
837 case INDEX_op_qemu_ld_i64:
838 if (TCG_TARGET_REG_BITS == 64) {
839 tci_args_rrm(insn, &r0, &r1, &oi);
840 taddr = regs[r1];
841 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
842 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
843 taddr = regs[r2];
844 } else {
845 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
846 taddr = tci_uint64(regs[r3], regs[r2]);
847 oi = regs[r4];
849 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
850 case MO_UB:
851 tmp64 = qemu_ld_ub;
852 break;
853 case MO_SB:
854 tmp64 = (int8_t)qemu_ld_ub;
855 break;
856 case MO_LEUW:
857 tmp64 = qemu_ld_leuw;
858 break;
859 case MO_LESW:
860 tmp64 = (int16_t)qemu_ld_leuw;
861 break;
862 case MO_LEUL:
863 tmp64 = qemu_ld_leul;
864 break;
865 case MO_LESL:
866 tmp64 = (int32_t)qemu_ld_leul;
867 break;
868 case MO_LEQ:
869 tmp64 = qemu_ld_leq;
870 break;
871 case MO_BEUW:
872 tmp64 = qemu_ld_beuw;
873 break;
874 case MO_BESW:
875 tmp64 = (int16_t)qemu_ld_beuw;
876 break;
877 case MO_BEUL:
878 tmp64 = qemu_ld_beul;
879 break;
880 case MO_BESL:
881 tmp64 = (int32_t)qemu_ld_beul;
882 break;
883 case MO_BEQ:
884 tmp64 = qemu_ld_beq;
885 break;
886 default:
887 g_assert_not_reached();
889 if (TCG_TARGET_REG_BITS == 32) {
890 tci_write_reg64(regs, r1, r0, tmp64);
891 } else {
892 regs[r0] = tmp64;
894 break;
896 case INDEX_op_qemu_st_i32:
897 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
898 tci_args_rrm(insn, &r0, &r1, &oi);
899 taddr = regs[r1];
900 } else {
901 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
902 taddr = tci_uint64(regs[r2], regs[r1]);
904 tmp32 = regs[r0];
905 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
906 case MO_UB:
907 qemu_st_b(tmp32);
908 break;
909 case MO_LEUW:
910 qemu_st_lew(tmp32);
911 break;
912 case MO_LEUL:
913 qemu_st_lel(tmp32);
914 break;
915 case MO_BEUW:
916 qemu_st_bew(tmp32);
917 break;
918 case MO_BEUL:
919 qemu_st_bel(tmp32);
920 break;
921 default:
922 g_assert_not_reached();
924 break;
926 case INDEX_op_qemu_st_i64:
927 if (TCG_TARGET_REG_BITS == 64) {
928 tci_args_rrm(insn, &r0, &r1, &oi);
929 taddr = regs[r1];
930 tmp64 = regs[r0];
931 } else {
932 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
933 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
934 taddr = regs[r2];
935 } else {
936 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
937 taddr = tci_uint64(regs[r3], regs[r2]);
938 oi = regs[r4];
940 tmp64 = tci_uint64(regs[r1], regs[r0]);
942 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
943 case MO_UB:
944 qemu_st_b(tmp64);
945 break;
946 case MO_LEUW:
947 qemu_st_lew(tmp64);
948 break;
949 case MO_LEUL:
950 qemu_st_lel(tmp64);
951 break;
952 case MO_LEQ:
953 qemu_st_leq(tmp64);
954 break;
955 case MO_BEUW:
956 qemu_st_bew(tmp64);
957 break;
958 case MO_BEUL:
959 qemu_st_bel(tmp64);
960 break;
961 case MO_BEQ:
962 qemu_st_beq(tmp64);
963 break;
964 default:
965 g_assert_not_reached();
967 break;
969 case INDEX_op_mb:
970 /* Ensure ordering for all kinds */
971 smp_mb();
972 break;
973 default:
974 g_assert_not_reached();
980 * Disassembler that matches the interpreter
983 static const char *str_r(TCGReg r)
985 static const char regs[TCG_TARGET_NB_REGS][4] = {
986 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
987 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
990 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
991 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
993 assert((unsigned)r < TCG_TARGET_NB_REGS);
994 return regs[r];
997 static const char *str_c(TCGCond c)
999 static const char cond[16][8] = {
1000 [TCG_COND_NEVER] = "never",
1001 [TCG_COND_ALWAYS] = "always",
1002 [TCG_COND_EQ] = "eq",
1003 [TCG_COND_NE] = "ne",
1004 [TCG_COND_LT] = "lt",
1005 [TCG_COND_GE] = "ge",
1006 [TCG_COND_LE] = "le",
1007 [TCG_COND_GT] = "gt",
1008 [TCG_COND_LTU] = "ltu",
1009 [TCG_COND_GEU] = "geu",
1010 [TCG_COND_LEU] = "leu",
1011 [TCG_COND_GTU] = "gtu",
1014 assert((unsigned)c < ARRAY_SIZE(cond));
1015 assert(cond[c][0] != 0);
1016 return cond[c];
1019 /* Disassemble TCI bytecode. */
1020 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1022 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1023 const TCGOpDef *def;
1024 const char *op_name;
1025 uint32_t insn;
1026 TCGOpcode op;
1027 TCGReg r0, r1, r2, r3, r4;
1028 #if TCG_TARGET_REG_BITS == 32
1029 TCGReg r5;
1030 #endif
1031 tcg_target_ulong i1;
1032 int32_t s2;
1033 TCGCond c;
1034 TCGMemOpIdx oi;
1035 uint8_t pos, len;
1036 void *ptr;
1038 /* TCI is always the host, so we don't need to load indirect. */
1039 insn = *tb_ptr++;
1041 info->fprintf_func(info->stream, "%08x ", insn);
1043 op = extract32(insn, 0, 8);
1044 def = &tcg_op_defs[op];
1045 op_name = def->name;
1047 switch (op) {
1048 case INDEX_op_br:
1049 case INDEX_op_exit_tb:
1050 case INDEX_op_goto_tb:
1051 tci_args_l(insn, tb_ptr, &ptr);
1052 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1053 break;
1055 case INDEX_op_goto_ptr:
1056 tci_args_r(insn, &r0);
1057 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1058 break;
1060 case INDEX_op_call:
1061 tci_args_nl(insn, tb_ptr, &len, &ptr);
1062 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1063 break;
1065 case INDEX_op_brcond_i32:
1066 case INDEX_op_brcond_i64:
1067 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1068 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1069 op_name, str_r(r0), ptr);
1070 break;
1072 case INDEX_op_setcond_i32:
1073 case INDEX_op_setcond_i64:
1074 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1075 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1076 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1077 break;
1079 case INDEX_op_tci_movi:
1080 tci_args_ri(insn, &r0, &i1);
1081 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1082 op_name, str_r(r0), i1);
1083 break;
1085 case INDEX_op_tci_movl:
1086 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1087 info->fprintf_func(info->stream, "%-12s %s, %p",
1088 op_name, str_r(r0), ptr);
1089 break;
1091 case INDEX_op_ld8u_i32:
1092 case INDEX_op_ld8u_i64:
1093 case INDEX_op_ld8s_i32:
1094 case INDEX_op_ld8s_i64:
1095 case INDEX_op_ld16u_i32:
1096 case INDEX_op_ld16u_i64:
1097 case INDEX_op_ld16s_i32:
1098 case INDEX_op_ld16s_i64:
1099 case INDEX_op_ld32u_i64:
1100 case INDEX_op_ld32s_i64:
1101 case INDEX_op_ld_i32:
1102 case INDEX_op_ld_i64:
1103 case INDEX_op_st8_i32:
1104 case INDEX_op_st8_i64:
1105 case INDEX_op_st16_i32:
1106 case INDEX_op_st16_i64:
1107 case INDEX_op_st32_i64:
1108 case INDEX_op_st_i32:
1109 case INDEX_op_st_i64:
1110 tci_args_rrs(insn, &r0, &r1, &s2);
1111 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1112 op_name, str_r(r0), str_r(r1), s2);
1113 break;
1115 case INDEX_op_mov_i32:
1116 case INDEX_op_mov_i64:
1117 case INDEX_op_ext8s_i32:
1118 case INDEX_op_ext8s_i64:
1119 case INDEX_op_ext8u_i32:
1120 case INDEX_op_ext8u_i64:
1121 case INDEX_op_ext16s_i32:
1122 case INDEX_op_ext16s_i64:
1123 case INDEX_op_ext16u_i32:
1124 case INDEX_op_ext32s_i64:
1125 case INDEX_op_ext32u_i64:
1126 case INDEX_op_ext_i32_i64:
1127 case INDEX_op_extu_i32_i64:
1128 case INDEX_op_bswap16_i32:
1129 case INDEX_op_bswap16_i64:
1130 case INDEX_op_bswap32_i32:
1131 case INDEX_op_bswap32_i64:
1132 case INDEX_op_bswap64_i64:
1133 case INDEX_op_not_i32:
1134 case INDEX_op_not_i64:
1135 case INDEX_op_neg_i32:
1136 case INDEX_op_neg_i64:
1137 tci_args_rr(insn, &r0, &r1);
1138 info->fprintf_func(info->stream, "%-12s %s, %s",
1139 op_name, str_r(r0), str_r(r1));
1140 break;
1142 case INDEX_op_add_i32:
1143 case INDEX_op_add_i64:
1144 case INDEX_op_sub_i32:
1145 case INDEX_op_sub_i64:
1146 case INDEX_op_mul_i32:
1147 case INDEX_op_mul_i64:
1148 case INDEX_op_and_i32:
1149 case INDEX_op_and_i64:
1150 case INDEX_op_or_i32:
1151 case INDEX_op_or_i64:
1152 case INDEX_op_xor_i32:
1153 case INDEX_op_xor_i64:
1154 case INDEX_op_andc_i32:
1155 case INDEX_op_andc_i64:
1156 case INDEX_op_orc_i32:
1157 case INDEX_op_orc_i64:
1158 case INDEX_op_eqv_i32:
1159 case INDEX_op_eqv_i64:
1160 case INDEX_op_nand_i32:
1161 case INDEX_op_nand_i64:
1162 case INDEX_op_nor_i32:
1163 case INDEX_op_nor_i64:
1164 case INDEX_op_div_i32:
1165 case INDEX_op_div_i64:
1166 case INDEX_op_rem_i32:
1167 case INDEX_op_rem_i64:
1168 case INDEX_op_divu_i32:
1169 case INDEX_op_divu_i64:
1170 case INDEX_op_remu_i32:
1171 case INDEX_op_remu_i64:
1172 case INDEX_op_shl_i32:
1173 case INDEX_op_shl_i64:
1174 case INDEX_op_shr_i32:
1175 case INDEX_op_shr_i64:
1176 case INDEX_op_sar_i32:
1177 case INDEX_op_sar_i64:
1178 case INDEX_op_rotl_i32:
1179 case INDEX_op_rotl_i64:
1180 case INDEX_op_rotr_i32:
1181 case INDEX_op_rotr_i64:
1182 tci_args_rrr(insn, &r0, &r1, &r2);
1183 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1184 op_name, str_r(r0), str_r(r1), str_r(r2));
1185 break;
1187 case INDEX_op_deposit_i32:
1188 case INDEX_op_deposit_i64:
1189 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1190 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1191 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1192 break;
1194 case INDEX_op_movcond_i32:
1195 case INDEX_op_movcond_i64:
1196 case INDEX_op_setcond2_i32:
1197 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1198 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1199 op_name, str_r(r0), str_r(r1), str_r(r2),
1200 str_r(r3), str_r(r4), str_c(c));
1201 break;
1203 #if TCG_TARGET_REG_BITS == 32
1204 case INDEX_op_mulu2_i32:
1205 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1206 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1207 op_name, str_r(r0), str_r(r1),
1208 str_r(r2), str_r(r3));
1209 break;
1211 case INDEX_op_add2_i32:
1212 case INDEX_op_sub2_i32:
1213 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1214 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1215 op_name, str_r(r0), str_r(r1), str_r(r2),
1216 str_r(r3), str_r(r4), str_r(r5));
1217 break;
1218 #endif
1220 case INDEX_op_qemu_ld_i64:
1221 case INDEX_op_qemu_st_i64:
1222 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1223 goto do_qemu_ldst;
1224 case INDEX_op_qemu_ld_i32:
1225 case INDEX_op_qemu_st_i32:
1226 len = 1;
1227 do_qemu_ldst:
1228 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
1229 switch (len) {
1230 case 2:
1231 tci_args_rrm(insn, &r0, &r1, &oi);
1232 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1233 op_name, str_r(r0), str_r(r1), oi);
1234 break;
1235 case 3:
1236 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1237 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
1238 op_name, str_r(r0), str_r(r1), str_r(r2), oi);
1239 break;
1240 case 4:
1241 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1242 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1243 op_name, str_r(r0), str_r(r1),
1244 str_r(r2), str_r(r3), str_r(r4));
1245 break;
1246 default:
1247 g_assert_not_reached();
1249 break;
1251 case 0:
1252 /* tcg_out_nop_fill uses zeros */
1253 if (insn == 0) {
1254 info->fprintf_func(info->stream, "align");
1255 break;
1257 /* fall through */
1259 default:
1260 info->fprintf_func(info->stream, "illegal opcode %d", op);
1261 break;
1264 return sizeof(insn);