Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
[qemu/kevin.git] / tcg / tci.c
blob4640902c8818e8e29fb1d7ca62d1eccfefca3732
1 /*
2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "tcg/tcg.h"
22 #include "tcg/tcg-ldst.h"
23 #include <ffi.h>
27 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
28 * Without assertions, the interpreter runs much faster.
30 #if defined(CONFIG_DEBUG_TCG)
31 # define tci_assert(cond) assert(cond)
32 #else
33 # define tci_assert(cond) ((void)(cond))
34 #endif
36 __thread uintptr_t tci_tb_ptr;
38 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
39 uint32_t low_index, uint64_t value)
41 regs[low_index] = (uint32_t)value;
42 regs[high_index] = value >> 32;
45 /* Create a 64 bit value from two 32 bit values. */
46 static uint64_t tci_uint64(uint32_t high, uint32_t low)
48 return ((uint64_t)high << 32) + low;
52 * Load sets of arguments all at once. The naming convention is:
53 * tci_args_<arguments>
54 * where arguments is a sequence of
56 * b = immediate (bit position)
57 * c = condition (TCGCond)
58 * i = immediate (uint32_t)
59 * I = immediate (tcg_target_ulong)
60 * l = label or pointer
61 * m = immediate (MemOpIdx)
62 * n = immediate (call return length)
63 * r = register
64 * s = signed ldst offset
67 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
69 int diff = sextract32(insn, 12, 20);
70 *l0 = diff ? (void *)tb_ptr + diff : NULL;
73 static void tci_args_r(uint32_t insn, TCGReg *r0)
75 *r0 = extract32(insn, 8, 4);
78 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
79 uint8_t *n0, void **l1)
81 *n0 = extract32(insn, 8, 4);
82 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
85 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
86 TCGReg *r0, void **l1)
88 *r0 = extract32(insn, 8, 4);
89 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
92 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
94 *r0 = extract32(insn, 8, 4);
95 *r1 = extract32(insn, 12, 4);
98 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
100 *r0 = extract32(insn, 8, 4);
101 *i1 = sextract32(insn, 12, 20);
104 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
105 TCGReg *r1, MemOpIdx *m2)
107 *r0 = extract32(insn, 8, 4);
108 *r1 = extract32(insn, 12, 4);
109 *m2 = extract32(insn, 16, 16);
112 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
114 *r0 = extract32(insn, 8, 4);
115 *r1 = extract32(insn, 12, 4);
116 *r2 = extract32(insn, 16, 4);
119 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
121 *r0 = extract32(insn, 8, 4);
122 *r1 = extract32(insn, 12, 4);
123 *i2 = sextract32(insn, 16, 16);
126 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
127 uint8_t *i2, uint8_t *i3)
129 *r0 = extract32(insn, 8, 4);
130 *r1 = extract32(insn, 12, 4);
131 *i2 = extract32(insn, 16, 6);
132 *i3 = extract32(insn, 22, 6);
135 static void tci_args_rrrc(uint32_t insn,
136 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
138 *r0 = extract32(insn, 8, 4);
139 *r1 = extract32(insn, 12, 4);
140 *r2 = extract32(insn, 16, 4);
141 *c3 = extract32(insn, 20, 4);
144 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
145 TCGReg *r2, uint8_t *i3, uint8_t *i4)
147 *r0 = extract32(insn, 8, 4);
148 *r1 = extract32(insn, 12, 4);
149 *r2 = extract32(insn, 16, 4);
150 *i3 = extract32(insn, 20, 6);
151 *i4 = extract32(insn, 26, 6);
154 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
155 TCGReg *r2, TCGReg *r3, TCGReg *r4)
157 *r0 = extract32(insn, 8, 4);
158 *r1 = extract32(insn, 12, 4);
159 *r2 = extract32(insn, 16, 4);
160 *r3 = extract32(insn, 20, 4);
161 *r4 = extract32(insn, 24, 4);
164 static void tci_args_rrrr(uint32_t insn,
165 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
167 *r0 = extract32(insn, 8, 4);
168 *r1 = extract32(insn, 12, 4);
169 *r2 = extract32(insn, 16, 4);
170 *r3 = extract32(insn, 20, 4);
173 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
174 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
176 *r0 = extract32(insn, 8, 4);
177 *r1 = extract32(insn, 12, 4);
178 *r2 = extract32(insn, 16, 4);
179 *r3 = extract32(insn, 20, 4);
180 *r4 = extract32(insn, 24, 4);
181 *c5 = extract32(insn, 28, 4);
184 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
185 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
187 *r0 = extract32(insn, 8, 4);
188 *r1 = extract32(insn, 12, 4);
189 *r2 = extract32(insn, 16, 4);
190 *r3 = extract32(insn, 20, 4);
191 *r4 = extract32(insn, 24, 4);
192 *r5 = extract32(insn, 28, 4);
195 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
197 bool result = false;
198 int32_t i0 = u0;
199 int32_t i1 = u1;
200 switch (condition) {
201 case TCG_COND_EQ:
202 result = (u0 == u1);
203 break;
204 case TCG_COND_NE:
205 result = (u0 != u1);
206 break;
207 case TCG_COND_LT:
208 result = (i0 < i1);
209 break;
210 case TCG_COND_GE:
211 result = (i0 >= i1);
212 break;
213 case TCG_COND_LE:
214 result = (i0 <= i1);
215 break;
216 case TCG_COND_GT:
217 result = (i0 > i1);
218 break;
219 case TCG_COND_LTU:
220 result = (u0 < u1);
221 break;
222 case TCG_COND_GEU:
223 result = (u0 >= u1);
224 break;
225 case TCG_COND_LEU:
226 result = (u0 <= u1);
227 break;
228 case TCG_COND_GTU:
229 result = (u0 > u1);
230 break;
231 default:
232 g_assert_not_reached();
234 return result;
237 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
239 bool result = false;
240 int64_t i0 = u0;
241 int64_t i1 = u1;
242 switch (condition) {
243 case TCG_COND_EQ:
244 result = (u0 == u1);
245 break;
246 case TCG_COND_NE:
247 result = (u0 != u1);
248 break;
249 case TCG_COND_LT:
250 result = (i0 < i1);
251 break;
252 case TCG_COND_GE:
253 result = (i0 >= i1);
254 break;
255 case TCG_COND_LE:
256 result = (i0 <= i1);
257 break;
258 case TCG_COND_GT:
259 result = (i0 > i1);
260 break;
261 case TCG_COND_LTU:
262 result = (u0 < u1);
263 break;
264 case TCG_COND_GEU:
265 result = (u0 >= u1);
266 break;
267 case TCG_COND_LEU:
268 result = (u0 <= u1);
269 break;
270 case TCG_COND_GTU:
271 result = (u0 > u1);
272 break;
273 default:
274 g_assert_not_reached();
276 return result;
279 static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr,
280 MemOpIdx oi, const void *tb_ptr)
282 MemOp mop = get_memop(oi);
283 uintptr_t ra = (uintptr_t)tb_ptr;
285 switch (mop & MO_SSIZE) {
286 case MO_UB:
287 return helper_ldub_mmu(env, taddr, oi, ra);
288 case MO_SB:
289 return helper_ldsb_mmu(env, taddr, oi, ra);
290 case MO_UW:
291 return helper_lduw_mmu(env, taddr, oi, ra);
292 case MO_SW:
293 return helper_ldsw_mmu(env, taddr, oi, ra);
294 case MO_UL:
295 return helper_ldul_mmu(env, taddr, oi, ra);
296 case MO_SL:
297 return helper_ldsl_mmu(env, taddr, oi, ra);
298 case MO_UQ:
299 return helper_ldq_mmu(env, taddr, oi, ra);
300 default:
301 g_assert_not_reached();
305 static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val,
306 MemOpIdx oi, const void *tb_ptr)
308 MemOp mop = get_memop(oi);
309 uintptr_t ra = (uintptr_t)tb_ptr;
311 switch (mop & MO_SIZE) {
312 case MO_UB:
313 helper_stb_mmu(env, taddr, val, oi, ra);
314 break;
315 case MO_UW:
316 helper_stw_mmu(env, taddr, val, oi, ra);
317 break;
318 case MO_UL:
319 helper_stl_mmu(env, taddr, val, oi, ra);
320 break;
321 case MO_UQ:
322 helper_stq_mmu(env, taddr, val, oi, ra);
323 break;
324 default:
325 g_assert_not_reached();
329 #if TCG_TARGET_REG_BITS == 64
330 # define CASE_32_64(x) \
331 case glue(glue(INDEX_op_, x), _i64): \
332 case glue(glue(INDEX_op_, x), _i32):
333 # define CASE_64(x) \
334 case glue(glue(INDEX_op_, x), _i64):
335 #else
336 # define CASE_32_64(x) \
337 case glue(glue(INDEX_op_, x), _i32):
338 # define CASE_64(x)
339 #endif
341 /* Interpret pseudo code in tb. */
343 * Disable CFI checks.
344 * One possible operation in the pseudo code is a call to binary code.
345 * Therefore, disable CFI checks in the interpreter function
347 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
348 const void *v_tb_ptr)
350 const uint32_t *tb_ptr = v_tb_ptr;
351 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
352 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
353 / sizeof(uint64_t)];
355 regs[TCG_AREG0] = (tcg_target_ulong)env;
356 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
357 tci_assert(tb_ptr);
359 for (;;) {
360 uint32_t insn;
361 TCGOpcode opc;
362 TCGReg r0, r1, r2, r3, r4, r5;
363 tcg_target_ulong t1;
364 TCGCond condition;
365 uint8_t pos, len;
366 uint32_t tmp32;
367 uint64_t tmp64, taddr;
368 uint64_t T1, T2;
369 MemOpIdx oi;
370 int32_t ofs;
371 void *ptr;
373 insn = *tb_ptr++;
374 opc = extract32(insn, 0, 8);
376 switch (opc) {
377 case INDEX_op_call:
379 void *call_slots[MAX_CALL_IARGS];
380 ffi_cif *cif;
381 void *func;
382 unsigned i, s, n;
384 tci_args_nl(insn, tb_ptr, &len, &ptr);
385 func = ((void **)ptr)[0];
386 cif = ((void **)ptr)[1];
388 n = cif->nargs;
389 for (i = s = 0; i < n; ++i) {
390 ffi_type *t = cif->arg_types[i];
391 call_slots[i] = &stack[s];
392 s += DIV_ROUND_UP(t->size, 8);
395 /* Helper functions may need to access the "return address" */
396 tci_tb_ptr = (uintptr_t)tb_ptr;
397 ffi_call(cif, func, stack, call_slots);
400 switch (len) {
401 case 0: /* void */
402 break;
403 case 1: /* uint32_t */
405 * The result winds up "left-aligned" in the stack[0] slot.
406 * Note that libffi has an odd special case in that it will
407 * always widen an integral result to ffi_arg.
409 if (sizeof(ffi_arg) == 8) {
410 regs[TCG_REG_R0] = (uint32_t)stack[0];
411 } else {
412 regs[TCG_REG_R0] = *(uint32_t *)stack;
414 break;
415 case 2: /* uint64_t */
417 * For TCG_TARGET_REG_BITS == 32, the register pair
418 * must stay in host memory order.
420 memcpy(&regs[TCG_REG_R0], stack, 8);
421 break;
422 case 3: /* Int128 */
423 memcpy(&regs[TCG_REG_R0], stack, 16);
424 break;
425 default:
426 g_assert_not_reached();
428 break;
430 case INDEX_op_br:
431 tci_args_l(insn, tb_ptr, &ptr);
432 tb_ptr = ptr;
433 continue;
434 case INDEX_op_setcond_i32:
435 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
436 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
437 break;
438 case INDEX_op_movcond_i32:
439 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
440 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
441 regs[r0] = regs[tmp32 ? r3 : r4];
442 break;
443 #if TCG_TARGET_REG_BITS == 32
444 case INDEX_op_setcond2_i32:
445 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
446 T1 = tci_uint64(regs[r2], regs[r1]);
447 T2 = tci_uint64(regs[r4], regs[r3]);
448 regs[r0] = tci_compare64(T1, T2, condition);
449 break;
450 #elif TCG_TARGET_REG_BITS == 64
451 case INDEX_op_setcond_i64:
452 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
453 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
454 break;
455 case INDEX_op_movcond_i64:
456 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
457 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
458 regs[r0] = regs[tmp32 ? r3 : r4];
459 break;
460 #endif
461 CASE_32_64(mov)
462 tci_args_rr(insn, &r0, &r1);
463 regs[r0] = regs[r1];
464 break;
465 case INDEX_op_tci_movi:
466 tci_args_ri(insn, &r0, &t1);
467 regs[r0] = t1;
468 break;
469 case INDEX_op_tci_movl:
470 tci_args_rl(insn, tb_ptr, &r0, &ptr);
471 regs[r0] = *(tcg_target_ulong *)ptr;
472 break;
474 /* Load/store operations (32 bit). */
476 CASE_32_64(ld8u)
477 tci_args_rrs(insn, &r0, &r1, &ofs);
478 ptr = (void *)(regs[r1] + ofs);
479 regs[r0] = *(uint8_t *)ptr;
480 break;
481 CASE_32_64(ld8s)
482 tci_args_rrs(insn, &r0, &r1, &ofs);
483 ptr = (void *)(regs[r1] + ofs);
484 regs[r0] = *(int8_t *)ptr;
485 break;
486 CASE_32_64(ld16u)
487 tci_args_rrs(insn, &r0, &r1, &ofs);
488 ptr = (void *)(regs[r1] + ofs);
489 regs[r0] = *(uint16_t *)ptr;
490 break;
491 CASE_32_64(ld16s)
492 tci_args_rrs(insn, &r0, &r1, &ofs);
493 ptr = (void *)(regs[r1] + ofs);
494 regs[r0] = *(int16_t *)ptr;
495 break;
496 case INDEX_op_ld_i32:
497 CASE_64(ld32u)
498 tci_args_rrs(insn, &r0, &r1, &ofs);
499 ptr = (void *)(regs[r1] + ofs);
500 regs[r0] = *(uint32_t *)ptr;
501 break;
502 CASE_32_64(st8)
503 tci_args_rrs(insn, &r0, &r1, &ofs);
504 ptr = (void *)(regs[r1] + ofs);
505 *(uint8_t *)ptr = regs[r0];
506 break;
507 CASE_32_64(st16)
508 tci_args_rrs(insn, &r0, &r1, &ofs);
509 ptr = (void *)(regs[r1] + ofs);
510 *(uint16_t *)ptr = regs[r0];
511 break;
512 case INDEX_op_st_i32:
513 CASE_64(st32)
514 tci_args_rrs(insn, &r0, &r1, &ofs);
515 ptr = (void *)(regs[r1] + ofs);
516 *(uint32_t *)ptr = regs[r0];
517 break;
519 /* Arithmetic operations (mixed 32/64 bit). */
521 CASE_32_64(add)
522 tci_args_rrr(insn, &r0, &r1, &r2);
523 regs[r0] = regs[r1] + regs[r2];
524 break;
525 CASE_32_64(sub)
526 tci_args_rrr(insn, &r0, &r1, &r2);
527 regs[r0] = regs[r1] - regs[r2];
528 break;
529 CASE_32_64(mul)
530 tci_args_rrr(insn, &r0, &r1, &r2);
531 regs[r0] = regs[r1] * regs[r2];
532 break;
533 CASE_32_64(and)
534 tci_args_rrr(insn, &r0, &r1, &r2);
535 regs[r0] = regs[r1] & regs[r2];
536 break;
537 CASE_32_64(or)
538 tci_args_rrr(insn, &r0, &r1, &r2);
539 regs[r0] = regs[r1] | regs[r2];
540 break;
541 CASE_32_64(xor)
542 tci_args_rrr(insn, &r0, &r1, &r2);
543 regs[r0] = regs[r1] ^ regs[r2];
544 break;
545 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
546 CASE_32_64(andc)
547 tci_args_rrr(insn, &r0, &r1, &r2);
548 regs[r0] = regs[r1] & ~regs[r2];
549 break;
550 #endif
551 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
552 CASE_32_64(orc)
553 tci_args_rrr(insn, &r0, &r1, &r2);
554 regs[r0] = regs[r1] | ~regs[r2];
555 break;
556 #endif
557 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
558 CASE_32_64(eqv)
559 tci_args_rrr(insn, &r0, &r1, &r2);
560 regs[r0] = ~(regs[r1] ^ regs[r2]);
561 break;
562 #endif
563 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
564 CASE_32_64(nand)
565 tci_args_rrr(insn, &r0, &r1, &r2);
566 regs[r0] = ~(regs[r1] & regs[r2]);
567 break;
568 #endif
569 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
570 CASE_32_64(nor)
571 tci_args_rrr(insn, &r0, &r1, &r2);
572 regs[r0] = ~(regs[r1] | regs[r2]);
573 break;
574 #endif
576 /* Arithmetic operations (32 bit). */
578 case INDEX_op_div_i32:
579 tci_args_rrr(insn, &r0, &r1, &r2);
580 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
581 break;
582 case INDEX_op_divu_i32:
583 tci_args_rrr(insn, &r0, &r1, &r2);
584 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
585 break;
586 case INDEX_op_rem_i32:
587 tci_args_rrr(insn, &r0, &r1, &r2);
588 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
589 break;
590 case INDEX_op_remu_i32:
591 tci_args_rrr(insn, &r0, &r1, &r2);
592 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
593 break;
594 #if TCG_TARGET_HAS_clz_i32
595 case INDEX_op_clz_i32:
596 tci_args_rrr(insn, &r0, &r1, &r2);
597 tmp32 = regs[r1];
598 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
599 break;
600 #endif
601 #if TCG_TARGET_HAS_ctz_i32
602 case INDEX_op_ctz_i32:
603 tci_args_rrr(insn, &r0, &r1, &r2);
604 tmp32 = regs[r1];
605 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
606 break;
607 #endif
608 #if TCG_TARGET_HAS_ctpop_i32
609 case INDEX_op_ctpop_i32:
610 tci_args_rr(insn, &r0, &r1);
611 regs[r0] = ctpop32(regs[r1]);
612 break;
613 #endif
615 /* Shift/rotate operations (32 bit). */
617 case INDEX_op_shl_i32:
618 tci_args_rrr(insn, &r0, &r1, &r2);
619 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
620 break;
621 case INDEX_op_shr_i32:
622 tci_args_rrr(insn, &r0, &r1, &r2);
623 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
624 break;
625 case INDEX_op_sar_i32:
626 tci_args_rrr(insn, &r0, &r1, &r2);
627 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
628 break;
629 #if TCG_TARGET_HAS_rot_i32
630 case INDEX_op_rotl_i32:
631 tci_args_rrr(insn, &r0, &r1, &r2);
632 regs[r0] = rol32(regs[r1], regs[r2] & 31);
633 break;
634 case INDEX_op_rotr_i32:
635 tci_args_rrr(insn, &r0, &r1, &r2);
636 regs[r0] = ror32(regs[r1], regs[r2] & 31);
637 break;
638 #endif
639 #if TCG_TARGET_HAS_deposit_i32
640 case INDEX_op_deposit_i32:
641 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
642 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
643 break;
644 #endif
645 #if TCG_TARGET_HAS_extract_i32
646 case INDEX_op_extract_i32:
647 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
648 regs[r0] = extract32(regs[r1], pos, len);
649 break;
650 #endif
651 #if TCG_TARGET_HAS_sextract_i32
652 case INDEX_op_sextract_i32:
653 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
654 regs[r0] = sextract32(regs[r1], pos, len);
655 break;
656 #endif
657 case INDEX_op_brcond_i32:
658 tci_args_rl(insn, tb_ptr, &r0, &ptr);
659 if ((uint32_t)regs[r0]) {
660 tb_ptr = ptr;
662 break;
663 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
664 case INDEX_op_add2_i32:
665 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
666 T1 = tci_uint64(regs[r3], regs[r2]);
667 T2 = tci_uint64(regs[r5], regs[r4]);
668 tci_write_reg64(regs, r1, r0, T1 + T2);
669 break;
670 #endif
671 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
672 case INDEX_op_sub2_i32:
673 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
674 T1 = tci_uint64(regs[r3], regs[r2]);
675 T2 = tci_uint64(regs[r5], regs[r4]);
676 tci_write_reg64(regs, r1, r0, T1 - T2);
677 break;
678 #endif
679 #if TCG_TARGET_HAS_mulu2_i32
680 case INDEX_op_mulu2_i32:
681 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
682 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
683 tci_write_reg64(regs, r1, r0, tmp64);
684 break;
685 #endif
686 #if TCG_TARGET_HAS_muls2_i32
687 case INDEX_op_muls2_i32:
688 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
689 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
690 tci_write_reg64(regs, r1, r0, tmp64);
691 break;
692 #endif
693 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
694 CASE_32_64(ext8s)
695 tci_args_rr(insn, &r0, &r1);
696 regs[r0] = (int8_t)regs[r1];
697 break;
698 #endif
699 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
700 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
701 CASE_32_64(ext16s)
702 tci_args_rr(insn, &r0, &r1);
703 regs[r0] = (int16_t)regs[r1];
704 break;
705 #endif
706 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
707 CASE_32_64(ext8u)
708 tci_args_rr(insn, &r0, &r1);
709 regs[r0] = (uint8_t)regs[r1];
710 break;
711 #endif
712 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
713 CASE_32_64(ext16u)
714 tci_args_rr(insn, &r0, &r1);
715 regs[r0] = (uint16_t)regs[r1];
716 break;
717 #endif
718 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
719 CASE_32_64(bswap16)
720 tci_args_rr(insn, &r0, &r1);
721 regs[r0] = bswap16(regs[r1]);
722 break;
723 #endif
724 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
725 CASE_32_64(bswap32)
726 tci_args_rr(insn, &r0, &r1);
727 regs[r0] = bswap32(regs[r1]);
728 break;
729 #endif
730 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
731 CASE_32_64(not)
732 tci_args_rr(insn, &r0, &r1);
733 regs[r0] = ~regs[r1];
734 break;
735 #endif
736 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
737 CASE_32_64(neg)
738 tci_args_rr(insn, &r0, &r1);
739 regs[r0] = -regs[r1];
740 break;
741 #endif
742 #if TCG_TARGET_REG_BITS == 64
743 /* Load/store operations (64 bit). */
745 case INDEX_op_ld32s_i64:
746 tci_args_rrs(insn, &r0, &r1, &ofs);
747 ptr = (void *)(regs[r1] + ofs);
748 regs[r0] = *(int32_t *)ptr;
749 break;
750 case INDEX_op_ld_i64:
751 tci_args_rrs(insn, &r0, &r1, &ofs);
752 ptr = (void *)(regs[r1] + ofs);
753 regs[r0] = *(uint64_t *)ptr;
754 break;
755 case INDEX_op_st_i64:
756 tci_args_rrs(insn, &r0, &r1, &ofs);
757 ptr = (void *)(regs[r1] + ofs);
758 *(uint64_t *)ptr = regs[r0];
759 break;
761 /* Arithmetic operations (64 bit). */
763 case INDEX_op_div_i64:
764 tci_args_rrr(insn, &r0, &r1, &r2);
765 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
766 break;
767 case INDEX_op_divu_i64:
768 tci_args_rrr(insn, &r0, &r1, &r2);
769 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
770 break;
771 case INDEX_op_rem_i64:
772 tci_args_rrr(insn, &r0, &r1, &r2);
773 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
774 break;
775 case INDEX_op_remu_i64:
776 tci_args_rrr(insn, &r0, &r1, &r2);
777 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
778 break;
779 #if TCG_TARGET_HAS_clz_i64
780 case INDEX_op_clz_i64:
781 tci_args_rrr(insn, &r0, &r1, &r2);
782 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
783 break;
784 #endif
785 #if TCG_TARGET_HAS_ctz_i64
786 case INDEX_op_ctz_i64:
787 tci_args_rrr(insn, &r0, &r1, &r2);
788 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
789 break;
790 #endif
791 #if TCG_TARGET_HAS_ctpop_i64
792 case INDEX_op_ctpop_i64:
793 tci_args_rr(insn, &r0, &r1);
794 regs[r0] = ctpop64(regs[r1]);
795 break;
796 #endif
797 #if TCG_TARGET_HAS_mulu2_i64
798 case INDEX_op_mulu2_i64:
799 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
800 mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
801 break;
802 #endif
803 #if TCG_TARGET_HAS_muls2_i64
804 case INDEX_op_muls2_i64:
805 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
806 muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
807 break;
808 #endif
809 #if TCG_TARGET_HAS_add2_i64
810 case INDEX_op_add2_i64:
811 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
812 T1 = regs[r2] + regs[r4];
813 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
814 regs[r0] = T1;
815 regs[r1] = T2;
816 break;
817 #endif
818 #if TCG_TARGET_HAS_add2_i64
819 case INDEX_op_sub2_i64:
820 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
821 T1 = regs[r2] - regs[r4];
822 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
823 regs[r0] = T1;
824 regs[r1] = T2;
825 break;
826 #endif
828 /* Shift/rotate operations (64 bit). */
830 case INDEX_op_shl_i64:
831 tci_args_rrr(insn, &r0, &r1, &r2);
832 regs[r0] = regs[r1] << (regs[r2] & 63);
833 break;
834 case INDEX_op_shr_i64:
835 tci_args_rrr(insn, &r0, &r1, &r2);
836 regs[r0] = regs[r1] >> (regs[r2] & 63);
837 break;
838 case INDEX_op_sar_i64:
839 tci_args_rrr(insn, &r0, &r1, &r2);
840 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
841 break;
842 #if TCG_TARGET_HAS_rot_i64
843 case INDEX_op_rotl_i64:
844 tci_args_rrr(insn, &r0, &r1, &r2);
845 regs[r0] = rol64(regs[r1], regs[r2] & 63);
846 break;
847 case INDEX_op_rotr_i64:
848 tci_args_rrr(insn, &r0, &r1, &r2);
849 regs[r0] = ror64(regs[r1], regs[r2] & 63);
850 break;
851 #endif
852 #if TCG_TARGET_HAS_deposit_i64
853 case INDEX_op_deposit_i64:
854 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
855 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
856 break;
857 #endif
858 #if TCG_TARGET_HAS_extract_i64
859 case INDEX_op_extract_i64:
860 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
861 regs[r0] = extract64(regs[r1], pos, len);
862 break;
863 #endif
864 #if TCG_TARGET_HAS_sextract_i64
865 case INDEX_op_sextract_i64:
866 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
867 regs[r0] = sextract64(regs[r1], pos, len);
868 break;
869 #endif
870 case INDEX_op_brcond_i64:
871 tci_args_rl(insn, tb_ptr, &r0, &ptr);
872 if (regs[r0]) {
873 tb_ptr = ptr;
875 break;
876 case INDEX_op_ext32s_i64:
877 case INDEX_op_ext_i32_i64:
878 tci_args_rr(insn, &r0, &r1);
879 regs[r0] = (int32_t)regs[r1];
880 break;
881 case INDEX_op_ext32u_i64:
882 case INDEX_op_extu_i32_i64:
883 tci_args_rr(insn, &r0, &r1);
884 regs[r0] = (uint32_t)regs[r1];
885 break;
886 #if TCG_TARGET_HAS_bswap64_i64
887 case INDEX_op_bswap64_i64:
888 tci_args_rr(insn, &r0, &r1);
889 regs[r0] = bswap64(regs[r1]);
890 break;
891 #endif
892 #endif /* TCG_TARGET_REG_BITS == 64 */
894 /* QEMU specific operations. */
896 case INDEX_op_exit_tb:
897 tci_args_l(insn, tb_ptr, &ptr);
898 return (uintptr_t)ptr;
900 case INDEX_op_goto_tb:
901 tci_args_l(insn, tb_ptr, &ptr);
902 tb_ptr = *(void **)ptr;
903 break;
905 case INDEX_op_goto_ptr:
906 tci_args_r(insn, &r0);
907 ptr = (void *)regs[r0];
908 if (!ptr) {
909 return 0;
911 tb_ptr = ptr;
912 break;
914 case INDEX_op_qemu_ld_a32_i32:
915 tci_args_rrm(insn, &r0, &r1, &oi);
916 taddr = (uint32_t)regs[r1];
917 goto do_ld_i32;
918 case INDEX_op_qemu_ld_a64_i32:
919 if (TCG_TARGET_REG_BITS == 64) {
920 tci_args_rrm(insn, &r0, &r1, &oi);
921 taddr = regs[r1];
922 } else {
923 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
924 taddr = tci_uint64(regs[r2], regs[r1]);
925 oi = regs[r3];
927 do_ld_i32:
928 regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
929 break;
931 case INDEX_op_qemu_ld_a32_i64:
932 if (TCG_TARGET_REG_BITS == 64) {
933 tci_args_rrm(insn, &r0, &r1, &oi);
934 taddr = (uint32_t)regs[r1];
935 } else {
936 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
937 taddr = (uint32_t)regs[r2];
938 oi = regs[r3];
940 goto do_ld_i64;
941 case INDEX_op_qemu_ld_a64_i64:
942 if (TCG_TARGET_REG_BITS == 64) {
943 tci_args_rrm(insn, &r0, &r1, &oi);
944 taddr = regs[r1];
945 } else {
946 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
947 taddr = tci_uint64(regs[r3], regs[r2]);
948 oi = regs[r4];
950 do_ld_i64:
951 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
952 if (TCG_TARGET_REG_BITS == 32) {
953 tci_write_reg64(regs, r1, r0, tmp64);
954 } else {
955 regs[r0] = tmp64;
957 break;
959 case INDEX_op_qemu_st_a32_i32:
960 tci_args_rrm(insn, &r0, &r1, &oi);
961 taddr = (uint32_t)regs[r1];
962 goto do_st_i32;
963 case INDEX_op_qemu_st_a64_i32:
964 if (TCG_TARGET_REG_BITS == 64) {
965 tci_args_rrm(insn, &r0, &r1, &oi);
966 taddr = regs[r1];
967 } else {
968 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
969 taddr = tci_uint64(regs[r2], regs[r1]);
970 oi = regs[r3];
972 do_st_i32:
973 tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
974 break;
976 case INDEX_op_qemu_st_a32_i64:
977 if (TCG_TARGET_REG_BITS == 64) {
978 tci_args_rrm(insn, &r0, &r1, &oi);
979 tmp64 = regs[r0];
980 taddr = (uint32_t)regs[r1];
981 } else {
982 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
983 tmp64 = tci_uint64(regs[r1], regs[r0]);
984 taddr = (uint32_t)regs[r2];
985 oi = regs[r3];
987 goto do_st_i64;
988 case INDEX_op_qemu_st_a64_i64:
989 if (TCG_TARGET_REG_BITS == 64) {
990 tci_args_rrm(insn, &r0, &r1, &oi);
991 tmp64 = regs[r0];
992 taddr = regs[r1];
993 } else {
994 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
995 tmp64 = tci_uint64(regs[r1], regs[r0]);
996 taddr = tci_uint64(regs[r3], regs[r2]);
997 oi = regs[r4];
999 do_st_i64:
1000 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1001 break;
1003 case INDEX_op_mb:
1004 /* Ensure ordering for all kinds */
1005 smp_mb();
1006 break;
1007 default:
1008 g_assert_not_reached();
1014 * Disassembler that matches the interpreter
1017 static const char *str_r(TCGReg r)
1019 static const char regs[TCG_TARGET_NB_REGS][4] = {
1020 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1021 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1024 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1025 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1027 assert((unsigned)r < TCG_TARGET_NB_REGS);
1028 return regs[r];
1031 static const char *str_c(TCGCond c)
1033 static const char cond[16][8] = {
1034 [TCG_COND_NEVER] = "never",
1035 [TCG_COND_ALWAYS] = "always",
1036 [TCG_COND_EQ] = "eq",
1037 [TCG_COND_NE] = "ne",
1038 [TCG_COND_LT] = "lt",
1039 [TCG_COND_GE] = "ge",
1040 [TCG_COND_LE] = "le",
1041 [TCG_COND_GT] = "gt",
1042 [TCG_COND_LTU] = "ltu",
1043 [TCG_COND_GEU] = "geu",
1044 [TCG_COND_LEU] = "leu",
1045 [TCG_COND_GTU] = "gtu",
1048 assert((unsigned)c < ARRAY_SIZE(cond));
1049 assert(cond[c][0] != 0);
1050 return cond[c];
1053 /* Disassemble TCI bytecode. */
1054 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1056 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1057 const TCGOpDef *def;
1058 const char *op_name;
1059 uint32_t insn;
1060 TCGOpcode op;
1061 TCGReg r0, r1, r2, r3, r4, r5;
1062 tcg_target_ulong i1;
1063 int32_t s2;
1064 TCGCond c;
1065 MemOpIdx oi;
1066 uint8_t pos, len;
1067 void *ptr;
1069 /* TCI is always the host, so we don't need to load indirect. */
1070 insn = *tb_ptr++;
1072 info->fprintf_func(info->stream, "%08x ", insn);
1074 op = extract32(insn, 0, 8);
1075 def = &tcg_op_defs[op];
1076 op_name = def->name;
1078 switch (op) {
1079 case INDEX_op_br:
1080 case INDEX_op_exit_tb:
1081 case INDEX_op_goto_tb:
1082 tci_args_l(insn, tb_ptr, &ptr);
1083 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1084 break;
1086 case INDEX_op_goto_ptr:
1087 tci_args_r(insn, &r0);
1088 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1089 break;
1091 case INDEX_op_call:
1092 tci_args_nl(insn, tb_ptr, &len, &ptr);
1093 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1094 break;
1096 case INDEX_op_brcond_i32:
1097 case INDEX_op_brcond_i64:
1098 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1099 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1100 op_name, str_r(r0), ptr);
1101 break;
1103 case INDEX_op_setcond_i32:
1104 case INDEX_op_setcond_i64:
1105 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1106 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1107 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1108 break;
1110 case INDEX_op_tci_movi:
1111 tci_args_ri(insn, &r0, &i1);
1112 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1113 op_name, str_r(r0), i1);
1114 break;
1116 case INDEX_op_tci_movl:
1117 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1118 info->fprintf_func(info->stream, "%-12s %s, %p",
1119 op_name, str_r(r0), ptr);
1120 break;
1122 case INDEX_op_ld8u_i32:
1123 case INDEX_op_ld8u_i64:
1124 case INDEX_op_ld8s_i32:
1125 case INDEX_op_ld8s_i64:
1126 case INDEX_op_ld16u_i32:
1127 case INDEX_op_ld16u_i64:
1128 case INDEX_op_ld16s_i32:
1129 case INDEX_op_ld16s_i64:
1130 case INDEX_op_ld32u_i64:
1131 case INDEX_op_ld32s_i64:
1132 case INDEX_op_ld_i32:
1133 case INDEX_op_ld_i64:
1134 case INDEX_op_st8_i32:
1135 case INDEX_op_st8_i64:
1136 case INDEX_op_st16_i32:
1137 case INDEX_op_st16_i64:
1138 case INDEX_op_st32_i64:
1139 case INDEX_op_st_i32:
1140 case INDEX_op_st_i64:
1141 tci_args_rrs(insn, &r0, &r1, &s2);
1142 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1143 op_name, str_r(r0), str_r(r1), s2);
1144 break;
1146 case INDEX_op_mov_i32:
1147 case INDEX_op_mov_i64:
1148 case INDEX_op_ext8s_i32:
1149 case INDEX_op_ext8s_i64:
1150 case INDEX_op_ext8u_i32:
1151 case INDEX_op_ext8u_i64:
1152 case INDEX_op_ext16s_i32:
1153 case INDEX_op_ext16s_i64:
1154 case INDEX_op_ext16u_i32:
1155 case INDEX_op_ext32s_i64:
1156 case INDEX_op_ext32u_i64:
1157 case INDEX_op_ext_i32_i64:
1158 case INDEX_op_extu_i32_i64:
1159 case INDEX_op_bswap16_i32:
1160 case INDEX_op_bswap16_i64:
1161 case INDEX_op_bswap32_i32:
1162 case INDEX_op_bswap32_i64:
1163 case INDEX_op_bswap64_i64:
1164 case INDEX_op_not_i32:
1165 case INDEX_op_not_i64:
1166 case INDEX_op_neg_i32:
1167 case INDEX_op_neg_i64:
1168 case INDEX_op_ctpop_i32:
1169 case INDEX_op_ctpop_i64:
1170 tci_args_rr(insn, &r0, &r1);
1171 info->fprintf_func(info->stream, "%-12s %s, %s",
1172 op_name, str_r(r0), str_r(r1));
1173 break;
1175 case INDEX_op_add_i32:
1176 case INDEX_op_add_i64:
1177 case INDEX_op_sub_i32:
1178 case INDEX_op_sub_i64:
1179 case INDEX_op_mul_i32:
1180 case INDEX_op_mul_i64:
1181 case INDEX_op_and_i32:
1182 case INDEX_op_and_i64:
1183 case INDEX_op_or_i32:
1184 case INDEX_op_or_i64:
1185 case INDEX_op_xor_i32:
1186 case INDEX_op_xor_i64:
1187 case INDEX_op_andc_i32:
1188 case INDEX_op_andc_i64:
1189 case INDEX_op_orc_i32:
1190 case INDEX_op_orc_i64:
1191 case INDEX_op_eqv_i32:
1192 case INDEX_op_eqv_i64:
1193 case INDEX_op_nand_i32:
1194 case INDEX_op_nand_i64:
1195 case INDEX_op_nor_i32:
1196 case INDEX_op_nor_i64:
1197 case INDEX_op_div_i32:
1198 case INDEX_op_div_i64:
1199 case INDEX_op_rem_i32:
1200 case INDEX_op_rem_i64:
1201 case INDEX_op_divu_i32:
1202 case INDEX_op_divu_i64:
1203 case INDEX_op_remu_i32:
1204 case INDEX_op_remu_i64:
1205 case INDEX_op_shl_i32:
1206 case INDEX_op_shl_i64:
1207 case INDEX_op_shr_i32:
1208 case INDEX_op_shr_i64:
1209 case INDEX_op_sar_i32:
1210 case INDEX_op_sar_i64:
1211 case INDEX_op_rotl_i32:
1212 case INDEX_op_rotl_i64:
1213 case INDEX_op_rotr_i32:
1214 case INDEX_op_rotr_i64:
1215 case INDEX_op_clz_i32:
1216 case INDEX_op_clz_i64:
1217 case INDEX_op_ctz_i32:
1218 case INDEX_op_ctz_i64:
1219 tci_args_rrr(insn, &r0, &r1, &r2);
1220 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1221 op_name, str_r(r0), str_r(r1), str_r(r2));
1222 break;
1224 case INDEX_op_deposit_i32:
1225 case INDEX_op_deposit_i64:
1226 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1227 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1228 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1229 break;
1231 case INDEX_op_extract_i32:
1232 case INDEX_op_extract_i64:
1233 case INDEX_op_sextract_i32:
1234 case INDEX_op_sextract_i64:
1235 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1236 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1237 op_name, str_r(r0), str_r(r1), pos, len);
1238 break;
1240 case INDEX_op_movcond_i32:
1241 case INDEX_op_movcond_i64:
1242 case INDEX_op_setcond2_i32:
1243 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1244 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1245 op_name, str_r(r0), str_r(r1), str_r(r2),
1246 str_r(r3), str_r(r4), str_c(c));
1247 break;
1249 case INDEX_op_mulu2_i32:
1250 case INDEX_op_mulu2_i64:
1251 case INDEX_op_muls2_i32:
1252 case INDEX_op_muls2_i64:
1253 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1254 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1255 op_name, str_r(r0), str_r(r1),
1256 str_r(r2), str_r(r3));
1257 break;
1259 case INDEX_op_add2_i32:
1260 case INDEX_op_add2_i64:
1261 case INDEX_op_sub2_i32:
1262 case INDEX_op_sub2_i64:
1263 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1264 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1265 op_name, str_r(r0), str_r(r1), str_r(r2),
1266 str_r(r3), str_r(r4), str_r(r5));
1267 break;
1269 case INDEX_op_qemu_ld_a32_i32:
1270 case INDEX_op_qemu_st_a32_i32:
1271 len = 1 + 1;
1272 goto do_qemu_ldst;
1273 case INDEX_op_qemu_ld_a32_i64:
1274 case INDEX_op_qemu_st_a32_i64:
1275 case INDEX_op_qemu_ld_a64_i32:
1276 case INDEX_op_qemu_st_a64_i32:
1277 len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1278 goto do_qemu_ldst;
1279 case INDEX_op_qemu_ld_a64_i64:
1280 case INDEX_op_qemu_st_a64_i64:
1281 len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1282 goto do_qemu_ldst;
1283 do_qemu_ldst:
1284 switch (len) {
1285 case 2:
1286 tci_args_rrm(insn, &r0, &r1, &oi);
1287 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1288 op_name, str_r(r0), str_r(r1), oi);
1289 break;
1290 case 3:
1291 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1292 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1293 op_name, str_r(r0), str_r(r1),
1294 str_r(r2), str_r(r3));
1295 break;
1296 case 4:
1297 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1298 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1299 op_name, str_r(r0), str_r(r1),
1300 str_r(r2), str_r(r3), str_r(r4));
1301 break;
1302 default:
1303 g_assert_not_reached();
1305 break;
1307 case 0:
1308 /* tcg_out_nop_fill uses zeros */
1309 if (insn == 0) {
1310 info->fprintf_func(info->stream, "align");
1311 break;
1313 /* fall through */
1315 default:
1316 info->fprintf_func(info->stream, "illegal opcode %d", op);
1317 break;
1320 return sizeof(insn);