target/mips: Prefer fast cpu_env() over slower CPU QOM cast macro
[qemu/ar7.git] / tcg / tci.c
blob39adcb7d82e3e0511f6c2d67b6dfe2990eae0e08
1 /*
2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "tcg/tcg.h"
22 #include "tcg/tcg-ldst.h"
23 #include <ffi.h>
27 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
28 * Without assertions, the interpreter runs much faster.
30 #if defined(CONFIG_DEBUG_TCG)
31 # define tci_assert(cond) assert(cond)
32 #else
33 # define tci_assert(cond) ((void)(cond))
34 #endif
36 __thread uintptr_t tci_tb_ptr;
38 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
39 uint32_t low_index, uint64_t value)
41 regs[low_index] = (uint32_t)value;
42 regs[high_index] = value >> 32;
45 /* Create a 64 bit value from two 32 bit values. */
46 static uint64_t tci_uint64(uint32_t high, uint32_t low)
48 return ((uint64_t)high << 32) + low;
52 * Load sets of arguments all at once. The naming convention is:
53 * tci_args_<arguments>
54 * where arguments is a sequence of
56 * b = immediate (bit position)
57 * c = condition (TCGCond)
58 * i = immediate (uint32_t)
59 * I = immediate (tcg_target_ulong)
60 * l = label or pointer
61 * m = immediate (MemOpIdx)
62 * n = immediate (call return length)
63 * r = register
64 * s = signed ldst offset
67 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
69 int diff = sextract32(insn, 12, 20);
70 *l0 = diff ? (void *)tb_ptr + diff : NULL;
73 static void tci_args_r(uint32_t insn, TCGReg *r0)
75 *r0 = extract32(insn, 8, 4);
78 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
79 uint8_t *n0, void **l1)
81 *n0 = extract32(insn, 8, 4);
82 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
85 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
86 TCGReg *r0, void **l1)
88 *r0 = extract32(insn, 8, 4);
89 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
92 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
94 *r0 = extract32(insn, 8, 4);
95 *r1 = extract32(insn, 12, 4);
98 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
100 *r0 = extract32(insn, 8, 4);
101 *i1 = sextract32(insn, 12, 20);
104 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
105 TCGReg *r1, MemOpIdx *m2)
107 *r0 = extract32(insn, 8, 4);
108 *r1 = extract32(insn, 12, 4);
109 *m2 = extract32(insn, 16, 16);
112 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
114 *r0 = extract32(insn, 8, 4);
115 *r1 = extract32(insn, 12, 4);
116 *r2 = extract32(insn, 16, 4);
119 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
121 *r0 = extract32(insn, 8, 4);
122 *r1 = extract32(insn, 12, 4);
123 *i2 = sextract32(insn, 16, 16);
126 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
127 uint8_t *i2, uint8_t *i3)
129 *r0 = extract32(insn, 8, 4);
130 *r1 = extract32(insn, 12, 4);
131 *i2 = extract32(insn, 16, 6);
132 *i3 = extract32(insn, 22, 6);
135 static void tci_args_rrrc(uint32_t insn,
136 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
138 *r0 = extract32(insn, 8, 4);
139 *r1 = extract32(insn, 12, 4);
140 *r2 = extract32(insn, 16, 4);
141 *c3 = extract32(insn, 20, 4);
144 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
145 TCGReg *r2, uint8_t *i3, uint8_t *i4)
147 *r0 = extract32(insn, 8, 4);
148 *r1 = extract32(insn, 12, 4);
149 *r2 = extract32(insn, 16, 4);
150 *i3 = extract32(insn, 20, 6);
151 *i4 = extract32(insn, 26, 6);
154 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
155 TCGReg *r2, TCGReg *r3, TCGReg *r4)
157 *r0 = extract32(insn, 8, 4);
158 *r1 = extract32(insn, 12, 4);
159 *r2 = extract32(insn, 16, 4);
160 *r3 = extract32(insn, 20, 4);
161 *r4 = extract32(insn, 24, 4);
164 static void tci_args_rrrr(uint32_t insn,
165 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
167 *r0 = extract32(insn, 8, 4);
168 *r1 = extract32(insn, 12, 4);
169 *r2 = extract32(insn, 16, 4);
170 *r3 = extract32(insn, 20, 4);
173 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
174 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
176 *r0 = extract32(insn, 8, 4);
177 *r1 = extract32(insn, 12, 4);
178 *r2 = extract32(insn, 16, 4);
179 *r3 = extract32(insn, 20, 4);
180 *r4 = extract32(insn, 24, 4);
181 *c5 = extract32(insn, 28, 4);
184 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
185 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
187 *r0 = extract32(insn, 8, 4);
188 *r1 = extract32(insn, 12, 4);
189 *r2 = extract32(insn, 16, 4);
190 *r3 = extract32(insn, 20, 4);
191 *r4 = extract32(insn, 24, 4);
192 *r5 = extract32(insn, 28, 4);
195 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
197 bool result = false;
198 int32_t i0 = u0;
199 int32_t i1 = u1;
200 switch (condition) {
201 case TCG_COND_EQ:
202 result = (u0 == u1);
203 break;
204 case TCG_COND_NE:
205 result = (u0 != u1);
206 break;
207 case TCG_COND_LT:
208 result = (i0 < i1);
209 break;
210 case TCG_COND_GE:
211 result = (i0 >= i1);
212 break;
213 case TCG_COND_LE:
214 result = (i0 <= i1);
215 break;
216 case TCG_COND_GT:
217 result = (i0 > i1);
218 break;
219 case TCG_COND_LTU:
220 result = (u0 < u1);
221 break;
222 case TCG_COND_GEU:
223 result = (u0 >= u1);
224 break;
225 case TCG_COND_LEU:
226 result = (u0 <= u1);
227 break;
228 case TCG_COND_GTU:
229 result = (u0 > u1);
230 break;
231 case TCG_COND_TSTEQ:
232 result = (u0 & u1) == 0;
233 break;
234 case TCG_COND_TSTNE:
235 result = (u0 & u1) != 0;
236 break;
237 default:
238 g_assert_not_reached();
240 return result;
243 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
245 bool result = false;
246 int64_t i0 = u0;
247 int64_t i1 = u1;
248 switch (condition) {
249 case TCG_COND_EQ:
250 result = (u0 == u1);
251 break;
252 case TCG_COND_NE:
253 result = (u0 != u1);
254 break;
255 case TCG_COND_LT:
256 result = (i0 < i1);
257 break;
258 case TCG_COND_GE:
259 result = (i0 >= i1);
260 break;
261 case TCG_COND_LE:
262 result = (i0 <= i1);
263 break;
264 case TCG_COND_GT:
265 result = (i0 > i1);
266 break;
267 case TCG_COND_LTU:
268 result = (u0 < u1);
269 break;
270 case TCG_COND_GEU:
271 result = (u0 >= u1);
272 break;
273 case TCG_COND_LEU:
274 result = (u0 <= u1);
275 break;
276 case TCG_COND_GTU:
277 result = (u0 > u1);
278 break;
279 case TCG_COND_TSTEQ:
280 result = (u0 & u1) == 0;
281 break;
282 case TCG_COND_TSTNE:
283 result = (u0 & u1) != 0;
284 break;
285 default:
286 g_assert_not_reached();
288 return result;
291 static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr,
292 MemOpIdx oi, const void *tb_ptr)
294 MemOp mop = get_memop(oi);
295 uintptr_t ra = (uintptr_t)tb_ptr;
297 switch (mop & MO_SSIZE) {
298 case MO_UB:
299 return helper_ldub_mmu(env, taddr, oi, ra);
300 case MO_SB:
301 return helper_ldsb_mmu(env, taddr, oi, ra);
302 case MO_UW:
303 return helper_lduw_mmu(env, taddr, oi, ra);
304 case MO_SW:
305 return helper_ldsw_mmu(env, taddr, oi, ra);
306 case MO_UL:
307 return helper_ldul_mmu(env, taddr, oi, ra);
308 case MO_SL:
309 return helper_ldsl_mmu(env, taddr, oi, ra);
310 case MO_UQ:
311 return helper_ldq_mmu(env, taddr, oi, ra);
312 default:
313 g_assert_not_reached();
317 static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val,
318 MemOpIdx oi, const void *tb_ptr)
320 MemOp mop = get_memop(oi);
321 uintptr_t ra = (uintptr_t)tb_ptr;
323 switch (mop & MO_SIZE) {
324 case MO_UB:
325 helper_stb_mmu(env, taddr, val, oi, ra);
326 break;
327 case MO_UW:
328 helper_stw_mmu(env, taddr, val, oi, ra);
329 break;
330 case MO_UL:
331 helper_stl_mmu(env, taddr, val, oi, ra);
332 break;
333 case MO_UQ:
334 helper_stq_mmu(env, taddr, val, oi, ra);
335 break;
336 default:
337 g_assert_not_reached();
341 #if TCG_TARGET_REG_BITS == 64
342 # define CASE_32_64(x) \
343 case glue(glue(INDEX_op_, x), _i64): \
344 case glue(glue(INDEX_op_, x), _i32):
345 # define CASE_64(x) \
346 case glue(glue(INDEX_op_, x), _i64):
347 #else
348 # define CASE_32_64(x) \
349 case glue(glue(INDEX_op_, x), _i32):
350 # define CASE_64(x)
351 #endif
353 /* Interpret pseudo code in tb. */
355 * Disable CFI checks.
356 * One possible operation in the pseudo code is a call to binary code.
357 * Therefore, disable CFI checks in the interpreter function
359 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
360 const void *v_tb_ptr)
362 const uint32_t *tb_ptr = v_tb_ptr;
363 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
364 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
365 / sizeof(uint64_t)];
367 regs[TCG_AREG0] = (tcg_target_ulong)env;
368 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
369 tci_assert(tb_ptr);
371 for (;;) {
372 uint32_t insn;
373 TCGOpcode opc;
374 TCGReg r0, r1, r2, r3, r4, r5;
375 tcg_target_ulong t1;
376 TCGCond condition;
377 uint8_t pos, len;
378 uint32_t tmp32;
379 uint64_t tmp64, taddr;
380 uint64_t T1, T2;
381 MemOpIdx oi;
382 int32_t ofs;
383 void *ptr;
385 insn = *tb_ptr++;
386 opc = extract32(insn, 0, 8);
388 switch (opc) {
389 case INDEX_op_call:
391 void *call_slots[MAX_CALL_IARGS];
392 ffi_cif *cif;
393 void *func;
394 unsigned i, s, n;
396 tci_args_nl(insn, tb_ptr, &len, &ptr);
397 func = ((void **)ptr)[0];
398 cif = ((void **)ptr)[1];
400 n = cif->nargs;
401 for (i = s = 0; i < n; ++i) {
402 ffi_type *t = cif->arg_types[i];
403 call_slots[i] = &stack[s];
404 s += DIV_ROUND_UP(t->size, 8);
407 /* Helper functions may need to access the "return address" */
408 tci_tb_ptr = (uintptr_t)tb_ptr;
409 ffi_call(cif, func, stack, call_slots);
412 switch (len) {
413 case 0: /* void */
414 break;
415 case 1: /* uint32_t */
417 * The result winds up "left-aligned" in the stack[0] slot.
418 * Note that libffi has an odd special case in that it will
419 * always widen an integral result to ffi_arg.
421 if (sizeof(ffi_arg) == 8) {
422 regs[TCG_REG_R0] = (uint32_t)stack[0];
423 } else {
424 regs[TCG_REG_R0] = *(uint32_t *)stack;
426 break;
427 case 2: /* uint64_t */
429 * For TCG_TARGET_REG_BITS == 32, the register pair
430 * must stay in host memory order.
432 memcpy(&regs[TCG_REG_R0], stack, 8);
433 break;
434 case 3: /* Int128 */
435 memcpy(&regs[TCG_REG_R0], stack, 16);
436 break;
437 default:
438 g_assert_not_reached();
440 break;
442 case INDEX_op_br:
443 tci_args_l(insn, tb_ptr, &ptr);
444 tb_ptr = ptr;
445 continue;
446 case INDEX_op_setcond_i32:
447 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
448 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
449 break;
450 case INDEX_op_movcond_i32:
451 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
452 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
453 regs[r0] = regs[tmp32 ? r3 : r4];
454 break;
455 #if TCG_TARGET_REG_BITS == 32
456 case INDEX_op_setcond2_i32:
457 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
458 T1 = tci_uint64(regs[r2], regs[r1]);
459 T2 = tci_uint64(regs[r4], regs[r3]);
460 regs[r0] = tci_compare64(T1, T2, condition);
461 break;
462 #elif TCG_TARGET_REG_BITS == 64
463 case INDEX_op_setcond_i64:
464 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
465 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
466 break;
467 case INDEX_op_movcond_i64:
468 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
469 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
470 regs[r0] = regs[tmp32 ? r3 : r4];
471 break;
472 #endif
473 CASE_32_64(mov)
474 tci_args_rr(insn, &r0, &r1);
475 regs[r0] = regs[r1];
476 break;
477 case INDEX_op_tci_movi:
478 tci_args_ri(insn, &r0, &t1);
479 regs[r0] = t1;
480 break;
481 case INDEX_op_tci_movl:
482 tci_args_rl(insn, tb_ptr, &r0, &ptr);
483 regs[r0] = *(tcg_target_ulong *)ptr;
484 break;
486 /* Load/store operations (32 bit). */
488 CASE_32_64(ld8u)
489 tci_args_rrs(insn, &r0, &r1, &ofs);
490 ptr = (void *)(regs[r1] + ofs);
491 regs[r0] = *(uint8_t *)ptr;
492 break;
493 CASE_32_64(ld8s)
494 tci_args_rrs(insn, &r0, &r1, &ofs);
495 ptr = (void *)(regs[r1] + ofs);
496 regs[r0] = *(int8_t *)ptr;
497 break;
498 CASE_32_64(ld16u)
499 tci_args_rrs(insn, &r0, &r1, &ofs);
500 ptr = (void *)(regs[r1] + ofs);
501 regs[r0] = *(uint16_t *)ptr;
502 break;
503 CASE_32_64(ld16s)
504 tci_args_rrs(insn, &r0, &r1, &ofs);
505 ptr = (void *)(regs[r1] + ofs);
506 regs[r0] = *(int16_t *)ptr;
507 break;
508 case INDEX_op_ld_i32:
509 CASE_64(ld32u)
510 tci_args_rrs(insn, &r0, &r1, &ofs);
511 ptr = (void *)(regs[r1] + ofs);
512 regs[r0] = *(uint32_t *)ptr;
513 break;
514 CASE_32_64(st8)
515 tci_args_rrs(insn, &r0, &r1, &ofs);
516 ptr = (void *)(regs[r1] + ofs);
517 *(uint8_t *)ptr = regs[r0];
518 break;
519 CASE_32_64(st16)
520 tci_args_rrs(insn, &r0, &r1, &ofs);
521 ptr = (void *)(regs[r1] + ofs);
522 *(uint16_t *)ptr = regs[r0];
523 break;
524 case INDEX_op_st_i32:
525 CASE_64(st32)
526 tci_args_rrs(insn, &r0, &r1, &ofs);
527 ptr = (void *)(regs[r1] + ofs);
528 *(uint32_t *)ptr = regs[r0];
529 break;
531 /* Arithmetic operations (mixed 32/64 bit). */
533 CASE_32_64(add)
534 tci_args_rrr(insn, &r0, &r1, &r2);
535 regs[r0] = regs[r1] + regs[r2];
536 break;
537 CASE_32_64(sub)
538 tci_args_rrr(insn, &r0, &r1, &r2);
539 regs[r0] = regs[r1] - regs[r2];
540 break;
541 CASE_32_64(mul)
542 tci_args_rrr(insn, &r0, &r1, &r2);
543 regs[r0] = regs[r1] * regs[r2];
544 break;
545 CASE_32_64(and)
546 tci_args_rrr(insn, &r0, &r1, &r2);
547 regs[r0] = regs[r1] & regs[r2];
548 break;
549 CASE_32_64(or)
550 tci_args_rrr(insn, &r0, &r1, &r2);
551 regs[r0] = regs[r1] | regs[r2];
552 break;
553 CASE_32_64(xor)
554 tci_args_rrr(insn, &r0, &r1, &r2);
555 regs[r0] = regs[r1] ^ regs[r2];
556 break;
557 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
558 CASE_32_64(andc)
559 tci_args_rrr(insn, &r0, &r1, &r2);
560 regs[r0] = regs[r1] & ~regs[r2];
561 break;
562 #endif
563 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
564 CASE_32_64(orc)
565 tci_args_rrr(insn, &r0, &r1, &r2);
566 regs[r0] = regs[r1] | ~regs[r2];
567 break;
568 #endif
569 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
570 CASE_32_64(eqv)
571 tci_args_rrr(insn, &r0, &r1, &r2);
572 regs[r0] = ~(regs[r1] ^ regs[r2]);
573 break;
574 #endif
575 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
576 CASE_32_64(nand)
577 tci_args_rrr(insn, &r0, &r1, &r2);
578 regs[r0] = ~(regs[r1] & regs[r2]);
579 break;
580 #endif
581 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
582 CASE_32_64(nor)
583 tci_args_rrr(insn, &r0, &r1, &r2);
584 regs[r0] = ~(regs[r1] | regs[r2]);
585 break;
586 #endif
588 /* Arithmetic operations (32 bit). */
590 case INDEX_op_div_i32:
591 tci_args_rrr(insn, &r0, &r1, &r2);
592 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
593 break;
594 case INDEX_op_divu_i32:
595 tci_args_rrr(insn, &r0, &r1, &r2);
596 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
597 break;
598 case INDEX_op_rem_i32:
599 tci_args_rrr(insn, &r0, &r1, &r2);
600 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
601 break;
602 case INDEX_op_remu_i32:
603 tci_args_rrr(insn, &r0, &r1, &r2);
604 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
605 break;
606 #if TCG_TARGET_HAS_clz_i32
607 case INDEX_op_clz_i32:
608 tci_args_rrr(insn, &r0, &r1, &r2);
609 tmp32 = regs[r1];
610 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
611 break;
612 #endif
613 #if TCG_TARGET_HAS_ctz_i32
614 case INDEX_op_ctz_i32:
615 tci_args_rrr(insn, &r0, &r1, &r2);
616 tmp32 = regs[r1];
617 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
618 break;
619 #endif
620 #if TCG_TARGET_HAS_ctpop_i32
621 case INDEX_op_ctpop_i32:
622 tci_args_rr(insn, &r0, &r1);
623 regs[r0] = ctpop32(regs[r1]);
624 break;
625 #endif
627 /* Shift/rotate operations (32 bit). */
629 case INDEX_op_shl_i32:
630 tci_args_rrr(insn, &r0, &r1, &r2);
631 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
632 break;
633 case INDEX_op_shr_i32:
634 tci_args_rrr(insn, &r0, &r1, &r2);
635 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
636 break;
637 case INDEX_op_sar_i32:
638 tci_args_rrr(insn, &r0, &r1, &r2);
639 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
640 break;
641 #if TCG_TARGET_HAS_rot_i32
642 case INDEX_op_rotl_i32:
643 tci_args_rrr(insn, &r0, &r1, &r2);
644 regs[r0] = rol32(regs[r1], regs[r2] & 31);
645 break;
646 case INDEX_op_rotr_i32:
647 tci_args_rrr(insn, &r0, &r1, &r2);
648 regs[r0] = ror32(regs[r1], regs[r2] & 31);
649 break;
650 #endif
651 #if TCG_TARGET_HAS_deposit_i32
652 case INDEX_op_deposit_i32:
653 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
654 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
655 break;
656 #endif
657 #if TCG_TARGET_HAS_extract_i32
658 case INDEX_op_extract_i32:
659 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
660 regs[r0] = extract32(regs[r1], pos, len);
661 break;
662 #endif
663 #if TCG_TARGET_HAS_sextract_i32
664 case INDEX_op_sextract_i32:
665 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
666 regs[r0] = sextract32(regs[r1], pos, len);
667 break;
668 #endif
669 case INDEX_op_brcond_i32:
670 tci_args_rl(insn, tb_ptr, &r0, &ptr);
671 if ((uint32_t)regs[r0]) {
672 tb_ptr = ptr;
674 break;
675 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
676 case INDEX_op_add2_i32:
677 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
678 T1 = tci_uint64(regs[r3], regs[r2]);
679 T2 = tci_uint64(regs[r5], regs[r4]);
680 tci_write_reg64(regs, r1, r0, T1 + T2);
681 break;
682 #endif
683 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
684 case INDEX_op_sub2_i32:
685 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
686 T1 = tci_uint64(regs[r3], regs[r2]);
687 T2 = tci_uint64(regs[r5], regs[r4]);
688 tci_write_reg64(regs, r1, r0, T1 - T2);
689 break;
690 #endif
691 #if TCG_TARGET_HAS_mulu2_i32
692 case INDEX_op_mulu2_i32:
693 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
694 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
695 tci_write_reg64(regs, r1, r0, tmp64);
696 break;
697 #endif
698 #if TCG_TARGET_HAS_muls2_i32
699 case INDEX_op_muls2_i32:
700 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
701 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
702 tci_write_reg64(regs, r1, r0, tmp64);
703 break;
704 #endif
705 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
706 CASE_32_64(ext8s)
707 tci_args_rr(insn, &r0, &r1);
708 regs[r0] = (int8_t)regs[r1];
709 break;
710 #endif
711 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
712 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
713 CASE_32_64(ext16s)
714 tci_args_rr(insn, &r0, &r1);
715 regs[r0] = (int16_t)regs[r1];
716 break;
717 #endif
718 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
719 CASE_32_64(ext8u)
720 tci_args_rr(insn, &r0, &r1);
721 regs[r0] = (uint8_t)regs[r1];
722 break;
723 #endif
724 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
725 CASE_32_64(ext16u)
726 tci_args_rr(insn, &r0, &r1);
727 regs[r0] = (uint16_t)regs[r1];
728 break;
729 #endif
730 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
731 CASE_32_64(bswap16)
732 tci_args_rr(insn, &r0, &r1);
733 regs[r0] = bswap16(regs[r1]);
734 break;
735 #endif
736 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
737 CASE_32_64(bswap32)
738 tci_args_rr(insn, &r0, &r1);
739 regs[r0] = bswap32(regs[r1]);
740 break;
741 #endif
742 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
743 CASE_32_64(not)
744 tci_args_rr(insn, &r0, &r1);
745 regs[r0] = ~regs[r1];
746 break;
747 #endif
748 CASE_32_64(neg)
749 tci_args_rr(insn, &r0, &r1);
750 regs[r0] = -regs[r1];
751 break;
752 #if TCG_TARGET_REG_BITS == 64
753 /* Load/store operations (64 bit). */
755 case INDEX_op_ld32s_i64:
756 tci_args_rrs(insn, &r0, &r1, &ofs);
757 ptr = (void *)(regs[r1] + ofs);
758 regs[r0] = *(int32_t *)ptr;
759 break;
760 case INDEX_op_ld_i64:
761 tci_args_rrs(insn, &r0, &r1, &ofs);
762 ptr = (void *)(regs[r1] + ofs);
763 regs[r0] = *(uint64_t *)ptr;
764 break;
765 case INDEX_op_st_i64:
766 tci_args_rrs(insn, &r0, &r1, &ofs);
767 ptr = (void *)(regs[r1] + ofs);
768 *(uint64_t *)ptr = regs[r0];
769 break;
771 /* Arithmetic operations (64 bit). */
773 case INDEX_op_div_i64:
774 tci_args_rrr(insn, &r0, &r1, &r2);
775 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
776 break;
777 case INDEX_op_divu_i64:
778 tci_args_rrr(insn, &r0, &r1, &r2);
779 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
780 break;
781 case INDEX_op_rem_i64:
782 tci_args_rrr(insn, &r0, &r1, &r2);
783 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
784 break;
785 case INDEX_op_remu_i64:
786 tci_args_rrr(insn, &r0, &r1, &r2);
787 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
788 break;
789 #if TCG_TARGET_HAS_clz_i64
790 case INDEX_op_clz_i64:
791 tci_args_rrr(insn, &r0, &r1, &r2);
792 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
793 break;
794 #endif
795 #if TCG_TARGET_HAS_ctz_i64
796 case INDEX_op_ctz_i64:
797 tci_args_rrr(insn, &r0, &r1, &r2);
798 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
799 break;
800 #endif
801 #if TCG_TARGET_HAS_ctpop_i64
802 case INDEX_op_ctpop_i64:
803 tci_args_rr(insn, &r0, &r1);
804 regs[r0] = ctpop64(regs[r1]);
805 break;
806 #endif
807 #if TCG_TARGET_HAS_mulu2_i64
808 case INDEX_op_mulu2_i64:
809 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
810 mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
811 break;
812 #endif
813 #if TCG_TARGET_HAS_muls2_i64
814 case INDEX_op_muls2_i64:
815 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
816 muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
817 break;
818 #endif
819 #if TCG_TARGET_HAS_add2_i64
820 case INDEX_op_add2_i64:
821 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
822 T1 = regs[r2] + regs[r4];
823 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
824 regs[r0] = T1;
825 regs[r1] = T2;
826 break;
827 #endif
828 #if TCG_TARGET_HAS_add2_i64
829 case INDEX_op_sub2_i64:
830 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
831 T1 = regs[r2] - regs[r4];
832 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
833 regs[r0] = T1;
834 regs[r1] = T2;
835 break;
836 #endif
838 /* Shift/rotate operations (64 bit). */
840 case INDEX_op_shl_i64:
841 tci_args_rrr(insn, &r0, &r1, &r2);
842 regs[r0] = regs[r1] << (regs[r2] & 63);
843 break;
844 case INDEX_op_shr_i64:
845 tci_args_rrr(insn, &r0, &r1, &r2);
846 regs[r0] = regs[r1] >> (regs[r2] & 63);
847 break;
848 case INDEX_op_sar_i64:
849 tci_args_rrr(insn, &r0, &r1, &r2);
850 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
851 break;
852 #if TCG_TARGET_HAS_rot_i64
853 case INDEX_op_rotl_i64:
854 tci_args_rrr(insn, &r0, &r1, &r2);
855 regs[r0] = rol64(regs[r1], regs[r2] & 63);
856 break;
857 case INDEX_op_rotr_i64:
858 tci_args_rrr(insn, &r0, &r1, &r2);
859 regs[r0] = ror64(regs[r1], regs[r2] & 63);
860 break;
861 #endif
862 #if TCG_TARGET_HAS_deposit_i64
863 case INDEX_op_deposit_i64:
864 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
865 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
866 break;
867 #endif
868 #if TCG_TARGET_HAS_extract_i64
869 case INDEX_op_extract_i64:
870 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
871 regs[r0] = extract64(regs[r1], pos, len);
872 break;
873 #endif
874 #if TCG_TARGET_HAS_sextract_i64
875 case INDEX_op_sextract_i64:
876 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
877 regs[r0] = sextract64(regs[r1], pos, len);
878 break;
879 #endif
880 case INDEX_op_brcond_i64:
881 tci_args_rl(insn, tb_ptr, &r0, &ptr);
882 if (regs[r0]) {
883 tb_ptr = ptr;
885 break;
886 case INDEX_op_ext32s_i64:
887 case INDEX_op_ext_i32_i64:
888 tci_args_rr(insn, &r0, &r1);
889 regs[r0] = (int32_t)regs[r1];
890 break;
891 case INDEX_op_ext32u_i64:
892 case INDEX_op_extu_i32_i64:
893 tci_args_rr(insn, &r0, &r1);
894 regs[r0] = (uint32_t)regs[r1];
895 break;
896 #if TCG_TARGET_HAS_bswap64_i64
897 case INDEX_op_bswap64_i64:
898 tci_args_rr(insn, &r0, &r1);
899 regs[r0] = bswap64(regs[r1]);
900 break;
901 #endif
902 #endif /* TCG_TARGET_REG_BITS == 64 */
904 /* QEMU specific operations. */
906 case INDEX_op_exit_tb:
907 tci_args_l(insn, tb_ptr, &ptr);
908 return (uintptr_t)ptr;
910 case INDEX_op_goto_tb:
911 tci_args_l(insn, tb_ptr, &ptr);
912 tb_ptr = *(void **)ptr;
913 break;
915 case INDEX_op_goto_ptr:
916 tci_args_r(insn, &r0);
917 ptr = (void *)regs[r0];
918 if (!ptr) {
919 return 0;
921 tb_ptr = ptr;
922 break;
924 case INDEX_op_qemu_ld_a32_i32:
925 tci_args_rrm(insn, &r0, &r1, &oi);
926 taddr = (uint32_t)regs[r1];
927 goto do_ld_i32;
928 case INDEX_op_qemu_ld_a64_i32:
929 if (TCG_TARGET_REG_BITS == 64) {
930 tci_args_rrm(insn, &r0, &r1, &oi);
931 taddr = regs[r1];
932 } else {
933 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
934 taddr = tci_uint64(regs[r2], regs[r1]);
935 oi = regs[r3];
937 do_ld_i32:
938 regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
939 break;
941 case INDEX_op_qemu_ld_a32_i64:
942 if (TCG_TARGET_REG_BITS == 64) {
943 tci_args_rrm(insn, &r0, &r1, &oi);
944 taddr = (uint32_t)regs[r1];
945 } else {
946 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
947 taddr = (uint32_t)regs[r2];
948 oi = regs[r3];
950 goto do_ld_i64;
951 case INDEX_op_qemu_ld_a64_i64:
952 if (TCG_TARGET_REG_BITS == 64) {
953 tci_args_rrm(insn, &r0, &r1, &oi);
954 taddr = regs[r1];
955 } else {
956 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
957 taddr = tci_uint64(regs[r3], regs[r2]);
958 oi = regs[r4];
960 do_ld_i64:
961 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
962 if (TCG_TARGET_REG_BITS == 32) {
963 tci_write_reg64(regs, r1, r0, tmp64);
964 } else {
965 regs[r0] = tmp64;
967 break;
969 case INDEX_op_qemu_st_a32_i32:
970 tci_args_rrm(insn, &r0, &r1, &oi);
971 taddr = (uint32_t)regs[r1];
972 goto do_st_i32;
973 case INDEX_op_qemu_st_a64_i32:
974 if (TCG_TARGET_REG_BITS == 64) {
975 tci_args_rrm(insn, &r0, &r1, &oi);
976 taddr = regs[r1];
977 } else {
978 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
979 taddr = tci_uint64(regs[r2], regs[r1]);
980 oi = regs[r3];
982 do_st_i32:
983 tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
984 break;
986 case INDEX_op_qemu_st_a32_i64:
987 if (TCG_TARGET_REG_BITS == 64) {
988 tci_args_rrm(insn, &r0, &r1, &oi);
989 tmp64 = regs[r0];
990 taddr = (uint32_t)regs[r1];
991 } else {
992 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
993 tmp64 = tci_uint64(regs[r1], regs[r0]);
994 taddr = (uint32_t)regs[r2];
995 oi = regs[r3];
997 goto do_st_i64;
998 case INDEX_op_qemu_st_a64_i64:
999 if (TCG_TARGET_REG_BITS == 64) {
1000 tci_args_rrm(insn, &r0, &r1, &oi);
1001 tmp64 = regs[r0];
1002 taddr = regs[r1];
1003 } else {
1004 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1005 tmp64 = tci_uint64(regs[r1], regs[r0]);
1006 taddr = tci_uint64(regs[r3], regs[r2]);
1007 oi = regs[r4];
1009 do_st_i64:
1010 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1011 break;
1013 case INDEX_op_mb:
1014 /* Ensure ordering for all kinds */
1015 smp_mb();
1016 break;
1017 default:
1018 g_assert_not_reached();
1024 * Disassembler that matches the interpreter
1027 static const char *str_r(TCGReg r)
1029 static const char regs[TCG_TARGET_NB_REGS][4] = {
1030 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1031 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1034 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1035 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1037 assert((unsigned)r < TCG_TARGET_NB_REGS);
1038 return regs[r];
1041 static const char *str_c(TCGCond c)
1043 static const char cond[16][8] = {
1044 [TCG_COND_NEVER] = "never",
1045 [TCG_COND_ALWAYS] = "always",
1046 [TCG_COND_EQ] = "eq",
1047 [TCG_COND_NE] = "ne",
1048 [TCG_COND_LT] = "lt",
1049 [TCG_COND_GE] = "ge",
1050 [TCG_COND_LE] = "le",
1051 [TCG_COND_GT] = "gt",
1052 [TCG_COND_LTU] = "ltu",
1053 [TCG_COND_GEU] = "geu",
1054 [TCG_COND_LEU] = "leu",
1055 [TCG_COND_GTU] = "gtu",
1056 [TCG_COND_TSTEQ] = "tsteq",
1057 [TCG_COND_TSTNE] = "tstne",
1060 assert((unsigned)c < ARRAY_SIZE(cond));
1061 assert(cond[c][0] != 0);
1062 return cond[c];
1065 /* Disassemble TCI bytecode. */
1066 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1068 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1069 const TCGOpDef *def;
1070 const char *op_name;
1071 uint32_t insn;
1072 TCGOpcode op;
1073 TCGReg r0, r1, r2, r3, r4, r5;
1074 tcg_target_ulong i1;
1075 int32_t s2;
1076 TCGCond c;
1077 MemOpIdx oi;
1078 uint8_t pos, len;
1079 void *ptr;
1081 /* TCI is always the host, so we don't need to load indirect. */
1082 insn = *tb_ptr++;
1084 info->fprintf_func(info->stream, "%08x ", insn);
1086 op = extract32(insn, 0, 8);
1087 def = &tcg_op_defs[op];
1088 op_name = def->name;
1090 switch (op) {
1091 case INDEX_op_br:
1092 case INDEX_op_exit_tb:
1093 case INDEX_op_goto_tb:
1094 tci_args_l(insn, tb_ptr, &ptr);
1095 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1096 break;
1098 case INDEX_op_goto_ptr:
1099 tci_args_r(insn, &r0);
1100 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1101 break;
1103 case INDEX_op_call:
1104 tci_args_nl(insn, tb_ptr, &len, &ptr);
1105 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1106 break;
1108 case INDEX_op_brcond_i32:
1109 case INDEX_op_brcond_i64:
1110 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1111 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1112 op_name, str_r(r0), ptr);
1113 break;
1115 case INDEX_op_setcond_i32:
1116 case INDEX_op_setcond_i64:
1117 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1118 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1119 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1120 break;
1122 case INDEX_op_tci_movi:
1123 tci_args_ri(insn, &r0, &i1);
1124 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1125 op_name, str_r(r0), i1);
1126 break;
1128 case INDEX_op_tci_movl:
1129 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1130 info->fprintf_func(info->stream, "%-12s %s, %p",
1131 op_name, str_r(r0), ptr);
1132 break;
1134 case INDEX_op_ld8u_i32:
1135 case INDEX_op_ld8u_i64:
1136 case INDEX_op_ld8s_i32:
1137 case INDEX_op_ld8s_i64:
1138 case INDEX_op_ld16u_i32:
1139 case INDEX_op_ld16u_i64:
1140 case INDEX_op_ld16s_i32:
1141 case INDEX_op_ld16s_i64:
1142 case INDEX_op_ld32u_i64:
1143 case INDEX_op_ld32s_i64:
1144 case INDEX_op_ld_i32:
1145 case INDEX_op_ld_i64:
1146 case INDEX_op_st8_i32:
1147 case INDEX_op_st8_i64:
1148 case INDEX_op_st16_i32:
1149 case INDEX_op_st16_i64:
1150 case INDEX_op_st32_i64:
1151 case INDEX_op_st_i32:
1152 case INDEX_op_st_i64:
1153 tci_args_rrs(insn, &r0, &r1, &s2);
1154 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1155 op_name, str_r(r0), str_r(r1), s2);
1156 break;
1158 case INDEX_op_mov_i32:
1159 case INDEX_op_mov_i64:
1160 case INDEX_op_ext8s_i32:
1161 case INDEX_op_ext8s_i64:
1162 case INDEX_op_ext8u_i32:
1163 case INDEX_op_ext8u_i64:
1164 case INDEX_op_ext16s_i32:
1165 case INDEX_op_ext16s_i64:
1166 case INDEX_op_ext16u_i32:
1167 case INDEX_op_ext32s_i64:
1168 case INDEX_op_ext32u_i64:
1169 case INDEX_op_ext_i32_i64:
1170 case INDEX_op_extu_i32_i64:
1171 case INDEX_op_bswap16_i32:
1172 case INDEX_op_bswap16_i64:
1173 case INDEX_op_bswap32_i32:
1174 case INDEX_op_bswap32_i64:
1175 case INDEX_op_bswap64_i64:
1176 case INDEX_op_not_i32:
1177 case INDEX_op_not_i64:
1178 case INDEX_op_neg_i32:
1179 case INDEX_op_neg_i64:
1180 case INDEX_op_ctpop_i32:
1181 case INDEX_op_ctpop_i64:
1182 tci_args_rr(insn, &r0, &r1);
1183 info->fprintf_func(info->stream, "%-12s %s, %s",
1184 op_name, str_r(r0), str_r(r1));
1185 break;
1187 case INDEX_op_add_i32:
1188 case INDEX_op_add_i64:
1189 case INDEX_op_sub_i32:
1190 case INDEX_op_sub_i64:
1191 case INDEX_op_mul_i32:
1192 case INDEX_op_mul_i64:
1193 case INDEX_op_and_i32:
1194 case INDEX_op_and_i64:
1195 case INDEX_op_or_i32:
1196 case INDEX_op_or_i64:
1197 case INDEX_op_xor_i32:
1198 case INDEX_op_xor_i64:
1199 case INDEX_op_andc_i32:
1200 case INDEX_op_andc_i64:
1201 case INDEX_op_orc_i32:
1202 case INDEX_op_orc_i64:
1203 case INDEX_op_eqv_i32:
1204 case INDEX_op_eqv_i64:
1205 case INDEX_op_nand_i32:
1206 case INDEX_op_nand_i64:
1207 case INDEX_op_nor_i32:
1208 case INDEX_op_nor_i64:
1209 case INDEX_op_div_i32:
1210 case INDEX_op_div_i64:
1211 case INDEX_op_rem_i32:
1212 case INDEX_op_rem_i64:
1213 case INDEX_op_divu_i32:
1214 case INDEX_op_divu_i64:
1215 case INDEX_op_remu_i32:
1216 case INDEX_op_remu_i64:
1217 case INDEX_op_shl_i32:
1218 case INDEX_op_shl_i64:
1219 case INDEX_op_shr_i32:
1220 case INDEX_op_shr_i64:
1221 case INDEX_op_sar_i32:
1222 case INDEX_op_sar_i64:
1223 case INDEX_op_rotl_i32:
1224 case INDEX_op_rotl_i64:
1225 case INDEX_op_rotr_i32:
1226 case INDEX_op_rotr_i64:
1227 case INDEX_op_clz_i32:
1228 case INDEX_op_clz_i64:
1229 case INDEX_op_ctz_i32:
1230 case INDEX_op_ctz_i64:
1231 tci_args_rrr(insn, &r0, &r1, &r2);
1232 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1233 op_name, str_r(r0), str_r(r1), str_r(r2));
1234 break;
1236 case INDEX_op_deposit_i32:
1237 case INDEX_op_deposit_i64:
1238 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1239 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1240 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1241 break;
1243 case INDEX_op_extract_i32:
1244 case INDEX_op_extract_i64:
1245 case INDEX_op_sextract_i32:
1246 case INDEX_op_sextract_i64:
1247 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1248 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1249 op_name, str_r(r0), str_r(r1), pos, len);
1250 break;
1252 case INDEX_op_movcond_i32:
1253 case INDEX_op_movcond_i64:
1254 case INDEX_op_setcond2_i32:
1255 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1256 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1257 op_name, str_r(r0), str_r(r1), str_r(r2),
1258 str_r(r3), str_r(r4), str_c(c));
1259 break;
1261 case INDEX_op_mulu2_i32:
1262 case INDEX_op_mulu2_i64:
1263 case INDEX_op_muls2_i32:
1264 case INDEX_op_muls2_i64:
1265 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1266 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1267 op_name, str_r(r0), str_r(r1),
1268 str_r(r2), str_r(r3));
1269 break;
1271 case INDEX_op_add2_i32:
1272 case INDEX_op_add2_i64:
1273 case INDEX_op_sub2_i32:
1274 case INDEX_op_sub2_i64:
1275 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1276 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1277 op_name, str_r(r0), str_r(r1), str_r(r2),
1278 str_r(r3), str_r(r4), str_r(r5));
1279 break;
1281 case INDEX_op_qemu_ld_a32_i32:
1282 case INDEX_op_qemu_st_a32_i32:
1283 len = 1 + 1;
1284 goto do_qemu_ldst;
1285 case INDEX_op_qemu_ld_a32_i64:
1286 case INDEX_op_qemu_st_a32_i64:
1287 case INDEX_op_qemu_ld_a64_i32:
1288 case INDEX_op_qemu_st_a64_i32:
1289 len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1290 goto do_qemu_ldst;
1291 case INDEX_op_qemu_ld_a64_i64:
1292 case INDEX_op_qemu_st_a64_i64:
1293 len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1294 goto do_qemu_ldst;
1295 do_qemu_ldst:
1296 switch (len) {
1297 case 2:
1298 tci_args_rrm(insn, &r0, &r1, &oi);
1299 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1300 op_name, str_r(r0), str_r(r1), oi);
1301 break;
1302 case 3:
1303 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1304 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1305 op_name, str_r(r0), str_r(r1),
1306 str_r(r2), str_r(r3));
1307 break;
1308 case 4:
1309 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1310 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1311 op_name, str_r(r0), str_r(r1),
1312 str_r(r2), str_r(r3), str_r(r4));
1313 break;
1314 default:
1315 g_assert_not_reached();
1317 break;
1319 case 0:
1320 /* tcg_out_nop_fill uses zeros */
1321 if (insn == 0) {
1322 info->fprintf_func(info->stream, "align");
1323 break;
1325 /* fall through */
1327 default:
1328 info->fprintf_func(info->stream, "illegal opcode %d", op);
1329 break;
1332 return sizeof(insn);