util: Add i386 CPUINFO_ATOMIC_VMOVDQU
[qemu/ar7.git] / tcg / tci.c
blobbab4397bc5df3a06f6c7f70e87fd77d2cc174982
1 /*
2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "exec/cpu_ldst.h"
22 #include "tcg/tcg-op.h"
23 #include "tcg/tcg-ldst.h"
24 #include <ffi.h>
28 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
29 * Without assertions, the interpreter runs much faster.
31 #if defined(CONFIG_DEBUG_TCG)
32 # define tci_assert(cond) assert(cond)
33 #else
34 # define tci_assert(cond) ((void)(cond))
35 #endif
37 __thread uintptr_t tci_tb_ptr;
39 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
40 uint32_t low_index, uint64_t value)
42 regs[low_index] = (uint32_t)value;
43 regs[high_index] = value >> 32;
46 /* Create a 64 bit value from two 32 bit values. */
47 static uint64_t tci_uint64(uint32_t high, uint32_t low)
49 return ((uint64_t)high << 32) + low;
53 * Load sets of arguments all at once. The naming convention is:
54 * tci_args_<arguments>
55 * where arguments is a sequence of
57 * b = immediate (bit position)
58 * c = condition (TCGCond)
59 * i = immediate (uint32_t)
60 * I = immediate (tcg_target_ulong)
61 * l = label or pointer
62 * m = immediate (MemOpIdx)
63 * n = immediate (call return length)
64 * r = register
65 * s = signed ldst offset
68 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
70 int diff = sextract32(insn, 12, 20);
71 *l0 = diff ? (void *)tb_ptr + diff : NULL;
74 static void tci_args_r(uint32_t insn, TCGReg *r0)
76 *r0 = extract32(insn, 8, 4);
79 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
80 uint8_t *n0, void **l1)
82 *n0 = extract32(insn, 8, 4);
83 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
86 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
87 TCGReg *r0, void **l1)
89 *r0 = extract32(insn, 8, 4);
90 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
93 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
95 *r0 = extract32(insn, 8, 4);
96 *r1 = extract32(insn, 12, 4);
99 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
101 *r0 = extract32(insn, 8, 4);
102 *i1 = sextract32(insn, 12, 20);
105 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
106 TCGReg *r1, MemOpIdx *m2)
108 *r0 = extract32(insn, 8, 4);
109 *r1 = extract32(insn, 12, 4);
110 *m2 = extract32(insn, 20, 12);
113 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
115 *r0 = extract32(insn, 8, 4);
116 *r1 = extract32(insn, 12, 4);
117 *r2 = extract32(insn, 16, 4);
120 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
122 *r0 = extract32(insn, 8, 4);
123 *r1 = extract32(insn, 12, 4);
124 *i2 = sextract32(insn, 16, 16);
127 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
128 uint8_t *i2, uint8_t *i3)
130 *r0 = extract32(insn, 8, 4);
131 *r1 = extract32(insn, 12, 4);
132 *i2 = extract32(insn, 16, 6);
133 *i3 = extract32(insn, 22, 6);
136 static void tci_args_rrrc(uint32_t insn,
137 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
139 *r0 = extract32(insn, 8, 4);
140 *r1 = extract32(insn, 12, 4);
141 *r2 = extract32(insn, 16, 4);
142 *c3 = extract32(insn, 20, 4);
145 static void tci_args_rrrm(uint32_t insn,
146 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
148 *r0 = extract32(insn, 8, 4);
149 *r1 = extract32(insn, 12, 4);
150 *r2 = extract32(insn, 16, 4);
151 *m3 = extract32(insn, 20, 12);
154 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
155 TCGReg *r2, uint8_t *i3, uint8_t *i4)
157 *r0 = extract32(insn, 8, 4);
158 *r1 = extract32(insn, 12, 4);
159 *r2 = extract32(insn, 16, 4);
160 *i3 = extract32(insn, 20, 6);
161 *i4 = extract32(insn, 26, 6);
164 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
165 TCGReg *r2, TCGReg *r3, TCGReg *r4)
167 *r0 = extract32(insn, 8, 4);
168 *r1 = extract32(insn, 12, 4);
169 *r2 = extract32(insn, 16, 4);
170 *r3 = extract32(insn, 20, 4);
171 *r4 = extract32(insn, 24, 4);
174 static void tci_args_rrrr(uint32_t insn,
175 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
177 *r0 = extract32(insn, 8, 4);
178 *r1 = extract32(insn, 12, 4);
179 *r2 = extract32(insn, 16, 4);
180 *r3 = extract32(insn, 20, 4);
183 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
184 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
186 *r0 = extract32(insn, 8, 4);
187 *r1 = extract32(insn, 12, 4);
188 *r2 = extract32(insn, 16, 4);
189 *r3 = extract32(insn, 20, 4);
190 *r4 = extract32(insn, 24, 4);
191 *c5 = extract32(insn, 28, 4);
194 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
195 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
197 *r0 = extract32(insn, 8, 4);
198 *r1 = extract32(insn, 12, 4);
199 *r2 = extract32(insn, 16, 4);
200 *r3 = extract32(insn, 20, 4);
201 *r4 = extract32(insn, 24, 4);
202 *r5 = extract32(insn, 28, 4);
205 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
207 bool result = false;
208 int32_t i0 = u0;
209 int32_t i1 = u1;
210 switch (condition) {
211 case TCG_COND_EQ:
212 result = (u0 == u1);
213 break;
214 case TCG_COND_NE:
215 result = (u0 != u1);
216 break;
217 case TCG_COND_LT:
218 result = (i0 < i1);
219 break;
220 case TCG_COND_GE:
221 result = (i0 >= i1);
222 break;
223 case TCG_COND_LE:
224 result = (i0 <= i1);
225 break;
226 case TCG_COND_GT:
227 result = (i0 > i1);
228 break;
229 case TCG_COND_LTU:
230 result = (u0 < u1);
231 break;
232 case TCG_COND_GEU:
233 result = (u0 >= u1);
234 break;
235 case TCG_COND_LEU:
236 result = (u0 <= u1);
237 break;
238 case TCG_COND_GTU:
239 result = (u0 > u1);
240 break;
241 default:
242 g_assert_not_reached();
244 return result;
247 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
249 bool result = false;
250 int64_t i0 = u0;
251 int64_t i1 = u1;
252 switch (condition) {
253 case TCG_COND_EQ:
254 result = (u0 == u1);
255 break;
256 case TCG_COND_NE:
257 result = (u0 != u1);
258 break;
259 case TCG_COND_LT:
260 result = (i0 < i1);
261 break;
262 case TCG_COND_GE:
263 result = (i0 >= i1);
264 break;
265 case TCG_COND_LE:
266 result = (i0 <= i1);
267 break;
268 case TCG_COND_GT:
269 result = (i0 > i1);
270 break;
271 case TCG_COND_LTU:
272 result = (u0 < u1);
273 break;
274 case TCG_COND_GEU:
275 result = (u0 >= u1);
276 break;
277 case TCG_COND_LEU:
278 result = (u0 <= u1);
279 break;
280 case TCG_COND_GTU:
281 result = (u0 > u1);
282 break;
283 default:
284 g_assert_not_reached();
286 return result;
289 static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr,
290 MemOpIdx oi, const void *tb_ptr)
292 MemOp mop = get_memop(oi);
293 uintptr_t ra = (uintptr_t)tb_ptr;
295 switch (mop & MO_SSIZE) {
296 case MO_UB:
297 return helper_ldub_mmu(env, taddr, oi, ra);
298 case MO_SB:
299 return helper_ldsb_mmu(env, taddr, oi, ra);
300 case MO_UW:
301 return helper_lduw_mmu(env, taddr, oi, ra);
302 case MO_SW:
303 return helper_ldsw_mmu(env, taddr, oi, ra);
304 case MO_UL:
305 return helper_ldul_mmu(env, taddr, oi, ra);
306 case MO_SL:
307 return helper_ldsl_mmu(env, taddr, oi, ra);
308 case MO_UQ:
309 return helper_ldq_mmu(env, taddr, oi, ra);
310 default:
311 g_assert_not_reached();
315 static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val,
316 MemOpIdx oi, const void *tb_ptr)
318 MemOp mop = get_memop(oi);
319 uintptr_t ra = (uintptr_t)tb_ptr;
321 switch (mop & MO_SIZE) {
322 case MO_UB:
323 helper_stb_mmu(env, taddr, val, oi, ra);
324 break;
325 case MO_UW:
326 helper_stw_mmu(env, taddr, val, oi, ra);
327 break;
328 case MO_UL:
329 helper_stl_mmu(env, taddr, val, oi, ra);
330 break;
331 case MO_UQ:
332 helper_stq_mmu(env, taddr, val, oi, ra);
333 break;
334 default:
335 g_assert_not_reached();
339 #if TCG_TARGET_REG_BITS == 64
340 # define CASE_32_64(x) \
341 case glue(glue(INDEX_op_, x), _i64): \
342 case glue(glue(INDEX_op_, x), _i32):
343 # define CASE_64(x) \
344 case glue(glue(INDEX_op_, x), _i64):
345 #else
346 # define CASE_32_64(x) \
347 case glue(glue(INDEX_op_, x), _i32):
348 # define CASE_64(x)
349 #endif
351 /* Interpret pseudo code in tb. */
353 * Disable CFI checks.
354 * One possible operation in the pseudo code is a call to binary code.
355 * Therefore, disable CFI checks in the interpreter function
357 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
358 const void *v_tb_ptr)
360 const uint32_t *tb_ptr = v_tb_ptr;
361 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
362 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
363 / sizeof(uint64_t)];
365 regs[TCG_AREG0] = (tcg_target_ulong)env;
366 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
367 tci_assert(tb_ptr);
369 for (;;) {
370 uint32_t insn;
371 TCGOpcode opc;
372 TCGReg r0, r1, r2, r3, r4, r5;
373 tcg_target_ulong t1;
374 TCGCond condition;
375 uint8_t pos, len;
376 uint32_t tmp32;
377 uint64_t tmp64, taddr;
378 uint64_t T1, T2;
379 MemOpIdx oi;
380 int32_t ofs;
381 void *ptr;
383 insn = *tb_ptr++;
384 opc = extract32(insn, 0, 8);
386 switch (opc) {
387 case INDEX_op_call:
389 void *call_slots[MAX_CALL_IARGS];
390 ffi_cif *cif;
391 void *func;
392 unsigned i, s, n;
394 tci_args_nl(insn, tb_ptr, &len, &ptr);
395 func = ((void **)ptr)[0];
396 cif = ((void **)ptr)[1];
398 n = cif->nargs;
399 for (i = s = 0; i < n; ++i) {
400 ffi_type *t = cif->arg_types[i];
401 call_slots[i] = &stack[s];
402 s += DIV_ROUND_UP(t->size, 8);
405 /* Helper functions may need to access the "return address" */
406 tci_tb_ptr = (uintptr_t)tb_ptr;
407 ffi_call(cif, func, stack, call_slots);
410 switch (len) {
411 case 0: /* void */
412 break;
413 case 1: /* uint32_t */
415 * The result winds up "left-aligned" in the stack[0] slot.
416 * Note that libffi has an odd special case in that it will
417 * always widen an integral result to ffi_arg.
419 if (sizeof(ffi_arg) == 8) {
420 regs[TCG_REG_R0] = (uint32_t)stack[0];
421 } else {
422 regs[TCG_REG_R0] = *(uint32_t *)stack;
424 break;
425 case 2: /* uint64_t */
427 * For TCG_TARGET_REG_BITS == 32, the register pair
428 * must stay in host memory order.
430 memcpy(&regs[TCG_REG_R0], stack, 8);
431 break;
432 case 3: /* Int128 */
433 memcpy(&regs[TCG_REG_R0], stack, 16);
434 break;
435 default:
436 g_assert_not_reached();
438 break;
440 case INDEX_op_br:
441 tci_args_l(insn, tb_ptr, &ptr);
442 tb_ptr = ptr;
443 continue;
444 case INDEX_op_setcond_i32:
445 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
446 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
447 break;
448 case INDEX_op_movcond_i32:
449 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
450 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
451 regs[r0] = regs[tmp32 ? r3 : r4];
452 break;
453 #if TCG_TARGET_REG_BITS == 32
454 case INDEX_op_setcond2_i32:
455 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
456 T1 = tci_uint64(regs[r2], regs[r1]);
457 T2 = tci_uint64(regs[r4], regs[r3]);
458 regs[r0] = tci_compare64(T1, T2, condition);
459 break;
460 #elif TCG_TARGET_REG_BITS == 64
461 case INDEX_op_setcond_i64:
462 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
463 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
464 break;
465 case INDEX_op_movcond_i64:
466 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
467 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
468 regs[r0] = regs[tmp32 ? r3 : r4];
469 break;
470 #endif
471 CASE_32_64(mov)
472 tci_args_rr(insn, &r0, &r1);
473 regs[r0] = regs[r1];
474 break;
475 case INDEX_op_tci_movi:
476 tci_args_ri(insn, &r0, &t1);
477 regs[r0] = t1;
478 break;
479 case INDEX_op_tci_movl:
480 tci_args_rl(insn, tb_ptr, &r0, &ptr);
481 regs[r0] = *(tcg_target_ulong *)ptr;
482 break;
484 /* Load/store operations (32 bit). */
486 CASE_32_64(ld8u)
487 tci_args_rrs(insn, &r0, &r1, &ofs);
488 ptr = (void *)(regs[r1] + ofs);
489 regs[r0] = *(uint8_t *)ptr;
490 break;
491 CASE_32_64(ld8s)
492 tci_args_rrs(insn, &r0, &r1, &ofs);
493 ptr = (void *)(regs[r1] + ofs);
494 regs[r0] = *(int8_t *)ptr;
495 break;
496 CASE_32_64(ld16u)
497 tci_args_rrs(insn, &r0, &r1, &ofs);
498 ptr = (void *)(regs[r1] + ofs);
499 regs[r0] = *(uint16_t *)ptr;
500 break;
501 CASE_32_64(ld16s)
502 tci_args_rrs(insn, &r0, &r1, &ofs);
503 ptr = (void *)(regs[r1] + ofs);
504 regs[r0] = *(int16_t *)ptr;
505 break;
506 case INDEX_op_ld_i32:
507 CASE_64(ld32u)
508 tci_args_rrs(insn, &r0, &r1, &ofs);
509 ptr = (void *)(regs[r1] + ofs);
510 regs[r0] = *(uint32_t *)ptr;
511 break;
512 CASE_32_64(st8)
513 tci_args_rrs(insn, &r0, &r1, &ofs);
514 ptr = (void *)(regs[r1] + ofs);
515 *(uint8_t *)ptr = regs[r0];
516 break;
517 CASE_32_64(st16)
518 tci_args_rrs(insn, &r0, &r1, &ofs);
519 ptr = (void *)(regs[r1] + ofs);
520 *(uint16_t *)ptr = regs[r0];
521 break;
522 case INDEX_op_st_i32:
523 CASE_64(st32)
524 tci_args_rrs(insn, &r0, &r1, &ofs);
525 ptr = (void *)(regs[r1] + ofs);
526 *(uint32_t *)ptr = regs[r0];
527 break;
529 /* Arithmetic operations (mixed 32/64 bit). */
531 CASE_32_64(add)
532 tci_args_rrr(insn, &r0, &r1, &r2);
533 regs[r0] = regs[r1] + regs[r2];
534 break;
535 CASE_32_64(sub)
536 tci_args_rrr(insn, &r0, &r1, &r2);
537 regs[r0] = regs[r1] - regs[r2];
538 break;
539 CASE_32_64(mul)
540 tci_args_rrr(insn, &r0, &r1, &r2);
541 regs[r0] = regs[r1] * regs[r2];
542 break;
543 CASE_32_64(and)
544 tci_args_rrr(insn, &r0, &r1, &r2);
545 regs[r0] = regs[r1] & regs[r2];
546 break;
547 CASE_32_64(or)
548 tci_args_rrr(insn, &r0, &r1, &r2);
549 regs[r0] = regs[r1] | regs[r2];
550 break;
551 CASE_32_64(xor)
552 tci_args_rrr(insn, &r0, &r1, &r2);
553 regs[r0] = regs[r1] ^ regs[r2];
554 break;
555 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
556 CASE_32_64(andc)
557 tci_args_rrr(insn, &r0, &r1, &r2);
558 regs[r0] = regs[r1] & ~regs[r2];
559 break;
560 #endif
561 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
562 CASE_32_64(orc)
563 tci_args_rrr(insn, &r0, &r1, &r2);
564 regs[r0] = regs[r1] | ~regs[r2];
565 break;
566 #endif
567 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
568 CASE_32_64(eqv)
569 tci_args_rrr(insn, &r0, &r1, &r2);
570 regs[r0] = ~(regs[r1] ^ regs[r2]);
571 break;
572 #endif
573 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
574 CASE_32_64(nand)
575 tci_args_rrr(insn, &r0, &r1, &r2);
576 regs[r0] = ~(regs[r1] & regs[r2]);
577 break;
578 #endif
579 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
580 CASE_32_64(nor)
581 tci_args_rrr(insn, &r0, &r1, &r2);
582 regs[r0] = ~(regs[r1] | regs[r2]);
583 break;
584 #endif
586 /* Arithmetic operations (32 bit). */
588 case INDEX_op_div_i32:
589 tci_args_rrr(insn, &r0, &r1, &r2);
590 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
591 break;
592 case INDEX_op_divu_i32:
593 tci_args_rrr(insn, &r0, &r1, &r2);
594 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
595 break;
596 case INDEX_op_rem_i32:
597 tci_args_rrr(insn, &r0, &r1, &r2);
598 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
599 break;
600 case INDEX_op_remu_i32:
601 tci_args_rrr(insn, &r0, &r1, &r2);
602 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
603 break;
604 #if TCG_TARGET_HAS_clz_i32
605 case INDEX_op_clz_i32:
606 tci_args_rrr(insn, &r0, &r1, &r2);
607 tmp32 = regs[r1];
608 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
609 break;
610 #endif
611 #if TCG_TARGET_HAS_ctz_i32
612 case INDEX_op_ctz_i32:
613 tci_args_rrr(insn, &r0, &r1, &r2);
614 tmp32 = regs[r1];
615 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
616 break;
617 #endif
618 #if TCG_TARGET_HAS_ctpop_i32
619 case INDEX_op_ctpop_i32:
620 tci_args_rr(insn, &r0, &r1);
621 regs[r0] = ctpop32(regs[r1]);
622 break;
623 #endif
625 /* Shift/rotate operations (32 bit). */
627 case INDEX_op_shl_i32:
628 tci_args_rrr(insn, &r0, &r1, &r2);
629 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
630 break;
631 case INDEX_op_shr_i32:
632 tci_args_rrr(insn, &r0, &r1, &r2);
633 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
634 break;
635 case INDEX_op_sar_i32:
636 tci_args_rrr(insn, &r0, &r1, &r2);
637 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
638 break;
639 #if TCG_TARGET_HAS_rot_i32
640 case INDEX_op_rotl_i32:
641 tci_args_rrr(insn, &r0, &r1, &r2);
642 regs[r0] = rol32(regs[r1], regs[r2] & 31);
643 break;
644 case INDEX_op_rotr_i32:
645 tci_args_rrr(insn, &r0, &r1, &r2);
646 regs[r0] = ror32(regs[r1], regs[r2] & 31);
647 break;
648 #endif
649 #if TCG_TARGET_HAS_deposit_i32
650 case INDEX_op_deposit_i32:
651 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
652 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
653 break;
654 #endif
655 #if TCG_TARGET_HAS_extract_i32
656 case INDEX_op_extract_i32:
657 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
658 regs[r0] = extract32(regs[r1], pos, len);
659 break;
660 #endif
661 #if TCG_TARGET_HAS_sextract_i32
662 case INDEX_op_sextract_i32:
663 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
664 regs[r0] = sextract32(regs[r1], pos, len);
665 break;
666 #endif
667 case INDEX_op_brcond_i32:
668 tci_args_rl(insn, tb_ptr, &r0, &ptr);
669 if ((uint32_t)regs[r0]) {
670 tb_ptr = ptr;
672 break;
673 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
674 case INDEX_op_add2_i32:
675 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
676 T1 = tci_uint64(regs[r3], regs[r2]);
677 T2 = tci_uint64(regs[r5], regs[r4]);
678 tci_write_reg64(regs, r1, r0, T1 + T2);
679 break;
680 #endif
681 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
682 case INDEX_op_sub2_i32:
683 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
684 T1 = tci_uint64(regs[r3], regs[r2]);
685 T2 = tci_uint64(regs[r5], regs[r4]);
686 tci_write_reg64(regs, r1, r0, T1 - T2);
687 break;
688 #endif
689 #if TCG_TARGET_HAS_mulu2_i32
690 case INDEX_op_mulu2_i32:
691 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
692 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
693 tci_write_reg64(regs, r1, r0, tmp64);
694 break;
695 #endif
696 #if TCG_TARGET_HAS_muls2_i32
697 case INDEX_op_muls2_i32:
698 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
699 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
700 tci_write_reg64(regs, r1, r0, tmp64);
701 break;
702 #endif
703 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
704 CASE_32_64(ext8s)
705 tci_args_rr(insn, &r0, &r1);
706 regs[r0] = (int8_t)regs[r1];
707 break;
708 #endif
709 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
710 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
711 CASE_32_64(ext16s)
712 tci_args_rr(insn, &r0, &r1);
713 regs[r0] = (int16_t)regs[r1];
714 break;
715 #endif
716 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
717 CASE_32_64(ext8u)
718 tci_args_rr(insn, &r0, &r1);
719 regs[r0] = (uint8_t)regs[r1];
720 break;
721 #endif
722 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
723 CASE_32_64(ext16u)
724 tci_args_rr(insn, &r0, &r1);
725 regs[r0] = (uint16_t)regs[r1];
726 break;
727 #endif
728 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
729 CASE_32_64(bswap16)
730 tci_args_rr(insn, &r0, &r1);
731 regs[r0] = bswap16(regs[r1]);
732 break;
733 #endif
734 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
735 CASE_32_64(bswap32)
736 tci_args_rr(insn, &r0, &r1);
737 regs[r0] = bswap32(regs[r1]);
738 break;
739 #endif
740 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
741 CASE_32_64(not)
742 tci_args_rr(insn, &r0, &r1);
743 regs[r0] = ~regs[r1];
744 break;
745 #endif
746 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
747 CASE_32_64(neg)
748 tci_args_rr(insn, &r0, &r1);
749 regs[r0] = -regs[r1];
750 break;
751 #endif
752 #if TCG_TARGET_REG_BITS == 64
753 /* Load/store operations (64 bit). */
755 case INDEX_op_ld32s_i64:
756 tci_args_rrs(insn, &r0, &r1, &ofs);
757 ptr = (void *)(regs[r1] + ofs);
758 regs[r0] = *(int32_t *)ptr;
759 break;
760 case INDEX_op_ld_i64:
761 tci_args_rrs(insn, &r0, &r1, &ofs);
762 ptr = (void *)(regs[r1] + ofs);
763 regs[r0] = *(uint64_t *)ptr;
764 break;
765 case INDEX_op_st_i64:
766 tci_args_rrs(insn, &r0, &r1, &ofs);
767 ptr = (void *)(regs[r1] + ofs);
768 *(uint64_t *)ptr = regs[r0];
769 break;
771 /* Arithmetic operations (64 bit). */
773 case INDEX_op_div_i64:
774 tci_args_rrr(insn, &r0, &r1, &r2);
775 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
776 break;
777 case INDEX_op_divu_i64:
778 tci_args_rrr(insn, &r0, &r1, &r2);
779 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
780 break;
781 case INDEX_op_rem_i64:
782 tci_args_rrr(insn, &r0, &r1, &r2);
783 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
784 break;
785 case INDEX_op_remu_i64:
786 tci_args_rrr(insn, &r0, &r1, &r2);
787 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
788 break;
789 #if TCG_TARGET_HAS_clz_i64
790 case INDEX_op_clz_i64:
791 tci_args_rrr(insn, &r0, &r1, &r2);
792 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
793 break;
794 #endif
795 #if TCG_TARGET_HAS_ctz_i64
796 case INDEX_op_ctz_i64:
797 tci_args_rrr(insn, &r0, &r1, &r2);
798 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
799 break;
800 #endif
801 #if TCG_TARGET_HAS_ctpop_i64
802 case INDEX_op_ctpop_i64:
803 tci_args_rr(insn, &r0, &r1);
804 regs[r0] = ctpop64(regs[r1]);
805 break;
806 #endif
807 #if TCG_TARGET_HAS_mulu2_i64
808 case INDEX_op_mulu2_i64:
809 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
810 mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
811 break;
812 #endif
813 #if TCG_TARGET_HAS_muls2_i64
814 case INDEX_op_muls2_i64:
815 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
816 muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
817 break;
818 #endif
819 #if TCG_TARGET_HAS_add2_i64
820 case INDEX_op_add2_i64:
821 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
822 T1 = regs[r2] + regs[r4];
823 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
824 regs[r0] = T1;
825 regs[r1] = T2;
826 break;
827 #endif
828 #if TCG_TARGET_HAS_add2_i64
829 case INDEX_op_sub2_i64:
830 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
831 T1 = regs[r2] - regs[r4];
832 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
833 regs[r0] = T1;
834 regs[r1] = T2;
835 break;
836 #endif
838 /* Shift/rotate operations (64 bit). */
840 case INDEX_op_shl_i64:
841 tci_args_rrr(insn, &r0, &r1, &r2);
842 regs[r0] = regs[r1] << (regs[r2] & 63);
843 break;
844 case INDEX_op_shr_i64:
845 tci_args_rrr(insn, &r0, &r1, &r2);
846 regs[r0] = regs[r1] >> (regs[r2] & 63);
847 break;
848 case INDEX_op_sar_i64:
849 tci_args_rrr(insn, &r0, &r1, &r2);
850 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
851 break;
852 #if TCG_TARGET_HAS_rot_i64
853 case INDEX_op_rotl_i64:
854 tci_args_rrr(insn, &r0, &r1, &r2);
855 regs[r0] = rol64(regs[r1], regs[r2] & 63);
856 break;
857 case INDEX_op_rotr_i64:
858 tci_args_rrr(insn, &r0, &r1, &r2);
859 regs[r0] = ror64(regs[r1], regs[r2] & 63);
860 break;
861 #endif
862 #if TCG_TARGET_HAS_deposit_i64
863 case INDEX_op_deposit_i64:
864 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
865 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
866 break;
867 #endif
868 #if TCG_TARGET_HAS_extract_i64
869 case INDEX_op_extract_i64:
870 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
871 regs[r0] = extract64(regs[r1], pos, len);
872 break;
873 #endif
874 #if TCG_TARGET_HAS_sextract_i64
875 case INDEX_op_sextract_i64:
876 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
877 regs[r0] = sextract64(regs[r1], pos, len);
878 break;
879 #endif
880 case INDEX_op_brcond_i64:
881 tci_args_rl(insn, tb_ptr, &r0, &ptr);
882 if (regs[r0]) {
883 tb_ptr = ptr;
885 break;
886 case INDEX_op_ext32s_i64:
887 case INDEX_op_ext_i32_i64:
888 tci_args_rr(insn, &r0, &r1);
889 regs[r0] = (int32_t)regs[r1];
890 break;
891 case INDEX_op_ext32u_i64:
892 case INDEX_op_extu_i32_i64:
893 tci_args_rr(insn, &r0, &r1);
894 regs[r0] = (uint32_t)regs[r1];
895 break;
896 #if TCG_TARGET_HAS_bswap64_i64
897 case INDEX_op_bswap64_i64:
898 tci_args_rr(insn, &r0, &r1);
899 regs[r0] = bswap64(regs[r1]);
900 break;
901 #endif
902 #endif /* TCG_TARGET_REG_BITS == 64 */
904 /* QEMU specific operations. */
906 case INDEX_op_exit_tb:
907 tci_args_l(insn, tb_ptr, &ptr);
908 return (uintptr_t)ptr;
910 case INDEX_op_goto_tb:
911 tci_args_l(insn, tb_ptr, &ptr);
912 tb_ptr = *(void **)ptr;
913 break;
915 case INDEX_op_goto_ptr:
916 tci_args_r(insn, &r0);
917 ptr = (void *)regs[r0];
918 if (!ptr) {
919 return 0;
921 tb_ptr = ptr;
922 break;
924 case INDEX_op_qemu_ld_a32_i32:
925 tci_args_rrm(insn, &r0, &r1, &oi);
926 taddr = (uint32_t)regs[r1];
927 goto do_ld_i32;
928 case INDEX_op_qemu_ld_a64_i32:
929 if (TCG_TARGET_REG_BITS == 64) {
930 tci_args_rrm(insn, &r0, &r1, &oi);
931 taddr = regs[r1];
932 } else {
933 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
934 taddr = tci_uint64(regs[r2], regs[r1]);
936 do_ld_i32:
937 regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
938 break;
940 case INDEX_op_qemu_ld_a32_i64:
941 if (TCG_TARGET_REG_BITS == 64) {
942 tci_args_rrm(insn, &r0, &r1, &oi);
943 taddr = (uint32_t)regs[r1];
944 } else {
945 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
946 taddr = (uint32_t)regs[r2];
948 goto do_ld_i64;
949 case INDEX_op_qemu_ld_a64_i64:
950 if (TCG_TARGET_REG_BITS == 64) {
951 tci_args_rrm(insn, &r0, &r1, &oi);
952 taddr = regs[r1];
953 } else {
954 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
955 taddr = tci_uint64(regs[r3], regs[r2]);
956 oi = regs[r4];
958 do_ld_i64:
959 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
960 if (TCG_TARGET_REG_BITS == 32) {
961 tci_write_reg64(regs, r1, r0, tmp64);
962 } else {
963 regs[r0] = tmp64;
965 break;
967 case INDEX_op_qemu_st_a32_i32:
968 tci_args_rrm(insn, &r0, &r1, &oi);
969 taddr = (uint32_t)regs[r1];
970 goto do_st_i32;
971 case INDEX_op_qemu_st_a64_i32:
972 if (TCG_TARGET_REG_BITS == 64) {
973 tci_args_rrm(insn, &r0, &r1, &oi);
974 taddr = regs[r1];
975 } else {
976 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
977 taddr = tci_uint64(regs[r2], regs[r1]);
979 do_st_i32:
980 tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
981 break;
983 case INDEX_op_qemu_st_a32_i64:
984 if (TCG_TARGET_REG_BITS == 64) {
985 tci_args_rrm(insn, &r0, &r1, &oi);
986 tmp64 = regs[r0];
987 taddr = (uint32_t)regs[r1];
988 } else {
989 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
990 tmp64 = tci_uint64(regs[r1], regs[r0]);
991 taddr = (uint32_t)regs[r2];
993 goto do_st_i64;
994 case INDEX_op_qemu_st_a64_i64:
995 if (TCG_TARGET_REG_BITS == 64) {
996 tci_args_rrm(insn, &r0, &r1, &oi);
997 tmp64 = regs[r0];
998 taddr = regs[r1];
999 } else {
1000 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1001 tmp64 = tci_uint64(regs[r1], regs[r0]);
1002 taddr = tci_uint64(regs[r3], regs[r2]);
1003 oi = regs[r4];
1005 do_st_i64:
1006 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1007 break;
1009 case INDEX_op_mb:
1010 /* Ensure ordering for all kinds */
1011 smp_mb();
1012 break;
1013 default:
1014 g_assert_not_reached();
1020 * Disassembler that matches the interpreter
1023 static const char *str_r(TCGReg r)
1025 static const char regs[TCG_TARGET_NB_REGS][4] = {
1026 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1027 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1030 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1031 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1033 assert((unsigned)r < TCG_TARGET_NB_REGS);
1034 return regs[r];
1037 static const char *str_c(TCGCond c)
1039 static const char cond[16][8] = {
1040 [TCG_COND_NEVER] = "never",
1041 [TCG_COND_ALWAYS] = "always",
1042 [TCG_COND_EQ] = "eq",
1043 [TCG_COND_NE] = "ne",
1044 [TCG_COND_LT] = "lt",
1045 [TCG_COND_GE] = "ge",
1046 [TCG_COND_LE] = "le",
1047 [TCG_COND_GT] = "gt",
1048 [TCG_COND_LTU] = "ltu",
1049 [TCG_COND_GEU] = "geu",
1050 [TCG_COND_LEU] = "leu",
1051 [TCG_COND_GTU] = "gtu",
1054 assert((unsigned)c < ARRAY_SIZE(cond));
1055 assert(cond[c][0] != 0);
1056 return cond[c];
1059 /* Disassemble TCI bytecode. */
1060 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1062 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1063 const TCGOpDef *def;
1064 const char *op_name;
1065 uint32_t insn;
1066 TCGOpcode op;
1067 TCGReg r0, r1, r2, r3, r4, r5;
1068 tcg_target_ulong i1;
1069 int32_t s2;
1070 TCGCond c;
1071 MemOpIdx oi;
1072 uint8_t pos, len;
1073 void *ptr;
1075 /* TCI is always the host, so we don't need to load indirect. */
1076 insn = *tb_ptr++;
1078 info->fprintf_func(info->stream, "%08x ", insn);
1080 op = extract32(insn, 0, 8);
1081 def = &tcg_op_defs[op];
1082 op_name = def->name;
1084 switch (op) {
1085 case INDEX_op_br:
1086 case INDEX_op_exit_tb:
1087 case INDEX_op_goto_tb:
1088 tci_args_l(insn, tb_ptr, &ptr);
1089 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1090 break;
1092 case INDEX_op_goto_ptr:
1093 tci_args_r(insn, &r0);
1094 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1095 break;
1097 case INDEX_op_call:
1098 tci_args_nl(insn, tb_ptr, &len, &ptr);
1099 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1100 break;
1102 case INDEX_op_brcond_i32:
1103 case INDEX_op_brcond_i64:
1104 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1105 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1106 op_name, str_r(r0), ptr);
1107 break;
1109 case INDEX_op_setcond_i32:
1110 case INDEX_op_setcond_i64:
1111 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1112 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1113 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1114 break;
1116 case INDEX_op_tci_movi:
1117 tci_args_ri(insn, &r0, &i1);
1118 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1119 op_name, str_r(r0), i1);
1120 break;
1122 case INDEX_op_tci_movl:
1123 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1124 info->fprintf_func(info->stream, "%-12s %s, %p",
1125 op_name, str_r(r0), ptr);
1126 break;
1128 case INDEX_op_ld8u_i32:
1129 case INDEX_op_ld8u_i64:
1130 case INDEX_op_ld8s_i32:
1131 case INDEX_op_ld8s_i64:
1132 case INDEX_op_ld16u_i32:
1133 case INDEX_op_ld16u_i64:
1134 case INDEX_op_ld16s_i32:
1135 case INDEX_op_ld16s_i64:
1136 case INDEX_op_ld32u_i64:
1137 case INDEX_op_ld32s_i64:
1138 case INDEX_op_ld_i32:
1139 case INDEX_op_ld_i64:
1140 case INDEX_op_st8_i32:
1141 case INDEX_op_st8_i64:
1142 case INDEX_op_st16_i32:
1143 case INDEX_op_st16_i64:
1144 case INDEX_op_st32_i64:
1145 case INDEX_op_st_i32:
1146 case INDEX_op_st_i64:
1147 tci_args_rrs(insn, &r0, &r1, &s2);
1148 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1149 op_name, str_r(r0), str_r(r1), s2);
1150 break;
1152 case INDEX_op_mov_i32:
1153 case INDEX_op_mov_i64:
1154 case INDEX_op_ext8s_i32:
1155 case INDEX_op_ext8s_i64:
1156 case INDEX_op_ext8u_i32:
1157 case INDEX_op_ext8u_i64:
1158 case INDEX_op_ext16s_i32:
1159 case INDEX_op_ext16s_i64:
1160 case INDEX_op_ext16u_i32:
1161 case INDEX_op_ext32s_i64:
1162 case INDEX_op_ext32u_i64:
1163 case INDEX_op_ext_i32_i64:
1164 case INDEX_op_extu_i32_i64:
1165 case INDEX_op_bswap16_i32:
1166 case INDEX_op_bswap16_i64:
1167 case INDEX_op_bswap32_i32:
1168 case INDEX_op_bswap32_i64:
1169 case INDEX_op_bswap64_i64:
1170 case INDEX_op_not_i32:
1171 case INDEX_op_not_i64:
1172 case INDEX_op_neg_i32:
1173 case INDEX_op_neg_i64:
1174 case INDEX_op_ctpop_i32:
1175 case INDEX_op_ctpop_i64:
1176 tci_args_rr(insn, &r0, &r1);
1177 info->fprintf_func(info->stream, "%-12s %s, %s",
1178 op_name, str_r(r0), str_r(r1));
1179 break;
1181 case INDEX_op_add_i32:
1182 case INDEX_op_add_i64:
1183 case INDEX_op_sub_i32:
1184 case INDEX_op_sub_i64:
1185 case INDEX_op_mul_i32:
1186 case INDEX_op_mul_i64:
1187 case INDEX_op_and_i32:
1188 case INDEX_op_and_i64:
1189 case INDEX_op_or_i32:
1190 case INDEX_op_or_i64:
1191 case INDEX_op_xor_i32:
1192 case INDEX_op_xor_i64:
1193 case INDEX_op_andc_i32:
1194 case INDEX_op_andc_i64:
1195 case INDEX_op_orc_i32:
1196 case INDEX_op_orc_i64:
1197 case INDEX_op_eqv_i32:
1198 case INDEX_op_eqv_i64:
1199 case INDEX_op_nand_i32:
1200 case INDEX_op_nand_i64:
1201 case INDEX_op_nor_i32:
1202 case INDEX_op_nor_i64:
1203 case INDEX_op_div_i32:
1204 case INDEX_op_div_i64:
1205 case INDEX_op_rem_i32:
1206 case INDEX_op_rem_i64:
1207 case INDEX_op_divu_i32:
1208 case INDEX_op_divu_i64:
1209 case INDEX_op_remu_i32:
1210 case INDEX_op_remu_i64:
1211 case INDEX_op_shl_i32:
1212 case INDEX_op_shl_i64:
1213 case INDEX_op_shr_i32:
1214 case INDEX_op_shr_i64:
1215 case INDEX_op_sar_i32:
1216 case INDEX_op_sar_i64:
1217 case INDEX_op_rotl_i32:
1218 case INDEX_op_rotl_i64:
1219 case INDEX_op_rotr_i32:
1220 case INDEX_op_rotr_i64:
1221 case INDEX_op_clz_i32:
1222 case INDEX_op_clz_i64:
1223 case INDEX_op_ctz_i32:
1224 case INDEX_op_ctz_i64:
1225 tci_args_rrr(insn, &r0, &r1, &r2);
1226 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1227 op_name, str_r(r0), str_r(r1), str_r(r2));
1228 break;
1230 case INDEX_op_deposit_i32:
1231 case INDEX_op_deposit_i64:
1232 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1233 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1234 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1235 break;
1237 case INDEX_op_extract_i32:
1238 case INDEX_op_extract_i64:
1239 case INDEX_op_sextract_i32:
1240 case INDEX_op_sextract_i64:
1241 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1242 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1243 op_name, str_r(r0), str_r(r1), pos, len);
1244 break;
1246 case INDEX_op_movcond_i32:
1247 case INDEX_op_movcond_i64:
1248 case INDEX_op_setcond2_i32:
1249 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1250 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1251 op_name, str_r(r0), str_r(r1), str_r(r2),
1252 str_r(r3), str_r(r4), str_c(c));
1253 break;
1255 case INDEX_op_mulu2_i32:
1256 case INDEX_op_mulu2_i64:
1257 case INDEX_op_muls2_i32:
1258 case INDEX_op_muls2_i64:
1259 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1260 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1261 op_name, str_r(r0), str_r(r1),
1262 str_r(r2), str_r(r3));
1263 break;
1265 case INDEX_op_add2_i32:
1266 case INDEX_op_add2_i64:
1267 case INDEX_op_sub2_i32:
1268 case INDEX_op_sub2_i64:
1269 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1270 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1271 op_name, str_r(r0), str_r(r1), str_r(r2),
1272 str_r(r3), str_r(r4), str_r(r5));
1273 break;
1275 case INDEX_op_qemu_ld_a32_i32:
1276 case INDEX_op_qemu_st_a32_i32:
1277 len = 1 + 1;
1278 goto do_qemu_ldst;
1279 case INDEX_op_qemu_ld_a32_i64:
1280 case INDEX_op_qemu_st_a32_i64:
1281 case INDEX_op_qemu_ld_a64_i32:
1282 case INDEX_op_qemu_st_a64_i32:
1283 len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1284 goto do_qemu_ldst;
1285 case INDEX_op_qemu_ld_a64_i64:
1286 case INDEX_op_qemu_st_a64_i64:
1287 len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1288 goto do_qemu_ldst;
1289 do_qemu_ldst:
1290 switch (len) {
1291 case 2:
1292 tci_args_rrm(insn, &r0, &r1, &oi);
1293 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1294 op_name, str_r(r0), str_r(r1), oi);
1295 break;
1296 case 3:
1297 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1298 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
1299 op_name, str_r(r0), str_r(r1), str_r(r2), oi);
1300 break;
1301 case 4:
1302 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1303 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1304 op_name, str_r(r0), str_r(r1),
1305 str_r(r2), str_r(r3), str_r(r4));
1306 break;
1307 default:
1308 g_assert_not_reached();
1310 break;
1312 case 0:
1313 /* tcg_out_nop_fill uses zeros */
1314 if (insn == 0) {
1315 info->fprintf_func(info->stream, "align");
1316 break;
1318 /* fall through */
1320 default:
1321 info->fprintf_func(info->stream, "illegal opcode %d", op);
1322 break;
1325 return sizeof(insn);