exec/cpu-common: add qemu_ram_get_fd()
[qemu/ar7.git] / tcg / tci.c
blobbdfac83492ddb57432c31b82dcae06010e72847e
1 /*
2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
22 #include "exec/cpu_ldst.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-ldst.h"
25 #include "qemu/compiler.h"
26 #include <ffi.h>
30 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
31 * Without assertions, the interpreter runs much faster.
33 #if defined(CONFIG_DEBUG_TCG)
34 # define tci_assert(cond) assert(cond)
35 #else
36 # define tci_assert(cond) ((void)(cond))
37 #endif
39 __thread uintptr_t tci_tb_ptr;
41 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
42 uint32_t low_index, uint64_t value)
44 regs[low_index] = (uint32_t)value;
45 regs[high_index] = value >> 32;
48 /* Create a 64 bit value from two 32 bit values. */
49 static uint64_t tci_uint64(uint32_t high, uint32_t low)
51 return ((uint64_t)high << 32) + low;
55 * Load sets of arguments all at once. The naming convention is:
56 * tci_args_<arguments>
57 * where arguments is a sequence of
59 * b = immediate (bit position)
60 * c = condition (TCGCond)
61 * i = immediate (uint32_t)
62 * I = immediate (tcg_target_ulong)
63 * l = label or pointer
64 * m = immediate (MemOpIdx)
65 * n = immediate (call return length)
66 * r = register
67 * s = signed ldst offset
70 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
72 int diff = sextract32(insn, 12, 20);
73 *l0 = diff ? (void *)tb_ptr + diff : NULL;
76 static void tci_args_r(uint32_t insn, TCGReg *r0)
78 *r0 = extract32(insn, 8, 4);
81 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
82 uint8_t *n0, void **l1)
84 *n0 = extract32(insn, 8, 4);
85 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
88 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
89 TCGReg *r0, void **l1)
91 *r0 = extract32(insn, 8, 4);
92 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
95 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
97 *r0 = extract32(insn, 8, 4);
98 *r1 = extract32(insn, 12, 4);
101 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
103 *r0 = extract32(insn, 8, 4);
104 *i1 = sextract32(insn, 12, 20);
107 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
108 TCGReg *r1, MemOpIdx *m2)
110 *r0 = extract32(insn, 8, 4);
111 *r1 = extract32(insn, 12, 4);
112 *m2 = extract32(insn, 20, 12);
115 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
117 *r0 = extract32(insn, 8, 4);
118 *r1 = extract32(insn, 12, 4);
119 *r2 = extract32(insn, 16, 4);
122 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
124 *r0 = extract32(insn, 8, 4);
125 *r1 = extract32(insn, 12, 4);
126 *i2 = sextract32(insn, 16, 16);
129 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
130 uint8_t *i2, uint8_t *i3)
132 *r0 = extract32(insn, 8, 4);
133 *r1 = extract32(insn, 12, 4);
134 *i2 = extract32(insn, 16, 6);
135 *i3 = extract32(insn, 22, 6);
138 static void tci_args_rrrc(uint32_t insn,
139 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
141 *r0 = extract32(insn, 8, 4);
142 *r1 = extract32(insn, 12, 4);
143 *r2 = extract32(insn, 16, 4);
144 *c3 = extract32(insn, 20, 4);
147 static void tci_args_rrrm(uint32_t insn,
148 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
150 *r0 = extract32(insn, 8, 4);
151 *r1 = extract32(insn, 12, 4);
152 *r2 = extract32(insn, 16, 4);
153 *m3 = extract32(insn, 20, 12);
156 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
157 TCGReg *r2, uint8_t *i3, uint8_t *i4)
159 *r0 = extract32(insn, 8, 4);
160 *r1 = extract32(insn, 12, 4);
161 *r2 = extract32(insn, 16, 4);
162 *i3 = extract32(insn, 20, 6);
163 *i4 = extract32(insn, 26, 6);
166 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
167 TCGReg *r2, TCGReg *r3, TCGReg *r4)
169 *r0 = extract32(insn, 8, 4);
170 *r1 = extract32(insn, 12, 4);
171 *r2 = extract32(insn, 16, 4);
172 *r3 = extract32(insn, 20, 4);
173 *r4 = extract32(insn, 24, 4);
176 static void tci_args_rrrr(uint32_t insn,
177 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
179 *r0 = extract32(insn, 8, 4);
180 *r1 = extract32(insn, 12, 4);
181 *r2 = extract32(insn, 16, 4);
182 *r3 = extract32(insn, 20, 4);
185 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
186 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
188 *r0 = extract32(insn, 8, 4);
189 *r1 = extract32(insn, 12, 4);
190 *r2 = extract32(insn, 16, 4);
191 *r3 = extract32(insn, 20, 4);
192 *r4 = extract32(insn, 24, 4);
193 *c5 = extract32(insn, 28, 4);
196 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
197 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
199 *r0 = extract32(insn, 8, 4);
200 *r1 = extract32(insn, 12, 4);
201 *r2 = extract32(insn, 16, 4);
202 *r3 = extract32(insn, 20, 4);
203 *r4 = extract32(insn, 24, 4);
204 *r5 = extract32(insn, 28, 4);
207 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
209 bool result = false;
210 int32_t i0 = u0;
211 int32_t i1 = u1;
212 switch (condition) {
213 case TCG_COND_EQ:
214 result = (u0 == u1);
215 break;
216 case TCG_COND_NE:
217 result = (u0 != u1);
218 break;
219 case TCG_COND_LT:
220 result = (i0 < i1);
221 break;
222 case TCG_COND_GE:
223 result = (i0 >= i1);
224 break;
225 case TCG_COND_LE:
226 result = (i0 <= i1);
227 break;
228 case TCG_COND_GT:
229 result = (i0 > i1);
230 break;
231 case TCG_COND_LTU:
232 result = (u0 < u1);
233 break;
234 case TCG_COND_GEU:
235 result = (u0 >= u1);
236 break;
237 case TCG_COND_LEU:
238 result = (u0 <= u1);
239 break;
240 case TCG_COND_GTU:
241 result = (u0 > u1);
242 break;
243 default:
244 g_assert_not_reached();
246 return result;
249 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
251 bool result = false;
252 int64_t i0 = u0;
253 int64_t i1 = u1;
254 switch (condition) {
255 case TCG_COND_EQ:
256 result = (u0 == u1);
257 break;
258 case TCG_COND_NE:
259 result = (u0 != u1);
260 break;
261 case TCG_COND_LT:
262 result = (i0 < i1);
263 break;
264 case TCG_COND_GE:
265 result = (i0 >= i1);
266 break;
267 case TCG_COND_LE:
268 result = (i0 <= i1);
269 break;
270 case TCG_COND_GT:
271 result = (i0 > i1);
272 break;
273 case TCG_COND_LTU:
274 result = (u0 < u1);
275 break;
276 case TCG_COND_GEU:
277 result = (u0 >= u1);
278 break;
279 case TCG_COND_LEU:
280 result = (u0 <= u1);
281 break;
282 case TCG_COND_GTU:
283 result = (u0 > u1);
284 break;
285 default:
286 g_assert_not_reached();
288 return result;
291 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
292 MemOpIdx oi, const void *tb_ptr)
294 MemOp mop = get_memop(oi);
295 uintptr_t ra = (uintptr_t)tb_ptr;
297 #ifdef CONFIG_SOFTMMU
298 switch (mop & (MO_BSWAP | MO_SSIZE)) {
299 case MO_UB:
300 return helper_ret_ldub_mmu(env, taddr, oi, ra);
301 case MO_SB:
302 return helper_ret_ldsb_mmu(env, taddr, oi, ra);
303 case MO_LEUW:
304 return helper_le_lduw_mmu(env, taddr, oi, ra);
305 case MO_LESW:
306 return helper_le_ldsw_mmu(env, taddr, oi, ra);
307 case MO_LEUL:
308 return helper_le_ldul_mmu(env, taddr, oi, ra);
309 case MO_LESL:
310 return helper_le_ldsl_mmu(env, taddr, oi, ra);
311 case MO_LEUQ:
312 return helper_le_ldq_mmu(env, taddr, oi, ra);
313 case MO_BEUW:
314 return helper_be_lduw_mmu(env, taddr, oi, ra);
315 case MO_BESW:
316 return helper_be_ldsw_mmu(env, taddr, oi, ra);
317 case MO_BEUL:
318 return helper_be_ldul_mmu(env, taddr, oi, ra);
319 case MO_BESL:
320 return helper_be_ldsl_mmu(env, taddr, oi, ra);
321 case MO_BEUQ:
322 return helper_be_ldq_mmu(env, taddr, oi, ra);
323 default:
324 g_assert_not_reached();
326 #else
327 void *haddr = g2h(env_cpu(env), taddr);
328 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
329 uint64_t ret;
331 set_helper_retaddr(ra);
332 if (taddr & a_mask) {
333 helper_unaligned_ld(env, taddr);
335 switch (mop & (MO_BSWAP | MO_SSIZE)) {
336 case MO_UB:
337 ret = ldub_p(haddr);
338 break;
339 case MO_SB:
340 ret = ldsb_p(haddr);
341 break;
342 case MO_LEUW:
343 ret = lduw_le_p(haddr);
344 break;
345 case MO_LESW:
346 ret = ldsw_le_p(haddr);
347 break;
348 case MO_LEUL:
349 ret = (uint32_t)ldl_le_p(haddr);
350 break;
351 case MO_LESL:
352 ret = (int32_t)ldl_le_p(haddr);
353 break;
354 case MO_LEUQ:
355 ret = ldq_le_p(haddr);
356 break;
357 case MO_BEUW:
358 ret = lduw_be_p(haddr);
359 break;
360 case MO_BESW:
361 ret = ldsw_be_p(haddr);
362 break;
363 case MO_BEUL:
364 ret = (uint32_t)ldl_be_p(haddr);
365 break;
366 case MO_BESL:
367 ret = (int32_t)ldl_be_p(haddr);
368 break;
369 case MO_BEUQ:
370 ret = ldq_be_p(haddr);
371 break;
372 default:
373 g_assert_not_reached();
375 clear_helper_retaddr();
376 return ret;
377 #endif
380 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
381 MemOpIdx oi, const void *tb_ptr)
383 MemOp mop = get_memop(oi);
384 uintptr_t ra = (uintptr_t)tb_ptr;
386 #ifdef CONFIG_SOFTMMU
387 switch (mop & (MO_BSWAP | MO_SIZE)) {
388 case MO_UB:
389 helper_ret_stb_mmu(env, taddr, val, oi, ra);
390 break;
391 case MO_LEUW:
392 helper_le_stw_mmu(env, taddr, val, oi, ra);
393 break;
394 case MO_LEUL:
395 helper_le_stl_mmu(env, taddr, val, oi, ra);
396 break;
397 case MO_LEUQ:
398 helper_le_stq_mmu(env, taddr, val, oi, ra);
399 break;
400 case MO_BEUW:
401 helper_be_stw_mmu(env, taddr, val, oi, ra);
402 break;
403 case MO_BEUL:
404 helper_be_stl_mmu(env, taddr, val, oi, ra);
405 break;
406 case MO_BEUQ:
407 helper_be_stq_mmu(env, taddr, val, oi, ra);
408 break;
409 default:
410 g_assert_not_reached();
412 #else
413 void *haddr = g2h(env_cpu(env), taddr);
414 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
416 set_helper_retaddr(ra);
417 if (taddr & a_mask) {
418 helper_unaligned_st(env, taddr);
420 switch (mop & (MO_BSWAP | MO_SIZE)) {
421 case MO_UB:
422 stb_p(haddr, val);
423 break;
424 case MO_LEUW:
425 stw_le_p(haddr, val);
426 break;
427 case MO_LEUL:
428 stl_le_p(haddr, val);
429 break;
430 case MO_LEUQ:
431 stq_le_p(haddr, val);
432 break;
433 case MO_BEUW:
434 stw_be_p(haddr, val);
435 break;
436 case MO_BEUL:
437 stl_be_p(haddr, val);
438 break;
439 case MO_BEUQ:
440 stq_be_p(haddr, val);
441 break;
442 default:
443 g_assert_not_reached();
445 clear_helper_retaddr();
446 #endif
449 #if TCG_TARGET_REG_BITS == 64
450 # define CASE_32_64(x) \
451 case glue(glue(INDEX_op_, x), _i64): \
452 case glue(glue(INDEX_op_, x), _i32):
453 # define CASE_64(x) \
454 case glue(glue(INDEX_op_, x), _i64):
455 #else
456 # define CASE_32_64(x) \
457 case glue(glue(INDEX_op_, x), _i32):
458 # define CASE_64(x)
459 #endif
461 /* Interpret pseudo code in tb. */
463 * Disable CFI checks.
464 * One possible operation in the pseudo code is a call to binary code.
465 * Therefore, disable CFI checks in the interpreter function
467 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
468 const void *v_tb_ptr)
470 const uint32_t *tb_ptr = v_tb_ptr;
471 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
472 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
473 / sizeof(uint64_t)];
474 void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)];
476 regs[TCG_AREG0] = (tcg_target_ulong)env;
477 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
478 /* Other call_slots entries initialized at first use (see below). */
479 call_slots[0] = NULL;
480 tci_assert(tb_ptr);
482 for (;;) {
483 uint32_t insn;
484 TCGOpcode opc;
485 TCGReg r0, r1, r2, r3, r4, r5;
486 tcg_target_ulong t1;
487 TCGCond condition;
488 target_ulong taddr;
489 uint8_t pos, len;
490 uint32_t tmp32;
491 uint64_t tmp64;
492 uint64_t T1, T2;
493 MemOpIdx oi;
494 int32_t ofs;
495 void *ptr;
497 insn = *tb_ptr++;
498 opc = extract32(insn, 0, 8);
500 switch (opc) {
501 case INDEX_op_call:
503 * Set up the ffi_avalue array once, delayed until now
504 * because many TB's do not make any calls. In tcg_gen_callN,
505 * we arranged for every real argument to be "left-aligned"
506 * in each 64-bit slot.
508 if (unlikely(call_slots[0] == NULL)) {
509 for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) {
510 call_slots[i] = &stack[i];
514 tci_args_nl(insn, tb_ptr, &len, &ptr);
516 /* Helper functions may need to access the "return address" */
517 tci_tb_ptr = (uintptr_t)tb_ptr;
520 void **pptr = ptr;
521 ffi_call(pptr[1], pptr[0], stack, call_slots);
524 /* Any result winds up "left-aligned" in the stack[0] slot. */
525 switch (len) {
526 case 0: /* void */
527 break;
528 case 1: /* uint32_t */
530 * Note that libffi has an odd special case in that it will
531 * always widen an integral result to ffi_arg.
533 if (sizeof(ffi_arg) == 4) {
534 regs[TCG_REG_R0] = *(uint32_t *)stack;
535 break;
537 /* fall through */
538 case 2: /* uint64_t */
539 if (TCG_TARGET_REG_BITS == 32) {
540 tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]);
541 } else {
542 regs[TCG_REG_R0] = stack[0];
544 break;
545 default:
546 g_assert_not_reached();
548 break;
550 case INDEX_op_br:
551 tci_args_l(insn, tb_ptr, &ptr);
552 tb_ptr = ptr;
553 continue;
554 case INDEX_op_setcond_i32:
555 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
556 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
557 break;
558 case INDEX_op_movcond_i32:
559 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
560 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
561 regs[r0] = regs[tmp32 ? r3 : r4];
562 break;
563 #if TCG_TARGET_REG_BITS == 32
564 case INDEX_op_setcond2_i32:
565 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
566 T1 = tci_uint64(regs[r2], regs[r1]);
567 T2 = tci_uint64(regs[r4], regs[r3]);
568 regs[r0] = tci_compare64(T1, T2, condition);
569 break;
570 #elif TCG_TARGET_REG_BITS == 64
571 case INDEX_op_setcond_i64:
572 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
573 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
574 break;
575 case INDEX_op_movcond_i64:
576 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
577 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
578 regs[r0] = regs[tmp32 ? r3 : r4];
579 break;
580 #endif
581 CASE_32_64(mov)
582 tci_args_rr(insn, &r0, &r1);
583 regs[r0] = regs[r1];
584 break;
585 case INDEX_op_tci_movi:
586 tci_args_ri(insn, &r0, &t1);
587 regs[r0] = t1;
588 break;
589 case INDEX_op_tci_movl:
590 tci_args_rl(insn, tb_ptr, &r0, &ptr);
591 regs[r0] = *(tcg_target_ulong *)ptr;
592 break;
594 /* Load/store operations (32 bit). */
596 CASE_32_64(ld8u)
597 tci_args_rrs(insn, &r0, &r1, &ofs);
598 ptr = (void *)(regs[r1] + ofs);
599 regs[r0] = *(uint8_t *)ptr;
600 break;
601 CASE_32_64(ld8s)
602 tci_args_rrs(insn, &r0, &r1, &ofs);
603 ptr = (void *)(regs[r1] + ofs);
604 regs[r0] = *(int8_t *)ptr;
605 break;
606 CASE_32_64(ld16u)
607 tci_args_rrs(insn, &r0, &r1, &ofs);
608 ptr = (void *)(regs[r1] + ofs);
609 regs[r0] = *(uint16_t *)ptr;
610 break;
611 CASE_32_64(ld16s)
612 tci_args_rrs(insn, &r0, &r1, &ofs);
613 ptr = (void *)(regs[r1] + ofs);
614 regs[r0] = *(int16_t *)ptr;
615 break;
616 case INDEX_op_ld_i32:
617 CASE_64(ld32u)
618 tci_args_rrs(insn, &r0, &r1, &ofs);
619 ptr = (void *)(regs[r1] + ofs);
620 regs[r0] = *(uint32_t *)ptr;
621 break;
622 CASE_32_64(st8)
623 tci_args_rrs(insn, &r0, &r1, &ofs);
624 ptr = (void *)(regs[r1] + ofs);
625 *(uint8_t *)ptr = regs[r0];
626 break;
627 CASE_32_64(st16)
628 tci_args_rrs(insn, &r0, &r1, &ofs);
629 ptr = (void *)(regs[r1] + ofs);
630 *(uint16_t *)ptr = regs[r0];
631 break;
632 case INDEX_op_st_i32:
633 CASE_64(st32)
634 tci_args_rrs(insn, &r0, &r1, &ofs);
635 ptr = (void *)(regs[r1] + ofs);
636 *(uint32_t *)ptr = regs[r0];
637 break;
639 /* Arithmetic operations (mixed 32/64 bit). */
641 CASE_32_64(add)
642 tci_args_rrr(insn, &r0, &r1, &r2);
643 regs[r0] = regs[r1] + regs[r2];
644 break;
645 CASE_32_64(sub)
646 tci_args_rrr(insn, &r0, &r1, &r2);
647 regs[r0] = regs[r1] - regs[r2];
648 break;
649 CASE_32_64(mul)
650 tci_args_rrr(insn, &r0, &r1, &r2);
651 regs[r0] = regs[r1] * regs[r2];
652 break;
653 CASE_32_64(and)
654 tci_args_rrr(insn, &r0, &r1, &r2);
655 regs[r0] = regs[r1] & regs[r2];
656 break;
657 CASE_32_64(or)
658 tci_args_rrr(insn, &r0, &r1, &r2);
659 regs[r0] = regs[r1] | regs[r2];
660 break;
661 CASE_32_64(xor)
662 tci_args_rrr(insn, &r0, &r1, &r2);
663 regs[r0] = regs[r1] ^ regs[r2];
664 break;
665 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
666 CASE_32_64(andc)
667 tci_args_rrr(insn, &r0, &r1, &r2);
668 regs[r0] = regs[r1] & ~regs[r2];
669 break;
670 #endif
671 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
672 CASE_32_64(orc)
673 tci_args_rrr(insn, &r0, &r1, &r2);
674 regs[r0] = regs[r1] | ~regs[r2];
675 break;
676 #endif
677 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
678 CASE_32_64(eqv)
679 tci_args_rrr(insn, &r0, &r1, &r2);
680 regs[r0] = ~(regs[r1] ^ regs[r2]);
681 break;
682 #endif
683 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
684 CASE_32_64(nand)
685 tci_args_rrr(insn, &r0, &r1, &r2);
686 regs[r0] = ~(regs[r1] & regs[r2]);
687 break;
688 #endif
689 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
690 CASE_32_64(nor)
691 tci_args_rrr(insn, &r0, &r1, &r2);
692 regs[r0] = ~(regs[r1] | regs[r2]);
693 break;
694 #endif
696 /* Arithmetic operations (32 bit). */
698 case INDEX_op_div_i32:
699 tci_args_rrr(insn, &r0, &r1, &r2);
700 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
701 break;
702 case INDEX_op_divu_i32:
703 tci_args_rrr(insn, &r0, &r1, &r2);
704 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
705 break;
706 case INDEX_op_rem_i32:
707 tci_args_rrr(insn, &r0, &r1, &r2);
708 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
709 break;
710 case INDEX_op_remu_i32:
711 tci_args_rrr(insn, &r0, &r1, &r2);
712 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
713 break;
714 #if TCG_TARGET_HAS_clz_i32
715 case INDEX_op_clz_i32:
716 tci_args_rrr(insn, &r0, &r1, &r2);
717 tmp32 = regs[r1];
718 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
719 break;
720 #endif
721 #if TCG_TARGET_HAS_ctz_i32
722 case INDEX_op_ctz_i32:
723 tci_args_rrr(insn, &r0, &r1, &r2);
724 tmp32 = regs[r1];
725 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
726 break;
727 #endif
728 #if TCG_TARGET_HAS_ctpop_i32
729 case INDEX_op_ctpop_i32:
730 tci_args_rr(insn, &r0, &r1);
731 regs[r0] = ctpop32(regs[r1]);
732 break;
733 #endif
735 /* Shift/rotate operations (32 bit). */
737 case INDEX_op_shl_i32:
738 tci_args_rrr(insn, &r0, &r1, &r2);
739 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
740 break;
741 case INDEX_op_shr_i32:
742 tci_args_rrr(insn, &r0, &r1, &r2);
743 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
744 break;
745 case INDEX_op_sar_i32:
746 tci_args_rrr(insn, &r0, &r1, &r2);
747 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
748 break;
749 #if TCG_TARGET_HAS_rot_i32
750 case INDEX_op_rotl_i32:
751 tci_args_rrr(insn, &r0, &r1, &r2);
752 regs[r0] = rol32(regs[r1], regs[r2] & 31);
753 break;
754 case INDEX_op_rotr_i32:
755 tci_args_rrr(insn, &r0, &r1, &r2);
756 regs[r0] = ror32(regs[r1], regs[r2] & 31);
757 break;
758 #endif
759 #if TCG_TARGET_HAS_deposit_i32
760 case INDEX_op_deposit_i32:
761 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
762 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
763 break;
764 #endif
765 #if TCG_TARGET_HAS_extract_i32
766 case INDEX_op_extract_i32:
767 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
768 regs[r0] = extract32(regs[r1], pos, len);
769 break;
770 #endif
771 #if TCG_TARGET_HAS_sextract_i32
772 case INDEX_op_sextract_i32:
773 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
774 regs[r0] = sextract32(regs[r1], pos, len);
775 break;
776 #endif
777 case INDEX_op_brcond_i32:
778 tci_args_rl(insn, tb_ptr, &r0, &ptr);
779 if ((uint32_t)regs[r0]) {
780 tb_ptr = ptr;
782 break;
783 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
784 case INDEX_op_add2_i32:
785 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
786 T1 = tci_uint64(regs[r3], regs[r2]);
787 T2 = tci_uint64(regs[r5], regs[r4]);
788 tci_write_reg64(regs, r1, r0, T1 + T2);
789 break;
790 #endif
791 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
792 case INDEX_op_sub2_i32:
793 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
794 T1 = tci_uint64(regs[r3], regs[r2]);
795 T2 = tci_uint64(regs[r5], regs[r4]);
796 tci_write_reg64(regs, r1, r0, T1 - T2);
797 break;
798 #endif
799 #if TCG_TARGET_HAS_mulu2_i32
800 case INDEX_op_mulu2_i32:
801 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
802 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
803 tci_write_reg64(regs, r1, r0, tmp64);
804 break;
805 #endif
806 #if TCG_TARGET_HAS_muls2_i32
807 case INDEX_op_muls2_i32:
808 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
809 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
810 tci_write_reg64(regs, r1, r0, tmp64);
811 break;
812 #endif
813 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
814 CASE_32_64(ext8s)
815 tci_args_rr(insn, &r0, &r1);
816 regs[r0] = (int8_t)regs[r1];
817 break;
818 #endif
819 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
820 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
821 CASE_32_64(ext16s)
822 tci_args_rr(insn, &r0, &r1);
823 regs[r0] = (int16_t)regs[r1];
824 break;
825 #endif
826 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
827 CASE_32_64(ext8u)
828 tci_args_rr(insn, &r0, &r1);
829 regs[r0] = (uint8_t)regs[r1];
830 break;
831 #endif
832 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
833 CASE_32_64(ext16u)
834 tci_args_rr(insn, &r0, &r1);
835 regs[r0] = (uint16_t)regs[r1];
836 break;
837 #endif
838 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
839 CASE_32_64(bswap16)
840 tci_args_rr(insn, &r0, &r1);
841 regs[r0] = bswap16(regs[r1]);
842 break;
843 #endif
844 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
845 CASE_32_64(bswap32)
846 tci_args_rr(insn, &r0, &r1);
847 regs[r0] = bswap32(regs[r1]);
848 break;
849 #endif
850 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
851 CASE_32_64(not)
852 tci_args_rr(insn, &r0, &r1);
853 regs[r0] = ~regs[r1];
854 break;
855 #endif
856 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
857 CASE_32_64(neg)
858 tci_args_rr(insn, &r0, &r1);
859 regs[r0] = -regs[r1];
860 break;
861 #endif
862 #if TCG_TARGET_REG_BITS == 64
863 /* Load/store operations (64 bit). */
865 case INDEX_op_ld32s_i64:
866 tci_args_rrs(insn, &r0, &r1, &ofs);
867 ptr = (void *)(regs[r1] + ofs);
868 regs[r0] = *(int32_t *)ptr;
869 break;
870 case INDEX_op_ld_i64:
871 tci_args_rrs(insn, &r0, &r1, &ofs);
872 ptr = (void *)(regs[r1] + ofs);
873 regs[r0] = *(uint64_t *)ptr;
874 break;
875 case INDEX_op_st_i64:
876 tci_args_rrs(insn, &r0, &r1, &ofs);
877 ptr = (void *)(regs[r1] + ofs);
878 *(uint64_t *)ptr = regs[r0];
879 break;
881 /* Arithmetic operations (64 bit). */
883 case INDEX_op_div_i64:
884 tci_args_rrr(insn, &r0, &r1, &r2);
885 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
886 break;
887 case INDEX_op_divu_i64:
888 tci_args_rrr(insn, &r0, &r1, &r2);
889 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
890 break;
891 case INDEX_op_rem_i64:
892 tci_args_rrr(insn, &r0, &r1, &r2);
893 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
894 break;
895 case INDEX_op_remu_i64:
896 tci_args_rrr(insn, &r0, &r1, &r2);
897 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
898 break;
899 #if TCG_TARGET_HAS_clz_i64
900 case INDEX_op_clz_i64:
901 tci_args_rrr(insn, &r0, &r1, &r2);
902 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
903 break;
904 #endif
905 #if TCG_TARGET_HAS_ctz_i64
906 case INDEX_op_ctz_i64:
907 tci_args_rrr(insn, &r0, &r1, &r2);
908 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
909 break;
910 #endif
911 #if TCG_TARGET_HAS_ctpop_i64
912 case INDEX_op_ctpop_i64:
913 tci_args_rr(insn, &r0, &r1);
914 regs[r0] = ctpop64(regs[r1]);
915 break;
916 #endif
917 #if TCG_TARGET_HAS_mulu2_i64
918 case INDEX_op_mulu2_i64:
919 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
920 mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
921 break;
922 #endif
923 #if TCG_TARGET_HAS_muls2_i64
924 case INDEX_op_muls2_i64:
925 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
926 muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
927 break;
928 #endif
929 #if TCG_TARGET_HAS_add2_i64
930 case INDEX_op_add2_i64:
931 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
932 T1 = regs[r2] + regs[r4];
933 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
934 regs[r0] = T1;
935 regs[r1] = T2;
936 break;
937 #endif
938 #if TCG_TARGET_HAS_add2_i64
939 case INDEX_op_sub2_i64:
940 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
941 T1 = regs[r2] - regs[r4];
942 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
943 regs[r0] = T1;
944 regs[r1] = T2;
945 break;
946 #endif
948 /* Shift/rotate operations (64 bit). */
950 case INDEX_op_shl_i64:
951 tci_args_rrr(insn, &r0, &r1, &r2);
952 regs[r0] = regs[r1] << (regs[r2] & 63);
953 break;
954 case INDEX_op_shr_i64:
955 tci_args_rrr(insn, &r0, &r1, &r2);
956 regs[r0] = regs[r1] >> (regs[r2] & 63);
957 break;
958 case INDEX_op_sar_i64:
959 tci_args_rrr(insn, &r0, &r1, &r2);
960 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
961 break;
962 #if TCG_TARGET_HAS_rot_i64
963 case INDEX_op_rotl_i64:
964 tci_args_rrr(insn, &r0, &r1, &r2);
965 regs[r0] = rol64(regs[r1], regs[r2] & 63);
966 break;
967 case INDEX_op_rotr_i64:
968 tci_args_rrr(insn, &r0, &r1, &r2);
969 regs[r0] = ror64(regs[r1], regs[r2] & 63);
970 break;
971 #endif
972 #if TCG_TARGET_HAS_deposit_i64
973 case INDEX_op_deposit_i64:
974 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
975 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
976 break;
977 #endif
978 #if TCG_TARGET_HAS_extract_i64
979 case INDEX_op_extract_i64:
980 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
981 regs[r0] = extract64(regs[r1], pos, len);
982 break;
983 #endif
984 #if TCG_TARGET_HAS_sextract_i64
985 case INDEX_op_sextract_i64:
986 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
987 regs[r0] = sextract64(regs[r1], pos, len);
988 break;
989 #endif
990 case INDEX_op_brcond_i64:
991 tci_args_rl(insn, tb_ptr, &r0, &ptr);
992 if (regs[r0]) {
993 tb_ptr = ptr;
995 break;
996 case INDEX_op_ext32s_i64:
997 case INDEX_op_ext_i32_i64:
998 tci_args_rr(insn, &r0, &r1);
999 regs[r0] = (int32_t)regs[r1];
1000 break;
1001 case INDEX_op_ext32u_i64:
1002 case INDEX_op_extu_i32_i64:
1003 tci_args_rr(insn, &r0, &r1);
1004 regs[r0] = (uint32_t)regs[r1];
1005 break;
1006 #if TCG_TARGET_HAS_bswap64_i64
1007 case INDEX_op_bswap64_i64:
1008 tci_args_rr(insn, &r0, &r1);
1009 regs[r0] = bswap64(regs[r1]);
1010 break;
1011 #endif
1012 #endif /* TCG_TARGET_REG_BITS == 64 */
1014 /* QEMU specific operations. */
1016 case INDEX_op_exit_tb:
1017 tci_args_l(insn, tb_ptr, &ptr);
1018 return (uintptr_t)ptr;
1020 case INDEX_op_goto_tb:
1021 tci_args_l(insn, tb_ptr, &ptr);
1022 tb_ptr = *(void **)ptr;
1023 break;
1025 case INDEX_op_goto_ptr:
1026 tci_args_r(insn, &r0);
1027 ptr = (void *)regs[r0];
1028 if (!ptr) {
1029 return 0;
1031 tb_ptr = ptr;
1032 break;
1034 case INDEX_op_qemu_ld_i32:
1035 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1036 tci_args_rrm(insn, &r0, &r1, &oi);
1037 taddr = regs[r1];
1038 } else {
1039 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1040 taddr = tci_uint64(regs[r2], regs[r1]);
1042 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1043 regs[r0] = tmp32;
1044 break;
1046 case INDEX_op_qemu_ld_i64:
1047 if (TCG_TARGET_REG_BITS == 64) {
1048 tci_args_rrm(insn, &r0, &r1, &oi);
1049 taddr = regs[r1];
1050 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1051 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1052 taddr = regs[r2];
1053 } else {
1054 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1055 taddr = tci_uint64(regs[r3], regs[r2]);
1056 oi = regs[r4];
1058 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1059 if (TCG_TARGET_REG_BITS == 32) {
1060 tci_write_reg64(regs, r1, r0, tmp64);
1061 } else {
1062 regs[r0] = tmp64;
1064 break;
1066 case INDEX_op_qemu_st_i32:
1067 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1068 tci_args_rrm(insn, &r0, &r1, &oi);
1069 taddr = regs[r1];
1070 } else {
1071 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1072 taddr = tci_uint64(regs[r2], regs[r1]);
1074 tmp32 = regs[r0];
1075 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr);
1076 break;
1078 case INDEX_op_qemu_st_i64:
1079 if (TCG_TARGET_REG_BITS == 64) {
1080 tci_args_rrm(insn, &r0, &r1, &oi);
1081 taddr = regs[r1];
1082 tmp64 = regs[r0];
1083 } else {
1084 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1085 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1086 taddr = regs[r2];
1087 } else {
1088 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1089 taddr = tci_uint64(regs[r3], regs[r2]);
1090 oi = regs[r4];
1092 tmp64 = tci_uint64(regs[r1], regs[r0]);
1094 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1095 break;
1097 case INDEX_op_mb:
1098 /* Ensure ordering for all kinds */
1099 smp_mb();
1100 break;
1101 default:
1102 g_assert_not_reached();
1108 * Disassembler that matches the interpreter
1111 static const char *str_r(TCGReg r)
1113 static const char regs[TCG_TARGET_NB_REGS][4] = {
1114 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1115 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1118 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1119 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1121 assert((unsigned)r < TCG_TARGET_NB_REGS);
1122 return regs[r];
1125 static const char *str_c(TCGCond c)
1127 static const char cond[16][8] = {
1128 [TCG_COND_NEVER] = "never",
1129 [TCG_COND_ALWAYS] = "always",
1130 [TCG_COND_EQ] = "eq",
1131 [TCG_COND_NE] = "ne",
1132 [TCG_COND_LT] = "lt",
1133 [TCG_COND_GE] = "ge",
1134 [TCG_COND_LE] = "le",
1135 [TCG_COND_GT] = "gt",
1136 [TCG_COND_LTU] = "ltu",
1137 [TCG_COND_GEU] = "geu",
1138 [TCG_COND_LEU] = "leu",
1139 [TCG_COND_GTU] = "gtu",
1142 assert((unsigned)c < ARRAY_SIZE(cond));
1143 assert(cond[c][0] != 0);
1144 return cond[c];
1147 /* Disassemble TCI bytecode. */
1148 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1150 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1151 const TCGOpDef *def;
1152 const char *op_name;
1153 uint32_t insn;
1154 TCGOpcode op;
1155 TCGReg r0, r1, r2, r3, r4, r5;
1156 tcg_target_ulong i1;
1157 int32_t s2;
1158 TCGCond c;
1159 MemOpIdx oi;
1160 uint8_t pos, len;
1161 void *ptr;
1163 /* TCI is always the host, so we don't need to load indirect. */
1164 insn = *tb_ptr++;
1166 info->fprintf_func(info->stream, "%08x ", insn);
1168 op = extract32(insn, 0, 8);
1169 def = &tcg_op_defs[op];
1170 op_name = def->name;
1172 switch (op) {
1173 case INDEX_op_br:
1174 case INDEX_op_exit_tb:
1175 case INDEX_op_goto_tb:
1176 tci_args_l(insn, tb_ptr, &ptr);
1177 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1178 break;
1180 case INDEX_op_goto_ptr:
1181 tci_args_r(insn, &r0);
1182 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1183 break;
1185 case INDEX_op_call:
1186 tci_args_nl(insn, tb_ptr, &len, &ptr);
1187 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1188 break;
1190 case INDEX_op_brcond_i32:
1191 case INDEX_op_brcond_i64:
1192 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1193 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1194 op_name, str_r(r0), ptr);
1195 break;
1197 case INDEX_op_setcond_i32:
1198 case INDEX_op_setcond_i64:
1199 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1200 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1201 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1202 break;
1204 case INDEX_op_tci_movi:
1205 tci_args_ri(insn, &r0, &i1);
1206 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1207 op_name, str_r(r0), i1);
1208 break;
1210 case INDEX_op_tci_movl:
1211 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1212 info->fprintf_func(info->stream, "%-12s %s, %p",
1213 op_name, str_r(r0), ptr);
1214 break;
1216 case INDEX_op_ld8u_i32:
1217 case INDEX_op_ld8u_i64:
1218 case INDEX_op_ld8s_i32:
1219 case INDEX_op_ld8s_i64:
1220 case INDEX_op_ld16u_i32:
1221 case INDEX_op_ld16u_i64:
1222 case INDEX_op_ld16s_i32:
1223 case INDEX_op_ld16s_i64:
1224 case INDEX_op_ld32u_i64:
1225 case INDEX_op_ld32s_i64:
1226 case INDEX_op_ld_i32:
1227 case INDEX_op_ld_i64:
1228 case INDEX_op_st8_i32:
1229 case INDEX_op_st8_i64:
1230 case INDEX_op_st16_i32:
1231 case INDEX_op_st16_i64:
1232 case INDEX_op_st32_i64:
1233 case INDEX_op_st_i32:
1234 case INDEX_op_st_i64:
1235 tci_args_rrs(insn, &r0, &r1, &s2);
1236 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1237 op_name, str_r(r0), str_r(r1), s2);
1238 break;
1240 case INDEX_op_mov_i32:
1241 case INDEX_op_mov_i64:
1242 case INDEX_op_ext8s_i32:
1243 case INDEX_op_ext8s_i64:
1244 case INDEX_op_ext8u_i32:
1245 case INDEX_op_ext8u_i64:
1246 case INDEX_op_ext16s_i32:
1247 case INDEX_op_ext16s_i64:
1248 case INDEX_op_ext16u_i32:
1249 case INDEX_op_ext32s_i64:
1250 case INDEX_op_ext32u_i64:
1251 case INDEX_op_ext_i32_i64:
1252 case INDEX_op_extu_i32_i64:
1253 case INDEX_op_bswap16_i32:
1254 case INDEX_op_bswap16_i64:
1255 case INDEX_op_bswap32_i32:
1256 case INDEX_op_bswap32_i64:
1257 case INDEX_op_bswap64_i64:
1258 case INDEX_op_not_i32:
1259 case INDEX_op_not_i64:
1260 case INDEX_op_neg_i32:
1261 case INDEX_op_neg_i64:
1262 case INDEX_op_ctpop_i32:
1263 case INDEX_op_ctpop_i64:
1264 tci_args_rr(insn, &r0, &r1);
1265 info->fprintf_func(info->stream, "%-12s %s, %s",
1266 op_name, str_r(r0), str_r(r1));
1267 break;
1269 case INDEX_op_add_i32:
1270 case INDEX_op_add_i64:
1271 case INDEX_op_sub_i32:
1272 case INDEX_op_sub_i64:
1273 case INDEX_op_mul_i32:
1274 case INDEX_op_mul_i64:
1275 case INDEX_op_and_i32:
1276 case INDEX_op_and_i64:
1277 case INDEX_op_or_i32:
1278 case INDEX_op_or_i64:
1279 case INDEX_op_xor_i32:
1280 case INDEX_op_xor_i64:
1281 case INDEX_op_andc_i32:
1282 case INDEX_op_andc_i64:
1283 case INDEX_op_orc_i32:
1284 case INDEX_op_orc_i64:
1285 case INDEX_op_eqv_i32:
1286 case INDEX_op_eqv_i64:
1287 case INDEX_op_nand_i32:
1288 case INDEX_op_nand_i64:
1289 case INDEX_op_nor_i32:
1290 case INDEX_op_nor_i64:
1291 case INDEX_op_div_i32:
1292 case INDEX_op_div_i64:
1293 case INDEX_op_rem_i32:
1294 case INDEX_op_rem_i64:
1295 case INDEX_op_divu_i32:
1296 case INDEX_op_divu_i64:
1297 case INDEX_op_remu_i32:
1298 case INDEX_op_remu_i64:
1299 case INDEX_op_shl_i32:
1300 case INDEX_op_shl_i64:
1301 case INDEX_op_shr_i32:
1302 case INDEX_op_shr_i64:
1303 case INDEX_op_sar_i32:
1304 case INDEX_op_sar_i64:
1305 case INDEX_op_rotl_i32:
1306 case INDEX_op_rotl_i64:
1307 case INDEX_op_rotr_i32:
1308 case INDEX_op_rotr_i64:
1309 case INDEX_op_clz_i32:
1310 case INDEX_op_clz_i64:
1311 case INDEX_op_ctz_i32:
1312 case INDEX_op_ctz_i64:
1313 tci_args_rrr(insn, &r0, &r1, &r2);
1314 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1315 op_name, str_r(r0), str_r(r1), str_r(r2));
1316 break;
1318 case INDEX_op_deposit_i32:
1319 case INDEX_op_deposit_i64:
1320 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1321 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1322 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1323 break;
1325 case INDEX_op_extract_i32:
1326 case INDEX_op_extract_i64:
1327 case INDEX_op_sextract_i32:
1328 case INDEX_op_sextract_i64:
1329 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1330 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1331 op_name, str_r(r0), str_r(r1), pos, len);
1332 break;
1334 case INDEX_op_movcond_i32:
1335 case INDEX_op_movcond_i64:
1336 case INDEX_op_setcond2_i32:
1337 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1338 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1339 op_name, str_r(r0), str_r(r1), str_r(r2),
1340 str_r(r3), str_r(r4), str_c(c));
1341 break;
1343 case INDEX_op_mulu2_i32:
1344 case INDEX_op_mulu2_i64:
1345 case INDEX_op_muls2_i32:
1346 case INDEX_op_muls2_i64:
1347 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1348 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1349 op_name, str_r(r0), str_r(r1),
1350 str_r(r2), str_r(r3));
1351 break;
1353 case INDEX_op_add2_i32:
1354 case INDEX_op_add2_i64:
1355 case INDEX_op_sub2_i32:
1356 case INDEX_op_sub2_i64:
1357 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1358 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1359 op_name, str_r(r0), str_r(r1), str_r(r2),
1360 str_r(r3), str_r(r4), str_r(r5));
1361 break;
1363 case INDEX_op_qemu_ld_i64:
1364 case INDEX_op_qemu_st_i64:
1365 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1366 goto do_qemu_ldst;
1367 case INDEX_op_qemu_ld_i32:
1368 case INDEX_op_qemu_st_i32:
1369 len = 1;
1370 do_qemu_ldst:
1371 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
1372 switch (len) {
1373 case 2:
1374 tci_args_rrm(insn, &r0, &r1, &oi);
1375 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1376 op_name, str_r(r0), str_r(r1), oi);
1377 break;
1378 case 3:
1379 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1380 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
1381 op_name, str_r(r0), str_r(r1), str_r(r2), oi);
1382 break;
1383 case 4:
1384 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1385 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1386 op_name, str_r(r0), str_r(r1),
1387 str_r(r2), str_r(r3), str_r(r4));
1388 break;
1389 default:
1390 g_assert_not_reached();
1392 break;
1394 case 0:
1395 /* tcg_out_nop_fill uses zeros */
1396 if (insn == 0) {
1397 info->fprintf_func(info->stream, "align");
1398 break;
1400 /* fall through */
1402 default:
1403 info->fprintf_func(info->stream, "illegal opcode %d", op);
1404 break;
1407 return sizeof(insn);