ui/input-legacy: pass horizontal scroll information
[qemu/ar7.git] / tcg / tci.c
blob336af5945a8f0f172be455b9049adcb0611de4a3
1 /*
2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
23 #include "exec/cpu_ldst.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-ldst.h"
26 #include "qemu/compiler.h"
27 #include <ffi.h>
31 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
32 * Without assertions, the interpreter runs much faster.
34 #if defined(CONFIG_DEBUG_TCG)
35 # define tci_assert(cond) assert(cond)
36 #else
37 # define tci_assert(cond) ((void)(cond))
38 #endif
40 __thread uintptr_t tci_tb_ptr;
42 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
43 uint32_t low_index, uint64_t value)
45 regs[low_index] = (uint32_t)value;
46 regs[high_index] = value >> 32;
49 /* Create a 64 bit value from two 32 bit values. */
50 static uint64_t tci_uint64(uint32_t high, uint32_t low)
52 return ((uint64_t)high << 32) + low;
56 * Load sets of arguments all at once. The naming convention is:
57 * tci_args_<arguments>
58 * where arguments is a sequence of
60 * b = immediate (bit position)
61 * c = condition (TCGCond)
62 * i = immediate (uint32_t)
63 * I = immediate (tcg_target_ulong)
64 * l = label or pointer
65 * m = immediate (MemOpIdx)
66 * n = immediate (call return length)
67 * r = register
68 * s = signed ldst offset
71 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
73 int diff = sextract32(insn, 12, 20);
74 *l0 = diff ? (void *)tb_ptr + diff : NULL;
77 static void tci_args_r(uint32_t insn, TCGReg *r0)
79 *r0 = extract32(insn, 8, 4);
82 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
83 uint8_t *n0, void **l1)
85 *n0 = extract32(insn, 8, 4);
86 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
89 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
90 TCGReg *r0, void **l1)
92 *r0 = extract32(insn, 8, 4);
93 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
96 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
98 *r0 = extract32(insn, 8, 4);
99 *r1 = extract32(insn, 12, 4);
102 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
104 *r0 = extract32(insn, 8, 4);
105 *i1 = sextract32(insn, 12, 20);
108 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
109 TCGReg *r1, MemOpIdx *m2)
111 *r0 = extract32(insn, 8, 4);
112 *r1 = extract32(insn, 12, 4);
113 *m2 = extract32(insn, 20, 12);
116 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
118 *r0 = extract32(insn, 8, 4);
119 *r1 = extract32(insn, 12, 4);
120 *r2 = extract32(insn, 16, 4);
123 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
125 *r0 = extract32(insn, 8, 4);
126 *r1 = extract32(insn, 12, 4);
127 *i2 = sextract32(insn, 16, 16);
130 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
131 uint8_t *i2, uint8_t *i3)
133 *r0 = extract32(insn, 8, 4);
134 *r1 = extract32(insn, 12, 4);
135 *i2 = extract32(insn, 16, 6);
136 *i3 = extract32(insn, 22, 6);
139 static void tci_args_rrrc(uint32_t insn,
140 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
142 *r0 = extract32(insn, 8, 4);
143 *r1 = extract32(insn, 12, 4);
144 *r2 = extract32(insn, 16, 4);
145 *c3 = extract32(insn, 20, 4);
148 static void tci_args_rrrm(uint32_t insn,
149 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
151 *r0 = extract32(insn, 8, 4);
152 *r1 = extract32(insn, 12, 4);
153 *r2 = extract32(insn, 16, 4);
154 *m3 = extract32(insn, 20, 12);
157 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
158 TCGReg *r2, uint8_t *i3, uint8_t *i4)
160 *r0 = extract32(insn, 8, 4);
161 *r1 = extract32(insn, 12, 4);
162 *r2 = extract32(insn, 16, 4);
163 *i3 = extract32(insn, 20, 6);
164 *i4 = extract32(insn, 26, 6);
167 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
168 TCGReg *r2, TCGReg *r3, TCGReg *r4)
170 *r0 = extract32(insn, 8, 4);
171 *r1 = extract32(insn, 12, 4);
172 *r2 = extract32(insn, 16, 4);
173 *r3 = extract32(insn, 20, 4);
174 *r4 = extract32(insn, 24, 4);
177 static void tci_args_rrrr(uint32_t insn,
178 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
180 *r0 = extract32(insn, 8, 4);
181 *r1 = extract32(insn, 12, 4);
182 *r2 = extract32(insn, 16, 4);
183 *r3 = extract32(insn, 20, 4);
186 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
187 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
189 *r0 = extract32(insn, 8, 4);
190 *r1 = extract32(insn, 12, 4);
191 *r2 = extract32(insn, 16, 4);
192 *r3 = extract32(insn, 20, 4);
193 *r4 = extract32(insn, 24, 4);
194 *c5 = extract32(insn, 28, 4);
197 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
198 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
200 *r0 = extract32(insn, 8, 4);
201 *r1 = extract32(insn, 12, 4);
202 *r2 = extract32(insn, 16, 4);
203 *r3 = extract32(insn, 20, 4);
204 *r4 = extract32(insn, 24, 4);
205 *r5 = extract32(insn, 28, 4);
208 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
210 bool result = false;
211 int32_t i0 = u0;
212 int32_t i1 = u1;
213 switch (condition) {
214 case TCG_COND_EQ:
215 result = (u0 == u1);
216 break;
217 case TCG_COND_NE:
218 result = (u0 != u1);
219 break;
220 case TCG_COND_LT:
221 result = (i0 < i1);
222 break;
223 case TCG_COND_GE:
224 result = (i0 >= i1);
225 break;
226 case TCG_COND_LE:
227 result = (i0 <= i1);
228 break;
229 case TCG_COND_GT:
230 result = (i0 > i1);
231 break;
232 case TCG_COND_LTU:
233 result = (u0 < u1);
234 break;
235 case TCG_COND_GEU:
236 result = (u0 >= u1);
237 break;
238 case TCG_COND_LEU:
239 result = (u0 <= u1);
240 break;
241 case TCG_COND_GTU:
242 result = (u0 > u1);
243 break;
244 default:
245 g_assert_not_reached();
247 return result;
250 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
252 bool result = false;
253 int64_t i0 = u0;
254 int64_t i1 = u1;
255 switch (condition) {
256 case TCG_COND_EQ:
257 result = (u0 == u1);
258 break;
259 case TCG_COND_NE:
260 result = (u0 != u1);
261 break;
262 case TCG_COND_LT:
263 result = (i0 < i1);
264 break;
265 case TCG_COND_GE:
266 result = (i0 >= i1);
267 break;
268 case TCG_COND_LE:
269 result = (i0 <= i1);
270 break;
271 case TCG_COND_GT:
272 result = (i0 > i1);
273 break;
274 case TCG_COND_LTU:
275 result = (u0 < u1);
276 break;
277 case TCG_COND_GEU:
278 result = (u0 >= u1);
279 break;
280 case TCG_COND_LEU:
281 result = (u0 <= u1);
282 break;
283 case TCG_COND_GTU:
284 result = (u0 > u1);
285 break;
286 default:
287 g_assert_not_reached();
289 return result;
292 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
293 MemOpIdx oi, const void *tb_ptr)
295 MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
296 uintptr_t ra = (uintptr_t)tb_ptr;
298 #ifdef CONFIG_SOFTMMU
299 switch (mop) {
300 case MO_UB:
301 return helper_ret_ldub_mmu(env, taddr, oi, ra);
302 case MO_SB:
303 return helper_ret_ldsb_mmu(env, taddr, oi, ra);
304 case MO_LEUW:
305 return helper_le_lduw_mmu(env, taddr, oi, ra);
306 case MO_LESW:
307 return helper_le_ldsw_mmu(env, taddr, oi, ra);
308 case MO_LEUL:
309 return helper_le_ldul_mmu(env, taddr, oi, ra);
310 case MO_LESL:
311 return helper_le_ldsl_mmu(env, taddr, oi, ra);
312 case MO_LEUQ:
313 return helper_le_ldq_mmu(env, taddr, oi, ra);
314 case MO_BEUW:
315 return helper_be_lduw_mmu(env, taddr, oi, ra);
316 case MO_BESW:
317 return helper_be_ldsw_mmu(env, taddr, oi, ra);
318 case MO_BEUL:
319 return helper_be_ldul_mmu(env, taddr, oi, ra);
320 case MO_BESL:
321 return helper_be_ldsl_mmu(env, taddr, oi, ra);
322 case MO_BEUQ:
323 return helper_be_ldq_mmu(env, taddr, oi, ra);
324 default:
325 g_assert_not_reached();
327 #else
328 void *haddr = g2h(env_cpu(env), taddr);
329 uint64_t ret;
331 set_helper_retaddr(ra);
332 switch (mop) {
333 case MO_UB:
334 ret = ldub_p(haddr);
335 break;
336 case MO_SB:
337 ret = ldsb_p(haddr);
338 break;
339 case MO_LEUW:
340 ret = lduw_le_p(haddr);
341 break;
342 case MO_LESW:
343 ret = ldsw_le_p(haddr);
344 break;
345 case MO_LEUL:
346 ret = (uint32_t)ldl_le_p(haddr);
347 break;
348 case MO_LESL:
349 ret = (int32_t)ldl_le_p(haddr);
350 break;
351 case MO_LEUQ:
352 ret = ldq_le_p(haddr);
353 break;
354 case MO_BEUW:
355 ret = lduw_be_p(haddr);
356 break;
357 case MO_BESW:
358 ret = ldsw_be_p(haddr);
359 break;
360 case MO_BEUL:
361 ret = (uint32_t)ldl_be_p(haddr);
362 break;
363 case MO_BESL:
364 ret = (int32_t)ldl_be_p(haddr);
365 break;
366 case MO_BEUQ:
367 ret = ldq_be_p(haddr);
368 break;
369 default:
370 g_assert_not_reached();
372 clear_helper_retaddr();
373 return ret;
374 #endif
377 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
378 MemOpIdx oi, const void *tb_ptr)
380 MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
381 uintptr_t ra = (uintptr_t)tb_ptr;
383 #ifdef CONFIG_SOFTMMU
384 switch (mop) {
385 case MO_UB:
386 helper_ret_stb_mmu(env, taddr, val, oi, ra);
387 break;
388 case MO_LEUW:
389 helper_le_stw_mmu(env, taddr, val, oi, ra);
390 break;
391 case MO_LEUL:
392 helper_le_stl_mmu(env, taddr, val, oi, ra);
393 break;
394 case MO_LEUQ:
395 helper_le_stq_mmu(env, taddr, val, oi, ra);
396 break;
397 case MO_BEUW:
398 helper_be_stw_mmu(env, taddr, val, oi, ra);
399 break;
400 case MO_BEUL:
401 helper_be_stl_mmu(env, taddr, val, oi, ra);
402 break;
403 case MO_BEUQ:
404 helper_be_stq_mmu(env, taddr, val, oi, ra);
405 break;
406 default:
407 g_assert_not_reached();
409 #else
410 void *haddr = g2h(env_cpu(env), taddr);
412 set_helper_retaddr(ra);
413 switch (mop) {
414 case MO_UB:
415 stb_p(haddr, val);
416 break;
417 case MO_LEUW:
418 stw_le_p(haddr, val);
419 break;
420 case MO_LEUL:
421 stl_le_p(haddr, val);
422 break;
423 case MO_LEUQ:
424 stq_le_p(haddr, val);
425 break;
426 case MO_BEUW:
427 stw_be_p(haddr, val);
428 break;
429 case MO_BEUL:
430 stl_be_p(haddr, val);
431 break;
432 case MO_BEUQ:
433 stq_be_p(haddr, val);
434 break;
435 default:
436 g_assert_not_reached();
438 clear_helper_retaddr();
439 #endif
442 #if TCG_TARGET_REG_BITS == 64
443 # define CASE_32_64(x) \
444 case glue(glue(INDEX_op_, x), _i64): \
445 case glue(glue(INDEX_op_, x), _i32):
446 # define CASE_64(x) \
447 case glue(glue(INDEX_op_, x), _i64):
448 #else
449 # define CASE_32_64(x) \
450 case glue(glue(INDEX_op_, x), _i32):
451 # define CASE_64(x)
452 #endif
454 /* Interpret pseudo code in tb. */
456 * Disable CFI checks.
457 * One possible operation in the pseudo code is a call to binary code.
458 * Therefore, disable CFI checks in the interpreter function
460 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
461 const void *v_tb_ptr)
463 const uint32_t *tb_ptr = v_tb_ptr;
464 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
465 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
466 / sizeof(uint64_t)];
467 void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)];
469 regs[TCG_AREG0] = (tcg_target_ulong)env;
470 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
471 /* Other call_slots entries initialized at first use (see below). */
472 call_slots[0] = NULL;
473 tci_assert(tb_ptr);
475 for (;;) {
476 uint32_t insn;
477 TCGOpcode opc;
478 TCGReg r0, r1, r2, r3, r4, r5;
479 tcg_target_ulong t1;
480 TCGCond condition;
481 target_ulong taddr;
482 uint8_t pos, len;
483 uint32_t tmp32;
484 uint64_t tmp64;
485 uint64_t T1, T2;
486 MemOpIdx oi;
487 int32_t ofs;
488 void *ptr;
490 insn = *tb_ptr++;
491 opc = extract32(insn, 0, 8);
493 switch (opc) {
494 case INDEX_op_call:
496 * Set up the ffi_avalue array once, delayed until now
497 * because many TB's do not make any calls. In tcg_gen_callN,
498 * we arranged for every real argument to be "left-aligned"
499 * in each 64-bit slot.
501 if (unlikely(call_slots[0] == NULL)) {
502 for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) {
503 call_slots[i] = &stack[i];
507 tci_args_nl(insn, tb_ptr, &len, &ptr);
509 /* Helper functions may need to access the "return address" */
510 tci_tb_ptr = (uintptr_t)tb_ptr;
513 void **pptr = ptr;
514 ffi_call(pptr[1], pptr[0], stack, call_slots);
517 /* Any result winds up "left-aligned" in the stack[0] slot. */
518 switch (len) {
519 case 0: /* void */
520 break;
521 case 1: /* uint32_t */
523 * Note that libffi has an odd special case in that it will
524 * always widen an integral result to ffi_arg.
526 if (sizeof(ffi_arg) == 4) {
527 regs[TCG_REG_R0] = *(uint32_t *)stack;
528 break;
530 /* fall through */
531 case 2: /* uint64_t */
532 if (TCG_TARGET_REG_BITS == 32) {
533 tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]);
534 } else {
535 regs[TCG_REG_R0] = stack[0];
537 break;
538 default:
539 g_assert_not_reached();
541 break;
543 case INDEX_op_br:
544 tci_args_l(insn, tb_ptr, &ptr);
545 tb_ptr = ptr;
546 continue;
547 case INDEX_op_setcond_i32:
548 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
549 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
550 break;
551 case INDEX_op_movcond_i32:
552 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
553 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
554 regs[r0] = regs[tmp32 ? r3 : r4];
555 break;
556 #if TCG_TARGET_REG_BITS == 32
557 case INDEX_op_setcond2_i32:
558 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
559 T1 = tci_uint64(regs[r2], regs[r1]);
560 T2 = tci_uint64(regs[r4], regs[r3]);
561 regs[r0] = tci_compare64(T1, T2, condition);
562 break;
563 #elif TCG_TARGET_REG_BITS == 64
564 case INDEX_op_setcond_i64:
565 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
566 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
567 break;
568 case INDEX_op_movcond_i64:
569 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
570 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
571 regs[r0] = regs[tmp32 ? r3 : r4];
572 break;
573 #endif
574 CASE_32_64(mov)
575 tci_args_rr(insn, &r0, &r1);
576 regs[r0] = regs[r1];
577 break;
578 case INDEX_op_tci_movi:
579 tci_args_ri(insn, &r0, &t1);
580 regs[r0] = t1;
581 break;
582 case INDEX_op_tci_movl:
583 tci_args_rl(insn, tb_ptr, &r0, &ptr);
584 regs[r0] = *(tcg_target_ulong *)ptr;
585 break;
587 /* Load/store operations (32 bit). */
589 CASE_32_64(ld8u)
590 tci_args_rrs(insn, &r0, &r1, &ofs);
591 ptr = (void *)(regs[r1] + ofs);
592 regs[r0] = *(uint8_t *)ptr;
593 break;
594 CASE_32_64(ld8s)
595 tci_args_rrs(insn, &r0, &r1, &ofs);
596 ptr = (void *)(regs[r1] + ofs);
597 regs[r0] = *(int8_t *)ptr;
598 break;
599 CASE_32_64(ld16u)
600 tci_args_rrs(insn, &r0, &r1, &ofs);
601 ptr = (void *)(regs[r1] + ofs);
602 regs[r0] = *(uint16_t *)ptr;
603 break;
604 CASE_32_64(ld16s)
605 tci_args_rrs(insn, &r0, &r1, &ofs);
606 ptr = (void *)(regs[r1] + ofs);
607 regs[r0] = *(int16_t *)ptr;
608 break;
609 case INDEX_op_ld_i32:
610 CASE_64(ld32u)
611 tci_args_rrs(insn, &r0, &r1, &ofs);
612 ptr = (void *)(regs[r1] + ofs);
613 regs[r0] = *(uint32_t *)ptr;
614 break;
615 CASE_32_64(st8)
616 tci_args_rrs(insn, &r0, &r1, &ofs);
617 ptr = (void *)(regs[r1] + ofs);
618 *(uint8_t *)ptr = regs[r0];
619 break;
620 CASE_32_64(st16)
621 tci_args_rrs(insn, &r0, &r1, &ofs);
622 ptr = (void *)(regs[r1] + ofs);
623 *(uint16_t *)ptr = regs[r0];
624 break;
625 case INDEX_op_st_i32:
626 CASE_64(st32)
627 tci_args_rrs(insn, &r0, &r1, &ofs);
628 ptr = (void *)(regs[r1] + ofs);
629 *(uint32_t *)ptr = regs[r0];
630 break;
632 /* Arithmetic operations (mixed 32/64 bit). */
634 CASE_32_64(add)
635 tci_args_rrr(insn, &r0, &r1, &r2);
636 regs[r0] = regs[r1] + regs[r2];
637 break;
638 CASE_32_64(sub)
639 tci_args_rrr(insn, &r0, &r1, &r2);
640 regs[r0] = regs[r1] - regs[r2];
641 break;
642 CASE_32_64(mul)
643 tci_args_rrr(insn, &r0, &r1, &r2);
644 regs[r0] = regs[r1] * regs[r2];
645 break;
646 CASE_32_64(and)
647 tci_args_rrr(insn, &r0, &r1, &r2);
648 regs[r0] = regs[r1] & regs[r2];
649 break;
650 CASE_32_64(or)
651 tci_args_rrr(insn, &r0, &r1, &r2);
652 regs[r0] = regs[r1] | regs[r2];
653 break;
654 CASE_32_64(xor)
655 tci_args_rrr(insn, &r0, &r1, &r2);
656 regs[r0] = regs[r1] ^ regs[r2];
657 break;
658 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
659 CASE_32_64(andc)
660 tci_args_rrr(insn, &r0, &r1, &r2);
661 regs[r0] = regs[r1] & ~regs[r2];
662 break;
663 #endif
664 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
665 CASE_32_64(orc)
666 tci_args_rrr(insn, &r0, &r1, &r2);
667 regs[r0] = regs[r1] | ~regs[r2];
668 break;
669 #endif
670 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
671 CASE_32_64(eqv)
672 tci_args_rrr(insn, &r0, &r1, &r2);
673 regs[r0] = ~(regs[r1] ^ regs[r2]);
674 break;
675 #endif
676 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
677 CASE_32_64(nand)
678 tci_args_rrr(insn, &r0, &r1, &r2);
679 regs[r0] = ~(regs[r1] & regs[r2]);
680 break;
681 #endif
682 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
683 CASE_32_64(nor)
684 tci_args_rrr(insn, &r0, &r1, &r2);
685 regs[r0] = ~(regs[r1] | regs[r2]);
686 break;
687 #endif
689 /* Arithmetic operations (32 bit). */
691 case INDEX_op_div_i32:
692 tci_args_rrr(insn, &r0, &r1, &r2);
693 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
694 break;
695 case INDEX_op_divu_i32:
696 tci_args_rrr(insn, &r0, &r1, &r2);
697 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
698 break;
699 case INDEX_op_rem_i32:
700 tci_args_rrr(insn, &r0, &r1, &r2);
701 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
702 break;
703 case INDEX_op_remu_i32:
704 tci_args_rrr(insn, &r0, &r1, &r2);
705 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
706 break;
707 #if TCG_TARGET_HAS_clz_i32
708 case INDEX_op_clz_i32:
709 tci_args_rrr(insn, &r0, &r1, &r2);
710 tmp32 = regs[r1];
711 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
712 break;
713 #endif
714 #if TCG_TARGET_HAS_ctz_i32
715 case INDEX_op_ctz_i32:
716 tci_args_rrr(insn, &r0, &r1, &r2);
717 tmp32 = regs[r1];
718 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
719 break;
720 #endif
721 #if TCG_TARGET_HAS_ctpop_i32
722 case INDEX_op_ctpop_i32:
723 tci_args_rr(insn, &r0, &r1);
724 regs[r0] = ctpop32(regs[r1]);
725 break;
726 #endif
728 /* Shift/rotate operations (32 bit). */
730 case INDEX_op_shl_i32:
731 tci_args_rrr(insn, &r0, &r1, &r2);
732 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
733 break;
734 case INDEX_op_shr_i32:
735 tci_args_rrr(insn, &r0, &r1, &r2);
736 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
737 break;
738 case INDEX_op_sar_i32:
739 tci_args_rrr(insn, &r0, &r1, &r2);
740 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
741 break;
742 #if TCG_TARGET_HAS_rot_i32
743 case INDEX_op_rotl_i32:
744 tci_args_rrr(insn, &r0, &r1, &r2);
745 regs[r0] = rol32(regs[r1], regs[r2] & 31);
746 break;
747 case INDEX_op_rotr_i32:
748 tci_args_rrr(insn, &r0, &r1, &r2);
749 regs[r0] = ror32(regs[r1], regs[r2] & 31);
750 break;
751 #endif
752 #if TCG_TARGET_HAS_deposit_i32
753 case INDEX_op_deposit_i32:
754 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
755 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
756 break;
757 #endif
758 #if TCG_TARGET_HAS_extract_i32
759 case INDEX_op_extract_i32:
760 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
761 regs[r0] = extract32(regs[r1], pos, len);
762 break;
763 #endif
764 #if TCG_TARGET_HAS_sextract_i32
765 case INDEX_op_sextract_i32:
766 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
767 regs[r0] = sextract32(regs[r1], pos, len);
768 break;
769 #endif
770 case INDEX_op_brcond_i32:
771 tci_args_rl(insn, tb_ptr, &r0, &ptr);
772 if ((uint32_t)regs[r0]) {
773 tb_ptr = ptr;
775 break;
776 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
777 case INDEX_op_add2_i32:
778 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
779 T1 = tci_uint64(regs[r3], regs[r2]);
780 T2 = tci_uint64(regs[r5], regs[r4]);
781 tci_write_reg64(regs, r1, r0, T1 + T2);
782 break;
783 #endif
784 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
785 case INDEX_op_sub2_i32:
786 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
787 T1 = tci_uint64(regs[r3], regs[r2]);
788 T2 = tci_uint64(regs[r5], regs[r4]);
789 tci_write_reg64(regs, r1, r0, T1 - T2);
790 break;
791 #endif
792 #if TCG_TARGET_HAS_mulu2_i32
793 case INDEX_op_mulu2_i32:
794 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
795 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
796 tci_write_reg64(regs, r1, r0, tmp64);
797 break;
798 #endif
799 #if TCG_TARGET_HAS_muls2_i32
800 case INDEX_op_muls2_i32:
801 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
802 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
803 tci_write_reg64(regs, r1, r0, tmp64);
804 break;
805 #endif
806 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
807 CASE_32_64(ext8s)
808 tci_args_rr(insn, &r0, &r1);
809 regs[r0] = (int8_t)regs[r1];
810 break;
811 #endif
812 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
813 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
814 CASE_32_64(ext16s)
815 tci_args_rr(insn, &r0, &r1);
816 regs[r0] = (int16_t)regs[r1];
817 break;
818 #endif
819 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
820 CASE_32_64(ext8u)
821 tci_args_rr(insn, &r0, &r1);
822 regs[r0] = (uint8_t)regs[r1];
823 break;
824 #endif
825 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
826 CASE_32_64(ext16u)
827 tci_args_rr(insn, &r0, &r1);
828 regs[r0] = (uint16_t)regs[r1];
829 break;
830 #endif
831 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
832 CASE_32_64(bswap16)
833 tci_args_rr(insn, &r0, &r1);
834 regs[r0] = bswap16(regs[r1]);
835 break;
836 #endif
837 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
838 CASE_32_64(bswap32)
839 tci_args_rr(insn, &r0, &r1);
840 regs[r0] = bswap32(regs[r1]);
841 break;
842 #endif
843 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
844 CASE_32_64(not)
845 tci_args_rr(insn, &r0, &r1);
846 regs[r0] = ~regs[r1];
847 break;
848 #endif
849 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
850 CASE_32_64(neg)
851 tci_args_rr(insn, &r0, &r1);
852 regs[r0] = -regs[r1];
853 break;
854 #endif
855 #if TCG_TARGET_REG_BITS == 64
856 /* Load/store operations (64 bit). */
858 case INDEX_op_ld32s_i64:
859 tci_args_rrs(insn, &r0, &r1, &ofs);
860 ptr = (void *)(regs[r1] + ofs);
861 regs[r0] = *(int32_t *)ptr;
862 break;
863 case INDEX_op_ld_i64:
864 tci_args_rrs(insn, &r0, &r1, &ofs);
865 ptr = (void *)(regs[r1] + ofs);
866 regs[r0] = *(uint64_t *)ptr;
867 break;
868 case INDEX_op_st_i64:
869 tci_args_rrs(insn, &r0, &r1, &ofs);
870 ptr = (void *)(regs[r1] + ofs);
871 *(uint64_t *)ptr = regs[r0];
872 break;
874 /* Arithmetic operations (64 bit). */
876 case INDEX_op_div_i64:
877 tci_args_rrr(insn, &r0, &r1, &r2);
878 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
879 break;
880 case INDEX_op_divu_i64:
881 tci_args_rrr(insn, &r0, &r1, &r2);
882 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
883 break;
884 case INDEX_op_rem_i64:
885 tci_args_rrr(insn, &r0, &r1, &r2);
886 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
887 break;
888 case INDEX_op_remu_i64:
889 tci_args_rrr(insn, &r0, &r1, &r2);
890 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
891 break;
892 #if TCG_TARGET_HAS_clz_i64
893 case INDEX_op_clz_i64:
894 tci_args_rrr(insn, &r0, &r1, &r2);
895 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
896 break;
897 #endif
898 #if TCG_TARGET_HAS_ctz_i64
899 case INDEX_op_ctz_i64:
900 tci_args_rrr(insn, &r0, &r1, &r2);
901 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
902 break;
903 #endif
904 #if TCG_TARGET_HAS_ctpop_i64
905 case INDEX_op_ctpop_i64:
906 tci_args_rr(insn, &r0, &r1);
907 regs[r0] = ctpop64(regs[r1]);
908 break;
909 #endif
910 #if TCG_TARGET_HAS_mulu2_i64
911 case INDEX_op_mulu2_i64:
912 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
913 mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
914 break;
915 #endif
916 #if TCG_TARGET_HAS_muls2_i64
917 case INDEX_op_muls2_i64:
918 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
919 muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
920 break;
921 #endif
922 #if TCG_TARGET_HAS_add2_i64
923 case INDEX_op_add2_i64:
924 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
925 T1 = regs[r2] + regs[r4];
926 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
927 regs[r0] = T1;
928 regs[r1] = T2;
929 break;
930 #endif
931 #if TCG_TARGET_HAS_add2_i64
932 case INDEX_op_sub2_i64:
933 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
934 T1 = regs[r2] - regs[r4];
935 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
936 regs[r0] = T1;
937 regs[r1] = T2;
938 break;
939 #endif
941 /* Shift/rotate operations (64 bit). */
943 case INDEX_op_shl_i64:
944 tci_args_rrr(insn, &r0, &r1, &r2);
945 regs[r0] = regs[r1] << (regs[r2] & 63);
946 break;
947 case INDEX_op_shr_i64:
948 tci_args_rrr(insn, &r0, &r1, &r2);
949 regs[r0] = regs[r1] >> (regs[r2] & 63);
950 break;
951 case INDEX_op_sar_i64:
952 tci_args_rrr(insn, &r0, &r1, &r2);
953 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
954 break;
955 #if TCG_TARGET_HAS_rot_i64
956 case INDEX_op_rotl_i64:
957 tci_args_rrr(insn, &r0, &r1, &r2);
958 regs[r0] = rol64(regs[r1], regs[r2] & 63);
959 break;
960 case INDEX_op_rotr_i64:
961 tci_args_rrr(insn, &r0, &r1, &r2);
962 regs[r0] = ror64(regs[r1], regs[r2] & 63);
963 break;
964 #endif
965 #if TCG_TARGET_HAS_deposit_i64
966 case INDEX_op_deposit_i64:
967 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
968 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
969 break;
970 #endif
971 #if TCG_TARGET_HAS_extract_i64
972 case INDEX_op_extract_i64:
973 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
974 regs[r0] = extract64(regs[r1], pos, len);
975 break;
976 #endif
977 #if TCG_TARGET_HAS_sextract_i64
978 case INDEX_op_sextract_i64:
979 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
980 regs[r0] = sextract64(regs[r1], pos, len);
981 break;
982 #endif
983 case INDEX_op_brcond_i64:
984 tci_args_rl(insn, tb_ptr, &r0, &ptr);
985 if (regs[r0]) {
986 tb_ptr = ptr;
988 break;
989 case INDEX_op_ext32s_i64:
990 case INDEX_op_ext_i32_i64:
991 tci_args_rr(insn, &r0, &r1);
992 regs[r0] = (int32_t)regs[r1];
993 break;
994 case INDEX_op_ext32u_i64:
995 case INDEX_op_extu_i32_i64:
996 tci_args_rr(insn, &r0, &r1);
997 regs[r0] = (uint32_t)regs[r1];
998 break;
999 #if TCG_TARGET_HAS_bswap64_i64
1000 case INDEX_op_bswap64_i64:
1001 tci_args_rr(insn, &r0, &r1);
1002 regs[r0] = bswap64(regs[r1]);
1003 break;
1004 #endif
1005 #endif /* TCG_TARGET_REG_BITS == 64 */
1007 /* QEMU specific operations. */
1009 case INDEX_op_exit_tb:
1010 tci_args_l(insn, tb_ptr, &ptr);
1011 return (uintptr_t)ptr;
1013 case INDEX_op_goto_tb:
1014 tci_args_l(insn, tb_ptr, &ptr);
1015 tb_ptr = *(void **)ptr;
1016 break;
1018 case INDEX_op_goto_ptr:
1019 tci_args_r(insn, &r0);
1020 ptr = (void *)regs[r0];
1021 if (!ptr) {
1022 return 0;
1024 tb_ptr = ptr;
1025 break;
1027 case INDEX_op_qemu_ld_i32:
1028 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1029 tci_args_rrm(insn, &r0, &r1, &oi);
1030 taddr = regs[r1];
1031 } else {
1032 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1033 taddr = tci_uint64(regs[r2], regs[r1]);
1035 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1036 regs[r0] = tmp32;
1037 break;
1039 case INDEX_op_qemu_ld_i64:
1040 if (TCG_TARGET_REG_BITS == 64) {
1041 tci_args_rrm(insn, &r0, &r1, &oi);
1042 taddr = regs[r1];
1043 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1044 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1045 taddr = regs[r2];
1046 } else {
1047 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1048 taddr = tci_uint64(regs[r3], regs[r2]);
1049 oi = regs[r4];
1051 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1052 if (TCG_TARGET_REG_BITS == 32) {
1053 tci_write_reg64(regs, r1, r0, tmp64);
1054 } else {
1055 regs[r0] = tmp64;
1057 break;
1059 case INDEX_op_qemu_st_i32:
1060 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1061 tci_args_rrm(insn, &r0, &r1, &oi);
1062 taddr = regs[r1];
1063 } else {
1064 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1065 taddr = tci_uint64(regs[r2], regs[r1]);
1067 tmp32 = regs[r0];
1068 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr);
1069 break;
1071 case INDEX_op_qemu_st_i64:
1072 if (TCG_TARGET_REG_BITS == 64) {
1073 tci_args_rrm(insn, &r0, &r1, &oi);
1074 taddr = regs[r1];
1075 tmp64 = regs[r0];
1076 } else {
1077 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1078 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1079 taddr = regs[r2];
1080 } else {
1081 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1082 taddr = tci_uint64(regs[r3], regs[r2]);
1083 oi = regs[r4];
1085 tmp64 = tci_uint64(regs[r1], regs[r0]);
1087 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1088 break;
1090 case INDEX_op_mb:
1091 /* Ensure ordering for all kinds */
1092 smp_mb();
1093 break;
1094 default:
1095 g_assert_not_reached();
1101 * Disassembler that matches the interpreter
1104 static const char *str_r(TCGReg r)
1106 static const char regs[TCG_TARGET_NB_REGS][4] = {
1107 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1108 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1111 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1112 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1114 assert((unsigned)r < TCG_TARGET_NB_REGS);
1115 return regs[r];
1118 static const char *str_c(TCGCond c)
1120 static const char cond[16][8] = {
1121 [TCG_COND_NEVER] = "never",
1122 [TCG_COND_ALWAYS] = "always",
1123 [TCG_COND_EQ] = "eq",
1124 [TCG_COND_NE] = "ne",
1125 [TCG_COND_LT] = "lt",
1126 [TCG_COND_GE] = "ge",
1127 [TCG_COND_LE] = "le",
1128 [TCG_COND_GT] = "gt",
1129 [TCG_COND_LTU] = "ltu",
1130 [TCG_COND_GEU] = "geu",
1131 [TCG_COND_LEU] = "leu",
1132 [TCG_COND_GTU] = "gtu",
1135 assert((unsigned)c < ARRAY_SIZE(cond));
1136 assert(cond[c][0] != 0);
1137 return cond[c];
1140 /* Disassemble TCI bytecode. */
1141 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1143 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1144 const TCGOpDef *def;
1145 const char *op_name;
1146 uint32_t insn;
1147 TCGOpcode op;
1148 TCGReg r0, r1, r2, r3, r4, r5;
1149 tcg_target_ulong i1;
1150 int32_t s2;
1151 TCGCond c;
1152 MemOpIdx oi;
1153 uint8_t pos, len;
1154 void *ptr;
1156 /* TCI is always the host, so we don't need to load indirect. */
1157 insn = *tb_ptr++;
1159 info->fprintf_func(info->stream, "%08x ", insn);
1161 op = extract32(insn, 0, 8);
1162 def = &tcg_op_defs[op];
1163 op_name = def->name;
1165 switch (op) {
1166 case INDEX_op_br:
1167 case INDEX_op_exit_tb:
1168 case INDEX_op_goto_tb:
1169 tci_args_l(insn, tb_ptr, &ptr);
1170 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1171 break;
1173 case INDEX_op_goto_ptr:
1174 tci_args_r(insn, &r0);
1175 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1176 break;
1178 case INDEX_op_call:
1179 tci_args_nl(insn, tb_ptr, &len, &ptr);
1180 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1181 break;
1183 case INDEX_op_brcond_i32:
1184 case INDEX_op_brcond_i64:
1185 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1186 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1187 op_name, str_r(r0), ptr);
1188 break;
1190 case INDEX_op_setcond_i32:
1191 case INDEX_op_setcond_i64:
1192 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1193 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1194 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1195 break;
1197 case INDEX_op_tci_movi:
1198 tci_args_ri(insn, &r0, &i1);
1199 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1200 op_name, str_r(r0), i1);
1201 break;
1203 case INDEX_op_tci_movl:
1204 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1205 info->fprintf_func(info->stream, "%-12s %s, %p",
1206 op_name, str_r(r0), ptr);
1207 break;
1209 case INDEX_op_ld8u_i32:
1210 case INDEX_op_ld8u_i64:
1211 case INDEX_op_ld8s_i32:
1212 case INDEX_op_ld8s_i64:
1213 case INDEX_op_ld16u_i32:
1214 case INDEX_op_ld16u_i64:
1215 case INDEX_op_ld16s_i32:
1216 case INDEX_op_ld16s_i64:
1217 case INDEX_op_ld32u_i64:
1218 case INDEX_op_ld32s_i64:
1219 case INDEX_op_ld_i32:
1220 case INDEX_op_ld_i64:
1221 case INDEX_op_st8_i32:
1222 case INDEX_op_st8_i64:
1223 case INDEX_op_st16_i32:
1224 case INDEX_op_st16_i64:
1225 case INDEX_op_st32_i64:
1226 case INDEX_op_st_i32:
1227 case INDEX_op_st_i64:
1228 tci_args_rrs(insn, &r0, &r1, &s2);
1229 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1230 op_name, str_r(r0), str_r(r1), s2);
1231 break;
1233 case INDEX_op_mov_i32:
1234 case INDEX_op_mov_i64:
1235 case INDEX_op_ext8s_i32:
1236 case INDEX_op_ext8s_i64:
1237 case INDEX_op_ext8u_i32:
1238 case INDEX_op_ext8u_i64:
1239 case INDEX_op_ext16s_i32:
1240 case INDEX_op_ext16s_i64:
1241 case INDEX_op_ext16u_i32:
1242 case INDEX_op_ext32s_i64:
1243 case INDEX_op_ext32u_i64:
1244 case INDEX_op_ext_i32_i64:
1245 case INDEX_op_extu_i32_i64:
1246 case INDEX_op_bswap16_i32:
1247 case INDEX_op_bswap16_i64:
1248 case INDEX_op_bswap32_i32:
1249 case INDEX_op_bswap32_i64:
1250 case INDEX_op_bswap64_i64:
1251 case INDEX_op_not_i32:
1252 case INDEX_op_not_i64:
1253 case INDEX_op_neg_i32:
1254 case INDEX_op_neg_i64:
1255 case INDEX_op_ctpop_i32:
1256 case INDEX_op_ctpop_i64:
1257 tci_args_rr(insn, &r0, &r1);
1258 info->fprintf_func(info->stream, "%-12s %s, %s",
1259 op_name, str_r(r0), str_r(r1));
1260 break;
1262 case INDEX_op_add_i32:
1263 case INDEX_op_add_i64:
1264 case INDEX_op_sub_i32:
1265 case INDEX_op_sub_i64:
1266 case INDEX_op_mul_i32:
1267 case INDEX_op_mul_i64:
1268 case INDEX_op_and_i32:
1269 case INDEX_op_and_i64:
1270 case INDEX_op_or_i32:
1271 case INDEX_op_or_i64:
1272 case INDEX_op_xor_i32:
1273 case INDEX_op_xor_i64:
1274 case INDEX_op_andc_i32:
1275 case INDEX_op_andc_i64:
1276 case INDEX_op_orc_i32:
1277 case INDEX_op_orc_i64:
1278 case INDEX_op_eqv_i32:
1279 case INDEX_op_eqv_i64:
1280 case INDEX_op_nand_i32:
1281 case INDEX_op_nand_i64:
1282 case INDEX_op_nor_i32:
1283 case INDEX_op_nor_i64:
1284 case INDEX_op_div_i32:
1285 case INDEX_op_div_i64:
1286 case INDEX_op_rem_i32:
1287 case INDEX_op_rem_i64:
1288 case INDEX_op_divu_i32:
1289 case INDEX_op_divu_i64:
1290 case INDEX_op_remu_i32:
1291 case INDEX_op_remu_i64:
1292 case INDEX_op_shl_i32:
1293 case INDEX_op_shl_i64:
1294 case INDEX_op_shr_i32:
1295 case INDEX_op_shr_i64:
1296 case INDEX_op_sar_i32:
1297 case INDEX_op_sar_i64:
1298 case INDEX_op_rotl_i32:
1299 case INDEX_op_rotl_i64:
1300 case INDEX_op_rotr_i32:
1301 case INDEX_op_rotr_i64:
1302 case INDEX_op_clz_i32:
1303 case INDEX_op_clz_i64:
1304 case INDEX_op_ctz_i32:
1305 case INDEX_op_ctz_i64:
1306 tci_args_rrr(insn, &r0, &r1, &r2);
1307 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1308 op_name, str_r(r0), str_r(r1), str_r(r2));
1309 break;
1311 case INDEX_op_deposit_i32:
1312 case INDEX_op_deposit_i64:
1313 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1314 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1315 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1316 break;
1318 case INDEX_op_extract_i32:
1319 case INDEX_op_extract_i64:
1320 case INDEX_op_sextract_i32:
1321 case INDEX_op_sextract_i64:
1322 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1323 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1324 op_name, str_r(r0), str_r(r1), pos, len);
1325 break;
1327 case INDEX_op_movcond_i32:
1328 case INDEX_op_movcond_i64:
1329 case INDEX_op_setcond2_i32:
1330 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1331 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1332 op_name, str_r(r0), str_r(r1), str_r(r2),
1333 str_r(r3), str_r(r4), str_c(c));
1334 break;
1336 case INDEX_op_mulu2_i32:
1337 case INDEX_op_mulu2_i64:
1338 case INDEX_op_muls2_i32:
1339 case INDEX_op_muls2_i64:
1340 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1341 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1342 op_name, str_r(r0), str_r(r1),
1343 str_r(r2), str_r(r3));
1344 break;
1346 case INDEX_op_add2_i32:
1347 case INDEX_op_add2_i64:
1348 case INDEX_op_sub2_i32:
1349 case INDEX_op_sub2_i64:
1350 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1351 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1352 op_name, str_r(r0), str_r(r1), str_r(r2),
1353 str_r(r3), str_r(r4), str_r(r5));
1354 break;
1356 case INDEX_op_qemu_ld_i64:
1357 case INDEX_op_qemu_st_i64:
1358 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1359 goto do_qemu_ldst;
1360 case INDEX_op_qemu_ld_i32:
1361 case INDEX_op_qemu_st_i32:
1362 len = 1;
1363 do_qemu_ldst:
1364 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
1365 switch (len) {
1366 case 2:
1367 tci_args_rrm(insn, &r0, &r1, &oi);
1368 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1369 op_name, str_r(r0), str_r(r1), oi);
1370 break;
1371 case 3:
1372 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1373 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
1374 op_name, str_r(r0), str_r(r1), str_r(r2), oi);
1375 break;
1376 case 4:
1377 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1378 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1379 op_name, str_r(r0), str_r(r1),
1380 str_r(r2), str_r(r3), str_r(r4));
1381 break;
1382 default:
1383 g_assert_not_reached();
1385 break;
1387 case 0:
1388 /* tcg_out_nop_fill uses zeros */
1389 if (insn == 0) {
1390 info->fprintf_func(info->stream, "align");
1391 break;
1393 /* fall through */
1395 default:
1396 info->fprintf_func(info->stream, "illegal opcode %d", op);
1397 break;
1400 return sizeof(insn);