target-m68k: immediate ops manage word and byte operands
[qemu/ar7.git] / tcg / tci / tcg-target.inc.c
blob9dbf4d5512577ab8f1f13c3dfeb9c950776eaf60
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009, 2011 Stefan Weil
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-be-null.h"
27 /* TODO list:
28 * - See TODO comments in code.
31 /* Marker for missing code. */
32 #define TODO() \
33 do { \
34 fprintf(stderr, "TODO %s:%u: %s()\n", \
35 __FILE__, __LINE__, __func__); \
36 tcg_abort(); \
37 } while (0)
39 /* Bitfield n...m (in 32 bit value). */
40 #define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m)
42 /* Macros used in tcg_target_op_defs. */
43 #define R "r"
44 #define RI "ri"
45 #if TCG_TARGET_REG_BITS == 32
46 # define R64 "r", "r"
47 #else
48 # define R64 "r"
49 #endif
50 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
51 # define L "L", "L"
52 # define S "S", "S"
53 #else
54 # define L "L"
55 # define S "S"
56 #endif
58 /* TODO: documentation. */
59 static const TCGTargetOpDef tcg_target_op_defs[] = {
60 { INDEX_op_exit_tb, { NULL } },
61 { INDEX_op_goto_tb, { NULL } },
62 { INDEX_op_br, { NULL } },
64 { INDEX_op_ld8u_i32, { R, R } },
65 { INDEX_op_ld8s_i32, { R, R } },
66 { INDEX_op_ld16u_i32, { R, R } },
67 { INDEX_op_ld16s_i32, { R, R } },
68 { INDEX_op_ld_i32, { R, R } },
69 { INDEX_op_st8_i32, { R, R } },
70 { INDEX_op_st16_i32, { R, R } },
71 { INDEX_op_st_i32, { R, R } },
73 { INDEX_op_add_i32, { R, RI, RI } },
74 { INDEX_op_sub_i32, { R, RI, RI } },
75 { INDEX_op_mul_i32, { R, RI, RI } },
76 #if TCG_TARGET_HAS_div_i32
77 { INDEX_op_div_i32, { R, R, R } },
78 { INDEX_op_divu_i32, { R, R, R } },
79 { INDEX_op_rem_i32, { R, R, R } },
80 { INDEX_op_remu_i32, { R, R, R } },
81 #elif TCG_TARGET_HAS_div2_i32
82 { INDEX_op_div2_i32, { R, R, "0", "1", R } },
83 { INDEX_op_divu2_i32, { R, R, "0", "1", R } },
84 #endif
85 /* TODO: Does R, RI, RI result in faster code than R, R, RI?
86 If both operands are constants, we can optimize. */
87 { INDEX_op_and_i32, { R, RI, RI } },
88 #if TCG_TARGET_HAS_andc_i32
89 { INDEX_op_andc_i32, { R, RI, RI } },
90 #endif
91 #if TCG_TARGET_HAS_eqv_i32
92 { INDEX_op_eqv_i32, { R, RI, RI } },
93 #endif
94 #if TCG_TARGET_HAS_nand_i32
95 { INDEX_op_nand_i32, { R, RI, RI } },
96 #endif
97 #if TCG_TARGET_HAS_nor_i32
98 { INDEX_op_nor_i32, { R, RI, RI } },
99 #endif
100 { INDEX_op_or_i32, { R, RI, RI } },
101 #if TCG_TARGET_HAS_orc_i32
102 { INDEX_op_orc_i32, { R, RI, RI } },
103 #endif
104 { INDEX_op_xor_i32, { R, RI, RI } },
105 { INDEX_op_shl_i32, { R, RI, RI } },
106 { INDEX_op_shr_i32, { R, RI, RI } },
107 { INDEX_op_sar_i32, { R, RI, RI } },
108 #if TCG_TARGET_HAS_rot_i32
109 { INDEX_op_rotl_i32, { R, RI, RI } },
110 { INDEX_op_rotr_i32, { R, RI, RI } },
111 #endif
112 #if TCG_TARGET_HAS_deposit_i32
113 { INDEX_op_deposit_i32, { R, "0", R } },
114 #endif
116 { INDEX_op_brcond_i32, { R, RI } },
118 { INDEX_op_setcond_i32, { R, R, RI } },
119 #if TCG_TARGET_REG_BITS == 64
120 { INDEX_op_setcond_i64, { R, R, RI } },
121 #endif /* TCG_TARGET_REG_BITS == 64 */
123 #if TCG_TARGET_REG_BITS == 32
124 /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
125 { INDEX_op_add2_i32, { R, R, R, R, R, R } },
126 { INDEX_op_sub2_i32, { R, R, R, R, R, R } },
127 { INDEX_op_brcond2_i32, { R, R, RI, RI } },
128 { INDEX_op_mulu2_i32, { R, R, R, R } },
129 { INDEX_op_setcond2_i32, { R, R, R, RI, RI } },
130 #endif
132 #if TCG_TARGET_HAS_not_i32
133 { INDEX_op_not_i32, { R, R } },
134 #endif
135 #if TCG_TARGET_HAS_neg_i32
136 { INDEX_op_neg_i32, { R, R } },
137 #endif
139 #if TCG_TARGET_REG_BITS == 64
140 { INDEX_op_ld8u_i64, { R, R } },
141 { INDEX_op_ld8s_i64, { R, R } },
142 { INDEX_op_ld16u_i64, { R, R } },
143 { INDEX_op_ld16s_i64, { R, R } },
144 { INDEX_op_ld32u_i64, { R, R } },
145 { INDEX_op_ld32s_i64, { R, R } },
146 { INDEX_op_ld_i64, { R, R } },
148 { INDEX_op_st8_i64, { R, R } },
149 { INDEX_op_st16_i64, { R, R } },
150 { INDEX_op_st32_i64, { R, R } },
151 { INDEX_op_st_i64, { R, R } },
153 { INDEX_op_add_i64, { R, RI, RI } },
154 { INDEX_op_sub_i64, { R, RI, RI } },
155 { INDEX_op_mul_i64, { R, RI, RI } },
156 #if TCG_TARGET_HAS_div_i64
157 { INDEX_op_div_i64, { R, R, R } },
158 { INDEX_op_divu_i64, { R, R, R } },
159 { INDEX_op_rem_i64, { R, R, R } },
160 { INDEX_op_remu_i64, { R, R, R } },
161 #elif TCG_TARGET_HAS_div2_i64
162 { INDEX_op_div2_i64, { R, R, "0", "1", R } },
163 { INDEX_op_divu2_i64, { R, R, "0", "1", R } },
164 #endif
165 { INDEX_op_and_i64, { R, RI, RI } },
166 #if TCG_TARGET_HAS_andc_i64
167 { INDEX_op_andc_i64, { R, RI, RI } },
168 #endif
169 #if TCG_TARGET_HAS_eqv_i64
170 { INDEX_op_eqv_i64, { R, RI, RI } },
171 #endif
172 #if TCG_TARGET_HAS_nand_i64
173 { INDEX_op_nand_i64, { R, RI, RI } },
174 #endif
175 #if TCG_TARGET_HAS_nor_i64
176 { INDEX_op_nor_i64, { R, RI, RI } },
177 #endif
178 { INDEX_op_or_i64, { R, RI, RI } },
179 #if TCG_TARGET_HAS_orc_i64
180 { INDEX_op_orc_i64, { R, RI, RI } },
181 #endif
182 { INDEX_op_xor_i64, { R, RI, RI } },
183 { INDEX_op_shl_i64, { R, RI, RI } },
184 { INDEX_op_shr_i64, { R, RI, RI } },
185 { INDEX_op_sar_i64, { R, RI, RI } },
186 #if TCG_TARGET_HAS_rot_i64
187 { INDEX_op_rotl_i64, { R, RI, RI } },
188 { INDEX_op_rotr_i64, { R, RI, RI } },
189 #endif
190 #if TCG_TARGET_HAS_deposit_i64
191 { INDEX_op_deposit_i64, { R, "0", R } },
192 #endif
193 { INDEX_op_brcond_i64, { R, RI } },
195 #if TCG_TARGET_HAS_ext8s_i64
196 { INDEX_op_ext8s_i64, { R, R } },
197 #endif
198 #if TCG_TARGET_HAS_ext16s_i64
199 { INDEX_op_ext16s_i64, { R, R } },
200 #endif
201 #if TCG_TARGET_HAS_ext32s_i64
202 { INDEX_op_ext32s_i64, { R, R } },
203 #endif
204 #if TCG_TARGET_HAS_ext8u_i64
205 { INDEX_op_ext8u_i64, { R, R } },
206 #endif
207 #if TCG_TARGET_HAS_ext16u_i64
208 { INDEX_op_ext16u_i64, { R, R } },
209 #endif
210 #if TCG_TARGET_HAS_ext32u_i64
211 { INDEX_op_ext32u_i64, { R, R } },
212 #endif
213 { INDEX_op_ext_i32_i64, { R, R } },
214 { INDEX_op_extu_i32_i64, { R, R } },
215 #if TCG_TARGET_HAS_bswap16_i64
216 { INDEX_op_bswap16_i64, { R, R } },
217 #endif
218 #if TCG_TARGET_HAS_bswap32_i64
219 { INDEX_op_bswap32_i64, { R, R } },
220 #endif
221 #if TCG_TARGET_HAS_bswap64_i64
222 { INDEX_op_bswap64_i64, { R, R } },
223 #endif
224 #if TCG_TARGET_HAS_not_i64
225 { INDEX_op_not_i64, { R, R } },
226 #endif
227 #if TCG_TARGET_HAS_neg_i64
228 { INDEX_op_neg_i64, { R, R } },
229 #endif
230 #endif /* TCG_TARGET_REG_BITS == 64 */
232 { INDEX_op_qemu_ld_i32, { R, L } },
233 { INDEX_op_qemu_ld_i64, { R64, L } },
235 { INDEX_op_qemu_st_i32, { R, S } },
236 { INDEX_op_qemu_st_i64, { R64, S } },
238 #if TCG_TARGET_HAS_ext8s_i32
239 { INDEX_op_ext8s_i32, { R, R } },
240 #endif
241 #if TCG_TARGET_HAS_ext16s_i32
242 { INDEX_op_ext16s_i32, { R, R } },
243 #endif
244 #if TCG_TARGET_HAS_ext8u_i32
245 { INDEX_op_ext8u_i32, { R, R } },
246 #endif
247 #if TCG_TARGET_HAS_ext16u_i32
248 { INDEX_op_ext16u_i32, { R, R } },
249 #endif
251 #if TCG_TARGET_HAS_bswap16_i32
252 { INDEX_op_bswap16_i32, { R, R } },
253 #endif
254 #if TCG_TARGET_HAS_bswap32_i32
255 { INDEX_op_bswap32_i32, { R, R } },
256 #endif
258 { INDEX_op_mb, { } },
259 { -1 },
262 static const int tcg_target_reg_alloc_order[] = {
263 TCG_REG_R0,
264 TCG_REG_R1,
265 TCG_REG_R2,
266 TCG_REG_R3,
267 #if 0 /* used for TCG_REG_CALL_STACK */
268 TCG_REG_R4,
269 #endif
270 TCG_REG_R5,
271 TCG_REG_R6,
272 TCG_REG_R7,
273 #if TCG_TARGET_NB_REGS >= 16
274 TCG_REG_R8,
275 TCG_REG_R9,
276 TCG_REG_R10,
277 TCG_REG_R11,
278 TCG_REG_R12,
279 TCG_REG_R13,
280 TCG_REG_R14,
281 TCG_REG_R15,
282 #endif
285 #if MAX_OPC_PARAM_IARGS != 5
286 # error Fix needed, number of supported input arguments changed!
287 #endif
289 static const int tcg_target_call_iarg_regs[] = {
290 TCG_REG_R0,
291 TCG_REG_R1,
292 TCG_REG_R2,
293 TCG_REG_R3,
294 #if 0 /* used for TCG_REG_CALL_STACK */
295 TCG_REG_R4,
296 #endif
297 TCG_REG_R5,
298 #if TCG_TARGET_REG_BITS == 32
299 /* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */
300 TCG_REG_R6,
301 TCG_REG_R7,
302 #if TCG_TARGET_NB_REGS >= 16
303 TCG_REG_R8,
304 TCG_REG_R9,
305 TCG_REG_R10,
306 #else
307 # error Too few input registers available
308 #endif
309 #endif
312 static const int tcg_target_call_oarg_regs[] = {
313 TCG_REG_R0,
314 #if TCG_TARGET_REG_BITS == 32
315 TCG_REG_R1
316 #endif
319 #ifdef CONFIG_DEBUG_TCG
320 static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
321 "r00",
322 "r01",
323 "r02",
324 "r03",
325 "r04",
326 "r05",
327 "r06",
328 "r07",
329 #if TCG_TARGET_NB_REGS >= 16
330 "r08",
331 "r09",
332 "r10",
333 "r11",
334 "r12",
335 "r13",
336 "r14",
337 "r15",
338 #if TCG_TARGET_NB_REGS >= 32
339 "r16",
340 "r17",
341 "r18",
342 "r19",
343 "r20",
344 "r21",
345 "r22",
346 "r23",
347 "r24",
348 "r25",
349 "r26",
350 "r27",
351 "r28",
352 "r29",
353 "r30",
354 "r31"
355 #endif
356 #endif
358 #endif
360 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
361 intptr_t value, intptr_t addend)
363 /* tcg_out_reloc always uses the same type, addend. */
364 tcg_debug_assert(type == sizeof(tcg_target_long));
365 tcg_debug_assert(addend == 0);
366 tcg_debug_assert(value != 0);
367 if (TCG_TARGET_REG_BITS == 32) {
368 tcg_patch32(code_ptr, value);
369 } else {
370 tcg_patch64(code_ptr, value);
374 /* Parse target specific constraints. */
375 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
377 const char *ct_str = *pct_str;
378 switch (ct_str[0]) {
379 case 'r':
380 case 'L': /* qemu_ld constraint */
381 case 'S': /* qemu_st constraint */
382 ct->ct |= TCG_CT_REG;
383 tcg_regset_set32(ct->u.regs, 0, BIT(TCG_TARGET_NB_REGS) - 1);
384 break;
385 default:
386 return -1;
388 ct_str++;
389 *pct_str = ct_str;
390 return 0;
393 #if defined(CONFIG_DEBUG_TCG_INTERPRETER)
394 /* Show current bytecode. Used by tcg interpreter. */
395 void tci_disas(uint8_t opc)
397 const TCGOpDef *def = &tcg_op_defs[opc];
398 fprintf(stderr, "TCG %s %u, %u, %u\n",
399 def->name, def->nb_oargs, def->nb_iargs, def->nb_cargs);
401 #endif
403 /* Write value (native size). */
404 static void tcg_out_i(TCGContext *s, tcg_target_ulong v)
406 if (TCG_TARGET_REG_BITS == 32) {
407 tcg_out32(s, v);
408 } else {
409 tcg_out64(s, v);
413 /* Write opcode. */
414 static void tcg_out_op_t(TCGContext *s, TCGOpcode op)
416 tcg_out8(s, op);
417 tcg_out8(s, 0);
420 /* Write register. */
421 static void tcg_out_r(TCGContext *s, TCGArg t0)
423 tcg_debug_assert(t0 < TCG_TARGET_NB_REGS);
424 tcg_out8(s, t0);
427 /* Write register or constant (native size). */
428 static void tcg_out_ri(TCGContext *s, int const_arg, TCGArg arg)
430 if (const_arg) {
431 tcg_debug_assert(const_arg == 1);
432 tcg_out8(s, TCG_CONST);
433 tcg_out_i(s, arg);
434 } else {
435 tcg_out_r(s, arg);
439 /* Write register or constant (32 bit). */
440 static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg)
442 if (const_arg) {
443 tcg_debug_assert(const_arg == 1);
444 tcg_out8(s, TCG_CONST);
445 tcg_out32(s, arg);
446 } else {
447 tcg_out_r(s, arg);
451 #if TCG_TARGET_REG_BITS == 64
452 /* Write register or constant (64 bit). */
453 static void tcg_out_ri64(TCGContext *s, int const_arg, TCGArg arg)
455 if (const_arg) {
456 tcg_debug_assert(const_arg == 1);
457 tcg_out8(s, TCG_CONST);
458 tcg_out64(s, arg);
459 } else {
460 tcg_out_r(s, arg);
463 #endif
465 /* Write label. */
466 static void tci_out_label(TCGContext *s, TCGLabel *label)
468 if (label->has_value) {
469 tcg_out_i(s, label->u.value);
470 tcg_debug_assert(label->u.value);
471 } else {
472 tcg_out_reloc(s, s->code_ptr, sizeof(tcg_target_ulong), label, 0);
473 s->code_ptr += sizeof(tcg_target_ulong);
477 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
478 intptr_t arg2)
480 uint8_t *old_code_ptr = s->code_ptr;
481 if (type == TCG_TYPE_I32) {
482 tcg_out_op_t(s, INDEX_op_ld_i32);
483 tcg_out_r(s, ret);
484 tcg_out_r(s, arg1);
485 tcg_out32(s, arg2);
486 } else {
487 tcg_debug_assert(type == TCG_TYPE_I64);
488 #if TCG_TARGET_REG_BITS == 64
489 tcg_out_op_t(s, INDEX_op_ld_i64);
490 tcg_out_r(s, ret);
491 tcg_out_r(s, arg1);
492 tcg_debug_assert(arg2 == (int32_t)arg2);
493 tcg_out32(s, arg2);
494 #else
495 TODO();
496 #endif
498 old_code_ptr[1] = s->code_ptr - old_code_ptr;
501 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
503 uint8_t *old_code_ptr = s->code_ptr;
504 tcg_debug_assert(ret != arg);
505 #if TCG_TARGET_REG_BITS == 32
506 tcg_out_op_t(s, INDEX_op_mov_i32);
507 #else
508 tcg_out_op_t(s, INDEX_op_mov_i64);
509 #endif
510 tcg_out_r(s, ret);
511 tcg_out_r(s, arg);
512 old_code_ptr[1] = s->code_ptr - old_code_ptr;
515 static void tcg_out_movi(TCGContext *s, TCGType type,
516 TCGReg t0, tcg_target_long arg)
518 uint8_t *old_code_ptr = s->code_ptr;
519 uint32_t arg32 = arg;
520 if (type == TCG_TYPE_I32 || arg == arg32) {
521 tcg_out_op_t(s, INDEX_op_movi_i32);
522 tcg_out_r(s, t0);
523 tcg_out32(s, arg32);
524 } else {
525 tcg_debug_assert(type == TCG_TYPE_I64);
526 #if TCG_TARGET_REG_BITS == 64
527 tcg_out_op_t(s, INDEX_op_movi_i64);
528 tcg_out_r(s, t0);
529 tcg_out64(s, arg);
530 #else
531 TODO();
532 #endif
534 old_code_ptr[1] = s->code_ptr - old_code_ptr;
537 static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
539 uint8_t *old_code_ptr = s->code_ptr;
540 tcg_out_op_t(s, INDEX_op_call);
541 tcg_out_ri(s, 1, (uintptr_t)arg);
542 old_code_ptr[1] = s->code_ptr - old_code_ptr;
545 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
546 const int *const_args)
548 uint8_t *old_code_ptr = s->code_ptr;
550 tcg_out_op_t(s, opc);
552 switch (opc) {
553 case INDEX_op_exit_tb:
554 tcg_out64(s, args[0]);
555 break;
556 case INDEX_op_goto_tb:
557 if (s->tb_jmp_insn_offset) {
558 /* Direct jump method. */
559 tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_insn_offset));
560 /* Align for atomic patching and thread safety */
561 s->code_ptr = QEMU_ALIGN_PTR_UP(s->code_ptr, 4);
562 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
563 tcg_out32(s, 0);
564 } else {
565 /* Indirect jump method. */
566 TODO();
568 tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_reset_offset));
569 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
570 break;
571 case INDEX_op_br:
572 tci_out_label(s, arg_label(args[0]));
573 break;
574 case INDEX_op_setcond_i32:
575 tcg_out_r(s, args[0]);
576 tcg_out_r(s, args[1]);
577 tcg_out_ri32(s, const_args[2], args[2]);
578 tcg_out8(s, args[3]); /* condition */
579 break;
580 #if TCG_TARGET_REG_BITS == 32
581 case INDEX_op_setcond2_i32:
582 /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
583 tcg_out_r(s, args[0]);
584 tcg_out_r(s, args[1]);
585 tcg_out_r(s, args[2]);
586 tcg_out_ri32(s, const_args[3], args[3]);
587 tcg_out_ri32(s, const_args[4], args[4]);
588 tcg_out8(s, args[5]); /* condition */
589 break;
590 #elif TCG_TARGET_REG_BITS == 64
591 case INDEX_op_setcond_i64:
592 tcg_out_r(s, args[0]);
593 tcg_out_r(s, args[1]);
594 tcg_out_ri64(s, const_args[2], args[2]);
595 tcg_out8(s, args[3]); /* condition */
596 break;
597 #endif
598 case INDEX_op_ld8u_i32:
599 case INDEX_op_ld8s_i32:
600 case INDEX_op_ld16u_i32:
601 case INDEX_op_ld16s_i32:
602 case INDEX_op_ld_i32:
603 case INDEX_op_st8_i32:
604 case INDEX_op_st16_i32:
605 case INDEX_op_st_i32:
606 case INDEX_op_ld8u_i64:
607 case INDEX_op_ld8s_i64:
608 case INDEX_op_ld16u_i64:
609 case INDEX_op_ld16s_i64:
610 case INDEX_op_ld32u_i64:
611 case INDEX_op_ld32s_i64:
612 case INDEX_op_ld_i64:
613 case INDEX_op_st8_i64:
614 case INDEX_op_st16_i64:
615 case INDEX_op_st32_i64:
616 case INDEX_op_st_i64:
617 tcg_out_r(s, args[0]);
618 tcg_out_r(s, args[1]);
619 tcg_debug_assert(args[2] == (int32_t)args[2]);
620 tcg_out32(s, args[2]);
621 break;
622 case INDEX_op_add_i32:
623 case INDEX_op_sub_i32:
624 case INDEX_op_mul_i32:
625 case INDEX_op_and_i32:
626 case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */
627 case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */
628 case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */
629 case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */
630 case INDEX_op_or_i32:
631 case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */
632 case INDEX_op_xor_i32:
633 case INDEX_op_shl_i32:
634 case INDEX_op_shr_i32:
635 case INDEX_op_sar_i32:
636 case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
637 case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
638 tcg_out_r(s, args[0]);
639 tcg_out_ri32(s, const_args[1], args[1]);
640 tcg_out_ri32(s, const_args[2], args[2]);
641 break;
642 case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */
643 tcg_out_r(s, args[0]);
644 tcg_out_r(s, args[1]);
645 tcg_out_r(s, args[2]);
646 tcg_debug_assert(args[3] <= UINT8_MAX);
647 tcg_out8(s, args[3]);
648 tcg_debug_assert(args[4] <= UINT8_MAX);
649 tcg_out8(s, args[4]);
650 break;
652 #if TCG_TARGET_REG_BITS == 64
653 case INDEX_op_add_i64:
654 case INDEX_op_sub_i64:
655 case INDEX_op_mul_i64:
656 case INDEX_op_and_i64:
657 case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */
658 case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */
659 case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */
660 case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */
661 case INDEX_op_or_i64:
662 case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */
663 case INDEX_op_xor_i64:
664 case INDEX_op_shl_i64:
665 case INDEX_op_shr_i64:
666 case INDEX_op_sar_i64:
667 case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
668 case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
669 tcg_out_r(s, args[0]);
670 tcg_out_ri64(s, const_args[1], args[1]);
671 tcg_out_ri64(s, const_args[2], args[2]);
672 break;
673 case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */
674 tcg_out_r(s, args[0]);
675 tcg_out_r(s, args[1]);
676 tcg_out_r(s, args[2]);
677 tcg_debug_assert(args[3] <= UINT8_MAX);
678 tcg_out8(s, args[3]);
679 tcg_debug_assert(args[4] <= UINT8_MAX);
680 tcg_out8(s, args[4]);
681 break;
682 case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
683 case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
684 case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
685 case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
686 TODO();
687 break;
688 case INDEX_op_div2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */
689 case INDEX_op_divu2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */
690 TODO();
691 break;
692 case INDEX_op_brcond_i64:
693 tcg_out_r(s, args[0]);
694 tcg_out_ri64(s, const_args[1], args[1]);
695 tcg_out8(s, args[2]); /* condition */
696 tci_out_label(s, arg_label(args[3]));
697 break;
698 case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
699 case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
700 case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
701 case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */
702 case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */
703 case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */
704 case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */
705 case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */
706 case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */
707 case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */
708 case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */
709 case INDEX_op_ext_i32_i64:
710 case INDEX_op_extu_i32_i64:
711 #endif /* TCG_TARGET_REG_BITS == 64 */
712 case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */
713 case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */
714 case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */
715 case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */
716 case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */
717 case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */
718 case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
719 case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
720 tcg_out_r(s, args[0]);
721 tcg_out_r(s, args[1]);
722 break;
723 case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
724 case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
725 case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
726 case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
727 tcg_out_r(s, args[0]);
728 tcg_out_ri32(s, const_args[1], args[1]);
729 tcg_out_ri32(s, const_args[2], args[2]);
730 break;
731 case INDEX_op_div2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */
732 case INDEX_op_divu2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */
733 TODO();
734 break;
735 #if TCG_TARGET_REG_BITS == 32
736 case INDEX_op_add2_i32:
737 case INDEX_op_sub2_i32:
738 tcg_out_r(s, args[0]);
739 tcg_out_r(s, args[1]);
740 tcg_out_r(s, args[2]);
741 tcg_out_r(s, args[3]);
742 tcg_out_r(s, args[4]);
743 tcg_out_r(s, args[5]);
744 break;
745 case INDEX_op_brcond2_i32:
746 tcg_out_r(s, args[0]);
747 tcg_out_r(s, args[1]);
748 tcg_out_ri32(s, const_args[2], args[2]);
749 tcg_out_ri32(s, const_args[3], args[3]);
750 tcg_out8(s, args[4]); /* condition */
751 tci_out_label(s, arg_label(args[5]));
752 break;
753 case INDEX_op_mulu2_i32:
754 tcg_out_r(s, args[0]);
755 tcg_out_r(s, args[1]);
756 tcg_out_r(s, args[2]);
757 tcg_out_r(s, args[3]);
758 break;
759 #endif
760 case INDEX_op_brcond_i32:
761 tcg_out_r(s, args[0]);
762 tcg_out_ri32(s, const_args[1], args[1]);
763 tcg_out8(s, args[2]); /* condition */
764 tci_out_label(s, arg_label(args[3]));
765 break;
766 case INDEX_op_qemu_ld_i32:
767 tcg_out_r(s, *args++);
768 tcg_out_r(s, *args++);
769 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
770 tcg_out_r(s, *args++);
772 tcg_out_i(s, *args++);
773 break;
774 case INDEX_op_qemu_ld_i64:
775 tcg_out_r(s, *args++);
776 if (TCG_TARGET_REG_BITS == 32) {
777 tcg_out_r(s, *args++);
779 tcg_out_r(s, *args++);
780 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
781 tcg_out_r(s, *args++);
783 tcg_out_i(s, *args++);
784 break;
785 case INDEX_op_qemu_st_i32:
786 tcg_out_r(s, *args++);
787 tcg_out_r(s, *args++);
788 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
789 tcg_out_r(s, *args++);
791 tcg_out_i(s, *args++);
792 break;
793 case INDEX_op_qemu_st_i64:
794 tcg_out_r(s, *args++);
795 if (TCG_TARGET_REG_BITS == 32) {
796 tcg_out_r(s, *args++);
798 tcg_out_r(s, *args++);
799 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
800 tcg_out_r(s, *args++);
802 tcg_out_i(s, *args++);
803 break;
804 case INDEX_op_mb:
805 break;
806 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
807 case INDEX_op_mov_i64:
808 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
809 case INDEX_op_movi_i64:
810 case INDEX_op_call: /* Always emitted via tcg_out_call. */
811 default:
812 tcg_abort();
814 old_code_ptr[1] = s->code_ptr - old_code_ptr;
817 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
818 intptr_t arg2)
820 uint8_t *old_code_ptr = s->code_ptr;
821 if (type == TCG_TYPE_I32) {
822 tcg_out_op_t(s, INDEX_op_st_i32);
823 tcg_out_r(s, arg);
824 tcg_out_r(s, arg1);
825 tcg_out32(s, arg2);
826 } else {
827 tcg_debug_assert(type == TCG_TYPE_I64);
828 #if TCG_TARGET_REG_BITS == 64
829 tcg_out_op_t(s, INDEX_op_st_i64);
830 tcg_out_r(s, arg);
831 tcg_out_r(s, arg1);
832 tcg_out32(s, arg2);
833 #else
834 TODO();
835 #endif
837 old_code_ptr[1] = s->code_ptr - old_code_ptr;
840 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
841 TCGReg base, intptr_t ofs)
843 return false;
846 /* Test if a constant matches the constraint. */
847 static int tcg_target_const_match(tcg_target_long val, TCGType type,
848 const TCGArgConstraint *arg_ct)
850 /* No need to return 0 or 1, 0 or != 0 is good enough. */
851 return arg_ct->ct & TCG_CT_CONST;
854 static void tcg_target_init(TCGContext *s)
856 #if defined(CONFIG_DEBUG_TCG_INTERPRETER)
857 const char *envval = getenv("DEBUG_TCG");
858 if (envval) {
859 qemu_set_log(strtol(envval, NULL, 0));
861 #endif
863 /* The current code uses uint8_t for tcg operations. */
864 tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
866 /* Registers available for 32 bit operations. */
867 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0,
868 BIT(TCG_TARGET_NB_REGS) - 1);
869 /* Registers available for 64 bit operations. */
870 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0,
871 BIT(TCG_TARGET_NB_REGS) - 1);
872 /* TODO: Which registers should be set here? */
873 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
874 BIT(TCG_TARGET_NB_REGS) - 1);
876 tcg_regset_clear(s->reserved_regs);
877 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
878 tcg_add_target_add_op_defs(tcg_target_op_defs);
880 /* We use negative offsets from "sp" so that we can distinguish
881 stores that might pretend to be call arguments. */
882 tcg_set_frame(s, TCG_REG_CALL_STACK,
883 -CPU_TEMP_BUF_NLONGS * sizeof(long),
884 CPU_TEMP_BUF_NLONGS * sizeof(long));
887 /* Generate global QEMU prologue and epilogue code. */
888 static inline void tcg_target_qemu_prologue(TCGContext *s)