tcg-ppc64: Rename tcg_out_calli to tcg_out_call
[qemu.git] / tcg / ppc64 / tcg-target.c
blobd80f2d997de7728670e090b304707fadd293155f
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-be-ldst.h"
27 #define TCG_CT_CONST_S16 0x100
28 #define TCG_CT_CONST_U16 0x200
29 #define TCG_CT_CONST_S32 0x400
30 #define TCG_CT_CONST_U32 0x800
31 #define TCG_CT_CONST_ZERO 0x1000
32 #define TCG_CT_CONST_MONE 0x2000
34 static tcg_insn_unit *tb_ret_addr;
36 #if TARGET_LONG_BITS == 32
37 #define LD_ADDR LWZ
38 #define CMP_L 0
39 #else
40 #define LD_ADDR LD
41 #define CMP_L (1<<21)
42 #endif
44 #ifndef GUEST_BASE
45 #define GUEST_BASE 0
46 #endif
48 #include "elf.h"
49 static bool have_isa_2_06;
50 #define HAVE_ISA_2_06 have_isa_2_06
51 #define HAVE_ISEL have_isa_2_06
53 #ifdef CONFIG_USE_GUEST_BASE
54 #define TCG_GUEST_BASE_REG 30
55 #else
56 #define TCG_GUEST_BASE_REG 0
57 #endif
59 #ifndef NDEBUG
60 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
61 "r0",
62 "r1",
63 "r2",
64 "r3",
65 "r4",
66 "r5",
67 "r6",
68 "r7",
69 "r8",
70 "r9",
71 "r10",
72 "r11",
73 "r12",
74 "r13",
75 "r14",
76 "r15",
77 "r16",
78 "r17",
79 "r18",
80 "r19",
81 "r20",
82 "r21",
83 "r22",
84 "r23",
85 "r24",
86 "r25",
87 "r26",
88 "r27",
89 "r28",
90 "r29",
91 "r30",
92 "r31"
94 #endif
96 static const int tcg_target_reg_alloc_order[] = {
97 TCG_REG_R14, /* call saved registers */
98 TCG_REG_R15,
99 TCG_REG_R16,
100 TCG_REG_R17,
101 TCG_REG_R18,
102 TCG_REG_R19,
103 TCG_REG_R20,
104 TCG_REG_R21,
105 TCG_REG_R22,
106 TCG_REG_R23,
107 TCG_REG_R24,
108 TCG_REG_R25,
109 TCG_REG_R26,
110 TCG_REG_R27,
111 TCG_REG_R28,
112 TCG_REG_R29,
113 TCG_REG_R30,
114 TCG_REG_R31,
115 TCG_REG_R12, /* call clobbered, non-arguments */
116 TCG_REG_R11,
117 TCG_REG_R10, /* call clobbered, arguments */
118 TCG_REG_R9,
119 TCG_REG_R8,
120 TCG_REG_R7,
121 TCG_REG_R6,
122 TCG_REG_R5,
123 TCG_REG_R4,
124 TCG_REG_R3,
127 static const int tcg_target_call_iarg_regs[] = {
128 TCG_REG_R3,
129 TCG_REG_R4,
130 TCG_REG_R5,
131 TCG_REG_R6,
132 TCG_REG_R7,
133 TCG_REG_R8,
134 TCG_REG_R9,
135 TCG_REG_R10
138 static const int tcg_target_call_oarg_regs[] = {
139 TCG_REG_R3
142 static const int tcg_target_callee_save_regs[] = {
143 #ifdef __APPLE__
144 TCG_REG_R11,
145 #endif
146 TCG_REG_R14,
147 TCG_REG_R15,
148 TCG_REG_R16,
149 TCG_REG_R17,
150 TCG_REG_R18,
151 TCG_REG_R19,
152 TCG_REG_R20,
153 TCG_REG_R21,
154 TCG_REG_R22,
155 TCG_REG_R23,
156 TCG_REG_R24,
157 TCG_REG_R25,
158 TCG_REG_R26,
159 TCG_REG_R27, /* currently used for the global env */
160 TCG_REG_R28,
161 TCG_REG_R29,
162 TCG_REG_R30,
163 TCG_REG_R31
166 static inline bool in_range_b(tcg_target_long target)
168 return target == sextract64(target, 0, 26);
171 static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target)
173 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
174 assert(in_range_b(disp));
175 return disp & 0x3fffffc;
178 static void reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target)
180 *pc = (*pc & ~0x3fffffc) | reloc_pc24_val(pc, target);
183 static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target)
185 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
186 assert(disp == (int16_t) disp);
187 return disp & 0xfffc;
190 static void reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target)
192 *pc = (*pc & ~0xfffc) | reloc_pc14_val(pc, target);
195 static inline void tcg_out_b_noaddr(TCGContext *s, int insn)
197 unsigned retrans = *s->code_ptr & 0x3fffffc;
198 tcg_out32(s, insn | retrans);
201 static inline void tcg_out_bc_noaddr(TCGContext *s, int insn)
203 unsigned retrans = *s->code_ptr & 0xfffc;
204 tcg_out32(s, insn | retrans);
207 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
208 intptr_t value, intptr_t addend)
210 tcg_insn_unit *target = (tcg_insn_unit *)value;
212 assert(addend == 0);
213 switch (type) {
214 case R_PPC_REL14:
215 reloc_pc14(code_ptr, target);
216 break;
217 case R_PPC_REL24:
218 reloc_pc24(code_ptr, target);
219 break;
220 default:
221 tcg_abort();
225 /* parse target specific constraints */
226 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
228 const char *ct_str;
230 ct_str = *pct_str;
231 switch (ct_str[0]) {
232 case 'A': case 'B': case 'C': case 'D':
233 ct->ct |= TCG_CT_REG;
234 tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
235 break;
236 case 'r':
237 ct->ct |= TCG_CT_REG;
238 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
239 break;
240 case 'L': /* qemu_ld constraint */
241 ct->ct |= TCG_CT_REG;
242 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
243 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
244 #ifdef CONFIG_SOFTMMU
245 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
246 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
247 #endif
248 break;
249 case 'S': /* qemu_st constraint */
250 ct->ct |= TCG_CT_REG;
251 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
252 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
253 #ifdef CONFIG_SOFTMMU
254 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
255 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
256 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
257 #endif
258 break;
259 case 'I':
260 ct->ct |= TCG_CT_CONST_S16;
261 break;
262 case 'J':
263 ct->ct |= TCG_CT_CONST_U16;
264 break;
265 case 'M':
266 ct->ct |= TCG_CT_CONST_MONE;
267 break;
268 case 'T':
269 ct->ct |= TCG_CT_CONST_S32;
270 break;
271 case 'U':
272 ct->ct |= TCG_CT_CONST_U32;
273 break;
274 case 'Z':
275 ct->ct |= TCG_CT_CONST_ZERO;
276 break;
277 default:
278 return -1;
280 ct_str++;
281 *pct_str = ct_str;
282 return 0;
285 /* test if a constant matches the constraint */
286 static int tcg_target_const_match(tcg_target_long val, TCGType type,
287 const TCGArgConstraint *arg_ct)
289 int ct = arg_ct->ct;
290 if (ct & TCG_CT_CONST) {
291 return 1;
294 /* The only 32-bit constraint we use aside from
295 TCG_CT_CONST is TCG_CT_CONST_S16. */
296 if (type == TCG_TYPE_I32) {
297 val = (int32_t)val;
300 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
301 return 1;
302 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
303 return 1;
304 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
305 return 1;
306 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
307 return 1;
308 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
309 return 1;
310 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
311 return 1;
313 return 0;
316 #define OPCD(opc) ((opc)<<26)
317 #define XO19(opc) (OPCD(19)|((opc)<<1))
318 #define MD30(opc) (OPCD(30)|((opc)<<2))
319 #define MDS30(opc) (OPCD(30)|((opc)<<1))
320 #define XO31(opc) (OPCD(31)|((opc)<<1))
321 #define XO58(opc) (OPCD(58)|(opc))
322 #define XO62(opc) (OPCD(62)|(opc))
324 #define B OPCD( 18)
325 #define BC OPCD( 16)
326 #define LBZ OPCD( 34)
327 #define LHZ OPCD( 40)
328 #define LHA OPCD( 42)
329 #define LWZ OPCD( 32)
330 #define STB OPCD( 38)
331 #define STH OPCD( 44)
332 #define STW OPCD( 36)
334 #define STD XO62( 0)
335 #define STDU XO62( 1)
336 #define STDX XO31(149)
338 #define LD XO58( 0)
339 #define LDX XO31( 21)
340 #define LDU XO58( 1)
341 #define LWA XO58( 2)
342 #define LWAX XO31(341)
344 #define ADDIC OPCD( 12)
345 #define ADDI OPCD( 14)
346 #define ADDIS OPCD( 15)
347 #define ORI OPCD( 24)
348 #define ORIS OPCD( 25)
349 #define XORI OPCD( 26)
350 #define XORIS OPCD( 27)
351 #define ANDI OPCD( 28)
352 #define ANDIS OPCD( 29)
353 #define MULLI OPCD( 7)
354 #define CMPLI OPCD( 10)
355 #define CMPI OPCD( 11)
356 #define SUBFIC OPCD( 8)
358 #define LWZU OPCD( 33)
359 #define STWU OPCD( 37)
361 #define RLWIMI OPCD( 20)
362 #define RLWINM OPCD( 21)
363 #define RLWNM OPCD( 23)
365 #define RLDICL MD30( 0)
366 #define RLDICR MD30( 1)
367 #define RLDIMI MD30( 3)
368 #define RLDCL MDS30( 8)
370 #define BCLR XO19( 16)
371 #define BCCTR XO19(528)
372 #define CRAND XO19(257)
373 #define CRANDC XO19(129)
374 #define CRNAND XO19(225)
375 #define CROR XO19(449)
376 #define CRNOR XO19( 33)
378 #define EXTSB XO31(954)
379 #define EXTSH XO31(922)
380 #define EXTSW XO31(986)
381 #define ADD XO31(266)
382 #define ADDE XO31(138)
383 #define ADDME XO31(234)
384 #define ADDZE XO31(202)
385 #define ADDC XO31( 10)
386 #define AND XO31( 28)
387 #define SUBF XO31( 40)
388 #define SUBFC XO31( 8)
389 #define SUBFE XO31(136)
390 #define SUBFME XO31(232)
391 #define SUBFZE XO31(200)
392 #define OR XO31(444)
393 #define XOR XO31(316)
394 #define MULLW XO31(235)
395 #define MULHWU XO31( 11)
396 #define DIVW XO31(491)
397 #define DIVWU XO31(459)
398 #define CMP XO31( 0)
399 #define CMPL XO31( 32)
400 #define LHBRX XO31(790)
401 #define LWBRX XO31(534)
402 #define LDBRX XO31(532)
403 #define STHBRX XO31(918)
404 #define STWBRX XO31(662)
405 #define STDBRX XO31(660)
406 #define MFSPR XO31(339)
407 #define MTSPR XO31(467)
408 #define SRAWI XO31(824)
409 #define NEG XO31(104)
410 #define MFCR XO31( 19)
411 #define MFOCRF (MFCR | (1u << 20))
412 #define NOR XO31(124)
413 #define CNTLZW XO31( 26)
414 #define CNTLZD XO31( 58)
415 #define ANDC XO31( 60)
416 #define ORC XO31(412)
417 #define EQV XO31(284)
418 #define NAND XO31(476)
419 #define ISEL XO31( 15)
421 #define MULLD XO31(233)
422 #define MULHD XO31( 73)
423 #define MULHDU XO31( 9)
424 #define DIVD XO31(489)
425 #define DIVDU XO31(457)
427 #define LBZX XO31( 87)
428 #define LHZX XO31(279)
429 #define LHAX XO31(343)
430 #define LWZX XO31( 23)
431 #define STBX XO31(215)
432 #define STHX XO31(407)
433 #define STWX XO31(151)
435 #define SPR(a, b) ((((a)<<5)|(b))<<11)
436 #define LR SPR(8, 0)
437 #define CTR SPR(9, 0)
439 #define SLW XO31( 24)
440 #define SRW XO31(536)
441 #define SRAW XO31(792)
443 #define SLD XO31( 27)
444 #define SRD XO31(539)
445 #define SRAD XO31(794)
446 #define SRADI XO31(413<<1)
448 #define TW XO31( 4)
449 #define TRAP (TW | TO(31))
451 #define RT(r) ((r)<<21)
452 #define RS(r) ((r)<<21)
453 #define RA(r) ((r)<<16)
454 #define RB(r) ((r)<<11)
455 #define TO(t) ((t)<<21)
456 #define SH(s) ((s)<<11)
457 #define MB(b) ((b)<<6)
458 #define ME(e) ((e)<<1)
459 #define BO(o) ((o)<<21)
460 #define MB64(b) ((b)<<5)
461 #define FXM(b) (1 << (19 - (b)))
463 #define LK 1
465 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
466 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
467 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
468 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
470 #define BF(n) ((n)<<23)
471 #define BI(n, c) (((c)+((n)*4))<<16)
472 #define BT(n, c) (((c)+((n)*4))<<21)
473 #define BA(n, c) (((c)+((n)*4))<<16)
474 #define BB(n, c) (((c)+((n)*4))<<11)
475 #define BC_(n, c) (((c)+((n)*4))<<6)
477 #define BO_COND_TRUE BO(12)
478 #define BO_COND_FALSE BO( 4)
479 #define BO_ALWAYS BO(20)
481 enum {
482 CR_LT,
483 CR_GT,
484 CR_EQ,
485 CR_SO
488 static const uint32_t tcg_to_bc[] = {
489 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
490 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
491 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
492 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
493 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
494 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
495 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
496 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
497 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
498 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
501 /* The low bit here is set if the RA and RB fields must be inverted. */
502 static const uint32_t tcg_to_isel[] = {
503 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
504 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
505 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
506 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
507 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
508 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
509 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
510 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
511 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
512 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
515 static inline void tcg_out_mov(TCGContext *s, TCGType type,
516 TCGReg ret, TCGReg arg)
518 if (ret != arg) {
519 tcg_out32(s, OR | SAB(arg, ret, arg));
523 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
524 int sh, int mb)
526 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
527 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
528 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
531 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
532 int sh, int mb, int me)
534 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
537 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
539 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
542 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
544 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
547 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
549 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
552 static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg)
554 if (arg == (int16_t) arg) {
555 tcg_out32(s, ADDI | TAI(ret, 0, arg));
556 } else {
557 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
558 if (arg & 0xffff) {
559 tcg_out32(s, ORI | SAI(ret, ret, arg));
564 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
565 tcg_target_long arg)
567 if (type == TCG_TYPE_I32 || arg == (int32_t)arg) {
568 tcg_out_movi32(s, ret, arg);
569 } else if (arg == (uint32_t)arg && !(arg & 0x8000)) {
570 tcg_out32(s, ADDI | TAI(ret, 0, arg));
571 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
572 } else {
573 int32_t high = arg >> 32;
574 tcg_out_movi32(s, ret, high);
575 if (high) {
576 tcg_out_shli64(s, ret, ret, 32);
578 if (arg & 0xffff0000) {
579 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
581 if (arg & 0xffff) {
582 tcg_out32(s, ORI | SAI(ret, ret, arg));
587 static bool mask_operand(uint32_t c, int *mb, int *me)
589 uint32_t lsb, test;
591 /* Accept a bit pattern like:
592 0....01....1
593 1....10....0
594 0..01..10..0
595 Keep track of the transitions. */
596 if (c == 0 || c == -1) {
597 return false;
599 test = c;
600 lsb = test & -test;
601 test += lsb;
602 if (test & (test - 1)) {
603 return false;
606 *me = clz32(lsb);
607 *mb = test ? clz32(test & -test) + 1 : 0;
608 return true;
611 static bool mask64_operand(uint64_t c, int *mb, int *me)
613 uint64_t lsb;
615 if (c == 0) {
616 return false;
619 lsb = c & -c;
620 /* Accept 1..10..0. */
621 if (c == -lsb) {
622 *mb = 0;
623 *me = clz64(lsb);
624 return true;
626 /* Accept 0..01..1. */
627 if (lsb == 1 && (c & (c + 1)) == 0) {
628 *mb = clz64(c + 1) + 1;
629 *me = 63;
630 return true;
632 return false;
635 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
637 int mb, me;
639 if ((c & 0xffff) == c) {
640 tcg_out32(s, ANDI | SAI(src, dst, c));
641 return;
642 } else if ((c & 0xffff0000) == c) {
643 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
644 return;
645 } else if (mask_operand(c, &mb, &me)) {
646 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
647 } else {
648 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
649 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
653 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
655 int mb, me;
657 if ((c & 0xffff) == c) {
658 tcg_out32(s, ANDI | SAI(src, dst, c));
659 return;
660 } else if ((c & 0xffff0000) == c) {
661 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
662 return;
663 } else if (mask64_operand(c, &mb, &me)) {
664 if (mb == 0) {
665 tcg_out_rld(s, RLDICR, dst, src, 0, me);
666 } else {
667 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
669 } else {
670 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
671 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
675 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
676 int op_lo, int op_hi)
678 if (c >> 16) {
679 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
680 src = dst;
682 if (c & 0xffff) {
683 tcg_out32(s, op_lo | SAI(src, dst, c));
684 src = dst;
688 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
690 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
693 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
695 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
698 static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target)
700 ptrdiff_t disp = tcg_pcrel_diff(s, target);
701 if (in_range_b(disp)) {
702 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
703 } else {
704 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, (uintptr_t)target);
705 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
706 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
710 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
712 #ifdef __APPLE__
713 tcg_out_b(s, LK, target);
714 #else
715 /* Look through the descriptor. If the branch is in range, and we
716 don't have to spend too much effort on building the toc. */
717 void *tgt = ((void **)target)[0];
718 uintptr_t toc = ((uintptr_t *)target)[1];
719 intptr_t diff = tcg_pcrel_diff(s, tgt);
721 if (in_range_b(diff) && toc == (uint32_t)toc) {
722 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R2, toc);
723 tcg_out_b(s, LK, tgt);
724 } else {
725 /* Fold the low bits of the constant into the addresses below. */
726 intptr_t arg = (intptr_t)target;
727 int ofs = (int16_t)arg;
729 if (ofs + 8 < 0x8000) {
730 arg -= ofs;
731 } else {
732 ofs = 0;
734 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R2, arg);
735 tcg_out32(s, LD | TAI(TCG_REG_R0, TCG_REG_R2, ofs));
736 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
737 tcg_out32(s, LD | TAI(TCG_REG_R2, TCG_REG_R2, ofs + 8));
738 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
740 #endif
743 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
744 TCGReg base, tcg_target_long offset)
746 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
747 TCGReg rs = TCG_REG_R2;
749 assert(rt != TCG_REG_R2 && base != TCG_REG_R2);
751 switch (opi) {
752 case LD: case LWA:
753 align = 3;
754 /* FALLTHRU */
755 default:
756 if (rt != TCG_REG_R0) {
757 rs = rt;
759 break;
760 case STD:
761 align = 3;
762 break;
763 case STB: case STH: case STW:
764 break;
767 /* For unaligned, or very large offsets, use the indexed form. */
768 if (offset & align || offset != (int32_t)offset) {
769 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R2, orig);
770 tcg_out32(s, opx | TAB(rt, base, TCG_REG_R2));
771 return;
774 l0 = (int16_t)offset;
775 offset = (offset - l0) >> 16;
776 l1 = (int16_t)offset;
778 if (l1 < 0 && orig >= 0) {
779 extra = 0x4000;
780 l1 = (int16_t)(offset - 0x4000);
782 if (l1) {
783 tcg_out32(s, ADDIS | TAI(rs, base, l1));
784 base = rs;
786 if (extra) {
787 tcg_out32(s, ADDIS | TAI(rs, base, extra));
788 base = rs;
790 if (opi != ADDI || base != rt || l0 != 0) {
791 tcg_out32(s, opi | TAI(rt, base, l0));
795 static const uint32_t qemu_ldx_opc[16] = {
796 [MO_UB] = LBZX,
797 [MO_UW] = LHZX,
798 [MO_UL] = LWZX,
799 [MO_Q] = LDX,
800 [MO_SW] = LHAX,
801 [MO_SL] = LWAX,
802 [MO_BSWAP | MO_UB] = LBZX,
803 [MO_BSWAP | MO_UW] = LHBRX,
804 [MO_BSWAP | MO_UL] = LWBRX,
805 [MO_BSWAP | MO_Q] = LDBRX,
808 static const uint32_t qemu_stx_opc[16] = {
809 [MO_UB] = STBX,
810 [MO_UW] = STHX,
811 [MO_UL] = STWX,
812 [MO_Q] = STDX,
813 [MO_BSWAP | MO_UB] = STBX,
814 [MO_BSWAP | MO_UW] = STHBRX,
815 [MO_BSWAP | MO_UL] = STWBRX,
816 [MO_BSWAP | MO_Q] = STDBRX,
819 static const uint32_t qemu_exts_opc[4] = {
820 EXTSB, EXTSH, EXTSW, 0
823 #if defined (CONFIG_SOFTMMU)
824 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
825 * int mmu_idx, uintptr_t ra)
827 static void * const qemu_ld_helpers[16] = {
828 [MO_UB] = helper_ret_ldub_mmu,
829 [MO_LEUW] = helper_le_lduw_mmu,
830 [MO_LEUL] = helper_le_ldul_mmu,
831 [MO_LEQ] = helper_le_ldq_mmu,
832 [MO_BEUW] = helper_be_lduw_mmu,
833 [MO_BEUL] = helper_be_ldul_mmu,
834 [MO_BEQ] = helper_be_ldq_mmu,
837 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
838 * uintxx_t val, int mmu_idx, uintptr_t ra)
840 static void * const qemu_st_helpers[16] = {
841 [MO_UB] = helper_ret_stb_mmu,
842 [MO_LEUW] = helper_le_stw_mmu,
843 [MO_LEUL] = helper_le_stl_mmu,
844 [MO_LEQ] = helper_le_stq_mmu,
845 [MO_BEUW] = helper_be_stw_mmu,
846 [MO_BEUL] = helper_be_stl_mmu,
847 [MO_BEQ] = helper_be_stq_mmu,
850 /* Perform the TLB load and compare. Places the result of the comparison
851 in CR7, loads the addend of the TLB into R3, and returns the register
852 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
854 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, TCGReg addr_reg,
855 int mem_index, bool is_read)
857 int cmp_off
858 = (is_read
859 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
860 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
861 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
862 TCGReg base = TCG_AREG0;
864 /* Extract the page index, shifted into place for tlb index. */
865 if (TARGET_LONG_BITS == 32) {
866 /* Zero-extend the address into a place helpful for further use. */
867 tcg_out_ext32u(s, TCG_REG_R4, addr_reg);
868 addr_reg = TCG_REG_R4;
869 } else {
870 tcg_out_rld(s, RLDICL, TCG_REG_R3, addr_reg,
871 64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS);
874 /* Compensate for very large offsets. */
875 if (add_off >= 0x8000) {
876 /* Most target env are smaller than 32k; none are larger than 64k.
877 Simplify the logic here merely to offset by 0x7ff0, giving us a
878 range just shy of 64k. Check this assumption. */
879 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
880 tlb_table[NB_MMU_MODES - 1][1])
881 > 0x7ff0 + 0x7fff);
882 tcg_out32(s, ADDI | TAI(TCG_REG_R2, base, 0x7ff0));
883 base = TCG_REG_R2;
884 cmp_off -= 0x7ff0;
885 add_off -= 0x7ff0;
888 /* Extraction and shifting, part 2. */
889 if (TARGET_LONG_BITS == 32) {
890 tcg_out_rlw(s, RLWINM, TCG_REG_R3, addr_reg,
891 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
892 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS),
893 31 - CPU_TLB_ENTRY_BITS);
894 } else {
895 tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS);
898 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base));
900 /* Load the tlb comparator. */
901 tcg_out32(s, LD_ADDR | TAI(TCG_REG_R2, TCG_REG_R3, cmp_off));
903 /* Load the TLB addend for use on the fast path. Do this asap
904 to minimize any load use delay. */
905 tcg_out32(s, LD | TAI(TCG_REG_R3, TCG_REG_R3, add_off));
907 /* Clear the non-page, non-alignment bits from the address. */
908 if (TARGET_LONG_BITS == 32) {
909 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr_reg, 0,
910 (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
911 } else if (!s_bits) {
912 tcg_out_rld(s, RLDICR, TCG_REG_R0, addr_reg, 0, 63 - TARGET_PAGE_BITS);
913 } else {
914 tcg_out_rld(s, RLDICL, TCG_REG_R0, addr_reg,
915 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
916 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
919 tcg_out32(s, CMP | BF(7) | RA(TCG_REG_R0) | RB(TCG_REG_R2) | CMP_L);
921 return addr_reg;
924 /* Record the context of a call to the out of line helper code for the slow
925 path for a load or store, so that we can later generate the correct
926 helper code. */
927 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
928 int data_reg, int addr_reg, int mem_index,
929 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
931 TCGLabelQemuLdst *label = new_ldst_label(s);
933 label->is_ld = is_ld;
934 label->opc = opc;
935 label->datalo_reg = data_reg;
936 label->addrlo_reg = addr_reg;
937 label->mem_index = mem_index;
938 label->raddr = raddr;
939 label->label_ptr[0] = label_ptr;
942 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
944 TCGMemOp opc = lb->opc;
946 reloc_pc14(lb->label_ptr[0], s->code_ptr);
948 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0);
950 /* If the address needed to be zero-extended, we'll have already
951 placed it in R4. The only remaining case is 64-bit guest. */
952 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg);
954 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index);
955 tcg_out32(s, MFSPR | RT(TCG_REG_R6) | LR);
957 tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
959 if (opc & MO_SIGN) {
960 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
961 tcg_out32(s, insn | RA(lb->datalo_reg) | RS(TCG_REG_R3));
962 } else {
963 tcg_out_mov(s, TCG_TYPE_I64, lb->datalo_reg, TCG_REG_R3);
966 tcg_out_b(s, 0, lb->raddr);
969 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
971 TCGMemOp opc = lb->opc;
972 TCGMemOp s_bits = opc & MO_SIZE;
974 reloc_pc14(lb->label_ptr[0], s->code_ptr);
976 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, TCG_AREG0);
978 /* If the address needed to be zero-extended, we'll have already
979 placed it in R4. The only remaining case is 64-bit guest. */
980 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg);
982 tcg_out_rld(s, RLDICL, TCG_REG_R5, lb->datalo_reg,
983 0, 64 - (1 << (3 + s_bits)));
984 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R6, lb->mem_index);
985 tcg_out32(s, MFSPR | RT(TCG_REG_R7) | LR);
987 tcg_out_call(s, qemu_st_helpers[opc]);
989 tcg_out_b(s, 0, lb->raddr);
991 #endif /* SOFTMMU */
993 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
994 TCGMemOp opc, int mem_index)
996 TCGReg rbase;
997 uint32_t insn;
998 TCGMemOp s_bits = opc & MO_SIZE;
999 #ifdef CONFIG_SOFTMMU
1000 tcg_insn_unit *label_ptr;
1001 #endif
1003 #ifdef CONFIG_SOFTMMU
1004 addr_reg = tcg_out_tlb_read(s, s_bits, addr_reg, mem_index, true);
1006 /* Load a pointer into the current opcode w/conditional branch-link. */
1007 label_ptr = s->code_ptr;
1008 tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
1010 rbase = TCG_REG_R3;
1011 #else /* !CONFIG_SOFTMMU */
1012 rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
1013 if (TARGET_LONG_BITS == 32) {
1014 tcg_out_ext32u(s, TCG_REG_R2, addr_reg);
1015 addr_reg = TCG_REG_R2;
1017 #endif
1019 insn = qemu_ldx_opc[opc];
1020 if (!HAVE_ISA_2_06 && insn == LDBRX) {
1021 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addr_reg, 4));
1022 tcg_out32(s, LWBRX | TAB(data_reg, rbase, addr_reg));
1023 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
1024 tcg_out_rld(s, RLDIMI, data_reg, TCG_REG_R0, 32, 0);
1025 } else if (insn) {
1026 tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg));
1027 } else {
1028 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
1029 tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg));
1030 insn = qemu_exts_opc[s_bits];
1031 tcg_out32(s, insn | RA(data_reg) | RS(data_reg));
1034 #ifdef CONFIG_SOFTMMU
1035 add_qemu_ldst_label(s, true, opc, data_reg, addr_reg, mem_index,
1036 s->code_ptr, label_ptr);
1037 #endif
1040 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1041 TCGMemOp opc, int mem_index)
1043 TCGReg rbase;
1044 uint32_t insn;
1045 #ifdef CONFIG_SOFTMMU
1046 tcg_insn_unit *label_ptr;
1047 #endif
1049 #ifdef CONFIG_SOFTMMU
1050 addr_reg = tcg_out_tlb_read(s, opc & MO_SIZE, addr_reg, mem_index, false);
1052 /* Load a pointer into the current opcode w/conditional branch-link. */
1053 label_ptr = s->code_ptr;
1054 tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
1056 rbase = TCG_REG_R3;
1057 #else /* !CONFIG_SOFTMMU */
1058 rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
1059 if (TARGET_LONG_BITS == 32) {
1060 tcg_out_ext32u(s, TCG_REG_R2, addr_reg);
1061 addr_reg = TCG_REG_R2;
1063 #endif
1065 insn = qemu_stx_opc[opc];
1066 if (!HAVE_ISA_2_06 && insn == STDBRX) {
1067 tcg_out32(s, STWBRX | SAB(data_reg, rbase, addr_reg));
1068 tcg_out32(s, ADDI | TAI(TCG_REG_R2, addr_reg, 4));
1069 tcg_out_shri64(s, TCG_REG_R0, data_reg, 32);
1070 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_R2));
1071 } else {
1072 tcg_out32(s, insn | SAB(data_reg, rbase, addr_reg));
1075 #ifdef CONFIG_SOFTMMU
1076 add_qemu_ldst_label(s, false, opc, data_reg, addr_reg, mem_index,
1077 s->code_ptr, label_ptr);
1078 #endif
1081 #define FRAME_SIZE ((int) \
1082 ((8 /* back chain */ \
1083 + 8 /* CR */ \
1084 + 8 /* LR */ \
1085 + 8 /* compiler doubleword */ \
1086 + 8 /* link editor doubleword */ \
1087 + 8 /* TOC save area */ \
1088 + TCG_STATIC_CALL_ARGS_SIZE \
1089 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1090 + ARRAY_SIZE(tcg_target_callee_save_regs) * 8 \
1091 + 15) & ~15))
1093 #define REG_SAVE_BOT (FRAME_SIZE - ARRAY_SIZE(tcg_target_callee_save_regs) * 8)
1095 static void tcg_target_qemu_prologue(TCGContext *s)
1097 int i;
1099 tcg_set_frame(s, TCG_REG_CALL_STACK,
1100 REG_SAVE_BOT - CPU_TEMP_BUF_NLONGS * sizeof(long),
1101 CPU_TEMP_BUF_NLONGS * sizeof(long));
1103 #ifndef __APPLE__
1104 /* First emit adhoc function descriptor */
1105 tcg_out64(s, (uint64_t)s->code_ptr + 24); /* entry point */
1106 tcg_out64(s, 0); /* toc */
1107 tcg_out64(s, 0); /* environment pointer */
1108 #endif
1110 /* Prologue */
1111 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
1112 tcg_out32(s, STDU | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
1113 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
1114 tcg_out32(s, STD | SAI(tcg_target_callee_save_regs[i], 1,
1115 REG_SAVE_BOT + i * 8));
1117 tcg_out32(s, STD | SAI(TCG_REG_R0, TCG_REG_R1, FRAME_SIZE + 16));
1119 #ifdef CONFIG_USE_GUEST_BASE
1120 if (GUEST_BASE) {
1121 tcg_out_movi(s, TCG_TYPE_I64, TCG_GUEST_BASE_REG, GUEST_BASE);
1122 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1124 #endif
1126 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1127 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
1128 tcg_out32(s, BCCTR | BO_ALWAYS);
1130 /* Epilogue */
1131 tb_ret_addr = s->code_ptr;
1133 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
1134 tcg_out32(s, LD | TAI(tcg_target_callee_save_regs[i], TCG_REG_R1,
1135 REG_SAVE_BOT + i * 8));
1137 tcg_out32(s, LD | TAI(TCG_REG_R0, TCG_REG_R1, FRAME_SIZE + 16));
1138 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
1139 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
1140 tcg_out32(s, BCLR | BO_ALWAYS);
1143 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1144 TCGReg arg1, intptr_t arg2)
1146 int opi, opx;
1148 if (type == TCG_TYPE_I32) {
1149 opi = LWZ, opx = LWZX;
1150 } else {
1151 opi = LD, opx = LDX;
1153 tcg_out_mem_long(s, opi, opx, ret, arg1, arg2);
1156 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1157 TCGReg arg1, intptr_t arg2)
1159 int opi, opx;
1161 if (type == TCG_TYPE_I32) {
1162 opi = STW, opx = STWX;
1163 } else {
1164 opi = STD, opx = STDX;
1166 tcg_out_mem_long(s, opi, opx, arg, arg1, arg2);
1169 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1170 int const_arg2, int cr, TCGType type)
1172 int imm;
1173 uint32_t op;
1175 /* Simplify the comparisons below wrt CMPI. */
1176 if (type == TCG_TYPE_I32) {
1177 arg2 = (int32_t)arg2;
1180 switch (cond) {
1181 case TCG_COND_EQ:
1182 case TCG_COND_NE:
1183 if (const_arg2) {
1184 if ((int16_t) arg2 == arg2) {
1185 op = CMPI;
1186 imm = 1;
1187 break;
1188 } else if ((uint16_t) arg2 == arg2) {
1189 op = CMPLI;
1190 imm = 1;
1191 break;
1194 op = CMPL;
1195 imm = 0;
1196 break;
1198 case TCG_COND_LT:
1199 case TCG_COND_GE:
1200 case TCG_COND_LE:
1201 case TCG_COND_GT:
1202 if (const_arg2) {
1203 if ((int16_t) arg2 == arg2) {
1204 op = CMPI;
1205 imm = 1;
1206 break;
1209 op = CMP;
1210 imm = 0;
1211 break;
1213 case TCG_COND_LTU:
1214 case TCG_COND_GEU:
1215 case TCG_COND_LEU:
1216 case TCG_COND_GTU:
1217 if (const_arg2) {
1218 if ((uint16_t) arg2 == arg2) {
1219 op = CMPLI;
1220 imm = 1;
1221 break;
1224 op = CMPL;
1225 imm = 0;
1226 break;
1228 default:
1229 tcg_abort();
1231 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1233 if (imm) {
1234 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1235 } else {
1236 if (const_arg2) {
1237 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1238 arg2 = TCG_REG_R0;
1240 tcg_out32(s, op | RA(arg1) | RB(arg2));
1244 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1245 TCGReg dst, TCGReg src)
1247 tcg_out32(s, (type == TCG_TYPE_I64 ? CNTLZD : CNTLZW) | RS(src) | RA(dst));
1248 tcg_out_shri64(s, dst, dst, type == TCG_TYPE_I64 ? 6 : 5);
1251 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1253 /* X != 0 implies X + -1 generates a carry. Extra addition
1254 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1255 if (dst != src) {
1256 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1257 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1258 } else {
1259 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1260 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1264 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1265 bool const_arg2)
1267 if (const_arg2) {
1268 if ((uint32_t)arg2 == arg2) {
1269 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1270 } else {
1271 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1272 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1274 } else {
1275 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1277 return TCG_REG_R0;
1280 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1281 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1282 int const_arg2)
1284 int crop, sh;
1286 /* Ignore high bits of a potential constant arg2. */
1287 if (type == TCG_TYPE_I32) {
1288 arg2 = (uint32_t)arg2;
1291 /* Handle common and trivial cases before handling anything else. */
1292 if (arg2 == 0) {
1293 switch (cond) {
1294 case TCG_COND_EQ:
1295 tcg_out_setcond_eq0(s, type, arg0, arg1);
1296 return;
1297 case TCG_COND_NE:
1298 if (type == TCG_TYPE_I32) {
1299 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1300 arg1 = TCG_REG_R0;
1302 tcg_out_setcond_ne0(s, arg0, arg1);
1303 return;
1304 case TCG_COND_GE:
1305 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1306 arg1 = arg0;
1307 /* FALLTHRU */
1308 case TCG_COND_LT:
1309 /* Extract the sign bit. */
1310 tcg_out_rld(s, RLDICL, arg0, arg1,
1311 type == TCG_TYPE_I64 ? 1 : 33, 63);
1312 return;
1313 default:
1314 break;
1318 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1319 All other cases below are also at least 3 insns, so speed up the
1320 code generator by not considering them and always using ISEL. */
1321 if (HAVE_ISEL) {
1322 int isel, tab;
1324 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1326 isel = tcg_to_isel[cond];
1328 tcg_out_movi(s, type, arg0, 1);
1329 if (isel & 1) {
1330 /* arg0 = (bc ? 0 : 1) */
1331 tab = TAB(arg0, 0, arg0);
1332 isel &= ~1;
1333 } else {
1334 /* arg0 = (bc ? 1 : 0) */
1335 tcg_out_movi(s, type, TCG_REG_R0, 0);
1336 tab = TAB(arg0, arg0, TCG_REG_R0);
1338 tcg_out32(s, isel | tab);
1339 return;
1342 switch (cond) {
1343 case TCG_COND_EQ:
1344 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1345 tcg_out_setcond_eq0(s, type, arg0, arg1);
1346 return;
1348 case TCG_COND_NE:
1349 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1350 /* Discard the high bits only once, rather than both inputs. */
1351 if (type == TCG_TYPE_I32) {
1352 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1353 arg1 = TCG_REG_R0;
1355 tcg_out_setcond_ne0(s, arg0, arg1);
1356 return;
1358 case TCG_COND_GT:
1359 case TCG_COND_GTU:
1360 sh = 30;
1361 crop = 0;
1362 goto crtest;
1364 case TCG_COND_LT:
1365 case TCG_COND_LTU:
1366 sh = 29;
1367 crop = 0;
1368 goto crtest;
1370 case TCG_COND_GE:
1371 case TCG_COND_GEU:
1372 sh = 31;
1373 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1374 goto crtest;
1376 case TCG_COND_LE:
1377 case TCG_COND_LEU:
1378 sh = 31;
1379 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1380 crtest:
1381 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1382 if (crop) {
1383 tcg_out32(s, crop);
1385 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1386 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1387 break;
1389 default:
1390 tcg_abort();
1394 static void tcg_out_bc(TCGContext *s, int bc, int label_index)
1396 TCGLabel *l = &s->labels[label_index];
1398 if (l->has_value) {
1399 tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value_ptr));
1400 } else {
1401 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, label_index, 0);
1402 tcg_out_bc_noaddr(s, bc);
1406 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1407 TCGArg arg1, TCGArg arg2, int const_arg2,
1408 int label_index, TCGType type)
1410 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1411 tcg_out_bc(s, tcg_to_bc[cond], label_index);
1414 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1415 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1416 TCGArg v2, bool const_c2)
1418 /* If for some reason both inputs are zero, don't produce bad code. */
1419 if (v1 == 0 && v2 == 0) {
1420 tcg_out_movi(s, type, dest, 0);
1421 return;
1424 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1426 if (HAVE_ISEL) {
1427 int isel = tcg_to_isel[cond];
1429 /* Swap the V operands if the operation indicates inversion. */
1430 if (isel & 1) {
1431 int t = v1;
1432 v1 = v2;
1433 v2 = t;
1434 isel &= ~1;
1436 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1437 if (v2 == 0) {
1438 tcg_out_movi(s, type, TCG_REG_R0, 0);
1440 tcg_out32(s, isel | TAB(dest, v1, v2));
1441 } else {
1442 if (dest == v2) {
1443 cond = tcg_invert_cond(cond);
1444 v2 = v1;
1445 } else if (dest != v1) {
1446 if (v1 == 0) {
1447 tcg_out_movi(s, type, dest, 0);
1448 } else {
1449 tcg_out_mov(s, type, dest, v1);
1452 /* Branch forward over one insn */
1453 tcg_out32(s, tcg_to_bc[cond] | 8);
1454 if (v2 == 0) {
1455 tcg_out_movi(s, type, dest, 0);
1456 } else {
1457 tcg_out_mov(s, type, dest, v2);
1462 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
1464 TCGContext s;
1466 s.code_buf = s.code_ptr = (tcg_insn_unit *)jmp_addr;
1467 tcg_out_b(&s, 0, (tcg_insn_unit *)addr);
1468 flush_icache_range(jmp_addr, jmp_addr + tcg_current_code_size(&s));
1471 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1472 const int *const_args)
1474 TCGArg a0, a1, a2;
1475 int c;
1477 switch (opc) {
1478 case INDEX_op_exit_tb:
1479 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R3, args[0]);
1480 tcg_out_b(s, 0, tb_ret_addr);
1481 break;
1482 case INDEX_op_goto_tb:
1483 if (s->tb_jmp_offset) {
1484 /* Direct jump method. */
1485 s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
1486 s->code_ptr += 7;
1487 } else {
1488 /* Indirect jump method. */
1489 tcg_abort();
1491 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
1492 break;
1493 case INDEX_op_br:
1495 TCGLabel *l = &s->labels[args[0]];
1497 if (l->has_value) {
1498 tcg_out_b(s, 0, l->u.value_ptr);
1499 } else {
1500 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, args[0], 0);
1501 tcg_out_b_noaddr(s, B);
1504 break;
1505 case INDEX_op_call:
1506 if (const_args[0]) {
1507 tcg_out_call(s, (void *)(uintptr_t)arg);
1508 } else {
1509 #ifdef __APPLE__
1510 tcg_out32(s, MTSPR | RS(arg) | LR);
1511 tcg_out32(s, BCLR | BO_ALWAYS | LK);
1512 #else
1513 tcg_out32(s, LD | TAI(TCG_REG_R0, arg, 0));
1514 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1515 tcg_out32(s, LD | TAI(TCG_REG_R2, arg, 8));
1516 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1517 #endif
1519 break;
1520 case INDEX_op_movi_i32:
1521 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1522 break;
1523 case INDEX_op_movi_i64:
1524 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1525 break;
1526 case INDEX_op_ld8u_i32:
1527 case INDEX_op_ld8u_i64:
1528 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
1529 break;
1530 case INDEX_op_ld8s_i32:
1531 case INDEX_op_ld8s_i64:
1532 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
1533 tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
1534 break;
1535 case INDEX_op_ld16u_i32:
1536 case INDEX_op_ld16u_i64:
1537 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
1538 break;
1539 case INDEX_op_ld16s_i32:
1540 case INDEX_op_ld16s_i64:
1541 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
1542 break;
1543 case INDEX_op_ld_i32:
1544 case INDEX_op_ld32u_i64:
1545 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
1546 break;
1547 case INDEX_op_ld32s_i64:
1548 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
1549 break;
1550 case INDEX_op_ld_i64:
1551 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
1552 break;
1553 case INDEX_op_st8_i32:
1554 case INDEX_op_st8_i64:
1555 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
1556 break;
1557 case INDEX_op_st16_i32:
1558 case INDEX_op_st16_i64:
1559 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
1560 break;
1561 case INDEX_op_st_i32:
1562 case INDEX_op_st32_i64:
1563 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
1564 break;
1565 case INDEX_op_st_i64:
1566 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
1567 break;
1569 case INDEX_op_add_i32:
1570 a0 = args[0], a1 = args[1], a2 = args[2];
1571 if (const_args[2]) {
1572 do_addi_32:
1573 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
1574 } else {
1575 tcg_out32(s, ADD | TAB(a0, a1, a2));
1577 break;
1578 case INDEX_op_sub_i32:
1579 a0 = args[0], a1 = args[1], a2 = args[2];
1580 if (const_args[1]) {
1581 if (const_args[2]) {
1582 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
1583 } else {
1584 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
1586 } else if (const_args[2]) {
1587 a2 = -a2;
1588 goto do_addi_32;
1589 } else {
1590 tcg_out32(s, SUBF | TAB(a0, a2, a1));
1592 break;
1594 case INDEX_op_and_i32:
1595 a0 = args[0], a1 = args[1], a2 = args[2];
1596 if (const_args[2]) {
1597 tcg_out_andi32(s, a0, a1, a2);
1598 } else {
1599 tcg_out32(s, AND | SAB(a1, a0, a2));
1601 break;
1602 case INDEX_op_and_i64:
1603 a0 = args[0], a1 = args[1], a2 = args[2];
1604 if (const_args[2]) {
1605 tcg_out_andi64(s, a0, a1, a2);
1606 } else {
1607 tcg_out32(s, AND | SAB(a1, a0, a2));
1609 break;
1610 case INDEX_op_or_i64:
1611 case INDEX_op_or_i32:
1612 a0 = args[0], a1 = args[1], a2 = args[2];
1613 if (const_args[2]) {
1614 tcg_out_ori32(s, a0, a1, a2);
1615 } else {
1616 tcg_out32(s, OR | SAB(a1, a0, a2));
1618 break;
1619 case INDEX_op_xor_i64:
1620 case INDEX_op_xor_i32:
1621 a0 = args[0], a1 = args[1], a2 = args[2];
1622 if (const_args[2]) {
1623 tcg_out_xori32(s, a0, a1, a2);
1624 } else {
1625 tcg_out32(s, XOR | SAB(a1, a0, a2));
1627 break;
1628 case INDEX_op_andc_i32:
1629 a0 = args[0], a1 = args[1], a2 = args[2];
1630 if (const_args[2]) {
1631 tcg_out_andi32(s, a0, a1, ~a2);
1632 } else {
1633 tcg_out32(s, ANDC | SAB(a1, a0, a2));
1635 break;
1636 case INDEX_op_andc_i64:
1637 a0 = args[0], a1 = args[1], a2 = args[2];
1638 if (const_args[2]) {
1639 tcg_out_andi64(s, a0, a1, ~a2);
1640 } else {
1641 tcg_out32(s, ANDC | SAB(a1, a0, a2));
1643 break;
1644 case INDEX_op_orc_i32:
1645 if (const_args[2]) {
1646 tcg_out_ori32(s, args[0], args[1], ~args[2]);
1647 break;
1649 /* FALLTHRU */
1650 case INDEX_op_orc_i64:
1651 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
1652 break;
1653 case INDEX_op_eqv_i32:
1654 if (const_args[2]) {
1655 tcg_out_xori32(s, args[0], args[1], ~args[2]);
1656 break;
1658 /* FALLTHRU */
1659 case INDEX_op_eqv_i64:
1660 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
1661 break;
1662 case INDEX_op_nand_i32:
1663 case INDEX_op_nand_i64:
1664 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
1665 break;
1666 case INDEX_op_nor_i32:
1667 case INDEX_op_nor_i64:
1668 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
1669 break;
1671 case INDEX_op_mul_i32:
1672 a0 = args[0], a1 = args[1], a2 = args[2];
1673 if (const_args[2]) {
1674 tcg_out32(s, MULLI | TAI(a0, a1, a2));
1675 } else {
1676 tcg_out32(s, MULLW | TAB(a0, a1, a2));
1678 break;
1680 case INDEX_op_div_i32:
1681 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
1682 break;
1684 case INDEX_op_divu_i32:
1685 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
1686 break;
1688 case INDEX_op_shl_i32:
1689 if (const_args[2]) {
1690 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31 - args[2]);
1691 } else {
1692 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
1694 break;
1695 case INDEX_op_shr_i32:
1696 if (const_args[2]) {
1697 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], args[2], 31);
1698 } else {
1699 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
1701 break;
1702 case INDEX_op_sar_i32:
1703 if (const_args[2]) {
1704 tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
1705 } else {
1706 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
1708 break;
1709 case INDEX_op_rotl_i32:
1710 if (const_args[2]) {
1711 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
1712 } else {
1713 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
1714 | MB(0) | ME(31));
1716 break;
1717 case INDEX_op_rotr_i32:
1718 if (const_args[2]) {
1719 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
1720 } else {
1721 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
1722 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
1723 | MB(0) | ME(31));
1725 break;
1727 case INDEX_op_brcond_i32:
1728 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1729 args[3], TCG_TYPE_I32);
1730 break;
1732 case INDEX_op_brcond_i64:
1733 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1734 args[3], TCG_TYPE_I64);
1735 break;
1737 case INDEX_op_neg_i32:
1738 case INDEX_op_neg_i64:
1739 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
1740 break;
1742 case INDEX_op_not_i32:
1743 case INDEX_op_not_i64:
1744 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
1745 break;
1747 case INDEX_op_add_i64:
1748 a0 = args[0], a1 = args[1], a2 = args[2];
1749 if (const_args[2]) {
1750 do_addi_64:
1751 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
1752 } else {
1753 tcg_out32(s, ADD | TAB(a0, a1, a2));
1755 break;
1756 case INDEX_op_sub_i64:
1757 a0 = args[0], a1 = args[1], a2 = args[2];
1758 if (const_args[1]) {
1759 if (const_args[2]) {
1760 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
1761 } else {
1762 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
1764 } else if (const_args[2]) {
1765 a2 = -a2;
1766 goto do_addi_64;
1767 } else {
1768 tcg_out32(s, SUBF | TAB(a0, a2, a1));
1770 break;
1772 case INDEX_op_shl_i64:
1773 if (const_args[2]) {
1774 tcg_out_shli64(s, args[0], args[1], args[2]);
1775 } else {
1776 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
1778 break;
1779 case INDEX_op_shr_i64:
1780 if (const_args[2]) {
1781 tcg_out_shri64(s, args[0], args[1], args[2]);
1782 } else {
1783 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
1785 break;
1786 case INDEX_op_sar_i64:
1787 if (const_args[2]) {
1788 int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
1789 tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
1790 } else {
1791 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
1793 break;
1794 case INDEX_op_rotl_i64:
1795 if (const_args[2]) {
1796 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
1797 } else {
1798 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
1800 break;
1801 case INDEX_op_rotr_i64:
1802 if (const_args[2]) {
1803 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
1804 } else {
1805 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
1806 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
1808 break;
1810 case INDEX_op_mul_i64:
1811 a0 = args[0], a1 = args[1], a2 = args[2];
1812 if (const_args[2]) {
1813 tcg_out32(s, MULLI | TAI(a0, a1, a2));
1814 } else {
1815 tcg_out32(s, MULLD | TAB(a0, a1, a2));
1817 break;
1818 case INDEX_op_div_i64:
1819 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
1820 break;
1821 case INDEX_op_divu_i64:
1822 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
1823 break;
1825 case INDEX_op_qemu_ld_i32:
1826 case INDEX_op_qemu_ld_i64:
1827 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3]);
1828 break;
1829 case INDEX_op_qemu_st_i32:
1830 case INDEX_op_qemu_st_i64:
1831 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]);
1832 break;
1834 case INDEX_op_ext8s_i32:
1835 case INDEX_op_ext8s_i64:
1836 c = EXTSB;
1837 goto gen_ext;
1838 case INDEX_op_ext16s_i32:
1839 case INDEX_op_ext16s_i64:
1840 c = EXTSH;
1841 goto gen_ext;
1842 case INDEX_op_ext32s_i64:
1843 c = EXTSW;
1844 goto gen_ext;
1845 gen_ext:
1846 tcg_out32(s, c | RS(args[1]) | RA(args[0]));
1847 break;
1849 case INDEX_op_setcond_i32:
1850 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
1851 const_args[2]);
1852 break;
1853 case INDEX_op_setcond_i64:
1854 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
1855 const_args[2]);
1856 break;
1858 case INDEX_op_bswap16_i32:
1859 case INDEX_op_bswap16_i64:
1860 a0 = args[0], a1 = args[1];
1861 /* a1 = abcd */
1862 if (a0 != a1) {
1863 /* a0 = (a1 r<< 24) & 0xff # 000c */
1864 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
1865 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
1866 tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
1867 } else {
1868 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
1869 tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
1870 /* a0 = (a1 r<< 24) & 0xff # 000c */
1871 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
1872 /* a0 = a0 | r0 # 00dc */
1873 tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
1875 break;
1877 case INDEX_op_bswap32_i32:
1878 case INDEX_op_bswap32_i64:
1879 /* Stolen from gcc's builtin_bswap32 */
1880 a1 = args[1];
1881 a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
1883 /* a1 = args[1] # abcd */
1884 /* a0 = rotate_left (a1, 8) # bcda */
1885 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
1886 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
1887 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
1888 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
1889 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
1891 if (a0 == TCG_REG_R0) {
1892 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
1894 break;
1896 case INDEX_op_bswap64_i64:
1897 a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
1898 if (a0 == a1) {
1899 a0 = TCG_REG_R0;
1900 a2 = a1;
1903 /* a1 = # abcd efgh */
1904 /* a0 = rl32(a1, 8) # 0000 fghe */
1905 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
1906 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
1907 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
1908 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
1909 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
1911 /* a0 = rl64(a0, 32) # hgfe 0000 */
1912 /* a2 = rl64(a1, 32) # efgh abcd */
1913 tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
1914 tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
1916 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
1917 tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
1918 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
1919 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
1920 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
1921 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
1923 if (a0 == 0) {
1924 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
1926 break;
1928 case INDEX_op_deposit_i32:
1929 if (const_args[2]) {
1930 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
1931 tcg_out_andi32(s, args[0], args[0], ~mask);
1932 } else {
1933 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
1934 32 - args[3] - args[4], 31 - args[3]);
1936 break;
1937 case INDEX_op_deposit_i64:
1938 if (const_args[2]) {
1939 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
1940 tcg_out_andi64(s, args[0], args[0], ~mask);
1941 } else {
1942 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
1943 64 - args[3] - args[4]);
1945 break;
1947 case INDEX_op_movcond_i32:
1948 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
1949 args[3], args[4], const_args[2]);
1950 break;
1951 case INDEX_op_movcond_i64:
1952 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
1953 args[3], args[4], const_args[2]);
1954 break;
1956 case INDEX_op_add2_i64:
1957 /* Note that the CA bit is defined based on the word size of the
1958 environment. So in 64-bit mode it's always carry-out of bit 63.
1959 The fallback code using deposit works just as well for 32-bit. */
1960 a0 = args[0], a1 = args[1];
1961 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
1962 a0 = TCG_REG_R0;
1964 if (const_args[4]) {
1965 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
1966 } else {
1967 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
1969 if (const_args[5]) {
1970 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
1971 } else {
1972 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
1974 if (a0 != args[0]) {
1975 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
1977 break;
1979 case INDEX_op_sub2_i64:
1980 a0 = args[0], a1 = args[1];
1981 if (a0 == args[5] || (!const_args[4] && a0 == args[4])) {
1982 a0 = TCG_REG_R0;
1984 if (const_args[2]) {
1985 tcg_out32(s, SUBFIC | TAI(a0, args[3], args[2]));
1986 } else {
1987 tcg_out32(s, SUBFC | TAB(a0, args[3], args[2]));
1989 if (const_args[4]) {
1990 tcg_out32(s, (args[4] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
1991 } else {
1992 tcg_out32(s, SUBFE | TAB(a1, args[5], args[4]));
1994 if (a0 != args[0]) {
1995 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
1997 break;
1999 case INDEX_op_muluh_i64:
2000 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
2001 break;
2002 case INDEX_op_mulsh_i64:
2003 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
2004 break;
2006 default:
2007 tcg_dump_ops(s);
2008 tcg_abort();
2012 static const TCGTargetOpDef ppc_op_defs[] = {
2013 { INDEX_op_exit_tb, { } },
2014 { INDEX_op_goto_tb, { } },
2015 { INDEX_op_call, { "ri" } },
2016 { INDEX_op_br, { } },
2018 { INDEX_op_mov_i32, { "r", "r" } },
2019 { INDEX_op_mov_i64, { "r", "r" } },
2020 { INDEX_op_movi_i32, { "r" } },
2021 { INDEX_op_movi_i64, { "r" } },
2023 { INDEX_op_ld8u_i32, { "r", "r" } },
2024 { INDEX_op_ld8s_i32, { "r", "r" } },
2025 { INDEX_op_ld16u_i32, { "r", "r" } },
2026 { INDEX_op_ld16s_i32, { "r", "r" } },
2027 { INDEX_op_ld_i32, { "r", "r" } },
2028 { INDEX_op_ld_i64, { "r", "r" } },
2029 { INDEX_op_st8_i32, { "r", "r" } },
2030 { INDEX_op_st8_i64, { "r", "r" } },
2031 { INDEX_op_st16_i32, { "r", "r" } },
2032 { INDEX_op_st16_i64, { "r", "r" } },
2033 { INDEX_op_st_i32, { "r", "r" } },
2034 { INDEX_op_st_i64, { "r", "r" } },
2035 { INDEX_op_st32_i64, { "r", "r" } },
2037 { INDEX_op_ld8u_i64, { "r", "r" } },
2038 { INDEX_op_ld8s_i64, { "r", "r" } },
2039 { INDEX_op_ld16u_i64, { "r", "r" } },
2040 { INDEX_op_ld16s_i64, { "r", "r" } },
2041 { INDEX_op_ld32u_i64, { "r", "r" } },
2042 { INDEX_op_ld32s_i64, { "r", "r" } },
2044 { INDEX_op_add_i32, { "r", "r", "ri" } },
2045 { INDEX_op_mul_i32, { "r", "r", "rI" } },
2046 { INDEX_op_div_i32, { "r", "r", "r" } },
2047 { INDEX_op_divu_i32, { "r", "r", "r" } },
2048 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
2049 { INDEX_op_and_i32, { "r", "r", "ri" } },
2050 { INDEX_op_or_i32, { "r", "r", "ri" } },
2051 { INDEX_op_xor_i32, { "r", "r", "ri" } },
2052 { INDEX_op_andc_i32, { "r", "r", "ri" } },
2053 { INDEX_op_orc_i32, { "r", "r", "ri" } },
2054 { INDEX_op_eqv_i32, { "r", "r", "ri" } },
2055 { INDEX_op_nand_i32, { "r", "r", "r" } },
2056 { INDEX_op_nor_i32, { "r", "r", "r" } },
2058 { INDEX_op_shl_i32, { "r", "r", "ri" } },
2059 { INDEX_op_shr_i32, { "r", "r", "ri" } },
2060 { INDEX_op_sar_i32, { "r", "r", "ri" } },
2061 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
2062 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
2064 { INDEX_op_brcond_i32, { "r", "ri" } },
2065 { INDEX_op_brcond_i64, { "r", "ri" } },
2067 { INDEX_op_neg_i32, { "r", "r" } },
2068 { INDEX_op_not_i32, { "r", "r" } },
2070 { INDEX_op_add_i64, { "r", "r", "rT" } },
2071 { INDEX_op_sub_i64, { "r", "rI", "rT" } },
2072 { INDEX_op_and_i64, { "r", "r", "ri" } },
2073 { INDEX_op_or_i64, { "r", "r", "rU" } },
2074 { INDEX_op_xor_i64, { "r", "r", "rU" } },
2075 { INDEX_op_andc_i64, { "r", "r", "ri" } },
2076 { INDEX_op_orc_i64, { "r", "r", "r" } },
2077 { INDEX_op_eqv_i64, { "r", "r", "r" } },
2078 { INDEX_op_nand_i64, { "r", "r", "r" } },
2079 { INDEX_op_nor_i64, { "r", "r", "r" } },
2081 { INDEX_op_shl_i64, { "r", "r", "ri" } },
2082 { INDEX_op_shr_i64, { "r", "r", "ri" } },
2083 { INDEX_op_sar_i64, { "r", "r", "ri" } },
2084 { INDEX_op_rotl_i64, { "r", "r", "ri" } },
2085 { INDEX_op_rotr_i64, { "r", "r", "ri" } },
2087 { INDEX_op_mul_i64, { "r", "r", "rI" } },
2088 { INDEX_op_div_i64, { "r", "r", "r" } },
2089 { INDEX_op_divu_i64, { "r", "r", "r" } },
2091 { INDEX_op_neg_i64, { "r", "r" } },
2092 { INDEX_op_not_i64, { "r", "r" } },
2094 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2095 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2096 { INDEX_op_qemu_st_i32, { "S", "S" } },
2097 { INDEX_op_qemu_st_i64, { "S", "S" } },
2099 { INDEX_op_ext8s_i32, { "r", "r" } },
2100 { INDEX_op_ext16s_i32, { "r", "r" } },
2101 { INDEX_op_ext8s_i64, { "r", "r" } },
2102 { INDEX_op_ext16s_i64, { "r", "r" } },
2103 { INDEX_op_ext32s_i64, { "r", "r" } },
2105 { INDEX_op_setcond_i32, { "r", "r", "ri" } },
2106 { INDEX_op_setcond_i64, { "r", "r", "ri" } },
2107 { INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } },
2108 { INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } },
2110 { INDEX_op_bswap16_i32, { "r", "r" } },
2111 { INDEX_op_bswap16_i64, { "r", "r" } },
2112 { INDEX_op_bswap32_i32, { "r", "r" } },
2113 { INDEX_op_bswap32_i64, { "r", "r" } },
2114 { INDEX_op_bswap64_i64, { "r", "r" } },
2116 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
2117 { INDEX_op_deposit_i64, { "r", "0", "rZ" } },
2119 { INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } },
2120 { INDEX_op_sub2_i64, { "r", "r", "rI", "r", "rZM", "r" } },
2121 { INDEX_op_mulsh_i64, { "r", "r", "r" } },
2122 { INDEX_op_muluh_i64, { "r", "r", "r" } },
2124 { -1 },
2127 static void tcg_target_init(TCGContext *s)
2129 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2130 if (hwcap & PPC_FEATURE_ARCH_2_06) {
2131 have_isa_2_06 = true;
2134 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
2135 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
2136 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
2137 (1 << TCG_REG_R0) |
2138 (1 << TCG_REG_R2) |
2139 (1 << TCG_REG_R3) |
2140 (1 << TCG_REG_R4) |
2141 (1 << TCG_REG_R5) |
2142 (1 << TCG_REG_R6) |
2143 (1 << TCG_REG_R7) |
2144 (1 << TCG_REG_R8) |
2145 (1 << TCG_REG_R9) |
2146 (1 << TCG_REG_R10) |
2147 (1 << TCG_REG_R11) |
2148 (1 << TCG_REG_R12));
2150 tcg_regset_clear(s->reserved_regs);
2151 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
2152 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
2153 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* mem temp */
2154 #ifdef __APPLE__
2155 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R11); /* ??? */
2156 #endif
2157 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
2159 tcg_add_target_add_op_defs(ppc_op_defs);
2162 typedef struct {
2163 DebugFrameCIE cie;
2164 DebugFrameFDEHeader fde;
2165 uint8_t fde_def_cfa[4];
2166 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
2167 } DebugFrame;
2169 /* We're expecting a 2 byte uleb128 encoded value. */
2170 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2172 #define ELF_HOST_MACHINE EM_PPC64
2174 static DebugFrame debug_frame = {
2175 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2176 .cie.id = -1,
2177 .cie.version = 1,
2178 .cie.code_align = 1,
2179 .cie.data_align = 0x78, /* sleb128 -8 */
2180 .cie.return_column = 65,
2182 /* Total FDE size does not include the "len" member. */
2183 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2185 .fde_def_cfa = {
2186 12, 1, /* DW_CFA_def_cfa r1, ... */
2187 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2188 (FRAME_SIZE >> 7)
2190 .fde_reg_ofs = {
2191 0x11, 65, 0x7e, /* DW_CFA_offset_extended_sf, lr, 16 */
2195 void tcg_register_jit(void *buf, size_t buf_size)
2197 uint8_t *p = &debug_frame.fde_reg_ofs[3];
2198 int i;
2200 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
2201 p[0] = 0x80 + tcg_target_callee_save_regs[i];
2202 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * 8)) / 8;
2205 debug_frame.fde.func_start = (tcg_target_long) buf;
2206 debug_frame.fde.func_len = buf_size;
2208 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));