tcg/ppc: Add support for load/store/logic/comparison
[qemu/kevin.git] / tcg / ppc / tcg-target.inc.c
blob1a8d7dc925038846eb17d540aa132e9d31fb7187
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "elf.h"
26 #include "tcg-pool.inc.c"
28 #if defined _CALL_DARWIN || defined __APPLE__
29 #define TCG_TARGET_CALL_DARWIN
30 #endif
31 #ifdef _CALL_SYSV
32 # define TCG_TARGET_CALL_ALIGN_ARGS 1
33 #endif
35 /* For some memory operations, we need a scratch that isn't R0. For the AIX
36 calling convention, we can re-use the TOC register since we'll be reloading
37 it at every call. Otherwise R12 will do nicely as neither a call-saved
38 register nor a parameter register. */
39 #ifdef _CALL_AIX
40 # define TCG_REG_TMP1 TCG_REG_R2
41 #else
42 # define TCG_REG_TMP1 TCG_REG_R12
43 #endif
45 #define TCG_VEC_TMP1 TCG_REG_V0
46 #define TCG_VEC_TMP2 TCG_REG_V1
48 #define TCG_REG_TB TCG_REG_R31
49 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
51 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
52 #define SZP ((int)sizeof(void *))
54 /* Shorthand for size of a register. */
55 #define SZR (TCG_TARGET_REG_BITS / 8)
57 #define TCG_CT_CONST_S16 0x100
58 #define TCG_CT_CONST_U16 0x200
59 #define TCG_CT_CONST_S32 0x400
60 #define TCG_CT_CONST_U32 0x800
61 #define TCG_CT_CONST_ZERO 0x1000
62 #define TCG_CT_CONST_MONE 0x2000
63 #define TCG_CT_CONST_WSZ 0x4000
65 static tcg_insn_unit *tb_ret_addr;
67 TCGPowerISA have_isa;
68 static bool have_isel;
69 bool have_altivec;
71 #ifndef CONFIG_SOFTMMU
72 #define TCG_GUEST_BASE_REG 30
73 #endif
75 #ifdef CONFIG_DEBUG_TCG
76 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
77 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
79 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
80 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
81 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
82 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
83 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
84 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
86 #endif
88 static const int tcg_target_reg_alloc_order[] = {
89 TCG_REG_R14, /* call saved registers */
90 TCG_REG_R15,
91 TCG_REG_R16,
92 TCG_REG_R17,
93 TCG_REG_R18,
94 TCG_REG_R19,
95 TCG_REG_R20,
96 TCG_REG_R21,
97 TCG_REG_R22,
98 TCG_REG_R23,
99 TCG_REG_R24,
100 TCG_REG_R25,
101 TCG_REG_R26,
102 TCG_REG_R27,
103 TCG_REG_R28,
104 TCG_REG_R29,
105 TCG_REG_R30,
106 TCG_REG_R31,
107 TCG_REG_R12, /* call clobbered, non-arguments */
108 TCG_REG_R11,
109 TCG_REG_R2,
110 TCG_REG_R13,
111 TCG_REG_R10, /* call clobbered, arguments */
112 TCG_REG_R9,
113 TCG_REG_R8,
114 TCG_REG_R7,
115 TCG_REG_R6,
116 TCG_REG_R5,
117 TCG_REG_R4,
118 TCG_REG_R3,
120 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
121 TCG_REG_V2, /* call clobbered, vectors */
122 TCG_REG_V3,
123 TCG_REG_V4,
124 TCG_REG_V5,
125 TCG_REG_V6,
126 TCG_REG_V7,
127 TCG_REG_V8,
128 TCG_REG_V9,
129 TCG_REG_V10,
130 TCG_REG_V11,
131 TCG_REG_V12,
132 TCG_REG_V13,
133 TCG_REG_V14,
134 TCG_REG_V15,
135 TCG_REG_V16,
136 TCG_REG_V17,
137 TCG_REG_V18,
138 TCG_REG_V19,
141 static const int tcg_target_call_iarg_regs[] = {
142 TCG_REG_R3,
143 TCG_REG_R4,
144 TCG_REG_R5,
145 TCG_REG_R6,
146 TCG_REG_R7,
147 TCG_REG_R8,
148 TCG_REG_R9,
149 TCG_REG_R10
152 static const int tcg_target_call_oarg_regs[] = {
153 TCG_REG_R3,
154 TCG_REG_R4
157 static const int tcg_target_callee_save_regs[] = {
158 #ifdef TCG_TARGET_CALL_DARWIN
159 TCG_REG_R11,
160 #endif
161 TCG_REG_R14,
162 TCG_REG_R15,
163 TCG_REG_R16,
164 TCG_REG_R17,
165 TCG_REG_R18,
166 TCG_REG_R19,
167 TCG_REG_R20,
168 TCG_REG_R21,
169 TCG_REG_R22,
170 TCG_REG_R23,
171 TCG_REG_R24,
172 TCG_REG_R25,
173 TCG_REG_R26,
174 TCG_REG_R27, /* currently used for the global env */
175 TCG_REG_R28,
176 TCG_REG_R29,
177 TCG_REG_R30,
178 TCG_REG_R31
181 static inline bool in_range_b(tcg_target_long target)
183 return target == sextract64(target, 0, 26);
186 static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target)
188 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
189 tcg_debug_assert(in_range_b(disp));
190 return disp & 0x3fffffc;
193 static bool reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target)
195 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
196 if (in_range_b(disp)) {
197 *pc = (*pc & ~0x3fffffc) | (disp & 0x3fffffc);
198 return true;
200 return false;
203 static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target)
205 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
206 tcg_debug_assert(disp == (int16_t) disp);
207 return disp & 0xfffc;
210 static bool reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target)
212 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
213 if (disp == (int16_t) disp) {
214 *pc = (*pc & ~0xfffc) | (disp & 0xfffc);
215 return true;
217 return false;
220 /* parse target specific constraints */
221 static const char *target_parse_constraint(TCGArgConstraint *ct,
222 const char *ct_str, TCGType type)
224 switch (*ct_str++) {
225 case 'A': case 'B': case 'C': case 'D':
226 ct->ct |= TCG_CT_REG;
227 tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
228 break;
229 case 'r':
230 ct->ct |= TCG_CT_REG;
231 ct->u.regs = 0xffffffff;
232 break;
233 case 'v':
234 ct->ct |= TCG_CT_REG;
235 ct->u.regs = 0xffffffff00000000ull;
236 break;
237 case 'L': /* qemu_ld constraint */
238 ct->ct |= TCG_CT_REG;
239 ct->u.regs = 0xffffffff;
240 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
241 #ifdef CONFIG_SOFTMMU
242 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
243 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
244 #endif
245 break;
246 case 'S': /* qemu_st constraint */
247 ct->ct |= TCG_CT_REG;
248 ct->u.regs = 0xffffffff;
249 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
250 #ifdef CONFIG_SOFTMMU
251 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
252 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
253 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
254 #endif
255 break;
256 case 'I':
257 ct->ct |= TCG_CT_CONST_S16;
258 break;
259 case 'J':
260 ct->ct |= TCG_CT_CONST_U16;
261 break;
262 case 'M':
263 ct->ct |= TCG_CT_CONST_MONE;
264 break;
265 case 'T':
266 ct->ct |= TCG_CT_CONST_S32;
267 break;
268 case 'U':
269 ct->ct |= TCG_CT_CONST_U32;
270 break;
271 case 'W':
272 ct->ct |= TCG_CT_CONST_WSZ;
273 break;
274 case 'Z':
275 ct->ct |= TCG_CT_CONST_ZERO;
276 break;
277 default:
278 return NULL;
280 return ct_str;
283 /* test if a constant matches the constraint */
284 static int tcg_target_const_match(tcg_target_long val, TCGType type,
285 const TCGArgConstraint *arg_ct)
287 int ct = arg_ct->ct;
288 if (ct & TCG_CT_CONST) {
289 return 1;
292 /* The only 32-bit constraint we use aside from
293 TCG_CT_CONST is TCG_CT_CONST_S16. */
294 if (type == TCG_TYPE_I32) {
295 val = (int32_t)val;
298 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
299 return 1;
300 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
301 return 1;
302 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
303 return 1;
304 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
305 return 1;
306 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
307 return 1;
308 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
309 return 1;
310 } else if ((ct & TCG_CT_CONST_WSZ)
311 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
312 return 1;
314 return 0;
317 #define OPCD(opc) ((opc)<<26)
318 #define XO19(opc) (OPCD(19)|((opc)<<1))
319 #define MD30(opc) (OPCD(30)|((opc)<<2))
320 #define MDS30(opc) (OPCD(30)|((opc)<<1))
321 #define XO31(opc) (OPCD(31)|((opc)<<1))
322 #define XO58(opc) (OPCD(58)|(opc))
323 #define XO62(opc) (OPCD(62)|(opc))
324 #define VX4(opc) (OPCD(4)|(opc))
326 #define B OPCD( 18)
327 #define BC OPCD( 16)
328 #define LBZ OPCD( 34)
329 #define LHZ OPCD( 40)
330 #define LHA OPCD( 42)
331 #define LWZ OPCD( 32)
332 #define LWZUX XO31( 55)
333 #define STB OPCD( 38)
334 #define STH OPCD( 44)
335 #define STW OPCD( 36)
337 #define STD XO62( 0)
338 #define STDU XO62( 1)
339 #define STDX XO31(149)
341 #define LD XO58( 0)
342 #define LDX XO31( 21)
343 #define LDU XO58( 1)
344 #define LDUX XO31( 53)
345 #define LWA XO58( 2)
346 #define LWAX XO31(341)
348 #define ADDIC OPCD( 12)
349 #define ADDI OPCD( 14)
350 #define ADDIS OPCD( 15)
351 #define ORI OPCD( 24)
352 #define ORIS OPCD( 25)
353 #define XORI OPCD( 26)
354 #define XORIS OPCD( 27)
355 #define ANDI OPCD( 28)
356 #define ANDIS OPCD( 29)
357 #define MULLI OPCD( 7)
358 #define CMPLI OPCD( 10)
359 #define CMPI OPCD( 11)
360 #define SUBFIC OPCD( 8)
362 #define LWZU OPCD( 33)
363 #define STWU OPCD( 37)
365 #define RLWIMI OPCD( 20)
366 #define RLWINM OPCD( 21)
367 #define RLWNM OPCD( 23)
369 #define RLDICL MD30( 0)
370 #define RLDICR MD30( 1)
371 #define RLDIMI MD30( 3)
372 #define RLDCL MDS30( 8)
374 #define BCLR XO19( 16)
375 #define BCCTR XO19(528)
376 #define CRAND XO19(257)
377 #define CRANDC XO19(129)
378 #define CRNAND XO19(225)
379 #define CROR XO19(449)
380 #define CRNOR XO19( 33)
382 #define EXTSB XO31(954)
383 #define EXTSH XO31(922)
384 #define EXTSW XO31(986)
385 #define ADD XO31(266)
386 #define ADDE XO31(138)
387 #define ADDME XO31(234)
388 #define ADDZE XO31(202)
389 #define ADDC XO31( 10)
390 #define AND XO31( 28)
391 #define SUBF XO31( 40)
392 #define SUBFC XO31( 8)
393 #define SUBFE XO31(136)
394 #define SUBFME XO31(232)
395 #define SUBFZE XO31(200)
396 #define OR XO31(444)
397 #define XOR XO31(316)
398 #define MULLW XO31(235)
399 #define MULHW XO31( 75)
400 #define MULHWU XO31( 11)
401 #define DIVW XO31(491)
402 #define DIVWU XO31(459)
403 #define CMP XO31( 0)
404 #define CMPL XO31( 32)
405 #define LHBRX XO31(790)
406 #define LWBRX XO31(534)
407 #define LDBRX XO31(532)
408 #define STHBRX XO31(918)
409 #define STWBRX XO31(662)
410 #define STDBRX XO31(660)
411 #define MFSPR XO31(339)
412 #define MTSPR XO31(467)
413 #define SRAWI XO31(824)
414 #define NEG XO31(104)
415 #define MFCR XO31( 19)
416 #define MFOCRF (MFCR | (1u << 20))
417 #define NOR XO31(124)
418 #define CNTLZW XO31( 26)
419 #define CNTLZD XO31( 58)
420 #define CNTTZW XO31(538)
421 #define CNTTZD XO31(570)
422 #define CNTPOPW XO31(378)
423 #define CNTPOPD XO31(506)
424 #define ANDC XO31( 60)
425 #define ORC XO31(412)
426 #define EQV XO31(284)
427 #define NAND XO31(476)
428 #define ISEL XO31( 15)
430 #define MULLD XO31(233)
431 #define MULHD XO31( 73)
432 #define MULHDU XO31( 9)
433 #define DIVD XO31(489)
434 #define DIVDU XO31(457)
436 #define LBZX XO31( 87)
437 #define LHZX XO31(279)
438 #define LHAX XO31(343)
439 #define LWZX XO31( 23)
440 #define STBX XO31(215)
441 #define STHX XO31(407)
442 #define STWX XO31(151)
444 #define EIEIO XO31(854)
445 #define HWSYNC XO31(598)
446 #define LWSYNC (HWSYNC | (1u << 21))
448 #define SPR(a, b) ((((a)<<5)|(b))<<11)
449 #define LR SPR(8, 0)
450 #define CTR SPR(9, 0)
452 #define SLW XO31( 24)
453 #define SRW XO31(536)
454 #define SRAW XO31(792)
456 #define SLD XO31( 27)
457 #define SRD XO31(539)
458 #define SRAD XO31(794)
459 #define SRADI XO31(413<<1)
461 #define TW XO31( 4)
462 #define TRAP (TW | TO(31))
464 #define NOP ORI /* ori 0,0,0 */
466 #define LVX XO31(103)
467 #define LVEBX XO31(7)
468 #define LVEHX XO31(39)
469 #define LVEWX XO31(71)
471 #define STVX XO31(231)
472 #define STVEWX XO31(199)
474 #define VCMPEQUB VX4(6)
475 #define VCMPEQUH VX4(70)
476 #define VCMPEQUW VX4(134)
477 #define VCMPGTSB VX4(774)
478 #define VCMPGTSH VX4(838)
479 #define VCMPGTSW VX4(902)
480 #define VCMPGTUB VX4(518)
481 #define VCMPGTUH VX4(582)
482 #define VCMPGTUW VX4(646)
484 #define VAND VX4(1028)
485 #define VANDC VX4(1092)
486 #define VNOR VX4(1284)
487 #define VOR VX4(1156)
488 #define VXOR VX4(1220)
490 #define VSPLTB VX4(524)
491 #define VSPLTH VX4(588)
492 #define VSPLTW VX4(652)
493 #define VSPLTISB VX4(780)
494 #define VSPLTISH VX4(844)
495 #define VSPLTISW VX4(908)
497 #define VSLDOI VX4(44)
499 #define RT(r) ((r)<<21)
500 #define RS(r) ((r)<<21)
501 #define RA(r) ((r)<<16)
502 #define RB(r) ((r)<<11)
503 #define TO(t) ((t)<<21)
504 #define SH(s) ((s)<<11)
505 #define MB(b) ((b)<<6)
506 #define ME(e) ((e)<<1)
507 #define BO(o) ((o)<<21)
508 #define MB64(b) ((b)<<5)
509 #define FXM(b) (1 << (19 - (b)))
511 #define VRT(r) (((r) & 31) << 21)
512 #define VRA(r) (((r) & 31) << 16)
513 #define VRB(r) (((r) & 31) << 11)
514 #define VRC(r) (((r) & 31) << 6)
516 #define LK 1
518 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
519 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
520 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
521 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
523 #define BF(n) ((n)<<23)
524 #define BI(n, c) (((c)+((n)*4))<<16)
525 #define BT(n, c) (((c)+((n)*4))<<21)
526 #define BA(n, c) (((c)+((n)*4))<<16)
527 #define BB(n, c) (((c)+((n)*4))<<11)
528 #define BC_(n, c) (((c)+((n)*4))<<6)
530 #define BO_COND_TRUE BO(12)
531 #define BO_COND_FALSE BO( 4)
532 #define BO_ALWAYS BO(20)
534 enum {
535 CR_LT,
536 CR_GT,
537 CR_EQ,
538 CR_SO
541 static const uint32_t tcg_to_bc[] = {
542 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
543 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
544 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
545 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
546 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
547 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
548 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
549 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
550 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
551 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
554 /* The low bit here is set if the RA and RB fields must be inverted. */
555 static const uint32_t tcg_to_isel[] = {
556 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
557 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
558 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
559 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
560 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
561 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
562 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
563 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
564 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
565 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
568 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
569 intptr_t value, intptr_t addend)
571 tcg_insn_unit *target;
572 int16_t lo;
573 int32_t hi;
575 value += addend;
576 target = (tcg_insn_unit *)value;
578 switch (type) {
579 case R_PPC_REL14:
580 return reloc_pc14(code_ptr, target);
581 case R_PPC_REL24:
582 return reloc_pc24(code_ptr, target);
583 case R_PPC_ADDR16:
585 * We are (slightly) abusing this relocation type. In particular,
586 * assert that the low 2 bits are zero, and do not modify them.
587 * That way we can use this with LD et al that have opcode bits
588 * in the low 2 bits of the insn.
590 if ((value & 3) || value != (int16_t)value) {
591 return false;
593 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
594 break;
595 case R_PPC_ADDR32:
597 * We are abusing this relocation type. Again, this points to
598 * a pair of insns, lis + load. This is an absolute address
599 * relocation for PPC32 so the lis cannot be removed.
601 lo = value;
602 hi = value - lo;
603 if (hi + lo != value) {
604 return false;
606 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
607 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
608 break;
609 default:
610 g_assert_not_reached();
612 return true;
615 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
616 TCGReg base, tcg_target_long offset);
618 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
620 if (ret == arg) {
621 return true;
623 switch (type) {
624 case TCG_TYPE_I64:
625 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
626 /* fallthru */
627 case TCG_TYPE_I32:
628 if (ret < TCG_REG_V0 && arg < TCG_REG_V0) {
629 tcg_out32(s, OR | SAB(arg, ret, arg));
630 break;
631 } else if (ret < TCG_REG_V0 || arg < TCG_REG_V0) {
632 /* Altivec does not support vector/integer moves. */
633 return false;
635 /* fallthru */
636 case TCG_TYPE_V64:
637 case TCG_TYPE_V128:
638 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
639 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
640 break;
641 default:
642 g_assert_not_reached();
644 return true;
647 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
648 int sh, int mb)
650 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
651 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
652 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
653 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
656 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
657 int sh, int mb, int me)
659 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
662 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
664 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
667 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
669 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
672 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
674 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
677 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
679 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
682 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
684 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
687 /* Emit a move into ret of arg, if it can be done in one insn. */
688 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
690 if (arg == (int16_t)arg) {
691 tcg_out32(s, ADDI | TAI(ret, 0, arg));
692 return true;
694 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
695 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
696 return true;
698 return false;
701 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
702 tcg_target_long arg, bool in_prologue)
704 intptr_t tb_diff;
705 tcg_target_long tmp;
706 int shift;
708 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
710 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
711 arg = (int32_t)arg;
714 /* Load 16-bit immediates with one insn. */
715 if (tcg_out_movi_one(s, ret, arg)) {
716 return;
719 /* Load addresses within the TB with one insn. */
720 tb_diff = arg - (intptr_t)s->code_gen_ptr;
721 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
722 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
723 return;
726 /* Load 32-bit immediates with two insns. Note that we've already
727 eliminated bare ADDIS, so we know both insns are required. */
728 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
729 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
730 tcg_out32(s, ORI | SAI(ret, ret, arg));
731 return;
733 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
734 tcg_out32(s, ADDI | TAI(ret, 0, arg));
735 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
736 return;
739 /* Load masked 16-bit value. */
740 if (arg > 0 && (arg & 0x8000)) {
741 tmp = arg | 0x7fff;
742 if ((tmp & (tmp + 1)) == 0) {
743 int mb = clz64(tmp + 1) + 1;
744 tcg_out32(s, ADDI | TAI(ret, 0, arg));
745 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
746 return;
750 /* Load common masks with 2 insns. */
751 shift = ctz64(arg);
752 tmp = arg >> shift;
753 if (tmp == (int16_t)tmp) {
754 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
755 tcg_out_shli64(s, ret, ret, shift);
756 return;
758 shift = clz64(arg);
759 if (tcg_out_movi_one(s, ret, arg << shift)) {
760 tcg_out_shri64(s, ret, ret, shift);
761 return;
764 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
765 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
766 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
767 return;
770 /* Use the constant pool, if possible. */
771 if (!in_prologue && USE_REG_TB) {
772 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
773 -(intptr_t)s->code_gen_ptr);
774 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
775 return;
778 tmp = arg >> 31 >> 1;
779 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
780 if (tmp) {
781 tcg_out_shli64(s, ret, ret, 32);
783 if (arg & 0xffff0000) {
784 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
786 if (arg & 0xffff) {
787 tcg_out32(s, ORI | SAI(ret, ret, arg));
791 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret,
792 tcg_target_long val)
794 uint32_t load_insn;
795 int rel, low;
796 intptr_t add;
798 low = (int8_t)val;
799 if (low >= -16 && low < 16) {
800 if (val == (tcg_target_long)dup_const(MO_8, low)) {
801 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
802 return;
804 if (val == (tcg_target_long)dup_const(MO_16, low)) {
805 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
806 return;
808 if (val == (tcg_target_long)dup_const(MO_32, low)) {
809 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
810 return;
815 * Otherwise we must load the value from the constant pool.
817 if (USE_REG_TB) {
818 rel = R_PPC_ADDR16;
819 add = -(intptr_t)s->code_gen_ptr;
820 } else {
821 rel = R_PPC_ADDR32;
822 add = 0;
825 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
826 if (TCG_TARGET_REG_BITS == 64) {
827 new_pool_l2(s, rel, s->code_ptr, add, val, val);
828 } else {
829 new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val);
832 if (USE_REG_TB) {
833 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
834 load_insn |= RA(TCG_REG_TB);
835 } else {
836 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
837 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
839 tcg_out32(s, load_insn);
842 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
843 tcg_target_long arg)
845 switch (type) {
846 case TCG_TYPE_I32:
847 case TCG_TYPE_I64:
848 tcg_debug_assert(ret < TCG_REG_V0);
849 tcg_out_movi_int(s, type, ret, arg, false);
850 break;
852 case TCG_TYPE_V64:
853 case TCG_TYPE_V128:
854 tcg_debug_assert(ret >= TCG_REG_V0);
855 tcg_out_dupi_vec(s, type, ret, arg);
856 break;
858 default:
859 g_assert_not_reached();
863 static bool mask_operand(uint32_t c, int *mb, int *me)
865 uint32_t lsb, test;
867 /* Accept a bit pattern like:
868 0....01....1
869 1....10....0
870 0..01..10..0
871 Keep track of the transitions. */
872 if (c == 0 || c == -1) {
873 return false;
875 test = c;
876 lsb = test & -test;
877 test += lsb;
878 if (test & (test - 1)) {
879 return false;
882 *me = clz32(lsb);
883 *mb = test ? clz32(test & -test) + 1 : 0;
884 return true;
887 static bool mask64_operand(uint64_t c, int *mb, int *me)
889 uint64_t lsb;
891 if (c == 0) {
892 return false;
895 lsb = c & -c;
896 /* Accept 1..10..0. */
897 if (c == -lsb) {
898 *mb = 0;
899 *me = clz64(lsb);
900 return true;
902 /* Accept 0..01..1. */
903 if (lsb == 1 && (c & (c + 1)) == 0) {
904 *mb = clz64(c + 1) + 1;
905 *me = 63;
906 return true;
908 return false;
911 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
913 int mb, me;
915 if (mask_operand(c, &mb, &me)) {
916 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
917 } else if ((c & 0xffff) == c) {
918 tcg_out32(s, ANDI | SAI(src, dst, c));
919 return;
920 } else if ((c & 0xffff0000) == c) {
921 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
922 return;
923 } else {
924 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
925 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
929 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
931 int mb, me;
933 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
934 if (mask64_operand(c, &mb, &me)) {
935 if (mb == 0) {
936 tcg_out_rld(s, RLDICR, dst, src, 0, me);
937 } else {
938 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
940 } else if ((c & 0xffff) == c) {
941 tcg_out32(s, ANDI | SAI(src, dst, c));
942 return;
943 } else if ((c & 0xffff0000) == c) {
944 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
945 return;
946 } else {
947 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
948 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
952 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
953 int op_lo, int op_hi)
955 if (c >> 16) {
956 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
957 src = dst;
959 if (c & 0xffff) {
960 tcg_out32(s, op_lo | SAI(src, dst, c));
961 src = dst;
965 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
967 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
970 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
972 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
975 static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target)
977 ptrdiff_t disp = tcg_pcrel_diff(s, target);
978 if (in_range_b(disp)) {
979 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
980 } else {
981 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
982 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
983 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
987 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
988 TCGReg base, tcg_target_long offset)
990 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
991 bool is_store = false;
992 TCGReg rs = TCG_REG_TMP1;
994 switch (opi) {
995 case LD: case LWA:
996 align = 3;
997 /* FALLTHRU */
998 default:
999 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1000 rs = rt;
1001 break;
1003 break;
1004 case STD:
1005 align = 3;
1006 /* FALLTHRU */
1007 case STB: case STH: case STW:
1008 is_store = true;
1009 break;
1012 /* For unaligned, or very large offsets, use the indexed form. */
1013 if (offset & align || offset != (int32_t)offset || opi == 0) {
1014 if (rs == base) {
1015 rs = TCG_REG_R0;
1017 tcg_debug_assert(!is_store || rs != rt);
1018 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1019 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1020 return;
1023 l0 = (int16_t)offset;
1024 offset = (offset - l0) >> 16;
1025 l1 = (int16_t)offset;
1027 if (l1 < 0 && orig >= 0) {
1028 extra = 0x4000;
1029 l1 = (int16_t)(offset - 0x4000);
1031 if (l1) {
1032 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1033 base = rs;
1035 if (extra) {
1036 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1037 base = rs;
1039 if (opi != ADDI || base != rt || l0 != 0) {
1040 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1044 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1045 TCGReg va, TCGReg vb, int shb)
1047 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1050 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1051 TCGReg base, intptr_t offset)
1053 int shift;
1055 switch (type) {
1056 case TCG_TYPE_I32:
1057 if (ret < TCG_REG_V0) {
1058 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1059 break;
1061 tcg_debug_assert((offset & 3) == 0);
1062 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1063 shift = (offset - 4) & 0xc;
1064 if (shift) {
1065 tcg_out_vsldoi(s, ret, ret, ret, shift);
1067 break;
1068 case TCG_TYPE_I64:
1069 if (ret < TCG_REG_V0) {
1070 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1071 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1072 break;
1074 /* fallthru */
1075 case TCG_TYPE_V64:
1076 tcg_debug_assert(ret >= TCG_REG_V0);
1077 tcg_debug_assert((offset & 7) == 0);
1078 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1079 if (offset & 8) {
1080 tcg_out_vsldoi(s, ret, ret, ret, 8);
1082 break;
1083 case TCG_TYPE_V128:
1084 tcg_debug_assert(ret >= TCG_REG_V0);
1085 tcg_debug_assert((offset & 15) == 0);
1086 tcg_out_mem_long(s, 0, LVX, ret, base, offset);
1087 break;
1088 default:
1089 g_assert_not_reached();
1093 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1094 TCGReg base, intptr_t offset)
1096 int shift;
1098 switch (type) {
1099 case TCG_TYPE_I32:
1100 if (arg < TCG_REG_V0) {
1101 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1102 break;
1104 tcg_debug_assert((offset & 3) == 0);
1105 shift = (offset - 4) & 0xc;
1106 if (shift) {
1107 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1108 arg = TCG_VEC_TMP1;
1110 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1111 break;
1112 case TCG_TYPE_I64:
1113 if (arg < TCG_REG_V0) {
1114 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1115 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1116 break;
1118 /* fallthru */
1119 case TCG_TYPE_V64:
1120 tcg_debug_assert(arg >= TCG_REG_V0);
1121 tcg_debug_assert((offset & 7) == 0);
1122 if (offset & 8) {
1123 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1124 arg = TCG_VEC_TMP1;
1126 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1127 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1128 break;
1129 case TCG_TYPE_V128:
1130 tcg_debug_assert(arg >= TCG_REG_V0);
1131 tcg_out_mem_long(s, 0, STVX, arg, base, offset);
1132 break;
1133 default:
1134 g_assert_not_reached();
1138 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1139 TCGReg base, intptr_t ofs)
1141 return false;
1144 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1145 int const_arg2, int cr, TCGType type)
1147 int imm;
1148 uint32_t op;
1150 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1152 /* Simplify the comparisons below wrt CMPI. */
1153 if (type == TCG_TYPE_I32) {
1154 arg2 = (int32_t)arg2;
1157 switch (cond) {
1158 case TCG_COND_EQ:
1159 case TCG_COND_NE:
1160 if (const_arg2) {
1161 if ((int16_t) arg2 == arg2) {
1162 op = CMPI;
1163 imm = 1;
1164 break;
1165 } else if ((uint16_t) arg2 == arg2) {
1166 op = CMPLI;
1167 imm = 1;
1168 break;
1171 op = CMPL;
1172 imm = 0;
1173 break;
1175 case TCG_COND_LT:
1176 case TCG_COND_GE:
1177 case TCG_COND_LE:
1178 case TCG_COND_GT:
1179 if (const_arg2) {
1180 if ((int16_t) arg2 == arg2) {
1181 op = CMPI;
1182 imm = 1;
1183 break;
1186 op = CMP;
1187 imm = 0;
1188 break;
1190 case TCG_COND_LTU:
1191 case TCG_COND_GEU:
1192 case TCG_COND_LEU:
1193 case TCG_COND_GTU:
1194 if (const_arg2) {
1195 if ((uint16_t) arg2 == arg2) {
1196 op = CMPLI;
1197 imm = 1;
1198 break;
1201 op = CMPL;
1202 imm = 0;
1203 break;
1205 default:
1206 tcg_abort();
1208 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1210 if (imm) {
1211 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1212 } else {
1213 if (const_arg2) {
1214 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1215 arg2 = TCG_REG_R0;
1217 tcg_out32(s, op | RA(arg1) | RB(arg2));
1221 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1222 TCGReg dst, TCGReg src)
1224 if (type == TCG_TYPE_I32) {
1225 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1226 tcg_out_shri32(s, dst, dst, 5);
1227 } else {
1228 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1229 tcg_out_shri64(s, dst, dst, 6);
1233 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1235 /* X != 0 implies X + -1 generates a carry. Extra addition
1236 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1237 if (dst != src) {
1238 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1239 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1240 } else {
1241 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1242 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1246 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1247 bool const_arg2)
1249 if (const_arg2) {
1250 if ((uint32_t)arg2 == arg2) {
1251 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1252 } else {
1253 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1254 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1256 } else {
1257 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1259 return TCG_REG_R0;
1262 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1263 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1264 int const_arg2)
1266 int crop, sh;
1268 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1270 /* Ignore high bits of a potential constant arg2. */
1271 if (type == TCG_TYPE_I32) {
1272 arg2 = (uint32_t)arg2;
1275 /* Handle common and trivial cases before handling anything else. */
1276 if (arg2 == 0) {
1277 switch (cond) {
1278 case TCG_COND_EQ:
1279 tcg_out_setcond_eq0(s, type, arg0, arg1);
1280 return;
1281 case TCG_COND_NE:
1282 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1283 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1284 arg1 = TCG_REG_R0;
1286 tcg_out_setcond_ne0(s, arg0, arg1);
1287 return;
1288 case TCG_COND_GE:
1289 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1290 arg1 = arg0;
1291 /* FALLTHRU */
1292 case TCG_COND_LT:
1293 /* Extract the sign bit. */
1294 if (type == TCG_TYPE_I32) {
1295 tcg_out_shri32(s, arg0, arg1, 31);
1296 } else {
1297 tcg_out_shri64(s, arg0, arg1, 63);
1299 return;
1300 default:
1301 break;
1305 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1306 All other cases below are also at least 3 insns, so speed up the
1307 code generator by not considering them and always using ISEL. */
1308 if (have_isel) {
1309 int isel, tab;
1311 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1313 isel = tcg_to_isel[cond];
1315 tcg_out_movi(s, type, arg0, 1);
1316 if (isel & 1) {
1317 /* arg0 = (bc ? 0 : 1) */
1318 tab = TAB(arg0, 0, arg0);
1319 isel &= ~1;
1320 } else {
1321 /* arg0 = (bc ? 1 : 0) */
1322 tcg_out_movi(s, type, TCG_REG_R0, 0);
1323 tab = TAB(arg0, arg0, TCG_REG_R0);
1325 tcg_out32(s, isel | tab);
1326 return;
1329 switch (cond) {
1330 case TCG_COND_EQ:
1331 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1332 tcg_out_setcond_eq0(s, type, arg0, arg1);
1333 return;
1335 case TCG_COND_NE:
1336 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1337 /* Discard the high bits only once, rather than both inputs. */
1338 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1339 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1340 arg1 = TCG_REG_R0;
1342 tcg_out_setcond_ne0(s, arg0, arg1);
1343 return;
1345 case TCG_COND_GT:
1346 case TCG_COND_GTU:
1347 sh = 30;
1348 crop = 0;
1349 goto crtest;
1351 case TCG_COND_LT:
1352 case TCG_COND_LTU:
1353 sh = 29;
1354 crop = 0;
1355 goto crtest;
1357 case TCG_COND_GE:
1358 case TCG_COND_GEU:
1359 sh = 31;
1360 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1361 goto crtest;
1363 case TCG_COND_LE:
1364 case TCG_COND_LEU:
1365 sh = 31;
1366 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1367 crtest:
1368 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1369 if (crop) {
1370 tcg_out32(s, crop);
1372 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1373 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1374 break;
1376 default:
1377 tcg_abort();
1381 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1383 if (l->has_value) {
1384 bc |= reloc_pc14_val(s->code_ptr, l->u.value_ptr);
1385 } else {
1386 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1388 tcg_out32(s, bc);
1391 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1392 TCGArg arg1, TCGArg arg2, int const_arg2,
1393 TCGLabel *l, TCGType type)
1395 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1396 tcg_out_bc(s, tcg_to_bc[cond], l);
1399 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1400 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1401 TCGArg v2, bool const_c2)
1403 /* If for some reason both inputs are zero, don't produce bad code. */
1404 if (v1 == 0 && v2 == 0) {
1405 tcg_out_movi(s, type, dest, 0);
1406 return;
1409 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1411 if (have_isel) {
1412 int isel = tcg_to_isel[cond];
1414 /* Swap the V operands if the operation indicates inversion. */
1415 if (isel & 1) {
1416 int t = v1;
1417 v1 = v2;
1418 v2 = t;
1419 isel &= ~1;
1421 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1422 if (v2 == 0) {
1423 tcg_out_movi(s, type, TCG_REG_R0, 0);
1425 tcg_out32(s, isel | TAB(dest, v1, v2));
1426 } else {
1427 if (dest == v2) {
1428 cond = tcg_invert_cond(cond);
1429 v2 = v1;
1430 } else if (dest != v1) {
1431 if (v1 == 0) {
1432 tcg_out_movi(s, type, dest, 0);
1433 } else {
1434 tcg_out_mov(s, type, dest, v1);
1437 /* Branch forward over one insn */
1438 tcg_out32(s, tcg_to_bc[cond] | 8);
1439 if (v2 == 0) {
1440 tcg_out_movi(s, type, dest, 0);
1441 } else {
1442 tcg_out_mov(s, type, dest, v2);
1447 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1448 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1450 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1451 tcg_out32(s, opc | RA(a0) | RS(a1));
1452 } else {
1453 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1454 /* Note that the only other valid constant for a2 is 0. */
1455 if (have_isel) {
1456 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1457 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1458 } else if (!const_a2 && a0 == a2) {
1459 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1460 tcg_out32(s, opc | RA(a0) | RS(a1));
1461 } else {
1462 tcg_out32(s, opc | RA(a0) | RS(a1));
1463 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1464 if (const_a2) {
1465 tcg_out_movi(s, type, a0, 0);
1466 } else {
1467 tcg_out_mov(s, type, a0, a2);
1473 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1474 const int *const_args)
1476 static const struct { uint8_t bit1, bit2; } bits[] = {
1477 [TCG_COND_LT ] = { CR_LT, CR_LT },
1478 [TCG_COND_LE ] = { CR_LT, CR_GT },
1479 [TCG_COND_GT ] = { CR_GT, CR_GT },
1480 [TCG_COND_GE ] = { CR_GT, CR_LT },
1481 [TCG_COND_LTU] = { CR_LT, CR_LT },
1482 [TCG_COND_LEU] = { CR_LT, CR_GT },
1483 [TCG_COND_GTU] = { CR_GT, CR_GT },
1484 [TCG_COND_GEU] = { CR_GT, CR_LT },
1487 TCGCond cond = args[4], cond2;
1488 TCGArg al, ah, bl, bh;
1489 int blconst, bhconst;
1490 int op, bit1, bit2;
1492 al = args[0];
1493 ah = args[1];
1494 bl = args[2];
1495 bh = args[3];
1496 blconst = const_args[2];
1497 bhconst = const_args[3];
1499 switch (cond) {
1500 case TCG_COND_EQ:
1501 op = CRAND;
1502 goto do_equality;
1503 case TCG_COND_NE:
1504 op = CRNAND;
1505 do_equality:
1506 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1507 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1508 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1509 break;
1511 case TCG_COND_LT:
1512 case TCG_COND_LE:
1513 case TCG_COND_GT:
1514 case TCG_COND_GE:
1515 case TCG_COND_LTU:
1516 case TCG_COND_LEU:
1517 case TCG_COND_GTU:
1518 case TCG_COND_GEU:
1519 bit1 = bits[cond].bit1;
1520 bit2 = bits[cond].bit2;
1521 op = (bit1 != bit2 ? CRANDC : CRAND);
1522 cond2 = tcg_unsigned_cond(cond);
1524 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1525 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1526 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1527 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1528 break;
1530 default:
1531 tcg_abort();
1535 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1536 const int *const_args)
1538 tcg_out_cmp2(s, args + 1, const_args + 1);
1539 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1540 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1543 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1544 const int *const_args)
1546 tcg_out_cmp2(s, args, const_args);
1547 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1550 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1552 uint32_t insn = HWSYNC;
1553 a0 &= TCG_MO_ALL;
1554 if (a0 == TCG_MO_LD_LD) {
1555 insn = LWSYNC;
1556 } else if (a0 == TCG_MO_ST_ST) {
1557 insn = EIEIO;
1559 tcg_out32(s, insn);
1562 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
1563 uintptr_t addr)
1565 if (TCG_TARGET_REG_BITS == 64) {
1566 tcg_insn_unit i1, i2;
1567 intptr_t tb_diff = addr - tc_ptr;
1568 intptr_t br_diff = addr - (jmp_addr + 4);
1569 uint64_t pair;
1571 /* This does not exercise the range of the branch, but we do
1572 still need to be able to load the new value of TCG_REG_TB.
1573 But this does still happen quite often. */
1574 if (tb_diff == (int16_t)tb_diff) {
1575 i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
1576 i2 = B | (br_diff & 0x3fffffc);
1577 } else {
1578 intptr_t lo = (int16_t)tb_diff;
1579 intptr_t hi = (int32_t)(tb_diff - lo);
1580 assert(tb_diff == hi + lo);
1581 i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
1582 i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
1584 #ifdef HOST_WORDS_BIGENDIAN
1585 pair = (uint64_t)i1 << 32 | i2;
1586 #else
1587 pair = (uint64_t)i2 << 32 | i1;
1588 #endif
1590 /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
1591 within atomic_set that would fail to build a ppc32 host. */
1592 atomic_set__nocheck((uint64_t *)jmp_addr, pair);
1593 flush_icache_range(jmp_addr, jmp_addr + 8);
1594 } else {
1595 intptr_t diff = addr - jmp_addr;
1596 tcg_debug_assert(in_range_b(diff));
1597 atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
1598 flush_icache_range(jmp_addr, jmp_addr + 4);
1602 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
1604 #ifdef _CALL_AIX
1605 /* Look through the descriptor. If the branch is in range, and we
1606 don't have to spend too much effort on building the toc. */
1607 void *tgt = ((void **)target)[0];
1608 uintptr_t toc = ((uintptr_t *)target)[1];
1609 intptr_t diff = tcg_pcrel_diff(s, tgt);
1611 if (in_range_b(diff) && toc == (uint32_t)toc) {
1612 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1613 tcg_out_b(s, LK, tgt);
1614 } else {
1615 /* Fold the low bits of the constant into the addresses below. */
1616 intptr_t arg = (intptr_t)target;
1617 int ofs = (int16_t)arg;
1619 if (ofs + 8 < 0x8000) {
1620 arg -= ofs;
1621 } else {
1622 ofs = 0;
1624 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1625 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1626 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1627 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1628 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1630 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1631 intptr_t diff;
1633 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1634 address, which the callee uses to compute its TOC address. */
1635 /* FIXME: when the branch is in range, we could avoid r12 load if we
1636 knew that the destination uses the same TOC, and what its local
1637 entry point offset is. */
1638 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1640 diff = tcg_pcrel_diff(s, target);
1641 if (in_range_b(diff)) {
1642 tcg_out_b(s, LK, target);
1643 } else {
1644 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1645 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1647 #else
1648 tcg_out_b(s, LK, target);
1649 #endif
1652 static const uint32_t qemu_ldx_opc[16] = {
1653 [MO_UB] = LBZX,
1654 [MO_UW] = LHZX,
1655 [MO_UL] = LWZX,
1656 [MO_Q] = LDX,
1657 [MO_SW] = LHAX,
1658 [MO_SL] = LWAX,
1659 [MO_BSWAP | MO_UB] = LBZX,
1660 [MO_BSWAP | MO_UW] = LHBRX,
1661 [MO_BSWAP | MO_UL] = LWBRX,
1662 [MO_BSWAP | MO_Q] = LDBRX,
1665 static const uint32_t qemu_stx_opc[16] = {
1666 [MO_UB] = STBX,
1667 [MO_UW] = STHX,
1668 [MO_UL] = STWX,
1669 [MO_Q] = STDX,
1670 [MO_BSWAP | MO_UB] = STBX,
1671 [MO_BSWAP | MO_UW] = STHBRX,
1672 [MO_BSWAP | MO_UL] = STWBRX,
1673 [MO_BSWAP | MO_Q] = STDBRX,
1676 static const uint32_t qemu_exts_opc[4] = {
1677 EXTSB, EXTSH, EXTSW, 0
1680 #if defined (CONFIG_SOFTMMU)
1681 #include "tcg-ldst.inc.c"
1683 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1684 * int mmu_idx, uintptr_t ra)
1686 static void * const qemu_ld_helpers[16] = {
1687 [MO_UB] = helper_ret_ldub_mmu,
1688 [MO_LEUW] = helper_le_lduw_mmu,
1689 [MO_LEUL] = helper_le_ldul_mmu,
1690 [MO_LEQ] = helper_le_ldq_mmu,
1691 [MO_BEUW] = helper_be_lduw_mmu,
1692 [MO_BEUL] = helper_be_ldul_mmu,
1693 [MO_BEQ] = helper_be_ldq_mmu,
1696 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1697 * uintxx_t val, int mmu_idx, uintptr_t ra)
1699 static void * const qemu_st_helpers[16] = {
1700 [MO_UB] = helper_ret_stb_mmu,
1701 [MO_LEUW] = helper_le_stw_mmu,
1702 [MO_LEUL] = helper_le_stl_mmu,
1703 [MO_LEQ] = helper_le_stq_mmu,
1704 [MO_BEUW] = helper_be_stw_mmu,
1705 [MO_BEUL] = helper_be_stl_mmu,
1706 [MO_BEQ] = helper_be_stq_mmu,
1709 /* We expect to use a 16-bit negative offset from ENV. */
1710 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1711 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1713 /* Perform the TLB load and compare. Places the result of the comparison
1714 in CR7, loads the addend of the TLB into R3, and returns the register
1715 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1717 static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
1718 TCGReg addrlo, TCGReg addrhi,
1719 int mem_index, bool is_read)
1721 int cmp_off
1722 = (is_read
1723 ? offsetof(CPUTLBEntry, addr_read)
1724 : offsetof(CPUTLBEntry, addr_write));
1725 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1726 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1727 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1728 unsigned s_bits = opc & MO_SIZE;
1729 unsigned a_bits = get_alignment_bits(opc);
1731 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1732 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
1733 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
1735 /* Extract the page index, shifted into place for tlb index. */
1736 if (TCG_TARGET_REG_BITS == 32) {
1737 tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
1738 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1739 } else {
1740 tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
1741 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1743 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
1745 /* Load the TLB comparator. */
1746 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
1747 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
1748 ? LWZUX : LDUX);
1749 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
1750 } else {
1751 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
1752 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1753 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
1754 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
1755 } else {
1756 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
1760 /* Load the TLB addend for use on the fast path. Do this asap
1761 to minimize any load use delay. */
1762 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
1763 offsetof(CPUTLBEntry, addend));
1765 /* Clear the non-page, non-alignment bits from the address */
1766 if (TCG_TARGET_REG_BITS == 32) {
1767 /* We don't support unaligned accesses on 32-bits.
1768 * Preserve the bottom bits and thus trigger a comparison
1769 * failure on unaligned accesses.
1771 if (a_bits < s_bits) {
1772 a_bits = s_bits;
1774 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
1775 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
1776 } else {
1777 TCGReg t = addrlo;
1779 /* If the access is unaligned, we need to make sure we fail if we
1780 * cross a page boundary. The trick is to add the access size-1
1781 * to the address before masking the low bits. That will make the
1782 * address overflow to the next page if we cross a page boundary,
1783 * which will then force a mismatch of the TLB compare.
1785 if (a_bits < s_bits) {
1786 unsigned a_mask = (1 << a_bits) - 1;
1787 unsigned s_mask = (1 << s_bits) - 1;
1788 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
1789 t = TCG_REG_R0;
1792 /* Mask the address for the requested alignment. */
1793 if (TARGET_LONG_BITS == 32) {
1794 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
1795 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
1796 /* Zero-extend the address for use in the final address. */
1797 tcg_out_ext32u(s, TCG_REG_R4, addrlo);
1798 addrlo = TCG_REG_R4;
1799 } else if (a_bits == 0) {
1800 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
1801 } else {
1802 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
1803 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
1804 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
1808 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1809 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
1810 0, 7, TCG_TYPE_I32);
1811 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
1812 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1813 } else {
1814 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
1815 0, 7, TCG_TYPE_TL);
1818 return addrlo;
1821 /* Record the context of a call to the out of line helper code for the slow
1822 path for a load or store, so that we can later generate the correct
1823 helper code. */
1824 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1825 TCGReg datalo_reg, TCGReg datahi_reg,
1826 TCGReg addrlo_reg, TCGReg addrhi_reg,
1827 tcg_insn_unit *raddr, tcg_insn_unit *lptr)
1829 TCGLabelQemuLdst *label = new_ldst_label(s);
1831 label->is_ld = is_ld;
1832 label->oi = oi;
1833 label->datalo_reg = datalo_reg;
1834 label->datahi_reg = datahi_reg;
1835 label->addrlo_reg = addrlo_reg;
1836 label->addrhi_reg = addrhi_reg;
1837 label->raddr = raddr;
1838 label->label_ptr[0] = lptr;
1841 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1843 TCGMemOpIdx oi = lb->oi;
1844 MemOp opc = get_memop(oi);
1845 TCGReg hi, lo, arg = TCG_REG_R3;
1847 if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
1848 return false;
1851 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
1853 lo = lb->addrlo_reg;
1854 hi = lb->addrhi_reg;
1855 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1856 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1857 arg |= 1;
1858 #endif
1859 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
1860 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
1861 } else {
1862 /* If the address needed to be zero-extended, we'll have already
1863 placed it in R4. The only remaining case is 64-bit guest. */
1864 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
1867 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
1868 tcg_out32(s, MFSPR | RT(arg) | LR);
1870 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1872 lo = lb->datalo_reg;
1873 hi = lb->datahi_reg;
1874 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
1875 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
1876 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
1877 } else if (opc & MO_SIGN) {
1878 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
1879 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
1880 } else {
1881 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
1884 tcg_out_b(s, 0, lb->raddr);
1885 return true;
1888 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1890 TCGMemOpIdx oi = lb->oi;
1891 MemOp opc = get_memop(oi);
1892 MemOp s_bits = opc & MO_SIZE;
1893 TCGReg hi, lo, arg = TCG_REG_R3;
1895 if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
1896 return false;
1899 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
1901 lo = lb->addrlo_reg;
1902 hi = lb->addrhi_reg;
1903 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1904 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1905 arg |= 1;
1906 #endif
1907 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
1908 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
1909 } else {
1910 /* If the address needed to be zero-extended, we'll have already
1911 placed it in R4. The only remaining case is 64-bit guest. */
1912 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
1915 lo = lb->datalo_reg;
1916 hi = lb->datahi_reg;
1917 if (TCG_TARGET_REG_BITS == 32) {
1918 switch (s_bits) {
1919 case MO_64:
1920 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1921 arg |= 1;
1922 #endif
1923 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
1924 /* FALLTHRU */
1925 case MO_32:
1926 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
1927 break;
1928 default:
1929 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
1930 break;
1932 } else {
1933 if (s_bits == MO_64) {
1934 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
1935 } else {
1936 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
1940 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
1941 tcg_out32(s, MFSPR | RT(arg) | LR);
1943 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1945 tcg_out_b(s, 0, lb->raddr);
1946 return true;
1948 #endif /* SOFTMMU */
1950 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1952 TCGReg datalo, datahi, addrlo, rbase;
1953 TCGReg addrhi __attribute__((unused));
1954 TCGMemOpIdx oi;
1955 MemOp opc, s_bits;
1956 #ifdef CONFIG_SOFTMMU
1957 int mem_index;
1958 tcg_insn_unit *label_ptr;
1959 #endif
1961 datalo = *args++;
1962 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1963 addrlo = *args++;
1964 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1965 oi = *args++;
1966 opc = get_memop(oi);
1967 s_bits = opc & MO_SIZE;
1969 #ifdef CONFIG_SOFTMMU
1970 mem_index = get_mmuidx(oi);
1971 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
1973 /* Load a pointer into the current opcode w/conditional branch-link. */
1974 label_ptr = s->code_ptr;
1975 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
1977 rbase = TCG_REG_R3;
1978 #else /* !CONFIG_SOFTMMU */
1979 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
1980 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1981 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
1982 addrlo = TCG_REG_TMP1;
1984 #endif
1986 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
1987 if (opc & MO_BSWAP) {
1988 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
1989 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
1990 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
1991 } else if (rbase != 0) {
1992 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
1993 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
1994 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
1995 } else if (addrlo == datahi) {
1996 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
1997 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
1998 } else {
1999 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2000 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2002 } else {
2003 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2004 if (!have_isa_2_06 && insn == LDBRX) {
2005 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2006 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2007 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
2008 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2009 } else if (insn) {
2010 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2011 } else {
2012 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2013 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2014 insn = qemu_exts_opc[s_bits];
2015 tcg_out32(s, insn | RA(datalo) | RS(datalo));
2019 #ifdef CONFIG_SOFTMMU
2020 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2021 s->code_ptr, label_ptr);
2022 #endif
2025 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
2027 TCGReg datalo, datahi, addrlo, rbase;
2028 TCGReg addrhi __attribute__((unused));
2029 TCGMemOpIdx oi;
2030 MemOp opc, s_bits;
2031 #ifdef CONFIG_SOFTMMU
2032 int mem_index;
2033 tcg_insn_unit *label_ptr;
2034 #endif
2036 datalo = *args++;
2037 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2038 addrlo = *args++;
2039 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2040 oi = *args++;
2041 opc = get_memop(oi);
2042 s_bits = opc & MO_SIZE;
2044 #ifdef CONFIG_SOFTMMU
2045 mem_index = get_mmuidx(oi);
2046 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
2048 /* Load a pointer into the current opcode w/conditional branch-link. */
2049 label_ptr = s->code_ptr;
2050 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2052 rbase = TCG_REG_R3;
2053 #else /* !CONFIG_SOFTMMU */
2054 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2055 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2056 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2057 addrlo = TCG_REG_TMP1;
2059 #endif
2061 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2062 if (opc & MO_BSWAP) {
2063 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2064 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2065 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
2066 } else if (rbase != 0) {
2067 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2068 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
2069 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
2070 } else {
2071 tcg_out32(s, STW | TAI(datahi, addrlo, 0));
2072 tcg_out32(s, STW | TAI(datalo, addrlo, 4));
2074 } else {
2075 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2076 if (!have_isa_2_06 && insn == STDBRX) {
2077 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2078 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
2079 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2080 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
2081 } else {
2082 tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
2086 #ifdef CONFIG_SOFTMMU
2087 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2088 s->code_ptr, label_ptr);
2089 #endif
2092 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2094 int i;
2095 for (i = 0; i < count; ++i) {
2096 p[i] = NOP;
2100 /* Parameters for function call generation, used in tcg.c. */
2101 #define TCG_TARGET_STACK_ALIGN 16
2102 #define TCG_TARGET_EXTEND_ARGS 1
2104 #ifdef _CALL_AIX
2105 # define LINK_AREA_SIZE (6 * SZR)
2106 # define LR_OFFSET (1 * SZR)
2107 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2108 #elif defined(TCG_TARGET_CALL_DARWIN)
2109 # define LINK_AREA_SIZE (6 * SZR)
2110 # define LR_OFFSET (2 * SZR)
2111 #elif TCG_TARGET_REG_BITS == 64
2112 # if defined(_CALL_ELF) && _CALL_ELF == 2
2113 # define LINK_AREA_SIZE (4 * SZR)
2114 # define LR_OFFSET (1 * SZR)
2115 # endif
2116 #else /* TCG_TARGET_REG_BITS == 32 */
2117 # if defined(_CALL_SYSV)
2118 # define LINK_AREA_SIZE (2 * SZR)
2119 # define LR_OFFSET (1 * SZR)
2120 # endif
2121 #endif
2122 #ifndef LR_OFFSET
2123 # error "Unhandled abi"
2124 #endif
2125 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2126 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2127 #endif
2129 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2130 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2132 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2133 + TCG_STATIC_CALL_ARGS_SIZE \
2134 + CPU_TEMP_BUF_SIZE \
2135 + REG_SAVE_SIZE \
2136 + TCG_TARGET_STACK_ALIGN - 1) \
2137 & -TCG_TARGET_STACK_ALIGN)
2139 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2141 static void tcg_target_qemu_prologue(TCGContext *s)
2143 int i;
2145 #ifdef _CALL_AIX
2146 void **desc = (void **)s->code_ptr;
2147 desc[0] = desc + 2; /* entry point */
2148 desc[1] = 0; /* environment pointer */
2149 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2150 #endif
2152 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2153 CPU_TEMP_BUF_SIZE);
2155 /* Prologue */
2156 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2157 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2158 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2160 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2161 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2162 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2164 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2166 #ifndef CONFIG_SOFTMMU
2167 if (guest_base) {
2168 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2169 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2171 #endif
2173 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2174 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2175 if (USE_REG_TB) {
2176 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2178 tcg_out32(s, BCCTR | BO_ALWAYS);
2180 /* Epilogue */
2181 s->code_gen_epilogue = tb_ret_addr = s->code_ptr;
2183 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2184 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2185 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2186 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2188 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2189 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2190 tcg_out32(s, BCLR | BO_ALWAYS);
2193 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
2194 const int *const_args)
2196 TCGArg a0, a1, a2;
2197 int c;
2199 switch (opc) {
2200 case INDEX_op_exit_tb:
2201 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
2202 tcg_out_b(s, 0, tb_ret_addr);
2203 break;
2204 case INDEX_op_goto_tb:
2205 if (s->tb_jmp_insn_offset) {
2206 /* Direct jump. */
2207 if (TCG_TARGET_REG_BITS == 64) {
2208 /* Ensure the next insns are 8-byte aligned. */
2209 if ((uintptr_t)s->code_ptr & 7) {
2210 tcg_out32(s, NOP);
2212 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2213 tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2214 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2215 } else {
2216 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2217 tcg_out32(s, B);
2218 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
2219 break;
2221 } else {
2222 /* Indirect jump. */
2223 tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
2224 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
2225 (intptr_t)(s->tb_jmp_insn_offset + args[0]));
2227 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2228 tcg_out32(s, BCCTR | BO_ALWAYS);
2229 set_jmp_reset_offset(s, args[0]);
2230 if (USE_REG_TB) {
2231 /* For the unlinked case, need to reset TCG_REG_TB. */
2232 c = -tcg_current_code_size(s);
2233 assert(c == (int16_t)c);
2234 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c));
2236 break;
2237 case INDEX_op_goto_ptr:
2238 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2239 if (USE_REG_TB) {
2240 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2242 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2243 tcg_out32(s, BCCTR | BO_ALWAYS);
2244 break;
2245 case INDEX_op_br:
2247 TCGLabel *l = arg_label(args[0]);
2248 uint32_t insn = B;
2250 if (l->has_value) {
2251 insn |= reloc_pc24_val(s->code_ptr, l->u.value_ptr);
2252 } else {
2253 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2255 tcg_out32(s, insn);
2257 break;
2258 case INDEX_op_ld8u_i32:
2259 case INDEX_op_ld8u_i64:
2260 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2261 break;
2262 case INDEX_op_ld8s_i32:
2263 case INDEX_op_ld8s_i64:
2264 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2265 tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
2266 break;
2267 case INDEX_op_ld16u_i32:
2268 case INDEX_op_ld16u_i64:
2269 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2270 break;
2271 case INDEX_op_ld16s_i32:
2272 case INDEX_op_ld16s_i64:
2273 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2274 break;
2275 case INDEX_op_ld_i32:
2276 case INDEX_op_ld32u_i64:
2277 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2278 break;
2279 case INDEX_op_ld32s_i64:
2280 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2281 break;
2282 case INDEX_op_ld_i64:
2283 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2284 break;
2285 case INDEX_op_st8_i32:
2286 case INDEX_op_st8_i64:
2287 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2288 break;
2289 case INDEX_op_st16_i32:
2290 case INDEX_op_st16_i64:
2291 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2292 break;
2293 case INDEX_op_st_i32:
2294 case INDEX_op_st32_i64:
2295 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2296 break;
2297 case INDEX_op_st_i64:
2298 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2299 break;
2301 case INDEX_op_add_i32:
2302 a0 = args[0], a1 = args[1], a2 = args[2];
2303 if (const_args[2]) {
2304 do_addi_32:
2305 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2306 } else {
2307 tcg_out32(s, ADD | TAB(a0, a1, a2));
2309 break;
2310 case INDEX_op_sub_i32:
2311 a0 = args[0], a1 = args[1], a2 = args[2];
2312 if (const_args[1]) {
2313 if (const_args[2]) {
2314 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2315 } else {
2316 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2318 } else if (const_args[2]) {
2319 a2 = -a2;
2320 goto do_addi_32;
2321 } else {
2322 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2324 break;
2326 case INDEX_op_and_i32:
2327 a0 = args[0], a1 = args[1], a2 = args[2];
2328 if (const_args[2]) {
2329 tcg_out_andi32(s, a0, a1, a2);
2330 } else {
2331 tcg_out32(s, AND | SAB(a1, a0, a2));
2333 break;
2334 case INDEX_op_and_i64:
2335 a0 = args[0], a1 = args[1], a2 = args[2];
2336 if (const_args[2]) {
2337 tcg_out_andi64(s, a0, a1, a2);
2338 } else {
2339 tcg_out32(s, AND | SAB(a1, a0, a2));
2341 break;
2342 case INDEX_op_or_i64:
2343 case INDEX_op_or_i32:
2344 a0 = args[0], a1 = args[1], a2 = args[2];
2345 if (const_args[2]) {
2346 tcg_out_ori32(s, a0, a1, a2);
2347 } else {
2348 tcg_out32(s, OR | SAB(a1, a0, a2));
2350 break;
2351 case INDEX_op_xor_i64:
2352 case INDEX_op_xor_i32:
2353 a0 = args[0], a1 = args[1], a2 = args[2];
2354 if (const_args[2]) {
2355 tcg_out_xori32(s, a0, a1, a2);
2356 } else {
2357 tcg_out32(s, XOR | SAB(a1, a0, a2));
2359 break;
2360 case INDEX_op_andc_i32:
2361 a0 = args[0], a1 = args[1], a2 = args[2];
2362 if (const_args[2]) {
2363 tcg_out_andi32(s, a0, a1, ~a2);
2364 } else {
2365 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2367 break;
2368 case INDEX_op_andc_i64:
2369 a0 = args[0], a1 = args[1], a2 = args[2];
2370 if (const_args[2]) {
2371 tcg_out_andi64(s, a0, a1, ~a2);
2372 } else {
2373 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2375 break;
2376 case INDEX_op_orc_i32:
2377 if (const_args[2]) {
2378 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2379 break;
2381 /* FALLTHRU */
2382 case INDEX_op_orc_i64:
2383 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2384 break;
2385 case INDEX_op_eqv_i32:
2386 if (const_args[2]) {
2387 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2388 break;
2390 /* FALLTHRU */
2391 case INDEX_op_eqv_i64:
2392 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2393 break;
2394 case INDEX_op_nand_i32:
2395 case INDEX_op_nand_i64:
2396 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2397 break;
2398 case INDEX_op_nor_i32:
2399 case INDEX_op_nor_i64:
2400 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2401 break;
2403 case INDEX_op_clz_i32:
2404 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2405 args[2], const_args[2]);
2406 break;
2407 case INDEX_op_ctz_i32:
2408 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2409 args[2], const_args[2]);
2410 break;
2411 case INDEX_op_ctpop_i32:
2412 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2413 break;
2415 case INDEX_op_clz_i64:
2416 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2417 args[2], const_args[2]);
2418 break;
2419 case INDEX_op_ctz_i64:
2420 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2421 args[2], const_args[2]);
2422 break;
2423 case INDEX_op_ctpop_i64:
2424 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2425 break;
2427 case INDEX_op_mul_i32:
2428 a0 = args[0], a1 = args[1], a2 = args[2];
2429 if (const_args[2]) {
2430 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2431 } else {
2432 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2434 break;
2436 case INDEX_op_div_i32:
2437 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2438 break;
2440 case INDEX_op_divu_i32:
2441 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2442 break;
2444 case INDEX_op_shl_i32:
2445 if (const_args[2]) {
2446 tcg_out_shli32(s, args[0], args[1], args[2]);
2447 } else {
2448 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2450 break;
2451 case INDEX_op_shr_i32:
2452 if (const_args[2]) {
2453 tcg_out_shri32(s, args[0], args[1], args[2]);
2454 } else {
2455 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2457 break;
2458 case INDEX_op_sar_i32:
2459 if (const_args[2]) {
2460 tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
2461 } else {
2462 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2464 break;
2465 case INDEX_op_rotl_i32:
2466 if (const_args[2]) {
2467 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2468 } else {
2469 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2470 | MB(0) | ME(31));
2472 break;
2473 case INDEX_op_rotr_i32:
2474 if (const_args[2]) {
2475 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2476 } else {
2477 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2478 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2479 | MB(0) | ME(31));
2481 break;
2483 case INDEX_op_brcond_i32:
2484 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2485 arg_label(args[3]), TCG_TYPE_I32);
2486 break;
2487 case INDEX_op_brcond_i64:
2488 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2489 arg_label(args[3]), TCG_TYPE_I64);
2490 break;
2491 case INDEX_op_brcond2_i32:
2492 tcg_out_brcond2(s, args, const_args);
2493 break;
2495 case INDEX_op_neg_i32:
2496 case INDEX_op_neg_i64:
2497 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2498 break;
2500 case INDEX_op_not_i32:
2501 case INDEX_op_not_i64:
2502 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2503 break;
2505 case INDEX_op_add_i64:
2506 a0 = args[0], a1 = args[1], a2 = args[2];
2507 if (const_args[2]) {
2508 do_addi_64:
2509 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2510 } else {
2511 tcg_out32(s, ADD | TAB(a0, a1, a2));
2513 break;
2514 case INDEX_op_sub_i64:
2515 a0 = args[0], a1 = args[1], a2 = args[2];
2516 if (const_args[1]) {
2517 if (const_args[2]) {
2518 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2519 } else {
2520 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2522 } else if (const_args[2]) {
2523 a2 = -a2;
2524 goto do_addi_64;
2525 } else {
2526 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2528 break;
2530 case INDEX_op_shl_i64:
2531 if (const_args[2]) {
2532 tcg_out_shli64(s, args[0], args[1], args[2]);
2533 } else {
2534 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2536 break;
2537 case INDEX_op_shr_i64:
2538 if (const_args[2]) {
2539 tcg_out_shri64(s, args[0], args[1], args[2]);
2540 } else {
2541 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2543 break;
2544 case INDEX_op_sar_i64:
2545 if (const_args[2]) {
2546 int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
2547 tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
2548 } else {
2549 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2551 break;
2552 case INDEX_op_rotl_i64:
2553 if (const_args[2]) {
2554 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2555 } else {
2556 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2558 break;
2559 case INDEX_op_rotr_i64:
2560 if (const_args[2]) {
2561 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2562 } else {
2563 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2564 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2566 break;
2568 case INDEX_op_mul_i64:
2569 a0 = args[0], a1 = args[1], a2 = args[2];
2570 if (const_args[2]) {
2571 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2572 } else {
2573 tcg_out32(s, MULLD | TAB(a0, a1, a2));
2575 break;
2576 case INDEX_op_div_i64:
2577 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2578 break;
2579 case INDEX_op_divu_i64:
2580 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2581 break;
2583 case INDEX_op_qemu_ld_i32:
2584 tcg_out_qemu_ld(s, args, false);
2585 break;
2586 case INDEX_op_qemu_ld_i64:
2587 tcg_out_qemu_ld(s, args, true);
2588 break;
2589 case INDEX_op_qemu_st_i32:
2590 tcg_out_qemu_st(s, args, false);
2591 break;
2592 case INDEX_op_qemu_st_i64:
2593 tcg_out_qemu_st(s, args, true);
2594 break;
2596 case INDEX_op_ext8s_i32:
2597 case INDEX_op_ext8s_i64:
2598 c = EXTSB;
2599 goto gen_ext;
2600 case INDEX_op_ext16s_i32:
2601 case INDEX_op_ext16s_i64:
2602 c = EXTSH;
2603 goto gen_ext;
2604 case INDEX_op_ext_i32_i64:
2605 case INDEX_op_ext32s_i64:
2606 c = EXTSW;
2607 goto gen_ext;
2608 gen_ext:
2609 tcg_out32(s, c | RS(args[1]) | RA(args[0]));
2610 break;
2611 case INDEX_op_extu_i32_i64:
2612 tcg_out_ext32u(s, args[0], args[1]);
2613 break;
2615 case INDEX_op_setcond_i32:
2616 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2617 const_args[2]);
2618 break;
2619 case INDEX_op_setcond_i64:
2620 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2621 const_args[2]);
2622 break;
2623 case INDEX_op_setcond2_i32:
2624 tcg_out_setcond2(s, args, const_args);
2625 break;
2627 case INDEX_op_bswap16_i32:
2628 case INDEX_op_bswap16_i64:
2629 a0 = args[0], a1 = args[1];
2630 /* a1 = abcd */
2631 if (a0 != a1) {
2632 /* a0 = (a1 r<< 24) & 0xff # 000c */
2633 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
2634 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
2635 tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
2636 } else {
2637 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
2638 tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
2639 /* a0 = (a1 r<< 24) & 0xff # 000c */
2640 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
2641 /* a0 = a0 | r0 # 00dc */
2642 tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
2644 break;
2646 case INDEX_op_bswap32_i32:
2647 case INDEX_op_bswap32_i64:
2648 /* Stolen from gcc's builtin_bswap32 */
2649 a1 = args[1];
2650 a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
2652 /* a1 = args[1] # abcd */
2653 /* a0 = rotate_left (a1, 8) # bcda */
2654 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
2655 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
2656 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
2657 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
2658 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
2660 if (a0 == TCG_REG_R0) {
2661 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2663 break;
2665 case INDEX_op_bswap64_i64:
2666 a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
2667 if (a0 == a1) {
2668 a0 = TCG_REG_R0;
2669 a2 = a1;
2672 /* a1 = # abcd efgh */
2673 /* a0 = rl32(a1, 8) # 0000 fghe */
2674 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
2675 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
2676 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
2677 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
2678 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
2680 /* a0 = rl64(a0, 32) # hgfe 0000 */
2681 /* a2 = rl64(a1, 32) # efgh abcd */
2682 tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
2683 tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
2685 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
2686 tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
2687 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
2688 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
2689 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
2690 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
2692 if (a0 == 0) {
2693 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2695 break;
2697 case INDEX_op_deposit_i32:
2698 if (const_args[2]) {
2699 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
2700 tcg_out_andi32(s, args[0], args[0], ~mask);
2701 } else {
2702 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
2703 32 - args[3] - args[4], 31 - args[3]);
2705 break;
2706 case INDEX_op_deposit_i64:
2707 if (const_args[2]) {
2708 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
2709 tcg_out_andi64(s, args[0], args[0], ~mask);
2710 } else {
2711 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
2712 64 - args[3] - args[4]);
2714 break;
2716 case INDEX_op_extract_i32:
2717 tcg_out_rlw(s, RLWINM, args[0], args[1],
2718 32 - args[2], 32 - args[3], 31);
2719 break;
2720 case INDEX_op_extract_i64:
2721 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
2722 break;
2724 case INDEX_op_movcond_i32:
2725 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
2726 args[3], args[4], const_args[2]);
2727 break;
2728 case INDEX_op_movcond_i64:
2729 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
2730 args[3], args[4], const_args[2]);
2731 break;
2733 #if TCG_TARGET_REG_BITS == 64
2734 case INDEX_op_add2_i64:
2735 #else
2736 case INDEX_op_add2_i32:
2737 #endif
2738 /* Note that the CA bit is defined based on the word size of the
2739 environment. So in 64-bit mode it's always carry-out of bit 63.
2740 The fallback code using deposit works just as well for 32-bit. */
2741 a0 = args[0], a1 = args[1];
2742 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
2743 a0 = TCG_REG_R0;
2745 if (const_args[4]) {
2746 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
2747 } else {
2748 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
2750 if (const_args[5]) {
2751 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
2752 } else {
2753 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
2755 if (a0 != args[0]) {
2756 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2758 break;
2760 #if TCG_TARGET_REG_BITS == 64
2761 case INDEX_op_sub2_i64:
2762 #else
2763 case INDEX_op_sub2_i32:
2764 #endif
2765 a0 = args[0], a1 = args[1];
2766 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
2767 a0 = TCG_REG_R0;
2769 if (const_args[2]) {
2770 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
2771 } else {
2772 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
2774 if (const_args[3]) {
2775 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
2776 } else {
2777 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
2779 if (a0 != args[0]) {
2780 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2782 break;
2784 case INDEX_op_muluh_i32:
2785 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
2786 break;
2787 case INDEX_op_mulsh_i32:
2788 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
2789 break;
2790 case INDEX_op_muluh_i64:
2791 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
2792 break;
2793 case INDEX_op_mulsh_i64:
2794 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
2795 break;
2797 case INDEX_op_mb:
2798 tcg_out_mb(s, args[0]);
2799 break;
2801 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2802 case INDEX_op_mov_i64:
2803 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2804 case INDEX_op_movi_i64:
2805 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2806 default:
2807 tcg_abort();
2811 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2813 switch (opc) {
2814 case INDEX_op_and_vec:
2815 case INDEX_op_or_vec:
2816 case INDEX_op_xor_vec:
2817 case INDEX_op_andc_vec:
2818 case INDEX_op_not_vec:
2819 return 1;
2820 case INDEX_op_cmp_vec:
2821 return vece <= MO_32 ? -1 : 0;
2822 default:
2823 return 0;
2827 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2828 TCGReg dst, TCGReg src)
2830 tcg_debug_assert(dst >= TCG_REG_V0);
2831 tcg_debug_assert(src >= TCG_REG_V0);
2834 * Recall we use (or emulate) VSX integer loads, so the integer is
2835 * right justified within the left (zero-index) double-word.
2837 switch (vece) {
2838 case MO_8:
2839 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
2840 break;
2841 case MO_16:
2842 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
2843 break;
2844 case MO_32:
2845 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
2846 break;
2847 case MO_64:
2848 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
2849 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
2850 break;
2851 default:
2852 g_assert_not_reached();
2854 return true;
2857 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2858 TCGReg out, TCGReg base, intptr_t offset)
2860 int elt;
2862 tcg_debug_assert(out >= TCG_REG_V0);
2863 switch (vece) {
2864 case MO_8:
2865 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
2866 elt = extract32(offset, 0, 4);
2867 #ifndef HOST_WORDS_BIGENDIAN
2868 elt ^= 15;
2869 #endif
2870 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
2871 break;
2872 case MO_16:
2873 tcg_debug_assert((offset & 1) == 0);
2874 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
2875 elt = extract32(offset, 1, 3);
2876 #ifndef HOST_WORDS_BIGENDIAN
2877 elt ^= 7;
2878 #endif
2879 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
2880 break;
2881 case MO_32:
2882 tcg_debug_assert((offset & 3) == 0);
2883 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
2884 elt = extract32(offset, 2, 2);
2885 #ifndef HOST_WORDS_BIGENDIAN
2886 elt ^= 3;
2887 #endif
2888 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
2889 break;
2890 case MO_64:
2891 tcg_debug_assert((offset & 7) == 0);
2892 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
2893 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
2894 elt = extract32(offset, 3, 1);
2895 #ifndef HOST_WORDS_BIGENDIAN
2896 elt = !elt;
2897 #endif
2898 if (elt) {
2899 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
2900 } else {
2901 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
2903 break;
2904 default:
2905 g_assert_not_reached();
2907 return true;
2910 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2911 unsigned vecl, unsigned vece,
2912 const TCGArg *args, const int *const_args)
2914 static const uint32_t
2915 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, 0 },
2916 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, 0 },
2917 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, 0 };
2919 TCGType type = vecl + TCG_TYPE_V64;
2920 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
2921 uint32_t insn;
2923 switch (opc) {
2924 case INDEX_op_ld_vec:
2925 tcg_out_ld(s, type, a0, a1, a2);
2926 return;
2927 case INDEX_op_st_vec:
2928 tcg_out_st(s, type, a0, a1, a2);
2929 return;
2930 case INDEX_op_dupm_vec:
2931 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2932 return;
2934 case INDEX_op_and_vec:
2935 insn = VAND;
2936 break;
2937 case INDEX_op_or_vec:
2938 insn = VOR;
2939 break;
2940 case INDEX_op_xor_vec:
2941 insn = VXOR;
2942 break;
2943 case INDEX_op_andc_vec:
2944 insn = VANDC;
2945 break;
2946 case INDEX_op_not_vec:
2947 insn = VNOR;
2948 a2 = a1;
2949 break;
2951 case INDEX_op_cmp_vec:
2952 switch (args[3]) {
2953 case TCG_COND_EQ:
2954 insn = eq_op[vece];
2955 break;
2956 case TCG_COND_GT:
2957 insn = gts_op[vece];
2958 break;
2959 case TCG_COND_GTU:
2960 insn = gtu_op[vece];
2961 break;
2962 default:
2963 g_assert_not_reached();
2965 break;
2967 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2968 case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
2969 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2970 default:
2971 g_assert_not_reached();
2974 tcg_debug_assert(insn != 0);
2975 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
2978 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
2979 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2981 bool need_swap = false, need_inv = false;
2983 tcg_debug_assert(vece <= MO_32);
2985 switch (cond) {
2986 case TCG_COND_EQ:
2987 case TCG_COND_GT:
2988 case TCG_COND_GTU:
2989 break;
2990 case TCG_COND_NE:
2991 case TCG_COND_LE:
2992 case TCG_COND_LEU:
2993 need_inv = true;
2994 break;
2995 case TCG_COND_LT:
2996 case TCG_COND_LTU:
2997 need_swap = true;
2998 break;
2999 case TCG_COND_GE:
3000 case TCG_COND_GEU:
3001 need_swap = need_inv = true;
3002 break;
3003 default:
3004 g_assert_not_reached();
3007 if (need_inv) {
3008 cond = tcg_invert_cond(cond);
3010 if (need_swap) {
3011 TCGv_vec t1;
3012 t1 = v1, v1 = v2, v2 = t1;
3013 cond = tcg_swap_cond(cond);
3016 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3017 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3019 if (need_inv) {
3020 tcg_gen_not_vec(vece, v0, v0);
3024 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3025 TCGArg a0, ...)
3027 va_list va;
3028 TCGv_vec v0, v1, v2;
3030 va_start(va, a0);
3031 v0 = temp_tcgv_vec(arg_temp(a0));
3032 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3033 v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3035 switch (opc) {
3036 case INDEX_op_cmp_vec:
3037 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3038 break;
3039 default:
3040 g_assert_not_reached();
3042 va_end(va);
3045 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
3047 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
3048 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
3049 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
3050 static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } };
3051 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
3052 static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
3053 static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
3054 static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
3055 static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } };
3056 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
3057 static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
3058 static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } };
3059 static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } };
3060 static const TCGTargetOpDef r_rI_ri
3061 = { .args_ct_str = { "r", "rI", "ri" } };
3062 static const TCGTargetOpDef r_rI_rT
3063 = { .args_ct_str = { "r", "rI", "rT" } };
3064 static const TCGTargetOpDef r_r_rZW
3065 = { .args_ct_str = { "r", "r", "rZW" } };
3066 static const TCGTargetOpDef L_L_L_L
3067 = { .args_ct_str = { "L", "L", "L", "L" } };
3068 static const TCGTargetOpDef S_S_S_S
3069 = { .args_ct_str = { "S", "S", "S", "S" } };
3070 static const TCGTargetOpDef movc
3071 = { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } };
3072 static const TCGTargetOpDef dep
3073 = { .args_ct_str = { "r", "0", "rZ" } };
3074 static const TCGTargetOpDef br2
3075 = { .args_ct_str = { "r", "r", "ri", "ri" } };
3076 static const TCGTargetOpDef setc2
3077 = { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
3078 static const TCGTargetOpDef add2
3079 = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } };
3080 static const TCGTargetOpDef sub2
3081 = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } };
3082 static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } };
3083 static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } };
3084 static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } };
3086 switch (op) {
3087 case INDEX_op_goto_ptr:
3088 return &r;
3090 case INDEX_op_ld8u_i32:
3091 case INDEX_op_ld8s_i32:
3092 case INDEX_op_ld16u_i32:
3093 case INDEX_op_ld16s_i32:
3094 case INDEX_op_ld_i32:
3095 case INDEX_op_st8_i32:
3096 case INDEX_op_st16_i32:
3097 case INDEX_op_st_i32:
3098 case INDEX_op_ctpop_i32:
3099 case INDEX_op_neg_i32:
3100 case INDEX_op_not_i32:
3101 case INDEX_op_ext8s_i32:
3102 case INDEX_op_ext16s_i32:
3103 case INDEX_op_bswap16_i32:
3104 case INDEX_op_bswap32_i32:
3105 case INDEX_op_extract_i32:
3106 case INDEX_op_ld8u_i64:
3107 case INDEX_op_ld8s_i64:
3108 case INDEX_op_ld16u_i64:
3109 case INDEX_op_ld16s_i64:
3110 case INDEX_op_ld32u_i64:
3111 case INDEX_op_ld32s_i64:
3112 case INDEX_op_ld_i64:
3113 case INDEX_op_st8_i64:
3114 case INDEX_op_st16_i64:
3115 case INDEX_op_st32_i64:
3116 case INDEX_op_st_i64:
3117 case INDEX_op_ctpop_i64:
3118 case INDEX_op_neg_i64:
3119 case INDEX_op_not_i64:
3120 case INDEX_op_ext8s_i64:
3121 case INDEX_op_ext16s_i64:
3122 case INDEX_op_ext32s_i64:
3123 case INDEX_op_ext_i32_i64:
3124 case INDEX_op_extu_i32_i64:
3125 case INDEX_op_bswap16_i64:
3126 case INDEX_op_bswap32_i64:
3127 case INDEX_op_bswap64_i64:
3128 case INDEX_op_extract_i64:
3129 return &r_r;
3131 case INDEX_op_add_i32:
3132 case INDEX_op_and_i32:
3133 case INDEX_op_or_i32:
3134 case INDEX_op_xor_i32:
3135 case INDEX_op_andc_i32:
3136 case INDEX_op_orc_i32:
3137 case INDEX_op_eqv_i32:
3138 case INDEX_op_shl_i32:
3139 case INDEX_op_shr_i32:
3140 case INDEX_op_sar_i32:
3141 case INDEX_op_rotl_i32:
3142 case INDEX_op_rotr_i32:
3143 case INDEX_op_setcond_i32:
3144 case INDEX_op_and_i64:
3145 case INDEX_op_andc_i64:
3146 case INDEX_op_shl_i64:
3147 case INDEX_op_shr_i64:
3148 case INDEX_op_sar_i64:
3149 case INDEX_op_rotl_i64:
3150 case INDEX_op_rotr_i64:
3151 case INDEX_op_setcond_i64:
3152 return &r_r_ri;
3153 case INDEX_op_mul_i32:
3154 case INDEX_op_mul_i64:
3155 return &r_r_rI;
3156 case INDEX_op_div_i32:
3157 case INDEX_op_divu_i32:
3158 case INDEX_op_nand_i32:
3159 case INDEX_op_nor_i32:
3160 case INDEX_op_muluh_i32:
3161 case INDEX_op_mulsh_i32:
3162 case INDEX_op_orc_i64:
3163 case INDEX_op_eqv_i64:
3164 case INDEX_op_nand_i64:
3165 case INDEX_op_nor_i64:
3166 case INDEX_op_div_i64:
3167 case INDEX_op_divu_i64:
3168 case INDEX_op_mulsh_i64:
3169 case INDEX_op_muluh_i64:
3170 return &r_r_r;
3171 case INDEX_op_sub_i32:
3172 return &r_rI_ri;
3173 case INDEX_op_add_i64:
3174 return &r_r_rT;
3175 case INDEX_op_or_i64:
3176 case INDEX_op_xor_i64:
3177 return &r_r_rU;
3178 case INDEX_op_sub_i64:
3179 return &r_rI_rT;
3180 case INDEX_op_clz_i32:
3181 case INDEX_op_ctz_i32:
3182 case INDEX_op_clz_i64:
3183 case INDEX_op_ctz_i64:
3184 return &r_r_rZW;
3186 case INDEX_op_brcond_i32:
3187 case INDEX_op_brcond_i64:
3188 return &r_ri;
3190 case INDEX_op_movcond_i32:
3191 case INDEX_op_movcond_i64:
3192 return &movc;
3193 case INDEX_op_deposit_i32:
3194 case INDEX_op_deposit_i64:
3195 return &dep;
3196 case INDEX_op_brcond2_i32:
3197 return &br2;
3198 case INDEX_op_setcond2_i32:
3199 return &setc2;
3200 case INDEX_op_add2_i64:
3201 case INDEX_op_add2_i32:
3202 return &add2;
3203 case INDEX_op_sub2_i64:
3204 case INDEX_op_sub2_i32:
3205 return &sub2;
3207 case INDEX_op_qemu_ld_i32:
3208 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3209 ? &r_L : &r_L_L);
3210 case INDEX_op_qemu_st_i32:
3211 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3212 ? &S_S : &S_S_S);
3213 case INDEX_op_qemu_ld_i64:
3214 return (TCG_TARGET_REG_BITS == 64 ? &r_L
3215 : TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L);
3216 case INDEX_op_qemu_st_i64:
3217 return (TCG_TARGET_REG_BITS == 64 ? &S_S
3218 : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S);
3220 case INDEX_op_and_vec:
3221 case INDEX_op_or_vec:
3222 case INDEX_op_xor_vec:
3223 case INDEX_op_andc_vec:
3224 case INDEX_op_orc_vec:
3225 case INDEX_op_cmp_vec:
3226 return &v_v_v;
3227 case INDEX_op_not_vec:
3228 case INDEX_op_dup_vec:
3229 return &v_v;
3230 case INDEX_op_ld_vec:
3231 case INDEX_op_st_vec:
3232 case INDEX_op_dupm_vec:
3233 return &v_r;
3235 default:
3236 return NULL;
3240 static void tcg_target_init(TCGContext *s)
3242 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3243 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3245 have_isa = tcg_isa_base;
3246 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3247 have_isa = tcg_isa_2_06;
3249 #ifdef PPC_FEATURE2_ARCH_3_00
3250 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3251 have_isa = tcg_isa_3_00;
3253 #endif
3255 #ifdef PPC_FEATURE2_HAS_ISEL
3256 /* Prefer explicit instruction from the kernel. */
3257 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3258 #else
3259 /* Fall back to knowing Power7 (2.06) has ISEL. */
3260 have_isel = have_isa_2_06;
3261 #endif
3263 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3264 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3265 if (have_altivec) {
3266 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3267 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3270 tcg_target_call_clobber_regs = 0;
3271 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3272 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3273 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3274 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3275 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3276 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3277 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3278 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3279 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3280 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3281 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3282 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3284 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3285 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3286 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3287 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3288 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3289 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3290 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3291 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3292 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3293 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3294 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3295 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3296 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3297 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3298 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3299 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3300 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3301 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3302 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3303 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3305 s->reserved_regs = 0;
3306 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3307 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3308 #if defined(_CALL_SYSV)
3309 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3310 #endif
3311 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3312 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3313 #endif
3314 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
3315 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3316 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3317 if (USE_REG_TB) {
3318 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
3322 #ifdef __ELF__
3323 typedef struct {
3324 DebugFrameCIE cie;
3325 DebugFrameFDEHeader fde;
3326 uint8_t fde_def_cfa[4];
3327 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3328 } DebugFrame;
3330 /* We're expecting a 2 byte uleb128 encoded value. */
3331 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3333 #if TCG_TARGET_REG_BITS == 64
3334 # define ELF_HOST_MACHINE EM_PPC64
3335 #else
3336 # define ELF_HOST_MACHINE EM_PPC
3337 #endif
3339 static DebugFrame debug_frame = {
3340 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3341 .cie.id = -1,
3342 .cie.version = 1,
3343 .cie.code_align = 1,
3344 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
3345 .cie.return_column = 65,
3347 /* Total FDE size does not include the "len" member. */
3348 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3350 .fde_def_cfa = {
3351 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
3352 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3353 (FRAME_SIZE >> 7)
3355 .fde_reg_ofs = {
3356 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3357 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
3361 void tcg_register_jit(void *buf, size_t buf_size)
3363 uint8_t *p = &debug_frame.fde_reg_ofs[3];
3364 int i;
3366 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
3367 p[0] = 0x80 + tcg_target_callee_save_regs[i];
3368 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
3371 debug_frame.fde.func_start = (uintptr_t)buf;
3372 debug_frame.fde.func_len = buf_size;
3374 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3376 #endif /* __ELF__ */
3378 void flush_icache_range(uintptr_t start, uintptr_t stop)
3380 uintptr_t p, start1, stop1;
3381 size_t dsize = qemu_dcache_linesize;
3382 size_t isize = qemu_icache_linesize;
3384 start1 = start & ~(dsize - 1);
3385 stop1 = (stop + dsize - 1) & ~(dsize - 1);
3386 for (p = start1; p < stop1; p += dsize) {
3387 asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
3389 asm volatile ("sync" : : : "memory");
3391 start &= start & ~(isize - 1);
3392 stop1 = (stop + isize - 1) & ~(isize - 1);
3393 for (p = start1; p < stop1; p += isize) {
3394 asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
3396 asm volatile ("sync" : : : "memory");
3397 asm volatile ("isync" : : : "memory");