2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.c.inc"
28 #if defined _CALL_DARWIN || defined __APPLE__
29 #define TCG_TARGET_CALL_DARWIN
32 # define TCG_TARGET_CALL_ALIGN_ARGS 1
35 /* For some memory operations, we need a scratch that isn't R0. For the AIX
36 calling convention, we can re-use the TOC register since we'll be reloading
37 it at every call. Otherwise R12 will do nicely as neither a call-saved
38 register nor a parameter register. */
40 # define TCG_REG_TMP1 TCG_REG_R2
42 # define TCG_REG_TMP1 TCG_REG_R12
45 #define TCG_VEC_TMP1 TCG_REG_V0
46 #define TCG_VEC_TMP2 TCG_REG_V1
48 #define TCG_REG_TB TCG_REG_R31
49 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
51 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
52 #define SZP ((int)sizeof(void *))
54 /* Shorthand for size of a register. */
55 #define SZR (TCG_TARGET_REG_BITS / 8)
57 #define TCG_CT_CONST_S16 0x100
58 #define TCG_CT_CONST_U16 0x200
59 #define TCG_CT_CONST_S32 0x400
60 #define TCG_CT_CONST_U32 0x800
61 #define TCG_CT_CONST_ZERO 0x1000
62 #define TCG_CT_CONST_MONE 0x2000
63 #define TCG_CT_CONST_WSZ 0x4000
65 #define ALL_GENERAL_REGS 0xffffffffu
66 #define ALL_VECTOR_REGS 0xffffffff00000000ull
69 #define ALL_QLOAD_REGS \
71 ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5)))
72 #define ALL_QSTORE_REGS \
73 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | \
74 (1 << TCG_REG_R5) | (1 << TCG_REG_R6)))
76 #define ALL_QLOAD_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R3))
77 #define ALL_QSTORE_REGS ALL_QLOAD_REGS
81 static bool have_isel;
85 #ifndef CONFIG_SOFTMMU
86 #define TCG_GUEST_BASE_REG 30
89 #ifdef CONFIG_DEBUG_TCG
90 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
91 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
92 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
93 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
94 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
95 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
96 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
97 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
98 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
102 static const int tcg_target_reg_alloc_order[] = {
103 TCG_REG_R14, /* call saved registers */
121 TCG_REG_R12, /* call clobbered, non-arguments */
125 TCG_REG_R10, /* call clobbered, arguments */
134 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
135 TCG_REG_V2, /* call clobbered, vectors */
155 static const int tcg_target_call_iarg_regs[] = {
166 static const int tcg_target_call_oarg_regs[] = {
171 static const int tcg_target_callee_save_regs[] = {
172 #ifdef TCG_TARGET_CALL_DARWIN
188 TCG_REG_R27, /* currently used for the global env */
195 static inline bool in_range_b(tcg_target_long target)
197 return target == sextract64(target, 0, 26);
200 static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
201 const tcg_insn_unit *target)
203 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
204 tcg_debug_assert(in_range_b(disp));
205 return disp & 0x3fffffc;
208 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
210 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
211 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
213 if (in_range_b(disp)) {
214 *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
220 static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
221 const tcg_insn_unit *target)
223 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
224 tcg_debug_assert(disp == (int16_t) disp);
225 return disp & 0xfffc;
228 static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
230 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
231 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
233 if (disp == (int16_t) disp) {
234 *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
240 /* test if a constant matches the constraint */
241 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
243 if (ct & TCG_CT_CONST) {
247 /* The only 32-bit constraint we use aside from
248 TCG_CT_CONST is TCG_CT_CONST_S16. */
249 if (type == TCG_TYPE_I32) {
253 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
255 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
257 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
259 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
261 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
263 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
265 } else if ((ct & TCG_CT_CONST_WSZ)
266 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
272 #define OPCD(opc) ((opc)<<26)
273 #define XO19(opc) (OPCD(19)|((opc)<<1))
274 #define MD30(opc) (OPCD(30)|((opc)<<2))
275 #define MDS30(opc) (OPCD(30)|((opc)<<1))
276 #define XO31(opc) (OPCD(31)|((opc)<<1))
277 #define XO58(opc) (OPCD(58)|(opc))
278 #define XO62(opc) (OPCD(62)|(opc))
279 #define VX4(opc) (OPCD(4)|(opc))
283 #define LBZ OPCD( 34)
284 #define LHZ OPCD( 40)
285 #define LHA OPCD( 42)
286 #define LWZ OPCD( 32)
287 #define LWZUX XO31( 55)
288 #define STB OPCD( 38)
289 #define STH OPCD( 44)
290 #define STW OPCD( 36)
293 #define STDU XO62( 1)
294 #define STDX XO31(149)
297 #define LDX XO31( 21)
299 #define LDUX XO31( 53)
301 #define LWAX XO31(341)
303 #define ADDIC OPCD( 12)
304 #define ADDI OPCD( 14)
305 #define ADDIS OPCD( 15)
306 #define ORI OPCD( 24)
307 #define ORIS OPCD( 25)
308 #define XORI OPCD( 26)
309 #define XORIS OPCD( 27)
310 #define ANDI OPCD( 28)
311 #define ANDIS OPCD( 29)
312 #define MULLI OPCD( 7)
313 #define CMPLI OPCD( 10)
314 #define CMPI OPCD( 11)
315 #define SUBFIC OPCD( 8)
317 #define LWZU OPCD( 33)
318 #define STWU OPCD( 37)
320 #define RLWIMI OPCD( 20)
321 #define RLWINM OPCD( 21)
322 #define RLWNM OPCD( 23)
324 #define RLDICL MD30( 0)
325 #define RLDICR MD30( 1)
326 #define RLDIMI MD30( 3)
327 #define RLDCL MDS30( 8)
329 #define BCLR XO19( 16)
330 #define BCCTR XO19(528)
331 #define CRAND XO19(257)
332 #define CRANDC XO19(129)
333 #define CRNAND XO19(225)
334 #define CROR XO19(449)
335 #define CRNOR XO19( 33)
337 #define EXTSB XO31(954)
338 #define EXTSH XO31(922)
339 #define EXTSW XO31(986)
340 #define ADD XO31(266)
341 #define ADDE XO31(138)
342 #define ADDME XO31(234)
343 #define ADDZE XO31(202)
344 #define ADDC XO31( 10)
345 #define AND XO31( 28)
346 #define SUBF XO31( 40)
347 #define SUBFC XO31( 8)
348 #define SUBFE XO31(136)
349 #define SUBFME XO31(232)
350 #define SUBFZE XO31(200)
352 #define XOR XO31(316)
353 #define MULLW XO31(235)
354 #define MULHW XO31( 75)
355 #define MULHWU XO31( 11)
356 #define DIVW XO31(491)
357 #define DIVWU XO31(459)
359 #define CMPL XO31( 32)
360 #define LHBRX XO31(790)
361 #define LWBRX XO31(534)
362 #define LDBRX XO31(532)
363 #define STHBRX XO31(918)
364 #define STWBRX XO31(662)
365 #define STDBRX XO31(660)
366 #define MFSPR XO31(339)
367 #define MTSPR XO31(467)
368 #define SRAWI XO31(824)
369 #define NEG XO31(104)
370 #define MFCR XO31( 19)
371 #define MFOCRF (MFCR | (1u << 20))
372 #define NOR XO31(124)
373 #define CNTLZW XO31( 26)
374 #define CNTLZD XO31( 58)
375 #define CNTTZW XO31(538)
376 #define CNTTZD XO31(570)
377 #define CNTPOPW XO31(378)
378 #define CNTPOPD XO31(506)
379 #define ANDC XO31( 60)
380 #define ORC XO31(412)
381 #define EQV XO31(284)
382 #define NAND XO31(476)
383 #define ISEL XO31( 15)
385 #define MULLD XO31(233)
386 #define MULHD XO31( 73)
387 #define MULHDU XO31( 9)
388 #define DIVD XO31(489)
389 #define DIVDU XO31(457)
391 #define LBZX XO31( 87)
392 #define LHZX XO31(279)
393 #define LHAX XO31(343)
394 #define LWZX XO31( 23)
395 #define STBX XO31(215)
396 #define STHX XO31(407)
397 #define STWX XO31(151)
399 #define EIEIO XO31(854)
400 #define HWSYNC XO31(598)
401 #define LWSYNC (HWSYNC | (1u << 21))
403 #define SPR(a, b) ((((a)<<5)|(b))<<11)
405 #define CTR SPR(9, 0)
407 #define SLW XO31( 24)
408 #define SRW XO31(536)
409 #define SRAW XO31(792)
411 #define SLD XO31( 27)
412 #define SRD XO31(539)
413 #define SRAD XO31(794)
414 #define SRADI XO31(413<<1)
416 #define BRH XO31(219)
417 #define BRW XO31(155)
418 #define BRD XO31(187)
421 #define TRAP (TW | TO(31))
423 #define NOP ORI /* ori 0,0,0 */
425 #define LVX XO31(103)
426 #define LVEBX XO31(7)
427 #define LVEHX XO31(39)
428 #define LVEWX XO31(71)
429 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
430 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
431 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
432 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
433 #define LXSD (OPCD(57) | 2) /* v3.00 */
434 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
436 #define STVX XO31(231)
437 #define STVEWX XO31(199)
438 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
439 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
440 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
441 #define STXSD (OPCD(61) | 2) /* v3.00 */
443 #define VADDSBS VX4(768)
444 #define VADDUBS VX4(512)
445 #define VADDUBM VX4(0)
446 #define VADDSHS VX4(832)
447 #define VADDUHS VX4(576)
448 #define VADDUHM VX4(64)
449 #define VADDSWS VX4(896)
450 #define VADDUWS VX4(640)
451 #define VADDUWM VX4(128)
452 #define VADDUDM VX4(192) /* v2.07 */
454 #define VSUBSBS VX4(1792)
455 #define VSUBUBS VX4(1536)
456 #define VSUBUBM VX4(1024)
457 #define VSUBSHS VX4(1856)
458 #define VSUBUHS VX4(1600)
459 #define VSUBUHM VX4(1088)
460 #define VSUBSWS VX4(1920)
461 #define VSUBUWS VX4(1664)
462 #define VSUBUWM VX4(1152)
463 #define VSUBUDM VX4(1216) /* v2.07 */
465 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
466 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
468 #define VMAXSB VX4(258)
469 #define VMAXSH VX4(322)
470 #define VMAXSW VX4(386)
471 #define VMAXSD VX4(450) /* v2.07 */
472 #define VMAXUB VX4(2)
473 #define VMAXUH VX4(66)
474 #define VMAXUW VX4(130)
475 #define VMAXUD VX4(194) /* v2.07 */
476 #define VMINSB VX4(770)
477 #define VMINSH VX4(834)
478 #define VMINSW VX4(898)
479 #define VMINSD VX4(962) /* v2.07 */
480 #define VMINUB VX4(514)
481 #define VMINUH VX4(578)
482 #define VMINUW VX4(642)
483 #define VMINUD VX4(706) /* v2.07 */
485 #define VCMPEQUB VX4(6)
486 #define VCMPEQUH VX4(70)
487 #define VCMPEQUW VX4(134)
488 #define VCMPEQUD VX4(199) /* v2.07 */
489 #define VCMPGTSB VX4(774)
490 #define VCMPGTSH VX4(838)
491 #define VCMPGTSW VX4(902)
492 #define VCMPGTSD VX4(967) /* v2.07 */
493 #define VCMPGTUB VX4(518)
494 #define VCMPGTUH VX4(582)
495 #define VCMPGTUW VX4(646)
496 #define VCMPGTUD VX4(711) /* v2.07 */
497 #define VCMPNEB VX4(7) /* v3.00 */
498 #define VCMPNEH VX4(71) /* v3.00 */
499 #define VCMPNEW VX4(135) /* v3.00 */
501 #define VSLB VX4(260)
502 #define VSLH VX4(324)
503 #define VSLW VX4(388)
504 #define VSLD VX4(1476) /* v2.07 */
505 #define VSRB VX4(516)
506 #define VSRH VX4(580)
507 #define VSRW VX4(644)
508 #define VSRD VX4(1732) /* v2.07 */
509 #define VSRAB VX4(772)
510 #define VSRAH VX4(836)
511 #define VSRAW VX4(900)
512 #define VSRAD VX4(964) /* v2.07 */
515 #define VRLW VX4(132)
516 #define VRLD VX4(196) /* v2.07 */
518 #define VMULEUB VX4(520)
519 #define VMULEUH VX4(584)
520 #define VMULEUW VX4(648) /* v2.07 */
521 #define VMULOUB VX4(8)
522 #define VMULOUH VX4(72)
523 #define VMULOUW VX4(136) /* v2.07 */
524 #define VMULUWM VX4(137) /* v2.07 */
525 #define VMULLD VX4(457) /* v3.10 */
526 #define VMSUMUHM VX4(38)
528 #define VMRGHB VX4(12)
529 #define VMRGHH VX4(76)
530 #define VMRGHW VX4(140)
531 #define VMRGLB VX4(268)
532 #define VMRGLH VX4(332)
533 #define VMRGLW VX4(396)
535 #define VPKUHUM VX4(14)
536 #define VPKUWUM VX4(78)
538 #define VAND VX4(1028)
539 #define VANDC VX4(1092)
540 #define VNOR VX4(1284)
541 #define VOR VX4(1156)
542 #define VXOR VX4(1220)
543 #define VEQV VX4(1668) /* v2.07 */
544 #define VNAND VX4(1412) /* v2.07 */
545 #define VORC VX4(1348) /* v2.07 */
547 #define VSPLTB VX4(524)
548 #define VSPLTH VX4(588)
549 #define VSPLTW VX4(652)
550 #define VSPLTISB VX4(780)
551 #define VSPLTISH VX4(844)
552 #define VSPLTISW VX4(908)
554 #define VSLDOI VX4(44)
556 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
557 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
558 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
560 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
561 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
562 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
563 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
564 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
565 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
567 #define RT(r) ((r)<<21)
568 #define RS(r) ((r)<<21)
569 #define RA(r) ((r)<<16)
570 #define RB(r) ((r)<<11)
571 #define TO(t) ((t)<<21)
572 #define SH(s) ((s)<<11)
573 #define MB(b) ((b)<<6)
574 #define ME(e) ((e)<<1)
575 #define BO(o) ((o)<<21)
576 #define MB64(b) ((b)<<5)
577 #define FXM(b) (1 << (19 - (b)))
579 #define VRT(r) (((r) & 31) << 21)
580 #define VRA(r) (((r) & 31) << 16)
581 #define VRB(r) (((r) & 31) << 11)
582 #define VRC(r) (((r) & 31) << 6)
586 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
587 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
588 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
589 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
591 #define BF(n) ((n)<<23)
592 #define BI(n, c) (((c)+((n)*4))<<16)
593 #define BT(n, c) (((c)+((n)*4))<<21)
594 #define BA(n, c) (((c)+((n)*4))<<16)
595 #define BB(n, c) (((c)+((n)*4))<<11)
596 #define BC_(n, c) (((c)+((n)*4))<<6)
598 #define BO_COND_TRUE BO(12)
599 #define BO_COND_FALSE BO( 4)
600 #define BO_ALWAYS BO(20)
609 static const uint32_t tcg_to_bc[] = {
610 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
611 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
612 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
613 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
614 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
615 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
616 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
617 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
618 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
619 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
622 /* The low bit here is set if the RA and RB fields must be inverted. */
623 static const uint32_t tcg_to_isel[] = {
624 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
625 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
626 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
627 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
628 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
629 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
630 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
631 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
632 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
633 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
636 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
637 intptr_t value, intptr_t addend)
639 const tcg_insn_unit *target;
644 target = (const tcg_insn_unit *)value;
648 return reloc_pc14(code_ptr, target);
650 return reloc_pc24(code_ptr, target);
653 * We are (slightly) abusing this relocation type. In particular,
654 * assert that the low 2 bits are zero, and do not modify them.
655 * That way we can use this with LD et al that have opcode bits
656 * in the low 2 bits of the insn.
658 if ((value & 3) || value != (int16_t)value) {
661 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
665 * We are abusing this relocation type. Again, this points to
666 * a pair of insns, lis + load. This is an absolute address
667 * relocation for PPC32 so the lis cannot be removed.
671 if (hi + lo != value) {
674 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
675 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
678 g_assert_not_reached();
683 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
684 TCGReg base, tcg_target_long offset);
686 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
693 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
696 if (ret < TCG_REG_V0) {
697 if (arg < TCG_REG_V0) {
698 tcg_out32(s, OR | SAB(arg, ret, arg));
700 } else if (have_isa_2_07) {
701 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
702 | VRT(arg) | RA(ret));
705 /* Altivec does not support vector->integer moves. */
708 } else if (arg < TCG_REG_V0) {
710 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
711 | VRT(ret) | RA(arg));
714 /* Altivec does not support integer->vector moves. */
721 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
722 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
725 g_assert_not_reached();
730 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
733 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
734 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
735 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
736 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
739 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
740 int sh, int mb, int me)
742 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
745 static inline void tcg_out_ext8s(TCGContext *s, TCGReg dst, TCGReg src)
747 tcg_out32(s, EXTSB | RA(dst) | RS(src));
750 static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src)
752 tcg_out32(s, EXTSH | RA(dst) | RS(src));
755 static inline void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
757 tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
760 static inline void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
762 tcg_out32(s, EXTSW | RA(dst) | RS(src));
765 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
767 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
770 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
772 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
775 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
777 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
780 static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
782 /* Limit immediate shift count lest we create an illegal insn. */
783 tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
786 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
788 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
791 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
793 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
796 static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
798 tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
801 static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
803 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
806 tcg_out32(s, BRH | RA(dst) | RS(src));
807 if (flags & TCG_BSWAP_OS) {
808 tcg_out_ext16s(s, dst, dst);
809 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
810 tcg_out_ext16u(s, dst, dst);
817 * dep(a, b, m) -> (a & ~m) | (b & m)
819 * Begin with: src = xxxxabcd
821 /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
822 tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
823 /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
824 tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
826 if (flags & TCG_BSWAP_OS) {
827 tcg_out_ext16s(s, dst, tmp);
829 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
833 static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
835 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
838 tcg_out32(s, BRW | RA(dst) | RS(src));
839 if (flags & TCG_BSWAP_OS) {
840 tcg_out_ext32s(s, dst, dst);
841 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
842 tcg_out_ext32u(s, dst, dst);
848 * Stolen from gcc's builtin_bswap32.
850 * dep(a, b, m) -> (a & ~m) | (b & m)
852 * Begin with: src = xxxxabcd
854 /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
855 tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
856 /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
857 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
858 /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
859 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
861 if (flags & TCG_BSWAP_OS) {
862 tcg_out_ext32s(s, dst, tmp);
864 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
868 static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
870 TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
871 TCGReg t1 = dst == src ? dst : TCG_REG_R0;
874 tcg_out32(s, BRD | RA(dst) | RS(src));
880 * dep(a, b, m) -> (a & ~m) | (b & m)
882 * Begin with: src = abcdefgh
884 /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
885 tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
886 /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
887 tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
888 /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
889 tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
891 /* t0 = rol64(t0, 32) = hgfe0000 */
892 tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
893 /* t1 = rol64(src, 32) = efghabcd */
894 tcg_out_rld(s, RLDICL, t1, src, 32, 0);
896 /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
897 tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
898 /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
899 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
900 /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
901 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
903 tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
906 /* Emit a move into ret of arg, if it can be done in one insn. */
907 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
909 if (arg == (int16_t)arg) {
910 tcg_out32(s, ADDI | TAI(ret, 0, arg));
913 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
914 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
920 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
921 tcg_target_long arg, bool in_prologue)
927 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
929 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
933 /* Load 16-bit immediates with one insn. */
934 if (tcg_out_movi_one(s, ret, arg)) {
938 /* Load addresses within the TB with one insn. */
939 tb_diff = tcg_tbrel_diff(s, (void *)arg);
940 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
941 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
945 /* Load 32-bit immediates with two insns. Note that we've already
946 eliminated bare ADDIS, so we know both insns are required. */
947 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
948 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
949 tcg_out32(s, ORI | SAI(ret, ret, arg));
952 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
953 tcg_out32(s, ADDI | TAI(ret, 0, arg));
954 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
958 /* Load masked 16-bit value. */
959 if (arg > 0 && (arg & 0x8000)) {
961 if ((tmp & (tmp + 1)) == 0) {
962 int mb = clz64(tmp + 1) + 1;
963 tcg_out32(s, ADDI | TAI(ret, 0, arg));
964 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
969 /* Load common masks with 2 insns. */
972 if (tmp == (int16_t)tmp) {
973 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
974 tcg_out_shli64(s, ret, ret, shift);
978 if (tcg_out_movi_one(s, ret, arg << shift)) {
979 tcg_out_shri64(s, ret, ret, shift);
983 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
984 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
985 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
989 /* Use the constant pool, if possible. */
990 if (!in_prologue && USE_REG_TB) {
991 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
992 tcg_tbrel_diff(s, NULL));
993 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
997 tmp = arg >> 31 >> 1;
998 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1000 tcg_out_shli64(s, ret, ret, 32);
1002 if (arg & 0xffff0000) {
1003 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1006 tcg_out32(s, ORI | SAI(ret, ret, arg));
1010 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1011 TCGReg ret, int64_t val)
1020 if (low >= -16 && low < 16) {
1021 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1024 if (have_isa_3_00) {
1025 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1032 if (low >= -16 && low < 16) {
1033 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1040 if (low >= -16 && low < 16) {
1041 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1048 * Otherwise we must load the value from the constant pool.
1052 add = tcg_tbrel_diff(s, NULL);
1059 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1060 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1061 if (TCG_TARGET_REG_BITS == 64) {
1062 new_pool_label(s, val, rel, s->code_ptr, add);
1064 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1067 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1068 if (TCG_TARGET_REG_BITS == 64) {
1069 new_pool_l2(s, rel, s->code_ptr, add, val, val);
1071 new_pool_l4(s, rel, s->code_ptr, add,
1072 val >> 32, val, val >> 32, val);
1077 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1078 load_insn |= RA(TCG_REG_TB);
1080 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1081 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1083 tcg_out32(s, load_insn);
1086 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1087 tcg_target_long arg)
1092 tcg_debug_assert(ret < TCG_REG_V0);
1093 tcg_out_movi_int(s, type, ret, arg, false);
1097 g_assert_not_reached();
1101 static bool mask_operand(uint32_t c, int *mb, int *me)
1105 /* Accept a bit pattern like:
1109 Keep track of the transitions. */
1110 if (c == 0 || c == -1) {
1116 if (test & (test - 1)) {
1121 *mb = test ? clz32(test & -test) + 1 : 0;
1125 static bool mask64_operand(uint64_t c, int *mb, int *me)
1134 /* Accept 1..10..0. */
1140 /* Accept 0..01..1. */
1141 if (lsb == 1 && (c & (c + 1)) == 0) {
1142 *mb = clz64(c + 1) + 1;
1149 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1153 if (mask_operand(c, &mb, &me)) {
1154 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1155 } else if ((c & 0xffff) == c) {
1156 tcg_out32(s, ANDI | SAI(src, dst, c));
1158 } else if ((c & 0xffff0000) == c) {
1159 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1162 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1163 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1167 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1171 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1172 if (mask64_operand(c, &mb, &me)) {
1174 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1176 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1178 } else if ((c & 0xffff) == c) {
1179 tcg_out32(s, ANDI | SAI(src, dst, c));
1181 } else if ((c & 0xffff0000) == c) {
1182 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1185 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1186 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1190 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1191 int op_lo, int op_hi)
1194 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1198 tcg_out32(s, op_lo | SAI(src, dst, c));
1203 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1205 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1208 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1210 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1213 static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1215 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1216 if (in_range_b(disp)) {
1217 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1219 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1220 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1221 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1225 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1226 TCGReg base, tcg_target_long offset)
1228 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1229 bool is_int_store = false;
1230 TCGReg rs = TCG_REG_TMP1;
1237 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1253 case STB: case STH: case STW:
1254 is_int_store = true;
1258 /* For unaligned, or very large offsets, use the indexed form. */
1259 if (offset & align || offset != (int32_t)offset || opi == 0) {
1263 tcg_debug_assert(!is_int_store || rs != rt);
1264 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1265 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1269 l0 = (int16_t)offset;
1270 offset = (offset - l0) >> 16;
1271 l1 = (int16_t)offset;
1273 if (l1 < 0 && orig >= 0) {
1275 l1 = (int16_t)(offset - 0x4000);
1278 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1282 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1285 if (opi != ADDI || base != rt || l0 != 0) {
1286 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1290 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1291 TCGReg va, TCGReg vb, int shb)
1293 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1296 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1297 TCGReg base, intptr_t offset)
1303 if (ret < TCG_REG_V0) {
1304 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1307 if (have_isa_2_07 && have_vsx) {
1308 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1311 tcg_debug_assert((offset & 3) == 0);
1312 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1313 shift = (offset - 4) & 0xc;
1315 tcg_out_vsldoi(s, ret, ret, ret, shift);
1319 if (ret < TCG_REG_V0) {
1320 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1321 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1326 tcg_debug_assert(ret >= TCG_REG_V0);
1328 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1332 tcg_debug_assert((offset & 7) == 0);
1333 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1335 tcg_out_vsldoi(s, ret, ret, ret, 8);
1339 tcg_debug_assert(ret >= TCG_REG_V0);
1340 tcg_debug_assert((offset & 15) == 0);
1341 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1342 LVX, ret, base, offset);
1345 g_assert_not_reached();
1349 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1350 TCGReg base, intptr_t offset)
1356 if (arg < TCG_REG_V0) {
1357 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1360 if (have_isa_2_07 && have_vsx) {
1361 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1364 assert((offset & 3) == 0);
1365 tcg_debug_assert((offset & 3) == 0);
1366 shift = (offset - 4) & 0xc;
1368 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1371 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1374 if (arg < TCG_REG_V0) {
1375 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1376 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1381 tcg_debug_assert(arg >= TCG_REG_V0);
1383 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1384 STXSDX, arg, base, offset);
1387 tcg_debug_assert((offset & 7) == 0);
1389 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1392 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1393 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1396 tcg_debug_assert(arg >= TCG_REG_V0);
1397 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1398 STVX, arg, base, offset);
1401 g_assert_not_reached();
1405 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1406 TCGReg base, intptr_t ofs)
1411 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1412 int const_arg2, int cr, TCGType type)
1417 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1419 /* Simplify the comparisons below wrt CMPI. */
1420 if (type == TCG_TYPE_I32) {
1421 arg2 = (int32_t)arg2;
1428 if ((int16_t) arg2 == arg2) {
1432 } else if ((uint16_t) arg2 == arg2) {
1447 if ((int16_t) arg2 == arg2) {
1462 if ((uint16_t) arg2 == arg2) {
1475 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1478 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1481 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1484 tcg_out32(s, op | RA(arg1) | RB(arg2));
1488 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1489 TCGReg dst, TCGReg src)
1491 if (type == TCG_TYPE_I32) {
1492 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1493 tcg_out_shri32(s, dst, dst, 5);
1495 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1496 tcg_out_shri64(s, dst, dst, 6);
1500 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1502 /* X != 0 implies X + -1 generates a carry. Extra addition
1503 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1505 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1506 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1508 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1509 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1513 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1517 if ((uint32_t)arg2 == arg2) {
1518 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1520 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1521 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1524 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1529 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1530 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1535 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1537 /* Ignore high bits of a potential constant arg2. */
1538 if (type == TCG_TYPE_I32) {
1539 arg2 = (uint32_t)arg2;
1542 /* Handle common and trivial cases before handling anything else. */
1546 tcg_out_setcond_eq0(s, type, arg0, arg1);
1549 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1550 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1553 tcg_out_setcond_ne0(s, arg0, arg1);
1556 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1560 /* Extract the sign bit. */
1561 if (type == TCG_TYPE_I32) {
1562 tcg_out_shri32(s, arg0, arg1, 31);
1564 tcg_out_shri64(s, arg0, arg1, 63);
1572 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1573 All other cases below are also at least 3 insns, so speed up the
1574 code generator by not considering them and always using ISEL. */
1578 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1580 isel = tcg_to_isel[cond];
1582 tcg_out_movi(s, type, arg0, 1);
1584 /* arg0 = (bc ? 0 : 1) */
1585 tab = TAB(arg0, 0, arg0);
1588 /* arg0 = (bc ? 1 : 0) */
1589 tcg_out_movi(s, type, TCG_REG_R0, 0);
1590 tab = TAB(arg0, arg0, TCG_REG_R0);
1592 tcg_out32(s, isel | tab);
1598 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1599 tcg_out_setcond_eq0(s, type, arg0, arg1);
1603 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1604 /* Discard the high bits only once, rather than both inputs. */
1605 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1606 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1609 tcg_out_setcond_ne0(s, arg0, arg1);
1627 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1633 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1635 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1639 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1640 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1648 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1651 bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
1653 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1658 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1659 TCGArg arg1, TCGArg arg2, int const_arg2,
1660 TCGLabel *l, TCGType type)
1662 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1663 tcg_out_bc(s, tcg_to_bc[cond], l);
1666 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1667 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1668 TCGArg v2, bool const_c2)
1670 /* If for some reason both inputs are zero, don't produce bad code. */
1671 if (v1 == 0 && v2 == 0) {
1672 tcg_out_movi(s, type, dest, 0);
1676 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1679 int isel = tcg_to_isel[cond];
1681 /* Swap the V operands if the operation indicates inversion. */
1688 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1690 tcg_out_movi(s, type, TCG_REG_R0, 0);
1692 tcg_out32(s, isel | TAB(dest, v1, v2));
1695 cond = tcg_invert_cond(cond);
1697 } else if (dest != v1) {
1699 tcg_out_movi(s, type, dest, 0);
1701 tcg_out_mov(s, type, dest, v1);
1704 /* Branch forward over one insn */
1705 tcg_out32(s, tcg_to_bc[cond] | 8);
1707 tcg_out_movi(s, type, dest, 0);
1709 tcg_out_mov(s, type, dest, v2);
1714 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1715 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1717 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1718 tcg_out32(s, opc | RA(a0) | RS(a1));
1720 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1721 /* Note that the only other valid constant for a2 is 0. */
1723 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1724 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1725 } else if (!const_a2 && a0 == a2) {
1726 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1727 tcg_out32(s, opc | RA(a0) | RS(a1));
1729 tcg_out32(s, opc | RA(a0) | RS(a1));
1730 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1732 tcg_out_movi(s, type, a0, 0);
1734 tcg_out_mov(s, type, a0, a2);
1740 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1741 const int *const_args)
1743 static const struct { uint8_t bit1, bit2; } bits[] = {
1744 [TCG_COND_LT ] = { CR_LT, CR_LT },
1745 [TCG_COND_LE ] = { CR_LT, CR_GT },
1746 [TCG_COND_GT ] = { CR_GT, CR_GT },
1747 [TCG_COND_GE ] = { CR_GT, CR_LT },
1748 [TCG_COND_LTU] = { CR_LT, CR_LT },
1749 [TCG_COND_LEU] = { CR_LT, CR_GT },
1750 [TCG_COND_GTU] = { CR_GT, CR_GT },
1751 [TCG_COND_GEU] = { CR_GT, CR_LT },
1754 TCGCond cond = args[4], cond2;
1755 TCGArg al, ah, bl, bh;
1756 int blconst, bhconst;
1763 blconst = const_args[2];
1764 bhconst = const_args[3];
1773 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1774 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1775 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1786 bit1 = bits[cond].bit1;
1787 bit2 = bits[cond].bit2;
1788 op = (bit1 != bit2 ? CRANDC : CRAND);
1789 cond2 = tcg_unsigned_cond(cond);
1791 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1792 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1793 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1794 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1802 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1803 const int *const_args)
1805 tcg_out_cmp2(s, args + 1, const_args + 1);
1806 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1807 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1810 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1811 const int *const_args)
1813 tcg_out_cmp2(s, args, const_args);
1814 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1817 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1819 uint32_t insn = HWSYNC;
1821 if (a0 == TCG_MO_LD_LD) {
1823 } else if (a0 == TCG_MO_ST_ST) {
1829 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1830 uintptr_t jmp_rw, uintptr_t addr)
1832 if (TCG_TARGET_REG_BITS == 64) {
1833 tcg_insn_unit i1, i2;
1834 intptr_t tb_diff = addr - tc_ptr;
1835 intptr_t br_diff = addr - (jmp_rx + 4);
1838 /* This does not exercise the range of the branch, but we do
1839 still need to be able to load the new value of TCG_REG_TB.
1840 But this does still happen quite often. */
1841 if (tb_diff == (int16_t)tb_diff) {
1842 i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
1843 i2 = B | (br_diff & 0x3fffffc);
1845 intptr_t lo = (int16_t)tb_diff;
1846 intptr_t hi = (int32_t)(tb_diff - lo);
1847 assert(tb_diff == hi + lo);
1848 i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
1849 i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
1851 #ifdef HOST_WORDS_BIGENDIAN
1852 pair = (uint64_t)i1 << 32 | i2;
1854 pair = (uint64_t)i2 << 32 | i1;
1857 /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
1858 within qatomic_set that would fail to build a ppc32 host. */
1859 qatomic_set__nocheck((uint64_t *)jmp_rw, pair);
1860 flush_idcache_range(jmp_rx, jmp_rw, 8);
1862 intptr_t diff = addr - jmp_rx;
1863 tcg_debug_assert(in_range_b(diff));
1864 qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
1865 flush_idcache_range(jmp_rx, jmp_rw, 4);
1869 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
1872 /* Look through the descriptor. If the branch is in range, and we
1873 don't have to spend too much effort on building the toc. */
1874 const void *tgt = ((const void * const *)target)[0];
1875 uintptr_t toc = ((const uintptr_t *)target)[1];
1876 intptr_t diff = tcg_pcrel_diff(s, tgt);
1878 if (in_range_b(diff) && toc == (uint32_t)toc) {
1879 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1880 tcg_out_b(s, LK, tgt);
1882 /* Fold the low bits of the constant into the addresses below. */
1883 intptr_t arg = (intptr_t)target;
1884 int ofs = (int16_t)arg;
1886 if (ofs + 8 < 0x8000) {
1891 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1892 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1893 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1894 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1895 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1897 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1900 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1901 address, which the callee uses to compute its TOC address. */
1902 /* FIXME: when the branch is in range, we could avoid r12 load if we
1903 knew that the destination uses the same TOC, and what its local
1904 entry point offset is. */
1905 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1907 diff = tcg_pcrel_diff(s, target);
1908 if (in_range_b(diff)) {
1909 tcg_out_b(s, LK, target);
1911 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1912 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1915 tcg_out_b(s, LK, target);
1919 static const uint32_t qemu_ldx_opc[16] = {
1926 [MO_BSWAP | MO_UB] = LBZX,
1927 [MO_BSWAP | MO_UW] = LHBRX,
1928 [MO_BSWAP | MO_UL] = LWBRX,
1929 [MO_BSWAP | MO_Q] = LDBRX,
1932 static const uint32_t qemu_stx_opc[16] = {
1937 [MO_BSWAP | MO_UB] = STBX,
1938 [MO_BSWAP | MO_UW] = STHBRX,
1939 [MO_BSWAP | MO_UL] = STWBRX,
1940 [MO_BSWAP | MO_Q] = STDBRX,
1943 static const uint32_t qemu_exts_opc[4] = {
1944 EXTSB, EXTSH, EXTSW, 0
1947 #if defined (CONFIG_SOFTMMU)
1948 #include "../tcg-ldst.c.inc"
1950 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1951 * int mmu_idx, uintptr_t ra)
1953 static void * const qemu_ld_helpers[16] = {
1954 [MO_UB] = helper_ret_ldub_mmu,
1955 [MO_LEUW] = helper_le_lduw_mmu,
1956 [MO_LEUL] = helper_le_ldul_mmu,
1957 [MO_LEQ] = helper_le_ldq_mmu,
1958 [MO_BEUW] = helper_be_lduw_mmu,
1959 [MO_BEUL] = helper_be_ldul_mmu,
1960 [MO_BEQ] = helper_be_ldq_mmu,
1963 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1964 * uintxx_t val, int mmu_idx, uintptr_t ra)
1966 static void * const qemu_st_helpers[16] = {
1967 [MO_UB] = helper_ret_stb_mmu,
1968 [MO_LEUW] = helper_le_stw_mmu,
1969 [MO_LEUL] = helper_le_stl_mmu,
1970 [MO_LEQ] = helper_le_stq_mmu,
1971 [MO_BEUW] = helper_be_stw_mmu,
1972 [MO_BEUL] = helper_be_stl_mmu,
1973 [MO_BEQ] = helper_be_stq_mmu,
1976 /* We expect to use a 16-bit negative offset from ENV. */
1977 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1978 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1980 /* Perform the TLB load and compare. Places the result of the comparison
1981 in CR7, loads the addend of the TLB into R3, and returns the register
1982 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1984 static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
1985 TCGReg addrlo, TCGReg addrhi,
1986 int mem_index, bool is_read)
1990 ? offsetof(CPUTLBEntry, addr_read)
1991 : offsetof(CPUTLBEntry, addr_write));
1992 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1993 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1994 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1995 unsigned s_bits = opc & MO_SIZE;
1996 unsigned a_bits = get_alignment_bits(opc);
1998 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1999 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
2000 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
2002 /* Extract the page index, shifted into place for tlb index. */
2003 if (TCG_TARGET_REG_BITS == 32) {
2004 tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
2005 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2007 tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
2008 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2010 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
2012 /* Load the TLB comparator. */
2013 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2014 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
2016 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
2018 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
2019 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2020 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
2021 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
2023 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
2027 /* Load the TLB addend for use on the fast path. Do this asap
2028 to minimize any load use delay. */
2029 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
2030 offsetof(CPUTLBEntry, addend));
2032 /* Clear the non-page, non-alignment bits from the address */
2033 if (TCG_TARGET_REG_BITS == 32) {
2034 /* We don't support unaligned accesses on 32-bits.
2035 * Preserve the bottom bits and thus trigger a comparison
2036 * failure on unaligned accesses.
2038 if (a_bits < s_bits) {
2041 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2042 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2046 /* If the access is unaligned, we need to make sure we fail if we
2047 * cross a page boundary. The trick is to add the access size-1
2048 * to the address before masking the low bits. That will make the
2049 * address overflow to the next page if we cross a page boundary,
2050 * which will then force a mismatch of the TLB compare.
2052 if (a_bits < s_bits) {
2053 unsigned a_mask = (1 << a_bits) - 1;
2054 unsigned s_mask = (1 << s_bits) - 1;
2055 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2059 /* Mask the address for the requested alignment. */
2060 if (TARGET_LONG_BITS == 32) {
2061 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2062 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2063 /* Zero-extend the address for use in the final address. */
2064 tcg_out_ext32u(s, TCG_REG_R4, addrlo);
2065 addrlo = TCG_REG_R4;
2066 } else if (a_bits == 0) {
2067 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
2069 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2070 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
2071 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
2075 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2076 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
2077 0, 7, TCG_TYPE_I32);
2078 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
2079 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2081 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
2088 /* Record the context of a call to the out of line helper code for the slow
2089 path for a load or store, so that we can later generate the correct
2091 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
2092 TCGReg datalo_reg, TCGReg datahi_reg,
2093 TCGReg addrlo_reg, TCGReg addrhi_reg,
2094 tcg_insn_unit *raddr, tcg_insn_unit *lptr)
2096 TCGLabelQemuLdst *label = new_ldst_label(s);
2098 label->is_ld = is_ld;
2100 label->datalo_reg = datalo_reg;
2101 label->datahi_reg = datahi_reg;
2102 label->addrlo_reg = addrlo_reg;
2103 label->addrhi_reg = addrhi_reg;
2104 label->raddr = tcg_splitwx_to_rx(raddr);
2105 label->label_ptr[0] = lptr;
2108 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2110 TCGMemOpIdx oi = lb->oi;
2111 MemOp opc = get_memop(oi);
2112 TCGReg hi, lo, arg = TCG_REG_R3;
2114 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2118 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2120 lo = lb->addrlo_reg;
2121 hi = lb->addrhi_reg;
2122 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2123 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2126 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2127 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2129 /* If the address needed to be zero-extended, we'll have already
2130 placed it in R4. The only remaining case is 64-bit guest. */
2131 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2134 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2135 tcg_out32(s, MFSPR | RT(arg) | LR);
2137 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2139 lo = lb->datalo_reg;
2140 hi = lb->datahi_reg;
2141 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2142 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
2143 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
2144 } else if (opc & MO_SIGN) {
2145 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
2146 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
2148 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
2151 tcg_out_b(s, 0, lb->raddr);
2155 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2157 TCGMemOpIdx oi = lb->oi;
2158 MemOp opc = get_memop(oi);
2159 MemOp s_bits = opc & MO_SIZE;
2160 TCGReg hi, lo, arg = TCG_REG_R3;
2162 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2166 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2168 lo = lb->addrlo_reg;
2169 hi = lb->addrhi_reg;
2170 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2171 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2174 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2175 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2177 /* If the address needed to be zero-extended, we'll have already
2178 placed it in R4. The only remaining case is 64-bit guest. */
2179 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2182 lo = lb->datalo_reg;
2183 hi = lb->datahi_reg;
2184 if (TCG_TARGET_REG_BITS == 32) {
2187 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2190 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2193 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2196 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
2200 if (s_bits == MO_64) {
2201 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
2203 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
2207 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2208 tcg_out32(s, MFSPR | RT(arg) | LR);
2210 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2212 tcg_out_b(s, 0, lb->raddr);
2215 #endif /* SOFTMMU */
2217 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
2219 TCGReg datalo, datahi, addrlo, rbase;
2220 TCGReg addrhi __attribute__((unused));
2223 #ifdef CONFIG_SOFTMMU
2225 tcg_insn_unit *label_ptr;
2229 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2231 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2233 opc = get_memop(oi);
2234 s_bits = opc & MO_SIZE;
2236 #ifdef CONFIG_SOFTMMU
2237 mem_index = get_mmuidx(oi);
2238 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
2240 /* Load a pointer into the current opcode w/conditional branch-link. */
2241 label_ptr = s->code_ptr;
2242 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2245 #else /* !CONFIG_SOFTMMU */
2246 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2247 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2248 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2249 addrlo = TCG_REG_TMP1;
2253 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2254 if (opc & MO_BSWAP) {
2255 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2256 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2257 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
2258 } else if (rbase != 0) {
2259 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2260 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
2261 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
2262 } else if (addrlo == datahi) {
2263 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2264 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2266 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2267 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2270 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2271 if (!have_isa_2_06 && insn == LDBRX) {
2272 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2273 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2274 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
2275 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2277 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2279 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2280 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2281 insn = qemu_exts_opc[s_bits];
2282 tcg_out32(s, insn | RA(datalo) | RS(datalo));
2286 #ifdef CONFIG_SOFTMMU
2287 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2288 s->code_ptr, label_ptr);
2292 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
2294 TCGReg datalo, datahi, addrlo, rbase;
2295 TCGReg addrhi __attribute__((unused));
2298 #ifdef CONFIG_SOFTMMU
2300 tcg_insn_unit *label_ptr;
2304 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2306 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2308 opc = get_memop(oi);
2309 s_bits = opc & MO_SIZE;
2311 #ifdef CONFIG_SOFTMMU
2312 mem_index = get_mmuidx(oi);
2313 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
2315 /* Load a pointer into the current opcode w/conditional branch-link. */
2316 label_ptr = s->code_ptr;
2317 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2320 #else /* !CONFIG_SOFTMMU */
2321 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2322 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2323 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2324 addrlo = TCG_REG_TMP1;
2328 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2329 if (opc & MO_BSWAP) {
2330 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2331 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2332 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
2333 } else if (rbase != 0) {
2334 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2335 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
2336 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
2338 tcg_out32(s, STW | TAI(datahi, addrlo, 0));
2339 tcg_out32(s, STW | TAI(datalo, addrlo, 4));
2342 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2343 if (!have_isa_2_06 && insn == STDBRX) {
2344 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2345 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
2346 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2347 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
2349 tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
2353 #ifdef CONFIG_SOFTMMU
2354 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2355 s->code_ptr, label_ptr);
2359 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2362 for (i = 0; i < count; ++i) {
2367 /* Parameters for function call generation, used in tcg.c. */
2368 #define TCG_TARGET_STACK_ALIGN 16
2369 #define TCG_TARGET_EXTEND_ARGS 1
2372 # define LINK_AREA_SIZE (6 * SZR)
2373 # define LR_OFFSET (1 * SZR)
2374 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2375 #elif defined(TCG_TARGET_CALL_DARWIN)
2376 # define LINK_AREA_SIZE (6 * SZR)
2377 # define LR_OFFSET (2 * SZR)
2378 #elif TCG_TARGET_REG_BITS == 64
2379 # if defined(_CALL_ELF) && _CALL_ELF == 2
2380 # define LINK_AREA_SIZE (4 * SZR)
2381 # define LR_OFFSET (1 * SZR)
2383 #else /* TCG_TARGET_REG_BITS == 32 */
2384 # if defined(_CALL_SYSV)
2385 # define LINK_AREA_SIZE (2 * SZR)
2386 # define LR_OFFSET (1 * SZR)
2390 # error "Unhandled abi"
2392 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2393 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2396 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2397 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2399 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2400 + TCG_STATIC_CALL_ARGS_SIZE \
2401 + CPU_TEMP_BUF_SIZE \
2403 + TCG_TARGET_STACK_ALIGN - 1) \
2404 & -TCG_TARGET_STACK_ALIGN)
2406 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2408 static void tcg_target_qemu_prologue(TCGContext *s)
2413 const void **desc = (const void **)s->code_ptr;
2414 desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */
2415 desc[1] = 0; /* environment pointer */
2416 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2419 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2423 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2424 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2425 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2427 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2428 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2429 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2431 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2433 #ifndef CONFIG_SOFTMMU
2435 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2436 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2440 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2441 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2443 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2445 tcg_out32(s, BCCTR | BO_ALWAYS);
2448 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2450 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2451 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2452 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2453 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2455 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2456 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2457 tcg_out32(s, BCLR | BO_ALWAYS);
2460 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2461 const TCGArg args[TCG_MAX_OP_ARGS],
2462 const int const_args[TCG_MAX_OP_ARGS])
2467 case INDEX_op_exit_tb:
2468 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
2469 tcg_out_b(s, 0, tcg_code_gen_epilogue);
2471 case INDEX_op_goto_tb:
2472 if (s->tb_jmp_insn_offset) {
2474 if (TCG_TARGET_REG_BITS == 64) {
2475 /* Ensure the next insns are 8-byte aligned. */
2476 if ((uintptr_t)s->code_ptr & 7) {
2479 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2480 tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2481 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2483 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2485 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
2489 /* Indirect jump. */
2490 tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
2491 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
2492 (intptr_t)(s->tb_jmp_insn_offset + args[0]));
2494 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2495 tcg_out32(s, BCCTR | BO_ALWAYS);
2496 set_jmp_reset_offset(s, args[0]);
2498 /* For the unlinked case, need to reset TCG_REG_TB. */
2499 tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
2500 -tcg_current_code_size(s));
2503 case INDEX_op_goto_ptr:
2504 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2506 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2508 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2509 tcg_out32(s, BCCTR | BO_ALWAYS);
2513 TCGLabel *l = arg_label(args[0]);
2517 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2520 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2525 case INDEX_op_ld8u_i32:
2526 case INDEX_op_ld8u_i64:
2527 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2529 case INDEX_op_ld8s_i32:
2530 case INDEX_op_ld8s_i64:
2531 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2532 tcg_out_ext8s(s, args[0], args[0]);
2534 case INDEX_op_ld16u_i32:
2535 case INDEX_op_ld16u_i64:
2536 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2538 case INDEX_op_ld16s_i32:
2539 case INDEX_op_ld16s_i64:
2540 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2542 case INDEX_op_ld_i32:
2543 case INDEX_op_ld32u_i64:
2544 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2546 case INDEX_op_ld32s_i64:
2547 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2549 case INDEX_op_ld_i64:
2550 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2552 case INDEX_op_st8_i32:
2553 case INDEX_op_st8_i64:
2554 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2556 case INDEX_op_st16_i32:
2557 case INDEX_op_st16_i64:
2558 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2560 case INDEX_op_st_i32:
2561 case INDEX_op_st32_i64:
2562 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2564 case INDEX_op_st_i64:
2565 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2568 case INDEX_op_add_i32:
2569 a0 = args[0], a1 = args[1], a2 = args[2];
2570 if (const_args[2]) {
2572 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2574 tcg_out32(s, ADD | TAB(a0, a1, a2));
2577 case INDEX_op_sub_i32:
2578 a0 = args[0], a1 = args[1], a2 = args[2];
2579 if (const_args[1]) {
2580 if (const_args[2]) {
2581 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2583 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2585 } else if (const_args[2]) {
2589 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2593 case INDEX_op_and_i32:
2594 a0 = args[0], a1 = args[1], a2 = args[2];
2595 if (const_args[2]) {
2596 tcg_out_andi32(s, a0, a1, a2);
2598 tcg_out32(s, AND | SAB(a1, a0, a2));
2601 case INDEX_op_and_i64:
2602 a0 = args[0], a1 = args[1], a2 = args[2];
2603 if (const_args[2]) {
2604 tcg_out_andi64(s, a0, a1, a2);
2606 tcg_out32(s, AND | SAB(a1, a0, a2));
2609 case INDEX_op_or_i64:
2610 case INDEX_op_or_i32:
2611 a0 = args[0], a1 = args[1], a2 = args[2];
2612 if (const_args[2]) {
2613 tcg_out_ori32(s, a0, a1, a2);
2615 tcg_out32(s, OR | SAB(a1, a0, a2));
2618 case INDEX_op_xor_i64:
2619 case INDEX_op_xor_i32:
2620 a0 = args[0], a1 = args[1], a2 = args[2];
2621 if (const_args[2]) {
2622 tcg_out_xori32(s, a0, a1, a2);
2624 tcg_out32(s, XOR | SAB(a1, a0, a2));
2627 case INDEX_op_andc_i32:
2628 a0 = args[0], a1 = args[1], a2 = args[2];
2629 if (const_args[2]) {
2630 tcg_out_andi32(s, a0, a1, ~a2);
2632 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2635 case INDEX_op_andc_i64:
2636 a0 = args[0], a1 = args[1], a2 = args[2];
2637 if (const_args[2]) {
2638 tcg_out_andi64(s, a0, a1, ~a2);
2640 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2643 case INDEX_op_orc_i32:
2644 if (const_args[2]) {
2645 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2649 case INDEX_op_orc_i64:
2650 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2652 case INDEX_op_eqv_i32:
2653 if (const_args[2]) {
2654 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2658 case INDEX_op_eqv_i64:
2659 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2661 case INDEX_op_nand_i32:
2662 case INDEX_op_nand_i64:
2663 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2665 case INDEX_op_nor_i32:
2666 case INDEX_op_nor_i64:
2667 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2670 case INDEX_op_clz_i32:
2671 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2672 args[2], const_args[2]);
2674 case INDEX_op_ctz_i32:
2675 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2676 args[2], const_args[2]);
2678 case INDEX_op_ctpop_i32:
2679 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2682 case INDEX_op_clz_i64:
2683 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2684 args[2], const_args[2]);
2686 case INDEX_op_ctz_i64:
2687 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2688 args[2], const_args[2]);
2690 case INDEX_op_ctpop_i64:
2691 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2694 case INDEX_op_mul_i32:
2695 a0 = args[0], a1 = args[1], a2 = args[2];
2696 if (const_args[2]) {
2697 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2699 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2703 case INDEX_op_div_i32:
2704 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2707 case INDEX_op_divu_i32:
2708 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2711 case INDEX_op_shl_i32:
2712 if (const_args[2]) {
2713 /* Limit immediate shift count lest we create an illegal insn. */
2714 tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2716 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2719 case INDEX_op_shr_i32:
2720 if (const_args[2]) {
2721 /* Limit immediate shift count lest we create an illegal insn. */
2722 tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2724 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2727 case INDEX_op_sar_i32:
2728 if (const_args[2]) {
2729 tcg_out_sari32(s, args[0], args[1], args[2]);
2731 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2734 case INDEX_op_rotl_i32:
2735 if (const_args[2]) {
2736 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2738 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2742 case INDEX_op_rotr_i32:
2743 if (const_args[2]) {
2744 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2746 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2747 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2752 case INDEX_op_brcond_i32:
2753 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2754 arg_label(args[3]), TCG_TYPE_I32);
2756 case INDEX_op_brcond_i64:
2757 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2758 arg_label(args[3]), TCG_TYPE_I64);
2760 case INDEX_op_brcond2_i32:
2761 tcg_out_brcond2(s, args, const_args);
2764 case INDEX_op_neg_i32:
2765 case INDEX_op_neg_i64:
2766 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2769 case INDEX_op_not_i32:
2770 case INDEX_op_not_i64:
2771 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2774 case INDEX_op_add_i64:
2775 a0 = args[0], a1 = args[1], a2 = args[2];
2776 if (const_args[2]) {
2778 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2780 tcg_out32(s, ADD | TAB(a0, a1, a2));
2783 case INDEX_op_sub_i64:
2784 a0 = args[0], a1 = args[1], a2 = args[2];
2785 if (const_args[1]) {
2786 if (const_args[2]) {
2787 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2789 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2791 } else if (const_args[2]) {
2795 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2799 case INDEX_op_shl_i64:
2800 if (const_args[2]) {
2801 /* Limit immediate shift count lest we create an illegal insn. */
2802 tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2804 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2807 case INDEX_op_shr_i64:
2808 if (const_args[2]) {
2809 /* Limit immediate shift count lest we create an illegal insn. */
2810 tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2812 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2815 case INDEX_op_sar_i64:
2816 if (const_args[2]) {
2817 tcg_out_sari64(s, args[0], args[1], args[2]);
2819 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2822 case INDEX_op_rotl_i64:
2823 if (const_args[2]) {
2824 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2826 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2829 case INDEX_op_rotr_i64:
2830 if (const_args[2]) {
2831 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2833 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2834 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2838 case INDEX_op_mul_i64:
2839 a0 = args[0], a1 = args[1], a2 = args[2];
2840 if (const_args[2]) {
2841 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2843 tcg_out32(s, MULLD | TAB(a0, a1, a2));
2846 case INDEX_op_div_i64:
2847 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2849 case INDEX_op_divu_i64:
2850 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2853 case INDEX_op_qemu_ld_i32:
2854 tcg_out_qemu_ld(s, args, false);
2856 case INDEX_op_qemu_ld_i64:
2857 tcg_out_qemu_ld(s, args, true);
2859 case INDEX_op_qemu_st_i32:
2860 tcg_out_qemu_st(s, args, false);
2862 case INDEX_op_qemu_st_i64:
2863 tcg_out_qemu_st(s, args, true);
2866 case INDEX_op_ext8s_i32:
2867 case INDEX_op_ext8s_i64:
2868 tcg_out_ext8s(s, args[0], args[1]);
2870 case INDEX_op_ext16s_i32:
2871 case INDEX_op_ext16s_i64:
2872 tcg_out_ext16s(s, args[0], args[1]);
2874 case INDEX_op_ext_i32_i64:
2875 case INDEX_op_ext32s_i64:
2876 tcg_out_ext32s(s, args[0], args[1]);
2878 case INDEX_op_extu_i32_i64:
2879 tcg_out_ext32u(s, args[0], args[1]);
2882 case INDEX_op_setcond_i32:
2883 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2886 case INDEX_op_setcond_i64:
2887 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2890 case INDEX_op_setcond2_i32:
2891 tcg_out_setcond2(s, args, const_args);
2894 case INDEX_op_bswap16_i32:
2895 case INDEX_op_bswap16_i64:
2896 tcg_out_bswap16(s, args[0], args[1], args[2]);
2898 case INDEX_op_bswap32_i32:
2899 tcg_out_bswap32(s, args[0], args[1], 0);
2901 case INDEX_op_bswap32_i64:
2902 tcg_out_bswap32(s, args[0], args[1], args[2]);
2904 case INDEX_op_bswap64_i64:
2905 tcg_out_bswap64(s, args[0], args[1]);
2908 case INDEX_op_deposit_i32:
2909 if (const_args[2]) {
2910 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
2911 tcg_out_andi32(s, args[0], args[0], ~mask);
2913 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
2914 32 - args[3] - args[4], 31 - args[3]);
2917 case INDEX_op_deposit_i64:
2918 if (const_args[2]) {
2919 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
2920 tcg_out_andi64(s, args[0], args[0], ~mask);
2922 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
2923 64 - args[3] - args[4]);
2927 case INDEX_op_extract_i32:
2928 tcg_out_rlw(s, RLWINM, args[0], args[1],
2929 32 - args[2], 32 - args[3], 31);
2931 case INDEX_op_extract_i64:
2932 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
2935 case INDEX_op_movcond_i32:
2936 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
2937 args[3], args[4], const_args[2]);
2939 case INDEX_op_movcond_i64:
2940 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
2941 args[3], args[4], const_args[2]);
2944 #if TCG_TARGET_REG_BITS == 64
2945 case INDEX_op_add2_i64:
2947 case INDEX_op_add2_i32:
2949 /* Note that the CA bit is defined based on the word size of the
2950 environment. So in 64-bit mode it's always carry-out of bit 63.
2951 The fallback code using deposit works just as well for 32-bit. */
2952 a0 = args[0], a1 = args[1];
2953 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
2956 if (const_args[4]) {
2957 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
2959 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
2961 if (const_args[5]) {
2962 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
2964 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
2966 if (a0 != args[0]) {
2967 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2971 #if TCG_TARGET_REG_BITS == 64
2972 case INDEX_op_sub2_i64:
2974 case INDEX_op_sub2_i32:
2976 a0 = args[0], a1 = args[1];
2977 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
2980 if (const_args[2]) {
2981 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
2983 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
2985 if (const_args[3]) {
2986 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
2988 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
2990 if (a0 != args[0]) {
2991 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2995 case INDEX_op_muluh_i32:
2996 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
2998 case INDEX_op_mulsh_i32:
2999 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
3001 case INDEX_op_muluh_i64:
3002 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
3004 case INDEX_op_mulsh_i64:
3005 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
3009 tcg_out_mb(s, args[0]);
3012 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
3013 case INDEX_op_mov_i64:
3014 case INDEX_op_call: /* Always emitted via tcg_out_call. */
3020 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3023 case INDEX_op_and_vec:
3024 case INDEX_op_or_vec:
3025 case INDEX_op_xor_vec:
3026 case INDEX_op_andc_vec:
3027 case INDEX_op_not_vec:
3029 case INDEX_op_orc_vec:
3030 return have_isa_2_07;
3031 case INDEX_op_add_vec:
3032 case INDEX_op_sub_vec:
3033 case INDEX_op_smax_vec:
3034 case INDEX_op_smin_vec:
3035 case INDEX_op_umax_vec:
3036 case INDEX_op_umin_vec:
3037 case INDEX_op_shlv_vec:
3038 case INDEX_op_shrv_vec:
3039 case INDEX_op_sarv_vec:
3040 case INDEX_op_rotlv_vec:
3041 return vece <= MO_32 || have_isa_2_07;
3042 case INDEX_op_ssadd_vec:
3043 case INDEX_op_sssub_vec:
3044 case INDEX_op_usadd_vec:
3045 case INDEX_op_ussub_vec:
3046 return vece <= MO_32;
3047 case INDEX_op_cmp_vec:
3048 case INDEX_op_shli_vec:
3049 case INDEX_op_shri_vec:
3050 case INDEX_op_sari_vec:
3051 case INDEX_op_rotli_vec:
3052 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3053 case INDEX_op_neg_vec:
3054 return vece >= MO_32 && have_isa_3_00;
3055 case INDEX_op_mul_vec:
3061 return have_isa_2_07 ? 1 : -1;
3063 return have_isa_3_10;
3066 case INDEX_op_bitsel_vec:
3068 case INDEX_op_rotrv_vec:
3075 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3076 TCGReg dst, TCGReg src)
3078 tcg_debug_assert(dst >= TCG_REG_V0);
3080 /* Splat from integer reg allowed via constraints for v3.00. */
3081 if (src < TCG_REG_V0) {
3082 tcg_debug_assert(have_isa_3_00);
3085 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3088 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3091 /* Fail, so that we fall back on either dupm or mov+dup. */
3097 * Recall we use (or emulate) VSX integer loads, so the integer is
3098 * right justified within the left (zero-index) double-word.
3102 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3105 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3108 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3112 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3115 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3116 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3119 g_assert_not_reached();
3124 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3125 TCGReg out, TCGReg base, intptr_t offset)
3129 tcg_debug_assert(out >= TCG_REG_V0);
3132 if (have_isa_3_00) {
3133 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3135 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3137 elt = extract32(offset, 0, 4);
3138 #ifndef HOST_WORDS_BIGENDIAN
3141 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3144 tcg_debug_assert((offset & 1) == 0);
3145 if (have_isa_3_00) {
3146 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3148 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3150 elt = extract32(offset, 1, 3);
3151 #ifndef HOST_WORDS_BIGENDIAN
3154 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3157 if (have_isa_3_00) {
3158 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3161 tcg_debug_assert((offset & 3) == 0);
3162 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3163 elt = extract32(offset, 2, 2);
3164 #ifndef HOST_WORDS_BIGENDIAN
3167 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3171 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3174 tcg_debug_assert((offset & 7) == 0);
3175 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3176 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3177 elt = extract32(offset, 3, 1);
3178 #ifndef HOST_WORDS_BIGENDIAN
3182 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3184 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3188 g_assert_not_reached();
3193 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3194 unsigned vecl, unsigned vece,
3195 const TCGArg args[TCG_MAX_OP_ARGS],
3196 const int const_args[TCG_MAX_OP_ARGS])
3198 static const uint32_t
3199 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3200 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3201 mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3202 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3203 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3204 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3205 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3206 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3207 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3208 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3209 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3210 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3211 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3212 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3213 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3214 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3215 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3216 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3217 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3218 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3219 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3220 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3221 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3222 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3223 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3225 TCGType type = vecl + TCG_TYPE_V64;
3226 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3230 case INDEX_op_ld_vec:
3231 tcg_out_ld(s, type, a0, a1, a2);
3233 case INDEX_op_st_vec:
3234 tcg_out_st(s, type, a0, a1, a2);
3236 case INDEX_op_dupm_vec:
3237 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3240 case INDEX_op_add_vec:
3241 insn = add_op[vece];
3243 case INDEX_op_sub_vec:
3244 insn = sub_op[vece];
3246 case INDEX_op_neg_vec:
3247 insn = neg_op[vece];
3251 case INDEX_op_mul_vec:
3252 insn = mul_op[vece];
3254 case INDEX_op_ssadd_vec:
3255 insn = ssadd_op[vece];
3257 case INDEX_op_sssub_vec:
3258 insn = sssub_op[vece];
3260 case INDEX_op_usadd_vec:
3261 insn = usadd_op[vece];
3263 case INDEX_op_ussub_vec:
3264 insn = ussub_op[vece];
3266 case INDEX_op_smin_vec:
3267 insn = smin_op[vece];
3269 case INDEX_op_umin_vec:
3270 insn = umin_op[vece];
3272 case INDEX_op_smax_vec:
3273 insn = smax_op[vece];
3275 case INDEX_op_umax_vec:
3276 insn = umax_op[vece];
3278 case INDEX_op_shlv_vec:
3279 insn = shlv_op[vece];
3281 case INDEX_op_shrv_vec:
3282 insn = shrv_op[vece];
3284 case INDEX_op_sarv_vec:
3285 insn = sarv_op[vece];
3287 case INDEX_op_and_vec:
3290 case INDEX_op_or_vec:
3293 case INDEX_op_xor_vec:
3296 case INDEX_op_andc_vec:
3299 case INDEX_op_not_vec:
3303 case INDEX_op_orc_vec:
3307 case INDEX_op_cmp_vec:
3316 insn = gts_op[vece];
3319 insn = gtu_op[vece];
3322 g_assert_not_reached();
3326 case INDEX_op_bitsel_vec:
3327 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3330 case INDEX_op_dup2_vec:
3331 assert(TCG_TARGET_REG_BITS == 32);
3332 /* With inputs a1 = xLxx, a2 = xHxx */
3333 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3334 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3335 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3338 case INDEX_op_ppc_mrgh_vec:
3339 insn = mrgh_op[vece];
3341 case INDEX_op_ppc_mrgl_vec:
3342 insn = mrgl_op[vece];
3344 case INDEX_op_ppc_muleu_vec:
3345 insn = muleu_op[vece];
3347 case INDEX_op_ppc_mulou_vec:
3348 insn = mulou_op[vece];
3350 case INDEX_op_ppc_pkum_vec:
3351 insn = pkum_op[vece];
3353 case INDEX_op_rotlv_vec:
3354 insn = rotl_op[vece];
3356 case INDEX_op_ppc_msum_vec:
3357 tcg_debug_assert(vece == MO_16);
3358 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3361 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3362 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3364 g_assert_not_reached();
3367 tcg_debug_assert(insn != 0);
3368 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3371 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3372 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3376 if (vece == MO_32) {
3378 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3379 * So using negative numbers gets us the 4th bit easily.
3381 imm = sextract32(imm, 0, 5);
3383 imm &= (8 << vece) - 1;
3386 /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3387 t1 = tcg_constant_vec(type, MO_8, imm);
3388 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3389 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3392 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3393 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3395 bool need_swap = false, need_inv = false;
3397 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3405 if (have_isa_3_00 && vece <= MO_32) {
3419 need_swap = need_inv = true;
3422 g_assert_not_reached();
3426 cond = tcg_invert_cond(cond);
3430 t1 = v1, v1 = v2, v2 = t1;
3431 cond = tcg_swap_cond(cond);
3434 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3435 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3438 tcg_gen_not_vec(vece, v0, v0);
3442 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3443 TCGv_vec v1, TCGv_vec v2)
3445 TCGv_vec t1 = tcg_temp_new_vec(type);
3446 TCGv_vec t2 = tcg_temp_new_vec(type);
3452 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3453 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3454 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3455 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3456 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3457 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3458 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3459 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3460 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3461 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3465 tcg_debug_assert(!have_isa_2_07);
3467 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3468 * So using -16 is a quick way to represent 16.
3470 c16 = tcg_constant_vec(type, MO_8, -16);
3471 c0 = tcg_constant_vec(type, MO_8, 0);
3473 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3474 tcgv_vec_arg(v2), tcgv_vec_arg(c16));
3475 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3476 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3477 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
3478 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
3479 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
3480 tcgv_vec_arg(t1), tcgv_vec_arg(c16));
3481 tcg_gen_add_vec(MO_32, v0, t1, t2);
3485 g_assert_not_reached();
3487 tcg_temp_free_vec(t1);
3488 tcg_temp_free_vec(t2);
3491 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3495 TCGv_vec v0, v1, v2, t0;
3499 v0 = temp_tcgv_vec(arg_temp(a0));
3500 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3501 a2 = va_arg(va, TCGArg);
3504 case INDEX_op_shli_vec:
3505 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3507 case INDEX_op_shri_vec:
3508 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3510 case INDEX_op_sari_vec:
3511 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3513 case INDEX_op_rotli_vec:
3514 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3516 case INDEX_op_cmp_vec:
3517 v2 = temp_tcgv_vec(arg_temp(a2));
3518 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3520 case INDEX_op_mul_vec:
3521 v2 = temp_tcgv_vec(arg_temp(a2));
3522 expand_vec_mul(type, vece, v0, v1, v2);
3524 case INDEX_op_rotlv_vec:
3525 v2 = temp_tcgv_vec(arg_temp(a2));
3526 t0 = tcg_temp_new_vec(type);
3527 tcg_gen_neg_vec(vece, t0, v2);
3528 tcg_gen_rotlv_vec(vece, v0, v1, t0);
3529 tcg_temp_free_vec(t0);
3532 g_assert_not_reached();
3537 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3540 case INDEX_op_goto_ptr:
3543 case INDEX_op_ld8u_i32:
3544 case INDEX_op_ld8s_i32:
3545 case INDEX_op_ld16u_i32:
3546 case INDEX_op_ld16s_i32:
3547 case INDEX_op_ld_i32:
3548 case INDEX_op_ctpop_i32:
3549 case INDEX_op_neg_i32:
3550 case INDEX_op_not_i32:
3551 case INDEX_op_ext8s_i32:
3552 case INDEX_op_ext16s_i32:
3553 case INDEX_op_bswap16_i32:
3554 case INDEX_op_bswap32_i32:
3555 case INDEX_op_extract_i32:
3556 case INDEX_op_ld8u_i64:
3557 case INDEX_op_ld8s_i64:
3558 case INDEX_op_ld16u_i64:
3559 case INDEX_op_ld16s_i64:
3560 case INDEX_op_ld32u_i64:
3561 case INDEX_op_ld32s_i64:
3562 case INDEX_op_ld_i64:
3563 case INDEX_op_ctpop_i64:
3564 case INDEX_op_neg_i64:
3565 case INDEX_op_not_i64:
3566 case INDEX_op_ext8s_i64:
3567 case INDEX_op_ext16s_i64:
3568 case INDEX_op_ext32s_i64:
3569 case INDEX_op_ext_i32_i64:
3570 case INDEX_op_extu_i32_i64:
3571 case INDEX_op_bswap16_i64:
3572 case INDEX_op_bswap32_i64:
3573 case INDEX_op_bswap64_i64:
3574 case INDEX_op_extract_i64:
3575 return C_O1_I1(r, r);
3577 case INDEX_op_st8_i32:
3578 case INDEX_op_st16_i32:
3579 case INDEX_op_st_i32:
3580 case INDEX_op_st8_i64:
3581 case INDEX_op_st16_i64:
3582 case INDEX_op_st32_i64:
3583 case INDEX_op_st_i64:
3584 return C_O0_I2(r, r);
3586 case INDEX_op_add_i32:
3587 case INDEX_op_and_i32:
3588 case INDEX_op_or_i32:
3589 case INDEX_op_xor_i32:
3590 case INDEX_op_andc_i32:
3591 case INDEX_op_orc_i32:
3592 case INDEX_op_eqv_i32:
3593 case INDEX_op_shl_i32:
3594 case INDEX_op_shr_i32:
3595 case INDEX_op_sar_i32:
3596 case INDEX_op_rotl_i32:
3597 case INDEX_op_rotr_i32:
3598 case INDEX_op_setcond_i32:
3599 case INDEX_op_and_i64:
3600 case INDEX_op_andc_i64:
3601 case INDEX_op_shl_i64:
3602 case INDEX_op_shr_i64:
3603 case INDEX_op_sar_i64:
3604 case INDEX_op_rotl_i64:
3605 case INDEX_op_rotr_i64:
3606 case INDEX_op_setcond_i64:
3607 return C_O1_I2(r, r, ri);
3609 case INDEX_op_mul_i32:
3610 case INDEX_op_mul_i64:
3611 return C_O1_I2(r, r, rI);
3613 case INDEX_op_div_i32:
3614 case INDEX_op_divu_i32:
3615 case INDEX_op_nand_i32:
3616 case INDEX_op_nor_i32:
3617 case INDEX_op_muluh_i32:
3618 case INDEX_op_mulsh_i32:
3619 case INDEX_op_orc_i64:
3620 case INDEX_op_eqv_i64:
3621 case INDEX_op_nand_i64:
3622 case INDEX_op_nor_i64:
3623 case INDEX_op_div_i64:
3624 case INDEX_op_divu_i64:
3625 case INDEX_op_mulsh_i64:
3626 case INDEX_op_muluh_i64:
3627 return C_O1_I2(r, r, r);
3629 case INDEX_op_sub_i32:
3630 return C_O1_I2(r, rI, ri);
3631 case INDEX_op_add_i64:
3632 return C_O1_I2(r, r, rT);
3633 case INDEX_op_or_i64:
3634 case INDEX_op_xor_i64:
3635 return C_O1_I2(r, r, rU);
3636 case INDEX_op_sub_i64:
3637 return C_O1_I2(r, rI, rT);
3638 case INDEX_op_clz_i32:
3639 case INDEX_op_ctz_i32:
3640 case INDEX_op_clz_i64:
3641 case INDEX_op_ctz_i64:
3642 return C_O1_I2(r, r, rZW);
3644 case INDEX_op_brcond_i32:
3645 case INDEX_op_brcond_i64:
3646 return C_O0_I2(r, ri);
3648 case INDEX_op_movcond_i32:
3649 case INDEX_op_movcond_i64:
3650 return C_O1_I4(r, r, ri, rZ, rZ);
3651 case INDEX_op_deposit_i32:
3652 case INDEX_op_deposit_i64:
3653 return C_O1_I2(r, 0, rZ);
3654 case INDEX_op_brcond2_i32:
3655 return C_O0_I4(r, r, ri, ri);
3656 case INDEX_op_setcond2_i32:
3657 return C_O1_I4(r, r, r, ri, ri);
3658 case INDEX_op_add2_i64:
3659 case INDEX_op_add2_i32:
3660 return C_O2_I4(r, r, r, r, rI, rZM);
3661 case INDEX_op_sub2_i64:
3662 case INDEX_op_sub2_i32:
3663 return C_O2_I4(r, r, rI, rZM, r, r);
3665 case INDEX_op_qemu_ld_i32:
3666 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3668 : C_O1_I2(r, L, L));
3670 case INDEX_op_qemu_st_i32:
3671 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3673 : C_O0_I3(S, S, S));
3675 case INDEX_op_qemu_ld_i64:
3676 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
3677 : TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L)
3678 : C_O2_I2(L, L, L, L));
3680 case INDEX_op_qemu_st_i64:
3681 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S)
3682 : TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S)
3683 : C_O0_I4(S, S, S, S));
3685 case INDEX_op_add_vec:
3686 case INDEX_op_sub_vec:
3687 case INDEX_op_mul_vec:
3688 case INDEX_op_and_vec:
3689 case INDEX_op_or_vec:
3690 case INDEX_op_xor_vec:
3691 case INDEX_op_andc_vec:
3692 case INDEX_op_orc_vec:
3693 case INDEX_op_cmp_vec:
3694 case INDEX_op_ssadd_vec:
3695 case INDEX_op_sssub_vec:
3696 case INDEX_op_usadd_vec:
3697 case INDEX_op_ussub_vec:
3698 case INDEX_op_smax_vec:
3699 case INDEX_op_smin_vec:
3700 case INDEX_op_umax_vec:
3701 case INDEX_op_umin_vec:
3702 case INDEX_op_shlv_vec:
3703 case INDEX_op_shrv_vec:
3704 case INDEX_op_sarv_vec:
3705 case INDEX_op_rotlv_vec:
3706 case INDEX_op_rotrv_vec:
3707 case INDEX_op_ppc_mrgh_vec:
3708 case INDEX_op_ppc_mrgl_vec:
3709 case INDEX_op_ppc_muleu_vec:
3710 case INDEX_op_ppc_mulou_vec:
3711 case INDEX_op_ppc_pkum_vec:
3712 case INDEX_op_dup2_vec:
3713 return C_O1_I2(v, v, v);
3715 case INDEX_op_not_vec:
3716 case INDEX_op_neg_vec:
3717 return C_O1_I1(v, v);
3719 case INDEX_op_dup_vec:
3720 return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
3722 case INDEX_op_ld_vec:
3723 case INDEX_op_dupm_vec:
3724 return C_O1_I1(v, r);
3726 case INDEX_op_st_vec:
3727 return C_O0_I2(v, r);
3729 case INDEX_op_bitsel_vec:
3730 case INDEX_op_ppc_msum_vec:
3731 return C_O1_I3(v, v, v, v);
3734 g_assert_not_reached();
3738 static void tcg_target_init(TCGContext *s)
3740 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3741 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3743 have_isa = tcg_isa_base;
3744 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3745 have_isa = tcg_isa_2_06;
3747 #ifdef PPC_FEATURE2_ARCH_2_07
3748 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3749 have_isa = tcg_isa_2_07;
3752 #ifdef PPC_FEATURE2_ARCH_3_00
3753 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3754 have_isa = tcg_isa_3_00;
3757 #ifdef PPC_FEATURE2_ARCH_3_10
3758 if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3759 have_isa = tcg_isa_3_10;
3763 #ifdef PPC_FEATURE2_HAS_ISEL
3764 /* Prefer explicit instruction from the kernel. */
3765 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3767 /* Fall back to knowing Power7 (2.06) has ISEL. */
3768 have_isel = have_isa_2_06;
3771 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3772 have_altivec = true;
3773 /* We only care about the portion of VSX that overlaps Altivec. */
3774 if (hwcap & PPC_FEATURE_HAS_VSX) {
3779 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3780 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3782 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3783 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3786 tcg_target_call_clobber_regs = 0;
3787 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3788 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3789 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3790 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3791 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3792 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3793 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3794 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3795 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3796 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3797 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3798 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3800 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3801 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3802 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3803 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3804 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3805 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3806 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3807 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3808 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3809 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3810 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3811 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3812 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3813 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3814 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3815 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3816 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3817 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3818 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3819 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3821 s->reserved_regs = 0;
3822 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3823 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3824 #if defined(_CALL_SYSV)
3825 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3827 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3828 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3830 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
3831 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3832 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3834 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
3841 DebugFrameFDEHeader fde;
3842 uint8_t fde_def_cfa[4];
3843 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3846 /* We're expecting a 2 byte uleb128 encoded value. */
3847 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3849 #if TCG_TARGET_REG_BITS == 64
3850 # define ELF_HOST_MACHINE EM_PPC64
3852 # define ELF_HOST_MACHINE EM_PPC
3855 static DebugFrame debug_frame = {
3856 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3859 .cie.code_align = 1,
3860 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
3861 .cie.return_column = 65,
3863 /* Total FDE size does not include the "len" member. */
3864 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3867 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
3868 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3872 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3873 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
3877 void tcg_register_jit(const void *buf, size_t buf_size)
3879 uint8_t *p = &debug_frame.fde_reg_ofs[3];
3882 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
3883 p[0] = 0x80 + tcg_target_callee_save_regs[i];
3884 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
3887 debug_frame.fde.func_start = (uintptr_t)buf;
3888 debug_frame.fde.func_len = buf_size;
3890 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3892 #endif /* __ELF__ */