2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.c.inc"
27 #include "../tcg-ldst.c.inc"
30 * Standardize on the _CALL_FOO symbols used by GCC:
31 * Apple XCode does not define _CALL_DARWIN.
32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV or _CALL_AIX.
34 #if TCG_TARGET_REG_BITS == 64
37 # elif defined(_CALL_ELF) && _CALL_ELF == 1
39 # elif defined(_CALL_ELF) && _CALL_ELF == 2
45 # if defined(_CALL_SYSV) || defined(_CALL_DARWIN)
47 # elif defined(__APPLE__)
49 # elif defined(__ELF__)
56 #if TCG_TARGET_REG_BITS == 64
57 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
58 # define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
60 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
61 # define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
64 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
65 # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
67 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
68 # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
71 /* For some memory operations, we need a scratch that isn't R0. For the AIX
72 calling convention, we can re-use the TOC register since we'll be reloading
73 it at every call. Otherwise R12 will do nicely as neither a call-saved
74 register nor a parameter register. */
76 # define TCG_REG_TMP1 TCG_REG_R2
78 # define TCG_REG_TMP1 TCG_REG_R12
80 #define TCG_REG_TMP2 TCG_REG_R11
82 #define TCG_VEC_TMP1 TCG_REG_V0
83 #define TCG_VEC_TMP2 TCG_REG_V1
85 #define TCG_REG_TB TCG_REG_R31
86 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64 && !have_isa_3_00)
88 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
89 #define SZP ((int)sizeof(void *))
91 /* Shorthand for size of a register. */
92 #define SZR (TCG_TARGET_REG_BITS / 8)
94 #define TCG_CT_CONST_S16 0x100
95 #define TCG_CT_CONST_U16 0x200
96 #define TCG_CT_CONST_S32 0x400
97 #define TCG_CT_CONST_U32 0x800
98 #define TCG_CT_CONST_ZERO 0x1000
99 #define TCG_CT_CONST_MONE 0x2000
100 #define TCG_CT_CONST_WSZ 0x4000
101 #define TCG_CT_CONST_CMP 0x8000
103 #define ALL_GENERAL_REGS 0xffffffffu
104 #define ALL_VECTOR_REGS 0xffffffff00000000ull
106 #ifndef R_PPC64_PCREL34
107 #define R_PPC64_PCREL34 132
110 #define have_isel (cpuinfo & CPUINFO_ISEL)
112 #define TCG_GUEST_BASE_REG TCG_REG_R30
114 #ifdef CONFIG_DEBUG_TCG
115 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
116 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
117 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
118 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
119 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
120 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
121 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
122 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
123 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
127 static const int tcg_target_reg_alloc_order[] = {
128 TCG_REG_R14, /* call saved registers */
146 TCG_REG_R12, /* call clobbered, non-arguments */
150 TCG_REG_R10, /* call clobbered, arguments */
159 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
160 TCG_REG_V2, /* call clobbered, vectors */
180 static const int tcg_target_call_iarg_regs[] = {
191 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
193 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
194 tcg_debug_assert(slot >= 0 && slot <= 1);
195 return TCG_REG_R3 + slot;
198 static const int tcg_target_callee_save_regs[] = {
215 TCG_REG_R27, /* currently used for the global env */
222 /* For PPC, we use TB+4 instead of TB as the base. */
223 static inline ptrdiff_t ppc_tbrel_diff(TCGContext *s, const void *target)
225 return tcg_tbrel_diff(s, target) - 4;
228 static inline bool in_range_b(tcg_target_long target)
230 return target == sextract64(target, 0, 26);
233 static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
234 const tcg_insn_unit *target)
236 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
237 tcg_debug_assert(in_range_b(disp));
238 return disp & 0x3fffffc;
241 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
243 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
244 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
246 if (in_range_b(disp)) {
247 *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
253 static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
254 const tcg_insn_unit *target)
256 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
257 tcg_debug_assert(disp == (int16_t) disp);
258 return disp & 0xfffc;
261 static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
263 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
264 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
266 if (disp == (int16_t) disp) {
267 *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
273 static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
275 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
276 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
278 if (disp == sextract64(disp, 0, 34)) {
279 src_rw[0] = (src_rw[0] & ~0x3ffff) | ((disp >> 16) & 0x3ffff);
280 src_rw[1] = (src_rw[1] & ~0xffff) | (disp & 0xffff);
286 static bool mask_operand(uint32_t c, int *mb, int *me);
287 static bool mask64_operand(uint64_t c, int *mb, int *me);
289 /* test if a constant matches the constraint */
290 static bool tcg_target_const_match(int64_t sval, int ct,
291 TCGType type, TCGCond cond, int vece)
293 uint64_t uval = sval;
296 if (ct & TCG_CT_CONST) {
300 if (type == TCG_TYPE_I32) {
301 uval = (uint32_t)sval;
302 sval = (int32_t)sval;
305 if (ct & TCG_CT_CONST_CMP) {
309 ct |= TCG_CT_CONST_S16 | TCG_CT_CONST_U16;
315 ct |= TCG_CT_CONST_S16;
321 ct |= TCG_CT_CONST_U16;
325 if ((uval & ~0xffff) == 0 || (uval & ~0xffff0000ull) == 0) {
328 if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32
329 ? mask_operand(uval, &mb, &me)
330 : mask64_operand(uval << clz64(uval), &mb, &me)) {
335 g_assert_not_reached();
339 if ((ct & TCG_CT_CONST_S16) && sval == (int16_t)sval) {
342 if ((ct & TCG_CT_CONST_U16) && uval == (uint16_t)uval) {
345 if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) {
348 if ((ct & TCG_CT_CONST_U32) && uval == (uint32_t)uval) {
351 if ((ct & TCG_CT_CONST_ZERO) && sval == 0) {
354 if ((ct & TCG_CT_CONST_MONE) && sval == -1) {
357 if ((ct & TCG_CT_CONST_WSZ) && sval == (type == TCG_TYPE_I32 ? 32 : 64)) {
363 #define OPCD(opc) ((opc)<<26)
364 #define XO19(opc) (OPCD(19)|((opc)<<1))
365 #define MD30(opc) (OPCD(30)|((opc)<<2))
366 #define MDS30(opc) (OPCD(30)|((opc)<<1))
367 #define XO31(opc) (OPCD(31)|((opc)<<1))
368 #define XO58(opc) (OPCD(58)|(opc))
369 #define XO62(opc) (OPCD(62)|(opc))
370 #define VX4(opc) (OPCD(4)|(opc))
375 #define LBZ OPCD( 34)
376 #define LHZ OPCD( 40)
377 #define LHA OPCD( 42)
378 #define LWZ OPCD( 32)
379 #define LWZUX XO31( 55)
381 #define LDX XO31( 21)
383 #define LDUX XO31( 53)
385 #define LWAX XO31(341)
388 #define STB OPCD( 38)
389 #define STH OPCD( 44)
390 #define STW OPCD( 36)
392 #define STDU XO62( 1)
393 #define STDX XO31(149)
396 #define PLWA OPCD( 41)
397 #define PLD OPCD( 57)
398 #define PLXSD OPCD( 42)
399 #define PLXV OPCD(25 * 2 + 1) /* force tx=1 */
401 #define PSTD OPCD( 61)
402 #define PSTXSD OPCD( 46)
403 #define PSTXV OPCD(27 * 2 + 1) /* force sx=1 */
405 #define ADDIC OPCD( 12)
406 #define ADDI OPCD( 14)
407 #define ADDIS OPCD( 15)
408 #define ORI OPCD( 24)
409 #define ORIS OPCD( 25)
410 #define XORI OPCD( 26)
411 #define XORIS OPCD( 27)
412 #define ANDI OPCD( 28)
413 #define ANDIS OPCD( 29)
414 #define MULLI OPCD( 7)
415 #define CMPLI OPCD( 10)
416 #define CMPI OPCD( 11)
417 #define SUBFIC OPCD( 8)
419 #define LWZU OPCD( 33)
420 #define STWU OPCD( 37)
422 #define RLWIMI OPCD( 20)
423 #define RLWINM OPCD( 21)
424 #define RLWNM OPCD( 23)
426 #define RLDICL MD30( 0)
427 #define RLDICR MD30( 1)
428 #define RLDIMI MD30( 3)
429 #define RLDCL MDS30( 8)
431 #define BCLR XO19( 16)
432 #define BCCTR XO19(528)
433 #define CRAND XO19(257)
434 #define CRANDC XO19(129)
435 #define CRNAND XO19(225)
436 #define CROR XO19(449)
437 #define CRNOR XO19( 33)
438 #define ADDPCIS XO19( 2)
440 #define EXTSB XO31(954)
441 #define EXTSH XO31(922)
442 #define EXTSW XO31(986)
443 #define ADD XO31(266)
444 #define ADDE XO31(138)
445 #define ADDME XO31(234)
446 #define ADDZE XO31(202)
447 #define ADDC XO31( 10)
448 #define AND XO31( 28)
449 #define SUBF XO31( 40)
450 #define SUBFC XO31( 8)
451 #define SUBFE XO31(136)
452 #define SUBFME XO31(232)
453 #define SUBFZE XO31(200)
455 #define XOR XO31(316)
456 #define MULLW XO31(235)
457 #define MULHW XO31( 75)
458 #define MULHWU XO31( 11)
459 #define DIVW XO31(491)
460 #define DIVWU XO31(459)
461 #define MODSW XO31(779)
462 #define MODUW XO31(267)
464 #define CMPL XO31( 32)
465 #define LHBRX XO31(790)
466 #define LWBRX XO31(534)
467 #define LDBRX XO31(532)
468 #define STHBRX XO31(918)
469 #define STWBRX XO31(662)
470 #define STDBRX XO31(660)
471 #define MFSPR XO31(339)
472 #define MTSPR XO31(467)
473 #define SRAWI XO31(824)
474 #define NEG XO31(104)
475 #define MFCR XO31( 19)
476 #define MFOCRF (MFCR | (1u << 20))
477 #define NOR XO31(124)
478 #define CNTLZW XO31( 26)
479 #define CNTLZD XO31( 58)
480 #define CNTTZW XO31(538)
481 #define CNTTZD XO31(570)
482 #define CNTPOPW XO31(378)
483 #define CNTPOPD XO31(506)
484 #define ANDC XO31( 60)
485 #define ORC XO31(412)
486 #define EQV XO31(284)
487 #define NAND XO31(476)
488 #define ISEL XO31( 15)
490 #define MULLD XO31(233)
491 #define MULHD XO31( 73)
492 #define MULHDU XO31( 9)
493 #define DIVD XO31(489)
494 #define DIVDU XO31(457)
495 #define MODSD XO31(777)
496 #define MODUD XO31(265)
498 #define LBZX XO31( 87)
499 #define LHZX XO31(279)
500 #define LHAX XO31(343)
501 #define LWZX XO31( 23)
502 #define STBX XO31(215)
503 #define STHX XO31(407)
504 #define STWX XO31(151)
506 #define EIEIO XO31(854)
507 #define HWSYNC XO31(598)
508 #define LWSYNC (HWSYNC | (1u << 21))
510 #define SPR(a, b) ((((a)<<5)|(b))<<11)
512 #define CTR SPR(9, 0)
514 #define SLW XO31( 24)
515 #define SRW XO31(536)
516 #define SRAW XO31(792)
518 #define SLD XO31( 27)
519 #define SRD XO31(539)
520 #define SRAD XO31(794)
521 #define SRADI XO31(413<<1)
523 #define BRH XO31(219)
524 #define BRW XO31(155)
525 #define BRD XO31(187)
528 #define TRAP (TW | TO(31))
530 #define SETBC XO31(384) /* v3.10 */
531 #define SETBCR XO31(416) /* v3.10 */
532 #define SETNBC XO31(448) /* v3.10 */
533 #define SETNBCR XO31(480) /* v3.10 */
535 #define NOP ORI /* ori 0,0,0 */
537 #define LVX XO31(103)
538 #define LVEBX XO31(7)
539 #define LVEHX XO31(39)
540 #define LVEWX XO31(71)
541 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
542 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
543 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
544 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
545 #define LXSD (OPCD(57) | 2) /* v3.00 */
546 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
548 #define STVX XO31(231)
549 #define STVEWX XO31(199)
550 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
551 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
552 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
553 #define STXSD (OPCD(61) | 2) /* v3.00 */
555 #define VADDSBS VX4(768)
556 #define VADDUBS VX4(512)
557 #define VADDUBM VX4(0)
558 #define VADDSHS VX4(832)
559 #define VADDUHS VX4(576)
560 #define VADDUHM VX4(64)
561 #define VADDSWS VX4(896)
562 #define VADDUWS VX4(640)
563 #define VADDUWM VX4(128)
564 #define VADDUDM VX4(192) /* v2.07 */
566 #define VSUBSBS VX4(1792)
567 #define VSUBUBS VX4(1536)
568 #define VSUBUBM VX4(1024)
569 #define VSUBSHS VX4(1856)
570 #define VSUBUHS VX4(1600)
571 #define VSUBUHM VX4(1088)
572 #define VSUBSWS VX4(1920)
573 #define VSUBUWS VX4(1664)
574 #define VSUBUWM VX4(1152)
575 #define VSUBUDM VX4(1216) /* v2.07 */
577 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
578 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
580 #define VMAXSB VX4(258)
581 #define VMAXSH VX4(322)
582 #define VMAXSW VX4(386)
583 #define VMAXSD VX4(450) /* v2.07 */
584 #define VMAXUB VX4(2)
585 #define VMAXUH VX4(66)
586 #define VMAXUW VX4(130)
587 #define VMAXUD VX4(194) /* v2.07 */
588 #define VMINSB VX4(770)
589 #define VMINSH VX4(834)
590 #define VMINSW VX4(898)
591 #define VMINSD VX4(962) /* v2.07 */
592 #define VMINUB VX4(514)
593 #define VMINUH VX4(578)
594 #define VMINUW VX4(642)
595 #define VMINUD VX4(706) /* v2.07 */
597 #define VCMPEQUB VX4(6)
598 #define VCMPEQUH VX4(70)
599 #define VCMPEQUW VX4(134)
600 #define VCMPEQUD VX4(199) /* v2.07 */
601 #define VCMPGTSB VX4(774)
602 #define VCMPGTSH VX4(838)
603 #define VCMPGTSW VX4(902)
604 #define VCMPGTSD VX4(967) /* v2.07 */
605 #define VCMPGTUB VX4(518)
606 #define VCMPGTUH VX4(582)
607 #define VCMPGTUW VX4(646)
608 #define VCMPGTUD VX4(711) /* v2.07 */
609 #define VCMPNEB VX4(7) /* v3.00 */
610 #define VCMPNEH VX4(71) /* v3.00 */
611 #define VCMPNEW VX4(135) /* v3.00 */
613 #define VSLB VX4(260)
614 #define VSLH VX4(324)
615 #define VSLW VX4(388)
616 #define VSLD VX4(1476) /* v2.07 */
617 #define VSRB VX4(516)
618 #define VSRH VX4(580)
619 #define VSRW VX4(644)
620 #define VSRD VX4(1732) /* v2.07 */
621 #define VSRAB VX4(772)
622 #define VSRAH VX4(836)
623 #define VSRAW VX4(900)
624 #define VSRAD VX4(964) /* v2.07 */
627 #define VRLW VX4(132)
628 #define VRLD VX4(196) /* v2.07 */
630 #define VMULEUB VX4(520)
631 #define VMULEUH VX4(584)
632 #define VMULEUW VX4(648) /* v2.07 */
633 #define VMULOUB VX4(8)
634 #define VMULOUH VX4(72)
635 #define VMULOUW VX4(136) /* v2.07 */
636 #define VMULUWM VX4(137) /* v2.07 */
637 #define VMULLD VX4(457) /* v3.10 */
638 #define VMSUMUHM VX4(38)
640 #define VMRGHB VX4(12)
641 #define VMRGHH VX4(76)
642 #define VMRGHW VX4(140)
643 #define VMRGLB VX4(268)
644 #define VMRGLH VX4(332)
645 #define VMRGLW VX4(396)
647 #define VPKUHUM VX4(14)
648 #define VPKUWUM VX4(78)
650 #define VAND VX4(1028)
651 #define VANDC VX4(1092)
652 #define VNOR VX4(1284)
653 #define VOR VX4(1156)
654 #define VXOR VX4(1220)
655 #define VEQV VX4(1668) /* v2.07 */
656 #define VNAND VX4(1412) /* v2.07 */
657 #define VORC VX4(1348) /* v2.07 */
659 #define VSPLTB VX4(524)
660 #define VSPLTH VX4(588)
661 #define VSPLTW VX4(652)
662 #define VSPLTISB VX4(780)
663 #define VSPLTISH VX4(844)
664 #define VSPLTISW VX4(908)
666 #define VSLDOI VX4(44)
668 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
669 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
670 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
672 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
673 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
674 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
675 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
676 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
677 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
679 #define RT(r) ((r)<<21)
680 #define RS(r) ((r)<<21)
681 #define RA(r) ((r)<<16)
682 #define RB(r) ((r)<<11)
683 #define TO(t) ((t)<<21)
684 #define SH(s) ((s)<<11)
685 #define MB(b) ((b)<<6)
686 #define ME(e) ((e)<<1)
687 #define BO(o) ((o)<<21)
688 #define MB64(b) ((b)<<5)
689 #define FXM(b) (1 << (19 - (b)))
691 #define VRT(r) (((r) & 31) << 21)
692 #define VRA(r) (((r) & 31) << 16)
693 #define VRB(r) (((r) & 31) << 11)
694 #define VRC(r) (((r) & 31) << 6)
698 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
699 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
700 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
701 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
703 #define BF(n) ((n)<<23)
704 #define BI(n, c) (((c)+((n)*4))<<16)
705 #define BT(n, c) (((c)+((n)*4))<<21)
706 #define BA(n, c) (((c)+((n)*4))<<16)
707 #define BB(n, c) (((c)+((n)*4))<<11)
708 #define BC_(n, c) (((c)+((n)*4))<<6)
710 #define BO_COND_TRUE BO(12)
711 #define BO_COND_FALSE BO( 4)
712 #define BO_ALWAYS BO(20)
721 static const uint32_t tcg_to_bc[16] = {
722 [TCG_COND_EQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE,
723 [TCG_COND_NE] = BC | BI(0, CR_EQ) | BO_COND_FALSE,
724 [TCG_COND_TSTEQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE,
725 [TCG_COND_TSTNE] = BC | BI(0, CR_EQ) | BO_COND_FALSE,
726 [TCG_COND_LT] = BC | BI(0, CR_LT) | BO_COND_TRUE,
727 [TCG_COND_GE] = BC | BI(0, CR_LT) | BO_COND_FALSE,
728 [TCG_COND_LE] = BC | BI(0, CR_GT) | BO_COND_FALSE,
729 [TCG_COND_GT] = BC | BI(0, CR_GT) | BO_COND_TRUE,
730 [TCG_COND_LTU] = BC | BI(0, CR_LT) | BO_COND_TRUE,
731 [TCG_COND_GEU] = BC | BI(0, CR_LT) | BO_COND_FALSE,
732 [TCG_COND_LEU] = BC | BI(0, CR_GT) | BO_COND_FALSE,
733 [TCG_COND_GTU] = BC | BI(0, CR_GT) | BO_COND_TRUE,
736 /* The low bit here is set if the RA and RB fields must be inverted. */
737 static const uint32_t tcg_to_isel[16] = {
738 [TCG_COND_EQ] = ISEL | BC_(0, CR_EQ),
739 [TCG_COND_NE] = ISEL | BC_(0, CR_EQ) | 1,
740 [TCG_COND_TSTEQ] = ISEL | BC_(0, CR_EQ),
741 [TCG_COND_TSTNE] = ISEL | BC_(0, CR_EQ) | 1,
742 [TCG_COND_LT] = ISEL | BC_(0, CR_LT),
743 [TCG_COND_GE] = ISEL | BC_(0, CR_LT) | 1,
744 [TCG_COND_LE] = ISEL | BC_(0, CR_GT) | 1,
745 [TCG_COND_GT] = ISEL | BC_(0, CR_GT),
746 [TCG_COND_LTU] = ISEL | BC_(0, CR_LT),
747 [TCG_COND_GEU] = ISEL | BC_(0, CR_LT) | 1,
748 [TCG_COND_LEU] = ISEL | BC_(0, CR_GT) | 1,
749 [TCG_COND_GTU] = ISEL | BC_(0, CR_GT),
752 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
753 intptr_t value, intptr_t addend)
755 const tcg_insn_unit *target;
760 target = (const tcg_insn_unit *)value;
764 return reloc_pc14(code_ptr, target);
766 return reloc_pc24(code_ptr, target);
767 case R_PPC64_PCREL34:
768 return reloc_pc34(code_ptr, target);
771 * We are (slightly) abusing this relocation type. In particular,
772 * assert that the low 2 bits are zero, and do not modify them.
773 * That way we can use this with LD et al that have opcode bits
774 * in the low 2 bits of the insn.
776 if ((value & 3) || value != (int16_t)value) {
779 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
783 * We are abusing this relocation type. Again, this points to
784 * a pair of insns, lis + load. This is an absolute address
785 * relocation for PPC32 so the lis cannot be removed.
789 if (hi + lo != value) {
792 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
793 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
796 g_assert_not_reached();
801 /* Ensure that the prefixed instruction does not cross a 64-byte boundary. */
802 static bool tcg_out_need_prefix_align(TCGContext *s)
804 return ((uintptr_t)s->code_ptr & 0x3f) == 0x3c;
807 static void tcg_out_prefix_align(TCGContext *s)
809 if (tcg_out_need_prefix_align(s)) {
814 static ptrdiff_t tcg_pcrel_diff_for_prefix(TCGContext *s, const void *target)
816 return tcg_pcrel_diff(s, target) - (tcg_out_need_prefix_align(s) ? 4 : 0);
819 /* Output Type 00 Prefix - 8-Byte Load/Store Form (8LS:D) */
820 static void tcg_out_8ls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
821 unsigned ra, tcg_target_long imm, bool r)
825 p = OPCD(1) | (r << 20) | ((imm >> 16) & 0x3ffff);
826 i = opc | TAI(rt, ra, imm);
828 tcg_out_prefix_align(s);
833 /* Output Type 10 Prefix - Modified Load/Store Form (MLS:D) */
834 static void tcg_out_mls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
835 unsigned ra, tcg_target_long imm, bool r)
839 p = OPCD(1) | (2 << 24) | (r << 20) | ((imm >> 16) & 0x3ffff);
840 i = opc | TAI(rt, ra, imm);
842 tcg_out_prefix_align(s);
847 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
848 TCGReg base, tcg_target_long offset);
850 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
857 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
860 if (ret < TCG_REG_V0) {
861 if (arg < TCG_REG_V0) {
862 tcg_out32(s, OR | SAB(arg, ret, arg));
864 } else if (have_isa_2_07) {
865 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
866 | VRT(arg) | RA(ret));
869 /* Altivec does not support vector->integer moves. */
872 } else if (arg < TCG_REG_V0) {
874 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
875 | VRT(ret) | RA(arg));
878 /* Altivec does not support integer->vector moves. */
885 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
886 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
889 g_assert_not_reached();
894 static void tcg_out_rld_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
895 int sh, int mb, bool rc)
897 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
898 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
899 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
900 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb | rc);
903 static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
906 tcg_out_rld_rc(s, op, ra, rs, sh, mb, false);
909 static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
910 int sh, int mb, int me, bool rc)
912 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc);
915 static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
916 int sh, int mb, int me)
918 tcg_out_rlw_rc(s, op, ra, rs, sh, mb, me, false);
921 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
923 tcg_out32(s, EXTSB | RA(dst) | RS(src));
926 static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src)
928 tcg_out32(s, ANDI | SAI(src, dst, 0xff));
931 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
933 tcg_out32(s, EXTSH | RA(dst) | RS(src));
936 static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
938 tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
941 static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
943 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
944 tcg_out32(s, EXTSW | RA(dst) | RS(src));
947 static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
949 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
950 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
953 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
955 tcg_out_ext32s(s, dst, src);
958 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
960 tcg_out_ext32u(s, dst, src);
963 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
965 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
966 tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
969 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
971 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
974 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
976 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
979 static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
981 /* Limit immediate shift count lest we create an illegal insn. */
982 tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
985 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
987 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
990 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
992 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
995 static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
997 tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
1000 static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm)
1002 uint32_t d0, d1, d2;
1004 tcg_debug_assert((imm & 0xffff) == 0);
1005 tcg_debug_assert(imm == (int32_t)imm);
1007 d2 = extract32(imm, 16, 1);
1008 d1 = extract32(imm, 17, 5);
1009 d0 = extract32(imm, 22, 10);
1010 tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2);
1013 static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
1015 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
1017 if (have_isa_3_10) {
1018 tcg_out32(s, BRH | RA(dst) | RS(src));
1019 if (flags & TCG_BSWAP_OS) {
1020 tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
1021 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1022 tcg_out_ext16u(s, dst, dst);
1029 * dep(a, b, m) -> (a & ~m) | (b & m)
1031 * Begin with: src = xxxxabcd
1033 /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
1034 tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
1035 /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
1036 tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
1038 if (flags & TCG_BSWAP_OS) {
1039 tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
1041 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
1045 static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
1047 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
1049 if (have_isa_3_10) {
1050 tcg_out32(s, BRW | RA(dst) | RS(src));
1051 if (flags & TCG_BSWAP_OS) {
1052 tcg_out_ext32s(s, dst, dst);
1053 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1054 tcg_out_ext32u(s, dst, dst);
1060 * Stolen from gcc's builtin_bswap32.
1062 * dep(a, b, m) -> (a & ~m) | (b & m)
1064 * Begin with: src = xxxxabcd
1066 /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
1067 tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
1068 /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
1069 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
1070 /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
1071 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
1073 if (flags & TCG_BSWAP_OS) {
1074 tcg_out_ext32s(s, dst, tmp);
1076 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
1080 static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
1082 TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
1083 TCGReg t1 = dst == src ? dst : TCG_REG_R0;
1085 if (have_isa_3_10) {
1086 tcg_out32(s, BRD | RA(dst) | RS(src));
1092 * dep(a, b, m) -> (a & ~m) | (b & m)
1094 * Begin with: src = abcdefgh
1096 /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
1097 tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
1098 /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
1099 tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
1100 /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
1101 tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
1103 /* t0 = rol64(t0, 32) = hgfe0000 */
1104 tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
1105 /* t1 = rol64(src, 32) = efghabcd */
1106 tcg_out_rld(s, RLDICL, t1, src, 32, 0);
1108 /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
1109 tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
1110 /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
1111 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
1112 /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
1113 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
1115 tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
1118 /* Emit a move into ret of arg, if it can be done in one insn. */
1119 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
1121 if (arg == (int16_t)arg) {
1122 tcg_out32(s, ADDI | TAI(ret, 0, arg));
1125 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
1126 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
1132 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
1133 tcg_target_long arg, bool in_prologue)
1136 tcg_target_long tmp;
1139 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1141 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1145 /* Load 16-bit immediates with one insn. */
1146 if (tcg_out_movi_one(s, ret, arg)) {
1150 /* Load addresses within the TB with one insn. */
1151 tb_diff = ppc_tbrel_diff(s, (void *)arg);
1152 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
1153 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
1158 * Load values up to 34 bits, and pc-relative addresses,
1159 * with one prefixed insn.
1161 if (have_isa_3_10) {
1162 if (arg == sextract64(arg, 0, 34)) {
1163 /* pli ret,value = paddi ret,0,value,0 */
1164 tcg_out_mls_d(s, ADDI, ret, 0, arg, 0);
1168 tmp = tcg_pcrel_diff_for_prefix(s, (void *)arg);
1169 if (tmp == sextract64(tmp, 0, 34)) {
1170 /* pla ret,value = paddi ret,0,value,1 */
1171 tcg_out_mls_d(s, ADDI, ret, 0, tmp, 1);
1176 /* Load 32-bit immediates with two insns. Note that we've already
1177 eliminated bare ADDIS, so we know both insns are required. */
1178 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
1179 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
1180 tcg_out32(s, ORI | SAI(ret, ret, arg));
1183 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
1184 tcg_out32(s, ADDI | TAI(ret, 0, arg));
1185 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1189 /* Load masked 16-bit value. */
1190 if (arg > 0 && (arg & 0x8000)) {
1192 if ((tmp & (tmp + 1)) == 0) {
1193 int mb = clz64(tmp + 1) + 1;
1194 tcg_out32(s, ADDI | TAI(ret, 0, arg));
1195 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
1200 /* Load common masks with 2 insns. */
1203 if (tmp == (int16_t)tmp) {
1204 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
1205 tcg_out_shli64(s, ret, ret, shift);
1209 if (tcg_out_movi_one(s, ret, arg << shift)) {
1210 tcg_out_shri64(s, ret, ret, shift);
1214 /* Load addresses within 2GB with 2 insns. */
1215 if (have_isa_3_00) {
1216 intptr_t hi = tcg_pcrel_diff(s, (void *)arg) - 4;
1220 if (hi == (int32_t)hi) {
1221 tcg_out_addpcis(s, TCG_REG_TMP2, hi);
1222 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TMP2, lo));
1227 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
1228 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
1229 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
1233 /* Use the constant pool, if possible. */
1234 if (!in_prologue && USE_REG_TB) {
1235 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1236 ppc_tbrel_diff(s, NULL));
1237 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
1240 if (have_isa_3_10) {
1241 tcg_out_8ls_d(s, PLD, ret, 0, 0, 1);
1242 new_pool_label(s, arg, R_PPC64_PCREL34, s->code_ptr - 2, 0);
1245 if (have_isa_3_00) {
1246 tcg_out_addpcis(s, TCG_REG_TMP2, 0);
1247 new_pool_label(s, arg, R_PPC_REL14, s->code_ptr, 0);
1248 tcg_out32(s, LD | TAI(ret, TCG_REG_TMP2, 0));
1252 tmp = arg >> 31 >> 1;
1253 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1255 tcg_out_shli64(s, ret, ret, 32);
1257 if (arg & 0xffff0000) {
1258 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1261 tcg_out32(s, ORI | SAI(ret, ret, arg));
1265 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1266 TCGReg ret, int64_t val)
1275 if (low >= -16 && low < 16) {
1276 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1279 if (have_isa_3_00) {
1280 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1287 if (low >= -16 && low < 16) {
1288 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1295 if (low >= -16 && low < 16) {
1296 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1303 * Otherwise we must load the value from the constant pool.
1307 add = ppc_tbrel_diff(s, NULL);
1308 } else if (have_isa_3_10) {
1309 if (type == TCG_TYPE_V64) {
1310 tcg_out_8ls_d(s, PLXSD, ret & 31, 0, 0, 1);
1311 new_pool_label(s, val, R_PPC64_PCREL34, s->code_ptr - 2, 0);
1313 tcg_out_8ls_d(s, PLXV, ret & 31, 0, 0, 1);
1314 new_pool_l2(s, R_PPC64_PCREL34, s->code_ptr - 2, 0, val, val);
1317 } else if (have_isa_3_00) {
1318 tcg_out_addpcis(s, TCG_REG_TMP1, 0);
1327 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1328 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1329 if (TCG_TARGET_REG_BITS == 64) {
1330 new_pool_label(s, val, rel, s->code_ptr, add);
1332 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1335 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1336 if (TCG_TARGET_REG_BITS == 64) {
1337 new_pool_l2(s, rel, s->code_ptr, add, val, val);
1339 new_pool_l4(s, rel, s->code_ptr, add,
1340 val >> 32, val, val >> 32, val);
1345 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1346 load_insn |= RA(TCG_REG_TB);
1347 } else if (have_isa_3_00) {
1348 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1350 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1351 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1353 tcg_out32(s, load_insn);
1356 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1357 tcg_target_long arg)
1362 tcg_debug_assert(ret < TCG_REG_V0);
1363 tcg_out_movi_int(s, type, ret, arg, false);
1367 g_assert_not_reached();
1371 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1376 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1377 tcg_target_long imm)
1379 /* This function is only used for passing structs by reference. */
1380 g_assert_not_reached();
1383 static bool mask_operand(uint32_t c, int *mb, int *me)
1387 /* Accept a bit pattern like:
1391 Keep track of the transitions. */
1392 if (c == 0 || c == -1) {
1398 if (test & (test - 1)) {
1403 *mb = test ? clz32(test & -test) + 1 : 0;
1407 static bool mask64_operand(uint64_t c, int *mb, int *me)
1416 /* Accept 1..10..0. */
1422 /* Accept 0..01..1. */
1423 if (lsb == 1 && (c & (c + 1)) == 0) {
1424 *mb = clz64(c + 1) + 1;
1431 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1435 if (mask_operand(c, &mb, &me)) {
1436 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1437 } else if ((c & 0xffff) == c) {
1438 tcg_out32(s, ANDI | SAI(src, dst, c));
1440 } else if ((c & 0xffff0000) == c) {
1441 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1444 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1445 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1449 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1453 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1454 if (mask64_operand(c, &mb, &me)) {
1456 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1458 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1460 } else if ((c & 0xffff) == c) {
1461 tcg_out32(s, ANDI | SAI(src, dst, c));
1463 } else if ((c & 0xffff0000) == c) {
1464 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1467 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1468 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1472 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1473 int op_lo, int op_hi)
1476 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1480 tcg_out32(s, op_lo | SAI(src, dst, c));
1485 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1487 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1490 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1492 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1495 static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1497 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1498 if (in_range_b(disp)) {
1499 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1501 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1502 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1503 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1507 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1508 TCGReg base, tcg_target_long offset)
1510 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1511 bool is_int_store = false;
1512 TCGReg rs = TCG_REG_TMP1;
1519 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1535 case STB: case STH: case STW:
1536 is_int_store = true;
1540 /* For unaligned or large offsets, use the prefixed form. */
1542 && (offset != (int16_t)offset || (offset & align))
1543 && offset == sextract64(offset, 0, 34)) {
1545 * Note that the MLS:D insns retain their un-prefixed opcode,
1546 * while the 8LS:D insns use a different opcode space.
1557 tcg_out_mls_d(s, opi, rt, base, offset, 0);
1560 tcg_out_8ls_d(s, PLWA, rt, base, offset, 0);
1563 tcg_out_8ls_d(s, PLD, rt, base, offset, 0);
1566 tcg_out_8ls_d(s, PSTD, rt, base, offset, 0);
1569 tcg_out_8ls_d(s, PLXSD, rt & 31, base, offset, 0);
1572 tcg_out_8ls_d(s, PSTXSD, rt & 31, base, offset, 0);
1575 tcg_out_8ls_d(s, PLXV, rt & 31, base, offset, 0);
1578 tcg_out_8ls_d(s, PSTXV, rt & 31, base, offset, 0);
1583 /* For unaligned, or very large offsets, use the indexed form. */
1584 if (offset & align || offset != (int32_t)offset || opi == 0) {
1588 tcg_debug_assert(!is_int_store || rs != rt);
1589 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1590 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1594 l0 = (int16_t)offset;
1595 offset = (offset - l0) >> 16;
1596 l1 = (int16_t)offset;
1598 if (l1 < 0 && orig >= 0) {
1600 l1 = (int16_t)(offset - 0x4000);
1603 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1607 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1610 if (opi != ADDI || base != rt || l0 != 0) {
1611 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1615 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1616 TCGReg va, TCGReg vb, int shb)
1618 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1621 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1622 TCGReg base, intptr_t offset)
1628 if (ret < TCG_REG_V0) {
1629 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1632 if (have_isa_2_07 && have_vsx) {
1633 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1636 tcg_debug_assert((offset & 3) == 0);
1637 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1638 shift = (offset - 4) & 0xc;
1640 tcg_out_vsldoi(s, ret, ret, ret, shift);
1644 if (ret < TCG_REG_V0) {
1645 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1646 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1651 tcg_debug_assert(ret >= TCG_REG_V0);
1653 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1657 tcg_debug_assert((offset & 7) == 0);
1658 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1660 tcg_out_vsldoi(s, ret, ret, ret, 8);
1664 tcg_debug_assert(ret >= TCG_REG_V0);
1665 tcg_debug_assert((offset & 15) == 0);
1666 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1667 LVX, ret, base, offset);
1670 g_assert_not_reached();
1674 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1675 TCGReg base, intptr_t offset)
1681 if (arg < TCG_REG_V0) {
1682 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1685 if (have_isa_2_07 && have_vsx) {
1686 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1689 assert((offset & 3) == 0);
1690 tcg_debug_assert((offset & 3) == 0);
1691 shift = (offset - 4) & 0xc;
1693 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1696 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1699 if (arg < TCG_REG_V0) {
1700 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1701 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1706 tcg_debug_assert(arg >= TCG_REG_V0);
1708 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1709 STXSDX, arg, base, offset);
1712 tcg_debug_assert((offset & 7) == 0);
1714 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1717 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1718 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1721 tcg_debug_assert(arg >= TCG_REG_V0);
1722 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1723 STVX, arg, base, offset);
1726 g_assert_not_reached();
1730 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1731 TCGReg base, intptr_t ofs)
1737 * Set dest non-zero if and only if (arg1 & arg2) is non-zero.
1738 * If RC, then also set RC0.
1740 static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2,
1741 bool const_arg2, TCGType type, bool rc)
1746 tcg_out32(s, AND | SAB(arg1, dest, arg2) | rc);
1750 if (type == TCG_TYPE_I32) {
1751 arg2 = (uint32_t)arg2;
1752 } else if (arg2 == (uint32_t)arg2) {
1753 type = TCG_TYPE_I32;
1756 if ((arg2 & ~0xffff) == 0) {
1757 tcg_out32(s, ANDI | SAI(arg1, dest, arg2));
1760 if ((arg2 & ~0xffff0000ull) == 0) {
1761 tcg_out32(s, ANDIS | SAI(arg1, dest, arg2 >> 16));
1764 if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
1765 if (mask_operand(arg2, &mb, &me)) {
1766 tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc);
1770 int sh = clz64(arg2);
1771 if (mask64_operand(arg2 << sh, &mb, &me)) {
1772 tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc);
1776 /* Constraints should satisfy this. */
1777 g_assert_not_reached();
1780 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1781 int const_arg2, int cr, TCGType type)
1786 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1789 * Simplify the comparisons below wrt CMPI.
1790 * All of the tests are 16-bit, so a 32-bit sign extend always works.
1792 if (type == TCG_TYPE_I32) {
1793 arg2 = (int32_t)arg2;
1800 if ((int16_t) arg2 == arg2) {
1804 } else if ((uint16_t) arg2 == arg2) {
1814 case TCG_COND_TSTEQ:
1815 case TCG_COND_TSTNE:
1816 tcg_debug_assert(cr == 0);
1817 tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, true);
1825 if ((int16_t) arg2 == arg2) {
1840 if ((uint16_t) arg2 == arg2) {
1851 g_assert_not_reached();
1853 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1856 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1859 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1862 tcg_out32(s, op | RA(arg1) | RB(arg2));
1866 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1867 TCGReg dst, TCGReg src, bool neg)
1869 if (neg && (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I64)) {
1871 * X != 0 implies X + -1 generates a carry.
1872 * RT = (~X + X) + CA
1876 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1877 tcg_out32(s, SUBFE | TAB(dst, src, src));
1881 if (type == TCG_TYPE_I32) {
1882 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1883 tcg_out_shri32(s, dst, dst, 5);
1885 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1886 tcg_out_shri64(s, dst, dst, 6);
1889 tcg_out32(s, NEG | RT(dst) | RA(dst));
1893 static void tcg_out_setcond_ne0(TCGContext *s, TCGType type,
1894 TCGReg dst, TCGReg src, bool neg)
1896 if (!neg && (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I64)) {
1898 * X != 0 implies X + -1 generates a carry. Extra addition
1899 * trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C.
1901 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1902 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1905 tcg_out_setcond_eq0(s, type, dst, src, false);
1907 tcg_out32(s, ADDI | TAI(dst, dst, -1));
1909 tcg_out_xori32(s, dst, dst, 1);
1913 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1917 if ((uint32_t)arg2 == arg2) {
1918 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1920 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1921 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1924 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1929 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1930 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1931 int const_arg2, bool neg)
1936 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1938 /* Ignore high bits of a potential constant arg2. */
1939 if (type == TCG_TYPE_I32) {
1940 arg2 = (uint32_t)arg2;
1943 /* With SETBC/SETBCR, we can always implement with 2 insns. */
1944 if (have_isa_3_10) {
1945 tcg_insn_unit bi, opc;
1947 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
1949 /* Re-use tcg_to_bc for BI and BO_COND_{TRUE,FALSE}. */
1950 bi = tcg_to_bc[cond] & (0x1f << 16);
1951 if (tcg_to_bc[cond] & BO(8)) {
1952 opc = neg ? SETNBC : SETBC;
1954 opc = neg ? SETNBCR : SETBCR;
1956 tcg_out32(s, opc | RT(arg0) | bi);
1960 /* Handle common and trivial cases before handling anything else. */
1964 tcg_out_setcond_eq0(s, type, arg0, arg1, neg);
1967 tcg_out_setcond_ne0(s, type, arg0, arg1, neg);
1970 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1974 /* Extract the sign bit. */
1975 if (type == TCG_TYPE_I32) {
1977 tcg_out_sari32(s, arg0, arg1, 31);
1979 tcg_out_shri32(s, arg0, arg1, 31);
1983 tcg_out_sari64(s, arg0, arg1, 63);
1985 tcg_out_shri64(s, arg0, arg1, 63);
1994 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1995 All other cases below are also at least 3 insns, so speed up the
1996 code generator by not considering them and always using ISEL. */
2000 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
2002 isel = tcg_to_isel[cond];
2004 tcg_out_movi(s, type, arg0, neg ? -1 : 1);
2006 /* arg0 = (bc ? 0 : 1) */
2007 tab = TAB(arg0, 0, arg0);
2010 /* arg0 = (bc ? 1 : 0) */
2011 tcg_out_movi(s, type, TCG_REG_R0, 0);
2012 tab = TAB(arg0, arg0, TCG_REG_R0);
2014 tcg_out32(s, isel | tab);
2021 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
2022 tcg_out_setcond_eq0(s, type, arg0, arg1, neg);
2026 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
2027 tcg_out_setcond_ne0(s, type, arg0, arg1, neg);
2030 case TCG_COND_TSTEQ:
2031 tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false);
2032 tcg_out_setcond_eq0(s, type, arg0, TCG_REG_R0, neg);
2035 case TCG_COND_TSTNE:
2036 tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false);
2037 tcg_out_setcond_ne0(s, type, arg0, TCG_REG_R0, neg);
2046 sh = 30; /* CR7 CR_GT */
2055 sh = 29; /* CR7 CR_LT */
2059 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
2060 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
2061 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
2063 tcg_out32(s, ADDI | TAI(arg0, arg0, -1));
2065 tcg_out32(s, NEG | RT(arg0) | RA(arg0));
2067 tcg_out_xori32(s, arg0, arg0, 1);
2072 g_assert_not_reached();
2076 static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd)
2078 tcg_out32(s, tcg_to_bc[cond] | bd);
2081 static void tcg_out_bc_lab(TCGContext *s, TCGCond cond, TCGLabel *l)
2085 bd = reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
2087 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
2089 tcg_out_bc(s, cond, bd);
2092 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
2093 TCGArg arg1, TCGArg arg2, int const_arg2,
2094 TCGLabel *l, TCGType type)
2096 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
2097 tcg_out_bc_lab(s, cond, l);
2100 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
2101 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
2102 TCGArg v2, bool const_c2)
2104 /* If for some reason both inputs are zero, don't produce bad code. */
2105 if (v1 == 0 && v2 == 0) {
2106 tcg_out_movi(s, type, dest, 0);
2110 tcg_out_cmp(s, cond, c1, c2, const_c2, 0, type);
2113 int isel = tcg_to_isel[cond];
2115 /* Swap the V operands if the operation indicates inversion. */
2122 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
2124 tcg_out_movi(s, type, TCG_REG_R0, 0);
2126 tcg_out32(s, isel | TAB(dest, v1, v2));
2129 cond = tcg_invert_cond(cond);
2131 } else if (dest != v1) {
2133 tcg_out_movi(s, type, dest, 0);
2135 tcg_out_mov(s, type, dest, v1);
2138 /* Branch forward over one insn */
2139 tcg_out_bc(s, cond, 8);
2141 tcg_out_movi(s, type, dest, 0);
2143 tcg_out_mov(s, type, dest, v2);
2148 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
2149 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
2151 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
2152 tcg_out32(s, opc | RA(a0) | RS(a1));
2154 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 0, type);
2155 /* Note that the only other valid constant for a2 is 0. */
2157 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
2158 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
2159 } else if (!const_a2 && a0 == a2) {
2160 tcg_out_bc(s, TCG_COND_EQ, 8);
2161 tcg_out32(s, opc | RA(a0) | RS(a1));
2163 tcg_out32(s, opc | RA(a0) | RS(a1));
2164 tcg_out_bc(s, TCG_COND_NE, 8);
2166 tcg_out_movi(s, type, a0, 0);
2168 tcg_out_mov(s, type, a0, a2);
2174 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
2175 const int *const_args)
2177 static const struct { uint8_t bit1, bit2; } bits[] = {
2178 [TCG_COND_LT ] = { CR_LT, CR_LT },
2179 [TCG_COND_LE ] = { CR_LT, CR_GT },
2180 [TCG_COND_GT ] = { CR_GT, CR_GT },
2181 [TCG_COND_GE ] = { CR_GT, CR_LT },
2182 [TCG_COND_LTU] = { CR_LT, CR_LT },
2183 [TCG_COND_LEU] = { CR_LT, CR_GT },
2184 [TCG_COND_GTU] = { CR_GT, CR_GT },
2185 [TCG_COND_GEU] = { CR_GT, CR_LT },
2188 TCGCond cond = args[4], cond2;
2189 TCGArg al, ah, bl, bh;
2190 int blconst, bhconst;
2197 blconst = const_args[2];
2198 bhconst = const_args[3];
2207 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
2208 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
2209 tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2212 case TCG_COND_TSTEQ:
2213 case TCG_COND_TSTNE:
2215 tcg_out_andi32(s, TCG_REG_R0, al, bl);
2217 tcg_out32(s, AND | SAB(al, TCG_REG_R0, bl));
2220 tcg_out_andi32(s, TCG_REG_TMP1, ah, bh);
2222 tcg_out32(s, AND | SAB(ah, TCG_REG_TMP1, bh));
2224 tcg_out32(s, OR | SAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_TMP1) | 1);
2235 bit1 = bits[cond].bit1;
2236 bit2 = bits[cond].bit2;
2237 op = (bit1 != bit2 ? CRANDC : CRAND);
2238 cond2 = tcg_unsigned_cond(cond);
2240 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
2241 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
2242 tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
2243 tcg_out32(s, CROR | BT(0, CR_EQ) | BA(6, bit1) | BB(0, CR_EQ));
2247 g_assert_not_reached();
2251 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
2252 const int *const_args)
2254 tcg_out_cmp2(s, args + 1, const_args + 1);
2255 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0));
2256 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
2259 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
2260 const int *const_args)
2262 tcg_out_cmp2(s, args, const_args);
2263 tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5]));
2266 static void tcg_out_mb(TCGContext *s, TCGArg a0)
2270 if (a0 & TCG_MO_ST_LD) {
2279 static void tcg_out_call_int(TCGContext *s, int lk,
2280 const tcg_insn_unit *target)
2283 /* Look through the descriptor. If the branch is in range, and we
2284 don't have to spend too much effort on building the toc. */
2285 const void *tgt = ((const void * const *)target)[0];
2286 uintptr_t toc = ((const uintptr_t *)target)[1];
2287 intptr_t diff = tcg_pcrel_diff(s, tgt);
2289 if (in_range_b(diff) && toc == (uint32_t)toc) {
2290 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
2291 tcg_out_b(s, lk, tgt);
2293 /* Fold the low bits of the constant into the addresses below. */
2294 intptr_t arg = (intptr_t)target;
2295 int ofs = (int16_t)arg;
2297 if (ofs + 8 < 0x8000) {
2302 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
2303 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
2304 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
2305 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
2306 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
2308 #elif defined(_CALL_ELF) && _CALL_ELF == 2
2311 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
2312 address, which the callee uses to compute its TOC address. */
2313 /* FIXME: when the branch is in range, we could avoid r12 load if we
2314 knew that the destination uses the same TOC, and what its local
2315 entry point offset is. */
2316 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
2318 diff = tcg_pcrel_diff(s, target);
2319 if (in_range_b(diff)) {
2320 tcg_out_b(s, lk, target);
2322 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
2323 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
2326 tcg_out_b(s, lk, target);
2330 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
2331 const TCGHelperInfo *info)
2333 tcg_out_call_int(s, LK, target);
2336 static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
2343 [MO_BSWAP | MO_UB] = LBZX,
2344 [MO_BSWAP | MO_UW] = LHBRX,
2345 [MO_BSWAP | MO_UL] = LWBRX,
2346 [MO_BSWAP | MO_UQ] = LDBRX,
2349 static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
2354 [MO_BSWAP | MO_UB] = STBX,
2355 [MO_BSWAP | MO_UW] = STHBRX,
2356 [MO_BSWAP | MO_UL] = STWBRX,
2357 [MO_BSWAP | MO_UQ] = STDBRX,
2360 static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
2365 tcg_out32(s, MFSPR | RT(arg) | LR);
2370 * For the purposes of ppc32 sorting 4 input registers into 4 argument
2371 * registers, there is an outside chance we would require 3 temps.
2373 static const TCGLdstHelperParam ldst_helper_param = {
2374 .ra_gen = ldst_ra_gen,
2376 .tmp = { TCG_REG_TMP1, TCG_REG_TMP2, TCG_REG_R0 }
2379 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2381 MemOp opc = get_memop(lb->oi);
2383 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2387 tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
2388 tcg_out_call_int(s, LK, qemu_ld_helpers[opc & MO_SIZE]);
2389 tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
2391 tcg_out_b(s, 0, lb->raddr);
2395 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2397 MemOp opc = get_memop(lb->oi);
2399 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2403 tcg_out_st_helper_args(s, lb, &ldst_helper_param);
2404 tcg_out_call_int(s, LK, qemu_st_helpers[opc & MO_SIZE]);
2406 tcg_out_b(s, 0, lb->raddr);
2416 bool tcg_target_has_memory_bswap(MemOp memop)
2420 if ((memop & MO_SIZE) <= MO_64) {
2425 * Reject 16-byte memop with 16-byte atomicity,
2426 * but do allow a pair of 64-bit operations.
2428 aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
2429 return aa.atom <= MO_64;
2432 /* We expect to use a 16-bit negative offset from ENV. */
2433 #define MIN_TLB_MASK_TABLE_OFS -32768
2436 * For system-mode, perform the TLB load and compare.
2437 * For user-mode, perform any required alignment tests.
2438 * In both cases, return a TCGLabelQemuLdst structure if the slow path
2439 * is required and fill in @h with the host address for the fast path.
2441 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
2442 TCGReg addrlo, TCGReg addrhi,
2443 MemOpIdx oi, bool is_ld)
2445 TCGType addr_type = s->addr_type;
2446 TCGLabelQemuLdst *ldst = NULL;
2447 MemOp opc = get_memop(oi);
2448 MemOp a_bits, s_bits;
2451 * Book II, Section 1.4, Single-Copy Atomicity, specifies:
2453 * Before 3.0, "An access that is not atomic is performed as a set of
2454 * smaller disjoint atomic accesses. In general, the number and alignment
2455 * of these accesses are implementation-dependent." Thus MO_ATOM_IFALIGN.
2457 * As of 3.0, "the non-atomic access is performed as described in
2458 * the corresponding list", which matches MO_ATOM_SUBALIGN.
2460 s_bits = opc & MO_SIZE;
2461 h->aa = atom_and_align_for_opc(s, opc,
2462 have_isa_3_00 ? MO_ATOM_SUBALIGN
2465 a_bits = h->aa.align;
2467 if (tcg_use_softmmu) {
2468 int mem_index = get_mmuidx(oi);
2469 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
2470 : offsetof(CPUTLBEntry, addr_write);
2471 int fast_off = tlb_mask_table_ofs(s, mem_index);
2472 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
2473 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
2475 ldst = new_ldst_label(s);
2476 ldst->is_ld = is_ld;
2478 ldst->addrlo_reg = addrlo;
2479 ldst->addrhi_reg = addrhi;
2481 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
2482 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
2483 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
2485 /* Extract the page index, shifted into place for tlb index. */
2486 if (TCG_TARGET_REG_BITS == 32) {
2487 tcg_out_shri32(s, TCG_REG_R0, addrlo,
2488 s->page_bits - CPU_TLB_ENTRY_BITS);
2490 tcg_out_shri64(s, TCG_REG_R0, addrlo,
2491 s->page_bits - CPU_TLB_ENTRY_BITS);
2493 tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
2496 * Load the (low part) TLB comparator into TMP2.
2497 * For 64-bit host, always load the entire 64-bit slot for simplicity.
2498 * We will ignore the high bits with tcg_out_cmp(..., addr_type).
2500 if (TCG_TARGET_REG_BITS == 64) {
2502 tcg_out32(s, LDUX | TAB(TCG_REG_TMP2,
2503 TCG_REG_TMP1, TCG_REG_TMP2));
2505 tcg_out32(s, ADD | TAB(TCG_REG_TMP1,
2506 TCG_REG_TMP1, TCG_REG_TMP2));
2507 tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2,
2508 TCG_REG_TMP1, cmp_off);
2510 } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
2511 tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2,
2512 TCG_REG_TMP1, TCG_REG_TMP2));
2514 tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
2515 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
2516 cmp_off + 4 * HOST_BIG_ENDIAN);
2520 * Load the TLB addend for use on the fast path.
2521 * Do this asap to minimize any load use delay.
2523 if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
2524 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2525 offsetof(CPUTLBEntry, addend));
2528 /* Clear the non-page, non-alignment bits from the address in R0. */
2529 if (TCG_TARGET_REG_BITS == 32) {
2531 * We don't support unaligned accesses on 32-bits.
2532 * Preserve the bottom bits and thus trigger a comparison
2533 * failure on unaligned accesses.
2535 if (a_bits < s_bits) {
2538 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2539 (32 - a_bits) & 31, 31 - s->page_bits);
2544 * If the access is unaligned, we need to make sure we fail if we
2545 * cross a page boundary. The trick is to add the access size-1
2546 * to the address before masking the low bits. That will make the
2547 * address overflow to the next page if we cross a page boundary,
2548 * which will then force a mismatch of the TLB compare.
2550 if (a_bits < s_bits) {
2551 unsigned a_mask = (1 << a_bits) - 1;
2552 unsigned s_mask = (1 << s_bits) - 1;
2553 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2557 /* Mask the address for the requested alignment. */
2558 if (addr_type == TCG_TYPE_I32) {
2559 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2560 (32 - a_bits) & 31, 31 - s->page_bits);
2561 } else if (a_bits == 0) {
2562 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
2564 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2565 64 - s->page_bits, s->page_bits - a_bits);
2566 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
2570 if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
2571 /* Low part comparison into cr7. */
2572 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2573 0, 7, TCG_TYPE_I32);
2575 /* Load the high part TLB comparator into TMP2. */
2576 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
2577 cmp_off + 4 * !HOST_BIG_ENDIAN);
2579 /* Load addend, deferred for this case. */
2580 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2581 offsetof(CPUTLBEntry, addend));
2583 /* High part comparison into cr6. */
2584 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
2585 0, 6, TCG_TYPE_I32);
2587 /* Combine comparisons into cr0. */
2588 tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2590 /* Full comparison into cr0. */
2591 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2595 /* Load a pointer into the current opcode w/conditional branch-link. */
2596 ldst->label_ptr[0] = s->code_ptr;
2597 tcg_out_bc(s, TCG_COND_NE, LK);
2599 h->base = TCG_REG_TMP1;
2602 ldst = new_ldst_label(s);
2603 ldst->is_ld = is_ld;
2605 ldst->addrlo_reg = addrlo;
2606 ldst->addrhi_reg = addrhi;
2608 /* We are expecting a_bits to max out at 7, much lower than ANDI. */
2609 tcg_debug_assert(a_bits < 16);
2610 tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
2612 ldst->label_ptr[0] = s->code_ptr;
2613 tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
2616 h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
2619 if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
2620 /* Zero-extend the guest address for use in the host address. */
2621 tcg_out_ext32u(s, TCG_REG_R0, addrlo);
2622 h->index = TCG_REG_R0;
2630 static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
2631 TCGReg addrlo, TCGReg addrhi,
2632 MemOpIdx oi, TCGType data_type)
2634 MemOp opc = get_memop(oi);
2635 TCGLabelQemuLdst *ldst;
2638 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
2640 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2641 if (opc & MO_BSWAP) {
2642 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2643 tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2644 tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0));
2645 } else if (h.base != 0) {
2646 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2647 tcg_out32(s, LWZX | TAB(datahi, h.base, h.index));
2648 tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0));
2649 } else if (h.index == datahi) {
2650 tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2651 tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2653 tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2654 tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2657 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2658 if (!have_isa_2_06 && insn == LDBRX) {
2659 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2660 tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2661 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0));
2662 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2664 tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2666 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2667 tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2668 tcg_out_movext(s, TCG_TYPE_REG, datalo,
2669 TCG_TYPE_REG, opc & MO_SSIZE, datalo);
2674 ldst->type = data_type;
2675 ldst->datalo_reg = datalo;
2676 ldst->datahi_reg = datahi;
2677 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2681 static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
2682 TCGReg addrlo, TCGReg addrhi,
2683 MemOpIdx oi, TCGType data_type)
2685 MemOp opc = get_memop(oi);
2686 TCGLabelQemuLdst *ldst;
2689 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
2691 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2692 if (opc & MO_BSWAP) {
2693 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2694 tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2695 tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0));
2696 } else if (h.base != 0) {
2697 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2698 tcg_out32(s, STWX | SAB(datahi, h.base, h.index));
2699 tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0));
2701 tcg_out32(s, STW | TAI(datahi, h.index, 0));
2702 tcg_out32(s, STW | TAI(datalo, h.index, 4));
2705 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2706 if (!have_isa_2_06 && insn == STDBRX) {
2707 tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2708 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4));
2709 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2710 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1));
2712 tcg_out32(s, insn | SAB(datalo, h.base, h.index));
2717 ldst->type = data_type;
2718 ldst->datalo_reg = datalo;
2719 ldst->datahi_reg = datahi;
2720 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2724 static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
2725 TCGReg addr_reg, MemOpIdx oi, bool is_ld)
2727 TCGLabelQemuLdst *ldst;
2733 ldst = prepare_host_addr(s, &h, addr_reg, -1, oi, is_ld);
2735 /* Compose the final address, as LQ/STQ have no indexing. */
2738 index = TCG_REG_TMP1;
2739 tcg_out32(s, ADD | TAB(index, h.base, h.index));
2741 need_bswap = get_memop(oi) & MO_BSWAP;
2743 if (h.aa.atom == MO_128) {
2744 tcg_debug_assert(!need_bswap);
2745 tcg_debug_assert(datalo & 1);
2746 tcg_debug_assert(datahi == datalo - 1);
2747 tcg_debug_assert(!is_ld || datahi != index);
2748 insn = is_ld ? LQ : STQ;
2749 tcg_out32(s, insn | TAI(datahi, index, 0));
2753 if (HOST_BIG_ENDIAN ^ need_bswap) {
2754 d1 = datahi, d2 = datalo;
2756 d1 = datalo, d2 = datahi;
2760 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 8);
2761 insn = is_ld ? LDBRX : STDBRX;
2762 tcg_out32(s, insn | TAB(d1, 0, index));
2763 tcg_out32(s, insn | TAB(d2, index, TCG_REG_R0));
2765 insn = is_ld ? LD : STD;
2766 tcg_out32(s, insn | TAI(d1, index, 0));
2767 tcg_out32(s, insn | TAI(d2, index, 8));
2772 ldst->type = TCG_TYPE_I128;
2773 ldst->datalo_reg = datalo;
2774 ldst->datahi_reg = datahi;
2775 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2779 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2782 for (i = 0; i < count; ++i) {
2787 /* Parameters for function call generation, used in tcg.c. */
2788 #define TCG_TARGET_STACK_ALIGN 16
2791 # define LINK_AREA_SIZE (6 * SZR)
2792 # define LR_OFFSET (1 * SZR)
2793 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2794 #elif defined(_CALL_DARWIN)
2795 # define LINK_AREA_SIZE (6 * SZR)
2796 # define LR_OFFSET (2 * SZR)
2797 #elif TCG_TARGET_REG_BITS == 64
2798 # if defined(_CALL_ELF) && _CALL_ELF == 2
2799 # define LINK_AREA_SIZE (4 * SZR)
2800 # define LR_OFFSET (1 * SZR)
2802 #else /* TCG_TARGET_REG_BITS == 32 */
2803 # if defined(_CALL_SYSV)
2804 # define LINK_AREA_SIZE (2 * SZR)
2805 # define LR_OFFSET (1 * SZR)
2809 # error "Unhandled abi"
2811 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2812 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2815 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2816 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2818 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2819 + TCG_STATIC_CALL_ARGS_SIZE \
2820 + CPU_TEMP_BUF_SIZE \
2822 + TCG_TARGET_STACK_ALIGN - 1) \
2823 & -TCG_TARGET_STACK_ALIGN)
2825 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2827 static void tcg_target_qemu_prologue(TCGContext *s)
2832 const void **desc = (const void **)s->code_ptr;
2833 desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */
2834 desc[1] = 0; /* environment pointer */
2835 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2838 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2842 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2843 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2844 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2846 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2847 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2848 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2850 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2852 if (!tcg_use_softmmu && guest_base) {
2853 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2854 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2857 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2858 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2859 tcg_out32(s, BCCTR | BO_ALWAYS);
2862 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2864 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2865 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2866 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2867 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2869 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2870 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2871 tcg_out32(s, BCLR | BO_ALWAYS);
2874 static void tcg_out_tb_start(TCGContext *s)
2876 /* Load TCG_REG_TB. */
2878 if (have_isa_3_00) {
2880 tcg_out_addpcis(s, TCG_REG_TB, 0);
2882 /* bcl 20,31,$+4 (preferred form for getting nia) */
2883 tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK);
2884 tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR);
2889 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
2891 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
2892 tcg_out_b(s, 0, tcg_code_gen_epilogue);
2895 static void tcg_out_goto_tb(TCGContext *s, int which)
2897 uintptr_t ptr = get_jmp_target_addr(s, which);
2900 /* Direct branch will be patched by tb_target_set_jmp_target. */
2901 set_jmp_insn_offset(s, which);
2904 /* When branch is out of range, fall through to indirect. */
2906 ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr);
2907 tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset);
2908 } else if (have_isa_3_10) {
2909 ptrdiff_t offset = tcg_pcrel_diff_for_prefix(s, (void *)ptr);
2910 tcg_out_8ls_d(s, PLD, TCG_REG_TMP1, 0, offset, 1);
2911 } else if (have_isa_3_00) {
2912 ptrdiff_t offset = tcg_pcrel_diff(s, (void *)ptr) - 4;
2914 tcg_out_addpcis(s, TCG_REG_TMP1, offset - lo);
2915 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo);
2918 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - lo);
2919 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo);
2922 tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
2923 tcg_out32(s, BCCTR | BO_ALWAYS);
2924 set_jmp_reset_offset(s, which);
2927 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2928 uintptr_t jmp_rx, uintptr_t jmp_rw)
2930 uintptr_t addr = tb->jmp_target_addr[n];
2931 intptr_t diff = addr - jmp_rx;
2934 if (in_range_b(diff)) {
2935 insn = B | (diff & 0x3fffffc);
2940 qatomic_set((uint32_t *)jmp_rw, insn);
2941 flush_idcache_range(jmp_rx, jmp_rw, 4);
2944 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2945 const TCGArg args[TCG_MAX_OP_ARGS],
2946 const int const_args[TCG_MAX_OP_ARGS])
2951 case INDEX_op_goto_ptr:
2952 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2953 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2954 tcg_out32(s, BCCTR | BO_ALWAYS);
2958 TCGLabel *l = arg_label(args[0]);
2962 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2965 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2970 case INDEX_op_ld8u_i32:
2971 case INDEX_op_ld8u_i64:
2972 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2974 case INDEX_op_ld8s_i32:
2975 case INDEX_op_ld8s_i64:
2976 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2977 tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
2979 case INDEX_op_ld16u_i32:
2980 case INDEX_op_ld16u_i64:
2981 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2983 case INDEX_op_ld16s_i32:
2984 case INDEX_op_ld16s_i64:
2985 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2987 case INDEX_op_ld_i32:
2988 case INDEX_op_ld32u_i64:
2989 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2991 case INDEX_op_ld32s_i64:
2992 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2994 case INDEX_op_ld_i64:
2995 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2997 case INDEX_op_st8_i32:
2998 case INDEX_op_st8_i64:
2999 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
3001 case INDEX_op_st16_i32:
3002 case INDEX_op_st16_i64:
3003 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
3005 case INDEX_op_st_i32:
3006 case INDEX_op_st32_i64:
3007 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
3009 case INDEX_op_st_i64:
3010 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
3013 case INDEX_op_add_i32:
3014 a0 = args[0], a1 = args[1], a2 = args[2];
3015 if (const_args[2]) {
3017 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
3019 tcg_out32(s, ADD | TAB(a0, a1, a2));
3022 case INDEX_op_sub_i32:
3023 a0 = args[0], a1 = args[1], a2 = args[2];
3024 if (const_args[1]) {
3025 if (const_args[2]) {
3026 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
3028 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
3030 } else if (const_args[2]) {
3034 tcg_out32(s, SUBF | TAB(a0, a2, a1));
3038 case INDEX_op_and_i32:
3039 a0 = args[0], a1 = args[1], a2 = args[2];
3040 if (const_args[2]) {
3041 tcg_out_andi32(s, a0, a1, a2);
3043 tcg_out32(s, AND | SAB(a1, a0, a2));
3046 case INDEX_op_and_i64:
3047 a0 = args[0], a1 = args[1], a2 = args[2];
3048 if (const_args[2]) {
3049 tcg_out_andi64(s, a0, a1, a2);
3051 tcg_out32(s, AND | SAB(a1, a0, a2));
3054 case INDEX_op_or_i64:
3055 case INDEX_op_or_i32:
3056 a0 = args[0], a1 = args[1], a2 = args[2];
3057 if (const_args[2]) {
3058 tcg_out_ori32(s, a0, a1, a2);
3060 tcg_out32(s, OR | SAB(a1, a0, a2));
3063 case INDEX_op_xor_i64:
3064 case INDEX_op_xor_i32:
3065 a0 = args[0], a1 = args[1], a2 = args[2];
3066 if (const_args[2]) {
3067 tcg_out_xori32(s, a0, a1, a2);
3069 tcg_out32(s, XOR | SAB(a1, a0, a2));
3072 case INDEX_op_andc_i32:
3073 a0 = args[0], a1 = args[1], a2 = args[2];
3074 if (const_args[2]) {
3075 tcg_out_andi32(s, a0, a1, ~a2);
3077 tcg_out32(s, ANDC | SAB(a1, a0, a2));
3080 case INDEX_op_andc_i64:
3081 a0 = args[0], a1 = args[1], a2 = args[2];
3082 if (const_args[2]) {
3083 tcg_out_andi64(s, a0, a1, ~a2);
3085 tcg_out32(s, ANDC | SAB(a1, a0, a2));
3088 case INDEX_op_orc_i32:
3089 if (const_args[2]) {
3090 tcg_out_ori32(s, args[0], args[1], ~args[2]);
3094 case INDEX_op_orc_i64:
3095 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
3097 case INDEX_op_eqv_i32:
3098 if (const_args[2]) {
3099 tcg_out_xori32(s, args[0], args[1], ~args[2]);
3103 case INDEX_op_eqv_i64:
3104 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
3106 case INDEX_op_nand_i32:
3107 case INDEX_op_nand_i64:
3108 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
3110 case INDEX_op_nor_i32:
3111 case INDEX_op_nor_i64:
3112 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
3115 case INDEX_op_clz_i32:
3116 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
3117 args[2], const_args[2]);
3119 case INDEX_op_ctz_i32:
3120 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
3121 args[2], const_args[2]);
3123 case INDEX_op_ctpop_i32:
3124 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
3127 case INDEX_op_clz_i64:
3128 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
3129 args[2], const_args[2]);
3131 case INDEX_op_ctz_i64:
3132 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
3133 args[2], const_args[2]);
3135 case INDEX_op_ctpop_i64:
3136 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
3139 case INDEX_op_mul_i32:
3140 a0 = args[0], a1 = args[1], a2 = args[2];
3141 if (const_args[2]) {
3142 tcg_out32(s, MULLI | TAI(a0, a1, a2));
3144 tcg_out32(s, MULLW | TAB(a0, a1, a2));
3148 case INDEX_op_div_i32:
3149 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
3152 case INDEX_op_divu_i32:
3153 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
3156 case INDEX_op_rem_i32:
3157 tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
3160 case INDEX_op_remu_i32:
3161 tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
3164 case INDEX_op_shl_i32:
3165 if (const_args[2]) {
3166 /* Limit immediate shift count lest we create an illegal insn. */
3167 tcg_out_shli32(s, args[0], args[1], args[2] & 31);
3169 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
3172 case INDEX_op_shr_i32:
3173 if (const_args[2]) {
3174 /* Limit immediate shift count lest we create an illegal insn. */
3175 tcg_out_shri32(s, args[0], args[1], args[2] & 31);
3177 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
3180 case INDEX_op_sar_i32:
3181 if (const_args[2]) {
3182 tcg_out_sari32(s, args[0], args[1], args[2]);
3184 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
3187 case INDEX_op_rotl_i32:
3188 if (const_args[2]) {
3189 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
3191 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
3195 case INDEX_op_rotr_i32:
3196 if (const_args[2]) {
3197 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
3199 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
3200 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
3205 case INDEX_op_brcond_i32:
3206 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
3207 arg_label(args[3]), TCG_TYPE_I32);
3209 case INDEX_op_brcond_i64:
3210 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
3211 arg_label(args[3]), TCG_TYPE_I64);
3213 case INDEX_op_brcond2_i32:
3214 tcg_out_brcond2(s, args, const_args);
3217 case INDEX_op_neg_i32:
3218 case INDEX_op_neg_i64:
3219 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
3222 case INDEX_op_not_i32:
3223 case INDEX_op_not_i64:
3224 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
3227 case INDEX_op_add_i64:
3228 a0 = args[0], a1 = args[1], a2 = args[2];
3229 if (const_args[2]) {
3231 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
3233 tcg_out32(s, ADD | TAB(a0, a1, a2));
3236 case INDEX_op_sub_i64:
3237 a0 = args[0], a1 = args[1], a2 = args[2];
3238 if (const_args[1]) {
3239 if (const_args[2]) {
3240 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
3242 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
3244 } else if (const_args[2]) {
3248 tcg_out32(s, SUBF | TAB(a0, a2, a1));
3252 case INDEX_op_shl_i64:
3253 if (const_args[2]) {
3254 /* Limit immediate shift count lest we create an illegal insn. */
3255 tcg_out_shli64(s, args[0], args[1], args[2] & 63);
3257 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
3260 case INDEX_op_shr_i64:
3261 if (const_args[2]) {
3262 /* Limit immediate shift count lest we create an illegal insn. */
3263 tcg_out_shri64(s, args[0], args[1], args[2] & 63);
3265 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
3268 case INDEX_op_sar_i64:
3269 if (const_args[2]) {
3270 tcg_out_sari64(s, args[0], args[1], args[2]);
3272 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
3275 case INDEX_op_rotl_i64:
3276 if (const_args[2]) {
3277 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
3279 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
3282 case INDEX_op_rotr_i64:
3283 if (const_args[2]) {
3284 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
3286 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
3287 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
3291 case INDEX_op_mul_i64:
3292 a0 = args[0], a1 = args[1], a2 = args[2];
3293 if (const_args[2]) {
3294 tcg_out32(s, MULLI | TAI(a0, a1, a2));
3296 tcg_out32(s, MULLD | TAB(a0, a1, a2));
3299 case INDEX_op_div_i64:
3300 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
3302 case INDEX_op_divu_i64:
3303 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
3305 case INDEX_op_rem_i64:
3306 tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
3308 case INDEX_op_remu_i64:
3309 tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
3312 case INDEX_op_qemu_ld_a64_i32:
3313 if (TCG_TARGET_REG_BITS == 32) {
3314 tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
3315 args[3], TCG_TYPE_I32);
3319 case INDEX_op_qemu_ld_a32_i32:
3320 tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
3322 case INDEX_op_qemu_ld_a32_i64:
3323 if (TCG_TARGET_REG_BITS == 64) {
3324 tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
3325 args[2], TCG_TYPE_I64);
3327 tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
3328 args[3], TCG_TYPE_I64);
3331 case INDEX_op_qemu_ld_a64_i64:
3332 if (TCG_TARGET_REG_BITS == 64) {
3333 tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
3334 args[2], TCG_TYPE_I64);
3336 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
3337 args[4], TCG_TYPE_I64);
3340 case INDEX_op_qemu_ld_a32_i128:
3341 case INDEX_op_qemu_ld_a64_i128:
3342 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
3343 tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
3346 case INDEX_op_qemu_st_a64_i32:
3347 if (TCG_TARGET_REG_BITS == 32) {
3348 tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
3349 args[3], TCG_TYPE_I32);
3353 case INDEX_op_qemu_st_a32_i32:
3354 tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
3356 case INDEX_op_qemu_st_a32_i64:
3357 if (TCG_TARGET_REG_BITS == 64) {
3358 tcg_out_qemu_st(s, args[0], -1, args[1], -1,
3359 args[2], TCG_TYPE_I64);
3361 tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
3362 args[3], TCG_TYPE_I64);
3365 case INDEX_op_qemu_st_a64_i64:
3366 if (TCG_TARGET_REG_BITS == 64) {
3367 tcg_out_qemu_st(s, args[0], -1, args[1], -1,
3368 args[2], TCG_TYPE_I64);
3370 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
3371 args[4], TCG_TYPE_I64);
3374 case INDEX_op_qemu_st_a32_i128:
3375 case INDEX_op_qemu_st_a64_i128:
3376 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
3377 tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
3380 case INDEX_op_setcond_i32:
3381 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
3382 const_args[2], false);
3384 case INDEX_op_setcond_i64:
3385 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
3386 const_args[2], false);
3388 case INDEX_op_negsetcond_i32:
3389 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
3390 const_args[2], true);
3392 case INDEX_op_negsetcond_i64:
3393 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
3394 const_args[2], true);
3396 case INDEX_op_setcond2_i32:
3397 tcg_out_setcond2(s, args, const_args);
3400 case INDEX_op_bswap16_i32:
3401 case INDEX_op_bswap16_i64:
3402 tcg_out_bswap16(s, args[0], args[1], args[2]);
3404 case INDEX_op_bswap32_i32:
3405 tcg_out_bswap32(s, args[0], args[1], 0);
3407 case INDEX_op_bswap32_i64:
3408 tcg_out_bswap32(s, args[0], args[1], args[2]);
3410 case INDEX_op_bswap64_i64:
3411 tcg_out_bswap64(s, args[0], args[1]);
3414 case INDEX_op_deposit_i32:
3415 if (const_args[2]) {
3416 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
3417 tcg_out_andi32(s, args[0], args[0], ~mask);
3419 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
3420 32 - args[3] - args[4], 31 - args[3]);
3423 case INDEX_op_deposit_i64:
3424 if (const_args[2]) {
3425 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
3426 tcg_out_andi64(s, args[0], args[0], ~mask);
3428 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
3429 64 - args[3] - args[4]);
3433 case INDEX_op_extract_i32:
3434 tcg_out_rlw(s, RLWINM, args[0], args[1],
3435 32 - args[2], 32 - args[3], 31);
3437 case INDEX_op_extract_i64:
3438 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
3441 case INDEX_op_movcond_i32:
3442 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
3443 args[3], args[4], const_args[2]);
3445 case INDEX_op_movcond_i64:
3446 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
3447 args[3], args[4], const_args[2]);
3450 #if TCG_TARGET_REG_BITS == 64
3451 case INDEX_op_add2_i64:
3453 case INDEX_op_add2_i32:
3455 /* Note that the CA bit is defined based on the word size of the
3456 environment. So in 64-bit mode it's always carry-out of bit 63.
3457 The fallback code using deposit works just as well for 32-bit. */
3458 a0 = args[0], a1 = args[1];
3459 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
3462 if (const_args[4]) {
3463 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
3465 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
3467 if (const_args[5]) {
3468 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
3470 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
3472 if (a0 != args[0]) {
3473 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3477 #if TCG_TARGET_REG_BITS == 64
3478 case INDEX_op_sub2_i64:
3480 case INDEX_op_sub2_i32:
3482 a0 = args[0], a1 = args[1];
3483 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
3486 if (const_args[2]) {
3487 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
3489 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
3491 if (const_args[3]) {
3492 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
3494 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
3496 if (a0 != args[0]) {
3497 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3501 case INDEX_op_muluh_i32:
3502 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
3504 case INDEX_op_mulsh_i32:
3505 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
3507 case INDEX_op_muluh_i64:
3508 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
3510 case INDEX_op_mulsh_i64:
3511 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
3515 tcg_out_mb(s, args[0]);
3518 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
3519 case INDEX_op_mov_i64:
3520 case INDEX_op_call: /* Always emitted via tcg_out_call. */
3521 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
3522 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
3523 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
3524 case INDEX_op_ext8s_i64:
3525 case INDEX_op_ext8u_i32:
3526 case INDEX_op_ext8u_i64:
3527 case INDEX_op_ext16s_i32:
3528 case INDEX_op_ext16s_i64:
3529 case INDEX_op_ext16u_i32:
3530 case INDEX_op_ext16u_i64:
3531 case INDEX_op_ext32s_i64:
3532 case INDEX_op_ext32u_i64:
3533 case INDEX_op_ext_i32_i64:
3534 case INDEX_op_extu_i32_i64:
3535 case INDEX_op_extrl_i64_i32:
3537 g_assert_not_reached();
3541 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3544 case INDEX_op_and_vec:
3545 case INDEX_op_or_vec:
3546 case INDEX_op_xor_vec:
3547 case INDEX_op_andc_vec:
3548 case INDEX_op_not_vec:
3549 case INDEX_op_nor_vec:
3550 case INDEX_op_eqv_vec:
3551 case INDEX_op_nand_vec:
3553 case INDEX_op_orc_vec:
3554 return have_isa_2_07;
3555 case INDEX_op_add_vec:
3556 case INDEX_op_sub_vec:
3557 case INDEX_op_smax_vec:
3558 case INDEX_op_smin_vec:
3559 case INDEX_op_umax_vec:
3560 case INDEX_op_umin_vec:
3561 case INDEX_op_shlv_vec:
3562 case INDEX_op_shrv_vec:
3563 case INDEX_op_sarv_vec:
3564 case INDEX_op_rotlv_vec:
3565 return vece <= MO_32 || have_isa_2_07;
3566 case INDEX_op_ssadd_vec:
3567 case INDEX_op_sssub_vec:
3568 case INDEX_op_usadd_vec:
3569 case INDEX_op_ussub_vec:
3570 return vece <= MO_32;
3571 case INDEX_op_cmp_vec:
3572 case INDEX_op_shli_vec:
3573 case INDEX_op_shri_vec:
3574 case INDEX_op_sari_vec:
3575 case INDEX_op_rotli_vec:
3576 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3577 case INDEX_op_neg_vec:
3578 return vece >= MO_32 && have_isa_3_00;
3579 case INDEX_op_mul_vec:
3585 return have_isa_2_07 ? 1 : -1;
3587 return have_isa_3_10;
3590 case INDEX_op_bitsel_vec:
3592 case INDEX_op_rotrv_vec:
3599 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3600 TCGReg dst, TCGReg src)
3602 tcg_debug_assert(dst >= TCG_REG_V0);
3604 /* Splat from integer reg allowed via constraints for v3.00. */
3605 if (src < TCG_REG_V0) {
3606 tcg_debug_assert(have_isa_3_00);
3609 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3612 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3615 /* Fail, so that we fall back on either dupm or mov+dup. */
3621 * Recall we use (or emulate) VSX integer loads, so the integer is
3622 * right justified within the left (zero-index) double-word.
3626 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3629 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3632 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3636 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3639 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3640 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3643 g_assert_not_reached();
3648 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3649 TCGReg out, TCGReg base, intptr_t offset)
3653 tcg_debug_assert(out >= TCG_REG_V0);
3656 if (have_isa_3_00) {
3657 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3659 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3661 elt = extract32(offset, 0, 4);
3662 #if !HOST_BIG_ENDIAN
3665 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3668 tcg_debug_assert((offset & 1) == 0);
3669 if (have_isa_3_00) {
3670 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3672 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3674 elt = extract32(offset, 1, 3);
3675 #if !HOST_BIG_ENDIAN
3678 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3681 if (have_isa_3_00) {
3682 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3685 tcg_debug_assert((offset & 3) == 0);
3686 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3687 elt = extract32(offset, 2, 2);
3688 #if !HOST_BIG_ENDIAN
3691 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3695 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3698 tcg_debug_assert((offset & 7) == 0);
3699 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3700 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3701 elt = extract32(offset, 3, 1);
3702 #if !HOST_BIG_ENDIAN
3706 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3708 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3712 g_assert_not_reached();
3717 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3718 unsigned vecl, unsigned vece,
3719 const TCGArg args[TCG_MAX_OP_ARGS],
3720 const int const_args[TCG_MAX_OP_ARGS])
3722 static const uint32_t
3723 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3724 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3725 mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3726 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3727 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3728 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3729 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3730 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3731 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3732 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3733 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3734 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3735 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3736 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3737 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3738 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3739 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3740 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3741 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3742 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3743 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3744 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3745 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3746 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3747 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3749 TCGType type = vecl + TCG_TYPE_V64;
3750 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3754 case INDEX_op_ld_vec:
3755 tcg_out_ld(s, type, a0, a1, a2);
3757 case INDEX_op_st_vec:
3758 tcg_out_st(s, type, a0, a1, a2);
3760 case INDEX_op_dupm_vec:
3761 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3764 case INDEX_op_add_vec:
3765 insn = add_op[vece];
3767 case INDEX_op_sub_vec:
3768 insn = sub_op[vece];
3770 case INDEX_op_neg_vec:
3771 insn = neg_op[vece];
3775 case INDEX_op_mul_vec:
3776 insn = mul_op[vece];
3778 case INDEX_op_ssadd_vec:
3779 insn = ssadd_op[vece];
3781 case INDEX_op_sssub_vec:
3782 insn = sssub_op[vece];
3784 case INDEX_op_usadd_vec:
3785 insn = usadd_op[vece];
3787 case INDEX_op_ussub_vec:
3788 insn = ussub_op[vece];
3790 case INDEX_op_smin_vec:
3791 insn = smin_op[vece];
3793 case INDEX_op_umin_vec:
3794 insn = umin_op[vece];
3796 case INDEX_op_smax_vec:
3797 insn = smax_op[vece];
3799 case INDEX_op_umax_vec:
3800 insn = umax_op[vece];
3802 case INDEX_op_shlv_vec:
3803 insn = shlv_op[vece];
3805 case INDEX_op_shrv_vec:
3806 insn = shrv_op[vece];
3808 case INDEX_op_sarv_vec:
3809 insn = sarv_op[vece];
3811 case INDEX_op_and_vec:
3814 case INDEX_op_or_vec:
3817 case INDEX_op_xor_vec:
3820 case INDEX_op_andc_vec:
3823 case INDEX_op_not_vec:
3827 case INDEX_op_orc_vec:
3830 case INDEX_op_nand_vec:
3833 case INDEX_op_nor_vec:
3836 case INDEX_op_eqv_vec:
3840 case INDEX_op_cmp_vec:
3849 insn = gts_op[vece];
3852 insn = gtu_op[vece];
3855 g_assert_not_reached();
3859 case INDEX_op_bitsel_vec:
3860 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3863 case INDEX_op_dup2_vec:
3864 assert(TCG_TARGET_REG_BITS == 32);
3865 /* With inputs a1 = xLxx, a2 = xHxx */
3866 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3867 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3868 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3871 case INDEX_op_ppc_mrgh_vec:
3872 insn = mrgh_op[vece];
3874 case INDEX_op_ppc_mrgl_vec:
3875 insn = mrgl_op[vece];
3877 case INDEX_op_ppc_muleu_vec:
3878 insn = muleu_op[vece];
3880 case INDEX_op_ppc_mulou_vec:
3881 insn = mulou_op[vece];
3883 case INDEX_op_ppc_pkum_vec:
3884 insn = pkum_op[vece];
3886 case INDEX_op_rotlv_vec:
3887 insn = rotl_op[vece];
3889 case INDEX_op_ppc_msum_vec:
3890 tcg_debug_assert(vece == MO_16);
3891 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3894 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3895 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3897 g_assert_not_reached();
3900 tcg_debug_assert(insn != 0);
3901 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3904 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3905 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3909 if (vece == MO_32) {
3911 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3912 * So using negative numbers gets us the 4th bit easily.
3914 imm = sextract32(imm, 0, 5);
3916 imm &= (8 << vece) - 1;
3919 /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3920 t1 = tcg_constant_vec(type, MO_8, imm);
3921 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3922 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3925 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3926 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3928 bool need_swap = false, need_inv = false;
3930 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3938 if (have_isa_3_00 && vece <= MO_32) {
3952 need_swap = need_inv = true;
3955 g_assert_not_reached();
3959 cond = tcg_invert_cond(cond);
3963 t1 = v1, v1 = v2, v2 = t1;
3964 cond = tcg_swap_cond(cond);
3967 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3968 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3971 tcg_gen_not_vec(vece, v0, v0);
3975 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3976 TCGv_vec v1, TCGv_vec v2)
3978 TCGv_vec t1 = tcg_temp_new_vec(type);
3979 TCGv_vec t2 = tcg_temp_new_vec(type);
3985 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3986 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3987 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3988 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3989 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3990 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3991 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3992 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3993 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3994 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3998 tcg_debug_assert(!have_isa_2_07);
4000 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
4001 * So using -16 is a quick way to represent 16.
4003 c16 = tcg_constant_vec(type, MO_8, -16);
4004 c0 = tcg_constant_vec(type, MO_8, 0);
4006 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
4007 tcgv_vec_arg(v2), tcgv_vec_arg(c16));
4008 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
4009 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
4010 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
4011 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
4012 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
4013 tcgv_vec_arg(t1), tcgv_vec_arg(c16));
4014 tcg_gen_add_vec(MO_32, v0, t1, t2);
4018 g_assert_not_reached();
4020 tcg_temp_free_vec(t1);
4021 tcg_temp_free_vec(t2);
4024 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
4028 TCGv_vec v0, v1, v2, t0;
4032 v0 = temp_tcgv_vec(arg_temp(a0));
4033 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
4034 a2 = va_arg(va, TCGArg);
4037 case INDEX_op_shli_vec:
4038 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
4040 case INDEX_op_shri_vec:
4041 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
4043 case INDEX_op_sari_vec:
4044 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
4046 case INDEX_op_rotli_vec:
4047 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
4049 case INDEX_op_cmp_vec:
4050 v2 = temp_tcgv_vec(arg_temp(a2));
4051 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
4053 case INDEX_op_mul_vec:
4054 v2 = temp_tcgv_vec(arg_temp(a2));
4055 expand_vec_mul(type, vece, v0, v1, v2);
4057 case INDEX_op_rotlv_vec:
4058 v2 = temp_tcgv_vec(arg_temp(a2));
4059 t0 = tcg_temp_new_vec(type);
4060 tcg_gen_neg_vec(vece, t0, v2);
4061 tcg_gen_rotlv_vec(vece, v0, v1, t0);
4062 tcg_temp_free_vec(t0);
4065 g_assert_not_reached();
4070 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
4073 case INDEX_op_goto_ptr:
4076 case INDEX_op_ld8u_i32:
4077 case INDEX_op_ld8s_i32:
4078 case INDEX_op_ld16u_i32:
4079 case INDEX_op_ld16s_i32:
4080 case INDEX_op_ld_i32:
4081 case INDEX_op_ctpop_i32:
4082 case INDEX_op_neg_i32:
4083 case INDEX_op_not_i32:
4084 case INDEX_op_ext8s_i32:
4085 case INDEX_op_ext16s_i32:
4086 case INDEX_op_bswap16_i32:
4087 case INDEX_op_bswap32_i32:
4088 case INDEX_op_extract_i32:
4089 case INDEX_op_ld8u_i64:
4090 case INDEX_op_ld8s_i64:
4091 case INDEX_op_ld16u_i64:
4092 case INDEX_op_ld16s_i64:
4093 case INDEX_op_ld32u_i64:
4094 case INDEX_op_ld32s_i64:
4095 case INDEX_op_ld_i64:
4096 case INDEX_op_ctpop_i64:
4097 case INDEX_op_neg_i64:
4098 case INDEX_op_not_i64:
4099 case INDEX_op_ext8s_i64:
4100 case INDEX_op_ext16s_i64:
4101 case INDEX_op_ext32s_i64:
4102 case INDEX_op_ext_i32_i64:
4103 case INDEX_op_extu_i32_i64:
4104 case INDEX_op_bswap16_i64:
4105 case INDEX_op_bswap32_i64:
4106 case INDEX_op_bswap64_i64:
4107 case INDEX_op_extract_i64:
4108 return C_O1_I1(r, r);
4110 case INDEX_op_st8_i32:
4111 case INDEX_op_st16_i32:
4112 case INDEX_op_st_i32:
4113 case INDEX_op_st8_i64:
4114 case INDEX_op_st16_i64:
4115 case INDEX_op_st32_i64:
4116 case INDEX_op_st_i64:
4117 return C_O0_I2(r, r);
4119 case INDEX_op_add_i32:
4120 case INDEX_op_and_i32:
4121 case INDEX_op_or_i32:
4122 case INDEX_op_xor_i32:
4123 case INDEX_op_andc_i32:
4124 case INDEX_op_orc_i32:
4125 case INDEX_op_eqv_i32:
4126 case INDEX_op_shl_i32:
4127 case INDEX_op_shr_i32:
4128 case INDEX_op_sar_i32:
4129 case INDEX_op_rotl_i32:
4130 case INDEX_op_rotr_i32:
4131 case INDEX_op_and_i64:
4132 case INDEX_op_andc_i64:
4133 case INDEX_op_shl_i64:
4134 case INDEX_op_shr_i64:
4135 case INDEX_op_sar_i64:
4136 case INDEX_op_rotl_i64:
4137 case INDEX_op_rotr_i64:
4138 return C_O1_I2(r, r, ri);
4140 case INDEX_op_mul_i32:
4141 case INDEX_op_mul_i64:
4142 return C_O1_I2(r, r, rI);
4144 case INDEX_op_div_i32:
4145 case INDEX_op_divu_i32:
4146 case INDEX_op_rem_i32:
4147 case INDEX_op_remu_i32:
4148 case INDEX_op_nand_i32:
4149 case INDEX_op_nor_i32:
4150 case INDEX_op_muluh_i32:
4151 case INDEX_op_mulsh_i32:
4152 case INDEX_op_orc_i64:
4153 case INDEX_op_eqv_i64:
4154 case INDEX_op_nand_i64:
4155 case INDEX_op_nor_i64:
4156 case INDEX_op_div_i64:
4157 case INDEX_op_divu_i64:
4158 case INDEX_op_rem_i64:
4159 case INDEX_op_remu_i64:
4160 case INDEX_op_mulsh_i64:
4161 case INDEX_op_muluh_i64:
4162 return C_O1_I2(r, r, r);
4164 case INDEX_op_sub_i32:
4165 return C_O1_I2(r, rI, ri);
4166 case INDEX_op_add_i64:
4167 return C_O1_I2(r, r, rT);
4168 case INDEX_op_or_i64:
4169 case INDEX_op_xor_i64:
4170 return C_O1_I2(r, r, rU);
4171 case INDEX_op_sub_i64:
4172 return C_O1_I2(r, rI, rT);
4173 case INDEX_op_clz_i32:
4174 case INDEX_op_ctz_i32:
4175 case INDEX_op_clz_i64:
4176 case INDEX_op_ctz_i64:
4177 return C_O1_I2(r, r, rZW);
4179 case INDEX_op_brcond_i32:
4180 case INDEX_op_brcond_i64:
4181 return C_O0_I2(r, rC);
4182 case INDEX_op_setcond_i32:
4183 case INDEX_op_setcond_i64:
4184 case INDEX_op_negsetcond_i32:
4185 case INDEX_op_negsetcond_i64:
4186 return C_O1_I2(r, r, rC);
4187 case INDEX_op_movcond_i32:
4188 case INDEX_op_movcond_i64:
4189 return C_O1_I4(r, r, rC, rZ, rZ);
4191 case INDEX_op_deposit_i32:
4192 case INDEX_op_deposit_i64:
4193 return C_O1_I2(r, 0, rZ);
4194 case INDEX_op_brcond2_i32:
4195 return C_O0_I4(r, r, ri, ri);
4196 case INDEX_op_setcond2_i32:
4197 return C_O1_I4(r, r, r, ri, ri);
4198 case INDEX_op_add2_i64:
4199 case INDEX_op_add2_i32:
4200 return C_O2_I4(r, r, r, r, rI, rZM);
4201 case INDEX_op_sub2_i64:
4202 case INDEX_op_sub2_i32:
4203 return C_O2_I4(r, r, rI, rZM, r, r);
4205 case INDEX_op_qemu_ld_a32_i32:
4206 return C_O1_I1(r, r);
4207 case INDEX_op_qemu_ld_a64_i32:
4208 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
4209 case INDEX_op_qemu_ld_a32_i64:
4210 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
4211 case INDEX_op_qemu_ld_a64_i64:
4212 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
4214 case INDEX_op_qemu_st_a32_i32:
4215 return C_O0_I2(r, r);
4216 case INDEX_op_qemu_st_a64_i32:
4217 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
4218 case INDEX_op_qemu_st_a32_i64:
4219 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
4220 case INDEX_op_qemu_st_a64_i64:
4221 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
4223 case INDEX_op_qemu_ld_a32_i128:
4224 case INDEX_op_qemu_ld_a64_i128:
4225 return C_N1O1_I1(o, m, r);
4226 case INDEX_op_qemu_st_a32_i128:
4227 case INDEX_op_qemu_st_a64_i128:
4228 return C_O0_I3(o, m, r);
4230 case INDEX_op_add_vec:
4231 case INDEX_op_sub_vec:
4232 case INDEX_op_mul_vec:
4233 case INDEX_op_and_vec:
4234 case INDEX_op_or_vec:
4235 case INDEX_op_xor_vec:
4236 case INDEX_op_andc_vec:
4237 case INDEX_op_orc_vec:
4238 case INDEX_op_nor_vec:
4239 case INDEX_op_eqv_vec:
4240 case INDEX_op_nand_vec:
4241 case INDEX_op_cmp_vec:
4242 case INDEX_op_ssadd_vec:
4243 case INDEX_op_sssub_vec:
4244 case INDEX_op_usadd_vec:
4245 case INDEX_op_ussub_vec:
4246 case INDEX_op_smax_vec:
4247 case INDEX_op_smin_vec:
4248 case INDEX_op_umax_vec:
4249 case INDEX_op_umin_vec:
4250 case INDEX_op_shlv_vec:
4251 case INDEX_op_shrv_vec:
4252 case INDEX_op_sarv_vec:
4253 case INDEX_op_rotlv_vec:
4254 case INDEX_op_rotrv_vec:
4255 case INDEX_op_ppc_mrgh_vec:
4256 case INDEX_op_ppc_mrgl_vec:
4257 case INDEX_op_ppc_muleu_vec:
4258 case INDEX_op_ppc_mulou_vec:
4259 case INDEX_op_ppc_pkum_vec:
4260 case INDEX_op_dup2_vec:
4261 return C_O1_I2(v, v, v);
4263 case INDEX_op_not_vec:
4264 case INDEX_op_neg_vec:
4265 return C_O1_I1(v, v);
4267 case INDEX_op_dup_vec:
4268 return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
4270 case INDEX_op_ld_vec:
4271 case INDEX_op_dupm_vec:
4272 return C_O1_I1(v, r);
4274 case INDEX_op_st_vec:
4275 return C_O0_I2(v, r);
4277 case INDEX_op_bitsel_vec:
4278 case INDEX_op_ppc_msum_vec:
4279 return C_O1_I3(v, v, v, v);
4282 g_assert_not_reached();
4286 static void tcg_target_init(TCGContext *s)
4288 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
4289 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
4291 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
4292 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
4295 tcg_target_call_clobber_regs = 0;
4296 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
4297 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
4298 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
4299 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
4300 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
4301 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
4302 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
4303 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
4304 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
4305 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
4306 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
4307 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
4309 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
4310 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
4311 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
4312 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
4313 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
4314 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
4315 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
4316 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
4317 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
4318 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
4319 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
4320 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
4321 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
4322 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
4323 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
4324 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
4325 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
4326 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
4327 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
4328 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
4330 s->reserved_regs = 0;
4331 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
4332 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
4333 #if defined(_CALL_SYSV)
4334 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
4336 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
4337 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
4339 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
4340 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
4341 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
4342 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
4344 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
4351 DebugFrameFDEHeader fde;
4352 uint8_t fde_def_cfa[4];
4353 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
4356 /* We're expecting a 2 byte uleb128 encoded value. */
4357 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
4359 #if TCG_TARGET_REG_BITS == 64
4360 # define ELF_HOST_MACHINE EM_PPC64
4362 # define ELF_HOST_MACHINE EM_PPC
4365 static DebugFrame debug_frame = {
4366 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
4369 .cie.code_align = 1,
4370 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
4371 .cie.return_column = 65,
4373 /* Total FDE size does not include the "len" member. */
4374 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
4377 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
4378 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
4382 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
4383 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
4387 void tcg_register_jit(const void *buf, size_t buf_size)
4389 uint8_t *p = &debug_frame.fde_reg_ofs[3];
4392 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
4393 p[0] = 0x80 + tcg_target_callee_save_regs[i];
4394 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
4397 debug_frame.fde.func_start = (uintptr_t)buf;
4398 debug_frame.fde.func_len = buf_size;
4400 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
4402 #endif /* __ELF__ */