2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.c.inc"
28 #if defined _CALL_DARWIN || defined __APPLE__
29 #define TCG_TARGET_CALL_DARWIN
32 # define TCG_TARGET_CALL_ALIGN_ARGS 1
35 /* For some memory operations, we need a scratch that isn't R0. For the AIX
36 calling convention, we can re-use the TOC register since we'll be reloading
37 it at every call. Otherwise R12 will do nicely as neither a call-saved
38 register nor a parameter register. */
40 # define TCG_REG_TMP1 TCG_REG_R2
42 # define TCG_REG_TMP1 TCG_REG_R12
45 #define TCG_VEC_TMP1 TCG_REG_V0
46 #define TCG_VEC_TMP2 TCG_REG_V1
48 #define TCG_REG_TB TCG_REG_R31
49 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
51 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
52 #define SZP ((int)sizeof(void *))
54 /* Shorthand for size of a register. */
55 #define SZR (TCG_TARGET_REG_BITS / 8)
57 #define TCG_CT_CONST_S16 0x100
58 #define TCG_CT_CONST_U16 0x200
59 #define TCG_CT_CONST_S32 0x400
60 #define TCG_CT_CONST_U32 0x800
61 #define TCG_CT_CONST_ZERO 0x1000
62 #define TCG_CT_CONST_MONE 0x2000
63 #define TCG_CT_CONST_WSZ 0x4000
65 static tcg_insn_unit *tb_ret_addr;
68 static bool have_isel;
72 #ifndef CONFIG_SOFTMMU
73 #define TCG_GUEST_BASE_REG 30
76 #ifdef CONFIG_DEBUG_TCG
77 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
78 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
80 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
81 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
82 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
83 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
84 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
85 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
89 static const int tcg_target_reg_alloc_order[] = {
90 TCG_REG_R14, /* call saved registers */
108 TCG_REG_R12, /* call clobbered, non-arguments */
112 TCG_REG_R10, /* call clobbered, arguments */
121 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
122 TCG_REG_V2, /* call clobbered, vectors */
142 static const int tcg_target_call_iarg_regs[] = {
153 static const int tcg_target_call_oarg_regs[] = {
158 static const int tcg_target_callee_save_regs[] = {
159 #ifdef TCG_TARGET_CALL_DARWIN
175 TCG_REG_R27, /* currently used for the global env */
182 static inline bool in_range_b(tcg_target_long target)
184 return target == sextract64(target, 0, 26);
187 static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target)
189 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
190 tcg_debug_assert(in_range_b(disp));
191 return disp & 0x3fffffc;
194 static bool reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target)
196 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
197 if (in_range_b(disp)) {
198 *pc = (*pc & ~0x3fffffc) | (disp & 0x3fffffc);
204 static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target)
206 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
207 tcg_debug_assert(disp == (int16_t) disp);
208 return disp & 0xfffc;
211 static bool reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target)
213 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
214 if (disp == (int16_t) disp) {
215 *pc = (*pc & ~0xfffc) | (disp & 0xfffc);
221 /* parse target specific constraints */
222 static const char *target_parse_constraint(TCGArgConstraint *ct,
223 const char *ct_str, TCGType type)
226 case 'A': case 'B': case 'C': case 'D':
227 ct->ct |= TCG_CT_REG;
228 tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
231 ct->ct |= TCG_CT_REG;
232 ct->u.regs = 0xffffffff;
235 ct->ct |= TCG_CT_REG;
236 ct->u.regs = 0xffffffff00000000ull;
238 case 'L': /* qemu_ld constraint */
239 ct->ct |= TCG_CT_REG;
240 ct->u.regs = 0xffffffff;
241 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
242 #ifdef CONFIG_SOFTMMU
243 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
244 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
247 case 'S': /* qemu_st constraint */
248 ct->ct |= TCG_CT_REG;
249 ct->u.regs = 0xffffffff;
250 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
251 #ifdef CONFIG_SOFTMMU
252 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
253 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
254 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
258 ct->ct |= TCG_CT_CONST_S16;
261 ct->ct |= TCG_CT_CONST_U16;
264 ct->ct |= TCG_CT_CONST_MONE;
267 ct->ct |= TCG_CT_CONST_S32;
270 ct->ct |= TCG_CT_CONST_U32;
273 ct->ct |= TCG_CT_CONST_WSZ;
276 ct->ct |= TCG_CT_CONST_ZERO;
284 /* test if a constant matches the constraint */
285 static int tcg_target_const_match(tcg_target_long val, TCGType type,
286 const TCGArgConstraint *arg_ct)
289 if (ct & TCG_CT_CONST) {
293 /* The only 32-bit constraint we use aside from
294 TCG_CT_CONST is TCG_CT_CONST_S16. */
295 if (type == TCG_TYPE_I32) {
299 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
301 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
303 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
305 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
307 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
309 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
311 } else if ((ct & TCG_CT_CONST_WSZ)
312 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
318 #define OPCD(opc) ((opc)<<26)
319 #define XO19(opc) (OPCD(19)|((opc)<<1))
320 #define MD30(opc) (OPCD(30)|((opc)<<2))
321 #define MDS30(opc) (OPCD(30)|((opc)<<1))
322 #define XO31(opc) (OPCD(31)|((opc)<<1))
323 #define XO58(opc) (OPCD(58)|(opc))
324 #define XO62(opc) (OPCD(62)|(opc))
325 #define VX4(opc) (OPCD(4)|(opc))
329 #define LBZ OPCD( 34)
330 #define LHZ OPCD( 40)
331 #define LHA OPCD( 42)
332 #define LWZ OPCD( 32)
333 #define LWZUX XO31( 55)
334 #define STB OPCD( 38)
335 #define STH OPCD( 44)
336 #define STW OPCD( 36)
339 #define STDU XO62( 1)
340 #define STDX XO31(149)
343 #define LDX XO31( 21)
345 #define LDUX XO31( 53)
347 #define LWAX XO31(341)
349 #define ADDIC OPCD( 12)
350 #define ADDI OPCD( 14)
351 #define ADDIS OPCD( 15)
352 #define ORI OPCD( 24)
353 #define ORIS OPCD( 25)
354 #define XORI OPCD( 26)
355 #define XORIS OPCD( 27)
356 #define ANDI OPCD( 28)
357 #define ANDIS OPCD( 29)
358 #define MULLI OPCD( 7)
359 #define CMPLI OPCD( 10)
360 #define CMPI OPCD( 11)
361 #define SUBFIC OPCD( 8)
363 #define LWZU OPCD( 33)
364 #define STWU OPCD( 37)
366 #define RLWIMI OPCD( 20)
367 #define RLWINM OPCD( 21)
368 #define RLWNM OPCD( 23)
370 #define RLDICL MD30( 0)
371 #define RLDICR MD30( 1)
372 #define RLDIMI MD30( 3)
373 #define RLDCL MDS30( 8)
375 #define BCLR XO19( 16)
376 #define BCCTR XO19(528)
377 #define CRAND XO19(257)
378 #define CRANDC XO19(129)
379 #define CRNAND XO19(225)
380 #define CROR XO19(449)
381 #define CRNOR XO19( 33)
383 #define EXTSB XO31(954)
384 #define EXTSH XO31(922)
385 #define EXTSW XO31(986)
386 #define ADD XO31(266)
387 #define ADDE XO31(138)
388 #define ADDME XO31(234)
389 #define ADDZE XO31(202)
390 #define ADDC XO31( 10)
391 #define AND XO31( 28)
392 #define SUBF XO31( 40)
393 #define SUBFC XO31( 8)
394 #define SUBFE XO31(136)
395 #define SUBFME XO31(232)
396 #define SUBFZE XO31(200)
398 #define XOR XO31(316)
399 #define MULLW XO31(235)
400 #define MULHW XO31( 75)
401 #define MULHWU XO31( 11)
402 #define DIVW XO31(491)
403 #define DIVWU XO31(459)
405 #define CMPL XO31( 32)
406 #define LHBRX XO31(790)
407 #define LWBRX XO31(534)
408 #define LDBRX XO31(532)
409 #define STHBRX XO31(918)
410 #define STWBRX XO31(662)
411 #define STDBRX XO31(660)
412 #define MFSPR XO31(339)
413 #define MTSPR XO31(467)
414 #define SRAWI XO31(824)
415 #define NEG XO31(104)
416 #define MFCR XO31( 19)
417 #define MFOCRF (MFCR | (1u << 20))
418 #define NOR XO31(124)
419 #define CNTLZW XO31( 26)
420 #define CNTLZD XO31( 58)
421 #define CNTTZW XO31(538)
422 #define CNTTZD XO31(570)
423 #define CNTPOPW XO31(378)
424 #define CNTPOPD XO31(506)
425 #define ANDC XO31( 60)
426 #define ORC XO31(412)
427 #define EQV XO31(284)
428 #define NAND XO31(476)
429 #define ISEL XO31( 15)
431 #define MULLD XO31(233)
432 #define MULHD XO31( 73)
433 #define MULHDU XO31( 9)
434 #define DIVD XO31(489)
435 #define DIVDU XO31(457)
437 #define LBZX XO31( 87)
438 #define LHZX XO31(279)
439 #define LHAX XO31(343)
440 #define LWZX XO31( 23)
441 #define STBX XO31(215)
442 #define STHX XO31(407)
443 #define STWX XO31(151)
445 #define EIEIO XO31(854)
446 #define HWSYNC XO31(598)
447 #define LWSYNC (HWSYNC | (1u << 21))
449 #define SPR(a, b) ((((a)<<5)|(b))<<11)
451 #define CTR SPR(9, 0)
453 #define SLW XO31( 24)
454 #define SRW XO31(536)
455 #define SRAW XO31(792)
457 #define SLD XO31( 27)
458 #define SRD XO31(539)
459 #define SRAD XO31(794)
460 #define SRADI XO31(413<<1)
463 #define TRAP (TW | TO(31))
465 #define NOP ORI /* ori 0,0,0 */
467 #define LVX XO31(103)
468 #define LVEBX XO31(7)
469 #define LVEHX XO31(39)
470 #define LVEWX XO31(71)
471 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
472 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
473 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
474 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
475 #define LXSD (OPCD(57) | 2) /* v3.00 */
476 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
478 #define STVX XO31(231)
479 #define STVEWX XO31(199)
480 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
481 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
482 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
483 #define STXSD (OPCD(61) | 2) /* v3.00 */
485 #define VADDSBS VX4(768)
486 #define VADDUBS VX4(512)
487 #define VADDUBM VX4(0)
488 #define VADDSHS VX4(832)
489 #define VADDUHS VX4(576)
490 #define VADDUHM VX4(64)
491 #define VADDSWS VX4(896)
492 #define VADDUWS VX4(640)
493 #define VADDUWM VX4(128)
494 #define VADDUDM VX4(192) /* v2.07 */
496 #define VSUBSBS VX4(1792)
497 #define VSUBUBS VX4(1536)
498 #define VSUBUBM VX4(1024)
499 #define VSUBSHS VX4(1856)
500 #define VSUBUHS VX4(1600)
501 #define VSUBUHM VX4(1088)
502 #define VSUBSWS VX4(1920)
503 #define VSUBUWS VX4(1664)
504 #define VSUBUWM VX4(1152)
505 #define VSUBUDM VX4(1216) /* v2.07 */
507 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
508 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
510 #define VMAXSB VX4(258)
511 #define VMAXSH VX4(322)
512 #define VMAXSW VX4(386)
513 #define VMAXSD VX4(450) /* v2.07 */
514 #define VMAXUB VX4(2)
515 #define VMAXUH VX4(66)
516 #define VMAXUW VX4(130)
517 #define VMAXUD VX4(194) /* v2.07 */
518 #define VMINSB VX4(770)
519 #define VMINSH VX4(834)
520 #define VMINSW VX4(898)
521 #define VMINSD VX4(962) /* v2.07 */
522 #define VMINUB VX4(514)
523 #define VMINUH VX4(578)
524 #define VMINUW VX4(642)
525 #define VMINUD VX4(706) /* v2.07 */
527 #define VCMPEQUB VX4(6)
528 #define VCMPEQUH VX4(70)
529 #define VCMPEQUW VX4(134)
530 #define VCMPEQUD VX4(199) /* v2.07 */
531 #define VCMPGTSB VX4(774)
532 #define VCMPGTSH VX4(838)
533 #define VCMPGTSW VX4(902)
534 #define VCMPGTSD VX4(967) /* v2.07 */
535 #define VCMPGTUB VX4(518)
536 #define VCMPGTUH VX4(582)
537 #define VCMPGTUW VX4(646)
538 #define VCMPGTUD VX4(711) /* v2.07 */
539 #define VCMPNEB VX4(7) /* v3.00 */
540 #define VCMPNEH VX4(71) /* v3.00 */
541 #define VCMPNEW VX4(135) /* v3.00 */
543 #define VSLB VX4(260)
544 #define VSLH VX4(324)
545 #define VSLW VX4(388)
546 #define VSLD VX4(1476) /* v2.07 */
547 #define VSRB VX4(516)
548 #define VSRH VX4(580)
549 #define VSRW VX4(644)
550 #define VSRD VX4(1732) /* v2.07 */
551 #define VSRAB VX4(772)
552 #define VSRAH VX4(836)
553 #define VSRAW VX4(900)
554 #define VSRAD VX4(964) /* v2.07 */
557 #define VRLW VX4(132)
558 #define VRLD VX4(196) /* v2.07 */
560 #define VMULEUB VX4(520)
561 #define VMULEUH VX4(584)
562 #define VMULEUW VX4(648) /* v2.07 */
563 #define VMULOUB VX4(8)
564 #define VMULOUH VX4(72)
565 #define VMULOUW VX4(136) /* v2.07 */
566 #define VMULUWM VX4(137) /* v2.07 */
567 #define VMULLD VX4(457) /* v3.10 */
568 #define VMSUMUHM VX4(38)
570 #define VMRGHB VX4(12)
571 #define VMRGHH VX4(76)
572 #define VMRGHW VX4(140)
573 #define VMRGLB VX4(268)
574 #define VMRGLH VX4(332)
575 #define VMRGLW VX4(396)
577 #define VPKUHUM VX4(14)
578 #define VPKUWUM VX4(78)
580 #define VAND VX4(1028)
581 #define VANDC VX4(1092)
582 #define VNOR VX4(1284)
583 #define VOR VX4(1156)
584 #define VXOR VX4(1220)
585 #define VEQV VX4(1668) /* v2.07 */
586 #define VNAND VX4(1412) /* v2.07 */
587 #define VORC VX4(1348) /* v2.07 */
589 #define VSPLTB VX4(524)
590 #define VSPLTH VX4(588)
591 #define VSPLTW VX4(652)
592 #define VSPLTISB VX4(780)
593 #define VSPLTISH VX4(844)
594 #define VSPLTISW VX4(908)
596 #define VSLDOI VX4(44)
598 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
599 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
600 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
602 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
603 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
604 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
605 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
606 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
607 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
609 #define RT(r) ((r)<<21)
610 #define RS(r) ((r)<<21)
611 #define RA(r) ((r)<<16)
612 #define RB(r) ((r)<<11)
613 #define TO(t) ((t)<<21)
614 #define SH(s) ((s)<<11)
615 #define MB(b) ((b)<<6)
616 #define ME(e) ((e)<<1)
617 #define BO(o) ((o)<<21)
618 #define MB64(b) ((b)<<5)
619 #define FXM(b) (1 << (19 - (b)))
621 #define VRT(r) (((r) & 31) << 21)
622 #define VRA(r) (((r) & 31) << 16)
623 #define VRB(r) (((r) & 31) << 11)
624 #define VRC(r) (((r) & 31) << 6)
628 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
629 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
630 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
631 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
633 #define BF(n) ((n)<<23)
634 #define BI(n, c) (((c)+((n)*4))<<16)
635 #define BT(n, c) (((c)+((n)*4))<<21)
636 #define BA(n, c) (((c)+((n)*4))<<16)
637 #define BB(n, c) (((c)+((n)*4))<<11)
638 #define BC_(n, c) (((c)+((n)*4))<<6)
640 #define BO_COND_TRUE BO(12)
641 #define BO_COND_FALSE BO( 4)
642 #define BO_ALWAYS BO(20)
651 static const uint32_t tcg_to_bc[] = {
652 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
653 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
654 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
655 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
656 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
657 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
658 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
659 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
660 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
661 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
664 /* The low bit here is set if the RA and RB fields must be inverted. */
665 static const uint32_t tcg_to_isel[] = {
666 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
667 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
668 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
669 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
670 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
671 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
672 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
673 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
674 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
675 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
678 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
679 intptr_t value, intptr_t addend)
681 tcg_insn_unit *target;
686 target = (tcg_insn_unit *)value;
690 return reloc_pc14(code_ptr, target);
692 return reloc_pc24(code_ptr, target);
695 * We are (slightly) abusing this relocation type. In particular,
696 * assert that the low 2 bits are zero, and do not modify them.
697 * That way we can use this with LD et al that have opcode bits
698 * in the low 2 bits of the insn.
700 if ((value & 3) || value != (int16_t)value) {
703 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
707 * We are abusing this relocation type. Again, this points to
708 * a pair of insns, lis + load. This is an absolute address
709 * relocation for PPC32 so the lis cannot be removed.
713 if (hi + lo != value) {
716 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
717 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
720 g_assert_not_reached();
725 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
726 TCGReg base, tcg_target_long offset);
728 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
735 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
738 if (ret < TCG_REG_V0) {
739 if (arg < TCG_REG_V0) {
740 tcg_out32(s, OR | SAB(arg, ret, arg));
742 } else if (have_isa_2_07) {
743 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
744 | VRT(arg) | RA(ret));
747 /* Altivec does not support vector->integer moves. */
750 } else if (arg < TCG_REG_V0) {
752 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
753 | VRT(ret) | RA(arg));
756 /* Altivec does not support integer->vector moves. */
763 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
764 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
767 g_assert_not_reached();
772 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
775 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
776 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
777 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
778 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
781 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
782 int sh, int mb, int me)
784 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
787 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
789 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
792 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
794 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
797 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
799 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
802 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
804 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
807 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
809 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
812 /* Emit a move into ret of arg, if it can be done in one insn. */
813 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
815 if (arg == (int16_t)arg) {
816 tcg_out32(s, ADDI | TAI(ret, 0, arg));
819 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
820 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
826 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
827 tcg_target_long arg, bool in_prologue)
833 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
835 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
839 /* Load 16-bit immediates with one insn. */
840 if (tcg_out_movi_one(s, ret, arg)) {
844 /* Load addresses within the TB with one insn. */
845 tb_diff = arg - (intptr_t)s->code_gen_ptr;
846 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
847 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
851 /* Load 32-bit immediates with two insns. Note that we've already
852 eliminated bare ADDIS, so we know both insns are required. */
853 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
854 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
855 tcg_out32(s, ORI | SAI(ret, ret, arg));
858 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
859 tcg_out32(s, ADDI | TAI(ret, 0, arg));
860 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
864 /* Load masked 16-bit value. */
865 if (arg > 0 && (arg & 0x8000)) {
867 if ((tmp & (tmp + 1)) == 0) {
868 int mb = clz64(tmp + 1) + 1;
869 tcg_out32(s, ADDI | TAI(ret, 0, arg));
870 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
875 /* Load common masks with 2 insns. */
878 if (tmp == (int16_t)tmp) {
879 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
880 tcg_out_shli64(s, ret, ret, shift);
884 if (tcg_out_movi_one(s, ret, arg << shift)) {
885 tcg_out_shri64(s, ret, ret, shift);
889 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
890 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
891 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
895 /* Use the constant pool, if possible. */
896 if (!in_prologue && USE_REG_TB) {
897 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
898 -(intptr_t)s->code_gen_ptr);
899 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
903 tmp = arg >> 31 >> 1;
904 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
906 tcg_out_shli64(s, ret, ret, 32);
908 if (arg & 0xffff0000) {
909 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
912 tcg_out32(s, ORI | SAI(ret, ret, arg));
916 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret,
924 if (low >= -16 && low < 16) {
925 if (val == (tcg_target_long)dup_const(MO_8, low)) {
926 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
929 if (val == (tcg_target_long)dup_const(MO_16, low)) {
930 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
933 if (val == (tcg_target_long)dup_const(MO_32, low)) {
934 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
938 if (have_isa_3_00 && val == (tcg_target_long)dup_const(MO_8, val)) {
939 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
944 * Otherwise we must load the value from the constant pool.
948 add = -(intptr_t)s->code_gen_ptr;
955 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
956 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
957 if (TCG_TARGET_REG_BITS == 64) {
958 new_pool_label(s, val, rel, s->code_ptr, add);
960 new_pool_l2(s, rel, s->code_ptr, add, val, val);
963 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
964 if (TCG_TARGET_REG_BITS == 64) {
965 new_pool_l2(s, rel, s->code_ptr, add, val, val);
967 new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val);
972 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
973 load_insn |= RA(TCG_REG_TB);
975 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
976 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
978 tcg_out32(s, load_insn);
981 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
987 tcg_debug_assert(ret < TCG_REG_V0);
988 tcg_out_movi_int(s, type, ret, arg, false);
993 tcg_debug_assert(ret >= TCG_REG_V0);
994 tcg_out_dupi_vec(s, type, ret, arg);
998 g_assert_not_reached();
1002 static bool mask_operand(uint32_t c, int *mb, int *me)
1006 /* Accept a bit pattern like:
1010 Keep track of the transitions. */
1011 if (c == 0 || c == -1) {
1017 if (test & (test - 1)) {
1022 *mb = test ? clz32(test & -test) + 1 : 0;
1026 static bool mask64_operand(uint64_t c, int *mb, int *me)
1035 /* Accept 1..10..0. */
1041 /* Accept 0..01..1. */
1042 if (lsb == 1 && (c & (c + 1)) == 0) {
1043 *mb = clz64(c + 1) + 1;
1050 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1054 if (mask_operand(c, &mb, &me)) {
1055 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1056 } else if ((c & 0xffff) == c) {
1057 tcg_out32(s, ANDI | SAI(src, dst, c));
1059 } else if ((c & 0xffff0000) == c) {
1060 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1063 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1064 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1068 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1072 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1073 if (mask64_operand(c, &mb, &me)) {
1075 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1077 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1079 } else if ((c & 0xffff) == c) {
1080 tcg_out32(s, ANDI | SAI(src, dst, c));
1082 } else if ((c & 0xffff0000) == c) {
1083 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1086 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1087 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1091 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1092 int op_lo, int op_hi)
1095 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1099 tcg_out32(s, op_lo | SAI(src, dst, c));
1104 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1106 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1109 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1111 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1114 static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target)
1116 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1117 if (in_range_b(disp)) {
1118 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1120 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1121 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1122 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1126 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1127 TCGReg base, tcg_target_long offset)
1129 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1130 bool is_int_store = false;
1131 TCGReg rs = TCG_REG_TMP1;
1138 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1154 case STB: case STH: case STW:
1155 is_int_store = true;
1159 /* For unaligned, or very large offsets, use the indexed form. */
1160 if (offset & align || offset != (int32_t)offset || opi == 0) {
1164 tcg_debug_assert(!is_int_store || rs != rt);
1165 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1166 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1170 l0 = (int16_t)offset;
1171 offset = (offset - l0) >> 16;
1172 l1 = (int16_t)offset;
1174 if (l1 < 0 && orig >= 0) {
1176 l1 = (int16_t)(offset - 0x4000);
1179 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1183 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1186 if (opi != ADDI || base != rt || l0 != 0) {
1187 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1191 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1192 TCGReg va, TCGReg vb, int shb)
1194 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1197 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1198 TCGReg base, intptr_t offset)
1204 if (ret < TCG_REG_V0) {
1205 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1208 if (have_isa_2_07 && have_vsx) {
1209 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1212 tcg_debug_assert((offset & 3) == 0);
1213 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1214 shift = (offset - 4) & 0xc;
1216 tcg_out_vsldoi(s, ret, ret, ret, shift);
1220 if (ret < TCG_REG_V0) {
1221 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1222 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1227 tcg_debug_assert(ret >= TCG_REG_V0);
1229 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1233 tcg_debug_assert((offset & 7) == 0);
1234 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1236 tcg_out_vsldoi(s, ret, ret, ret, 8);
1240 tcg_debug_assert(ret >= TCG_REG_V0);
1241 tcg_debug_assert((offset & 15) == 0);
1242 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1243 LVX, ret, base, offset);
1246 g_assert_not_reached();
1250 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1251 TCGReg base, intptr_t offset)
1257 if (arg < TCG_REG_V0) {
1258 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1261 if (have_isa_2_07 && have_vsx) {
1262 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1265 assert((offset & 3) == 0);
1266 tcg_debug_assert((offset & 3) == 0);
1267 shift = (offset - 4) & 0xc;
1269 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1272 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1275 if (arg < TCG_REG_V0) {
1276 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1277 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1282 tcg_debug_assert(arg >= TCG_REG_V0);
1284 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1285 STXSDX, arg, base, offset);
1288 tcg_debug_assert((offset & 7) == 0);
1290 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1293 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1294 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1297 tcg_debug_assert(arg >= TCG_REG_V0);
1298 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1299 STVX, arg, base, offset);
1302 g_assert_not_reached();
1306 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1307 TCGReg base, intptr_t ofs)
1312 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1313 int const_arg2, int cr, TCGType type)
1318 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1320 /* Simplify the comparisons below wrt CMPI. */
1321 if (type == TCG_TYPE_I32) {
1322 arg2 = (int32_t)arg2;
1329 if ((int16_t) arg2 == arg2) {
1333 } else if ((uint16_t) arg2 == arg2) {
1348 if ((int16_t) arg2 == arg2) {
1363 if ((uint16_t) arg2 == arg2) {
1376 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1379 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1382 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1385 tcg_out32(s, op | RA(arg1) | RB(arg2));
1389 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1390 TCGReg dst, TCGReg src)
1392 if (type == TCG_TYPE_I32) {
1393 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1394 tcg_out_shri32(s, dst, dst, 5);
1396 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1397 tcg_out_shri64(s, dst, dst, 6);
1401 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1403 /* X != 0 implies X + -1 generates a carry. Extra addition
1404 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1406 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1407 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1409 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1410 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1414 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1418 if ((uint32_t)arg2 == arg2) {
1419 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1421 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1422 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1425 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1430 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1431 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1436 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1438 /* Ignore high bits of a potential constant arg2. */
1439 if (type == TCG_TYPE_I32) {
1440 arg2 = (uint32_t)arg2;
1443 /* Handle common and trivial cases before handling anything else. */
1447 tcg_out_setcond_eq0(s, type, arg0, arg1);
1450 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1451 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1454 tcg_out_setcond_ne0(s, arg0, arg1);
1457 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1461 /* Extract the sign bit. */
1462 if (type == TCG_TYPE_I32) {
1463 tcg_out_shri32(s, arg0, arg1, 31);
1465 tcg_out_shri64(s, arg0, arg1, 63);
1473 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1474 All other cases below are also at least 3 insns, so speed up the
1475 code generator by not considering them and always using ISEL. */
1479 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1481 isel = tcg_to_isel[cond];
1483 tcg_out_movi(s, type, arg0, 1);
1485 /* arg0 = (bc ? 0 : 1) */
1486 tab = TAB(arg0, 0, arg0);
1489 /* arg0 = (bc ? 1 : 0) */
1490 tcg_out_movi(s, type, TCG_REG_R0, 0);
1491 tab = TAB(arg0, arg0, TCG_REG_R0);
1493 tcg_out32(s, isel | tab);
1499 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1500 tcg_out_setcond_eq0(s, type, arg0, arg1);
1504 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1505 /* Discard the high bits only once, rather than both inputs. */
1506 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1507 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1510 tcg_out_setcond_ne0(s, arg0, arg1);
1528 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1534 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1536 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1540 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1541 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1549 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1552 bc |= reloc_pc14_val(s->code_ptr, l->u.value_ptr);
1554 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1559 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1560 TCGArg arg1, TCGArg arg2, int const_arg2,
1561 TCGLabel *l, TCGType type)
1563 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1564 tcg_out_bc(s, tcg_to_bc[cond], l);
1567 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1568 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1569 TCGArg v2, bool const_c2)
1571 /* If for some reason both inputs are zero, don't produce bad code. */
1572 if (v1 == 0 && v2 == 0) {
1573 tcg_out_movi(s, type, dest, 0);
1577 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1580 int isel = tcg_to_isel[cond];
1582 /* Swap the V operands if the operation indicates inversion. */
1589 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1591 tcg_out_movi(s, type, TCG_REG_R0, 0);
1593 tcg_out32(s, isel | TAB(dest, v1, v2));
1596 cond = tcg_invert_cond(cond);
1598 } else if (dest != v1) {
1600 tcg_out_movi(s, type, dest, 0);
1602 tcg_out_mov(s, type, dest, v1);
1605 /* Branch forward over one insn */
1606 tcg_out32(s, tcg_to_bc[cond] | 8);
1608 tcg_out_movi(s, type, dest, 0);
1610 tcg_out_mov(s, type, dest, v2);
1615 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1616 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1618 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1619 tcg_out32(s, opc | RA(a0) | RS(a1));
1621 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1622 /* Note that the only other valid constant for a2 is 0. */
1624 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1625 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1626 } else if (!const_a2 && a0 == a2) {
1627 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1628 tcg_out32(s, opc | RA(a0) | RS(a1));
1630 tcg_out32(s, opc | RA(a0) | RS(a1));
1631 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1633 tcg_out_movi(s, type, a0, 0);
1635 tcg_out_mov(s, type, a0, a2);
1641 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1642 const int *const_args)
1644 static const struct { uint8_t bit1, bit2; } bits[] = {
1645 [TCG_COND_LT ] = { CR_LT, CR_LT },
1646 [TCG_COND_LE ] = { CR_LT, CR_GT },
1647 [TCG_COND_GT ] = { CR_GT, CR_GT },
1648 [TCG_COND_GE ] = { CR_GT, CR_LT },
1649 [TCG_COND_LTU] = { CR_LT, CR_LT },
1650 [TCG_COND_LEU] = { CR_LT, CR_GT },
1651 [TCG_COND_GTU] = { CR_GT, CR_GT },
1652 [TCG_COND_GEU] = { CR_GT, CR_LT },
1655 TCGCond cond = args[4], cond2;
1656 TCGArg al, ah, bl, bh;
1657 int blconst, bhconst;
1664 blconst = const_args[2];
1665 bhconst = const_args[3];
1674 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1675 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1676 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1687 bit1 = bits[cond].bit1;
1688 bit2 = bits[cond].bit2;
1689 op = (bit1 != bit2 ? CRANDC : CRAND);
1690 cond2 = tcg_unsigned_cond(cond);
1692 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1693 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1694 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1695 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1703 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1704 const int *const_args)
1706 tcg_out_cmp2(s, args + 1, const_args + 1);
1707 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1708 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1711 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1712 const int *const_args)
1714 tcg_out_cmp2(s, args, const_args);
1715 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1718 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1720 uint32_t insn = HWSYNC;
1722 if (a0 == TCG_MO_LD_LD) {
1724 } else if (a0 == TCG_MO_ST_ST) {
1730 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
1733 if (TCG_TARGET_REG_BITS == 64) {
1734 tcg_insn_unit i1, i2;
1735 intptr_t tb_diff = addr - tc_ptr;
1736 intptr_t br_diff = addr - (jmp_addr + 4);
1739 /* This does not exercise the range of the branch, but we do
1740 still need to be able to load the new value of TCG_REG_TB.
1741 But this does still happen quite often. */
1742 if (tb_diff == (int16_t)tb_diff) {
1743 i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
1744 i2 = B | (br_diff & 0x3fffffc);
1746 intptr_t lo = (int16_t)tb_diff;
1747 intptr_t hi = (int32_t)(tb_diff - lo);
1748 assert(tb_diff == hi + lo);
1749 i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
1750 i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
1752 #ifdef HOST_WORDS_BIGENDIAN
1753 pair = (uint64_t)i1 << 32 | i2;
1755 pair = (uint64_t)i2 << 32 | i1;
1758 /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
1759 within atomic_set that would fail to build a ppc32 host. */
1760 atomic_set__nocheck((uint64_t *)jmp_addr, pair);
1761 flush_icache_range(jmp_addr, jmp_addr + 8);
1763 intptr_t diff = addr - jmp_addr;
1764 tcg_debug_assert(in_range_b(diff));
1765 atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
1766 flush_icache_range(jmp_addr, jmp_addr + 4);
1770 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
1773 /* Look through the descriptor. If the branch is in range, and we
1774 don't have to spend too much effort on building the toc. */
1775 void *tgt = ((void **)target)[0];
1776 uintptr_t toc = ((uintptr_t *)target)[1];
1777 intptr_t diff = tcg_pcrel_diff(s, tgt);
1779 if (in_range_b(diff) && toc == (uint32_t)toc) {
1780 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1781 tcg_out_b(s, LK, tgt);
1783 /* Fold the low bits of the constant into the addresses below. */
1784 intptr_t arg = (intptr_t)target;
1785 int ofs = (int16_t)arg;
1787 if (ofs + 8 < 0x8000) {
1792 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1793 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1794 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1795 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1796 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1798 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1801 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1802 address, which the callee uses to compute its TOC address. */
1803 /* FIXME: when the branch is in range, we could avoid r12 load if we
1804 knew that the destination uses the same TOC, and what its local
1805 entry point offset is. */
1806 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1808 diff = tcg_pcrel_diff(s, target);
1809 if (in_range_b(diff)) {
1810 tcg_out_b(s, LK, target);
1812 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1813 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1816 tcg_out_b(s, LK, target);
1820 static const uint32_t qemu_ldx_opc[16] = {
1827 [MO_BSWAP | MO_UB] = LBZX,
1828 [MO_BSWAP | MO_UW] = LHBRX,
1829 [MO_BSWAP | MO_UL] = LWBRX,
1830 [MO_BSWAP | MO_Q] = LDBRX,
1833 static const uint32_t qemu_stx_opc[16] = {
1838 [MO_BSWAP | MO_UB] = STBX,
1839 [MO_BSWAP | MO_UW] = STHBRX,
1840 [MO_BSWAP | MO_UL] = STWBRX,
1841 [MO_BSWAP | MO_Q] = STDBRX,
1844 static const uint32_t qemu_exts_opc[4] = {
1845 EXTSB, EXTSH, EXTSW, 0
1848 #if defined (CONFIG_SOFTMMU)
1849 #include "../tcg-ldst.c.inc"
1851 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1852 * int mmu_idx, uintptr_t ra)
1854 static void * const qemu_ld_helpers[16] = {
1855 [MO_UB] = helper_ret_ldub_mmu,
1856 [MO_LEUW] = helper_le_lduw_mmu,
1857 [MO_LEUL] = helper_le_ldul_mmu,
1858 [MO_LEQ] = helper_le_ldq_mmu,
1859 [MO_BEUW] = helper_be_lduw_mmu,
1860 [MO_BEUL] = helper_be_ldul_mmu,
1861 [MO_BEQ] = helper_be_ldq_mmu,
1864 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1865 * uintxx_t val, int mmu_idx, uintptr_t ra)
1867 static void * const qemu_st_helpers[16] = {
1868 [MO_UB] = helper_ret_stb_mmu,
1869 [MO_LEUW] = helper_le_stw_mmu,
1870 [MO_LEUL] = helper_le_stl_mmu,
1871 [MO_LEQ] = helper_le_stq_mmu,
1872 [MO_BEUW] = helper_be_stw_mmu,
1873 [MO_BEUL] = helper_be_stl_mmu,
1874 [MO_BEQ] = helper_be_stq_mmu,
1877 /* We expect to use a 16-bit negative offset from ENV. */
1878 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1879 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1881 /* Perform the TLB load and compare. Places the result of the comparison
1882 in CR7, loads the addend of the TLB into R3, and returns the register
1883 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1885 static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
1886 TCGReg addrlo, TCGReg addrhi,
1887 int mem_index, bool is_read)
1891 ? offsetof(CPUTLBEntry, addr_read)
1892 : offsetof(CPUTLBEntry, addr_write));
1893 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1894 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1895 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1896 unsigned s_bits = opc & MO_SIZE;
1897 unsigned a_bits = get_alignment_bits(opc);
1899 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1900 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
1901 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
1903 /* Extract the page index, shifted into place for tlb index. */
1904 if (TCG_TARGET_REG_BITS == 32) {
1905 tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
1906 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1908 tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
1909 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1911 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
1913 /* Load the TLB comparator. */
1914 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
1915 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
1917 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
1919 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
1920 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1921 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
1922 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
1924 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
1928 /* Load the TLB addend for use on the fast path. Do this asap
1929 to minimize any load use delay. */
1930 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
1931 offsetof(CPUTLBEntry, addend));
1933 /* Clear the non-page, non-alignment bits from the address */
1934 if (TCG_TARGET_REG_BITS == 32) {
1935 /* We don't support unaligned accesses on 32-bits.
1936 * Preserve the bottom bits and thus trigger a comparison
1937 * failure on unaligned accesses.
1939 if (a_bits < s_bits) {
1942 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
1943 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
1947 /* If the access is unaligned, we need to make sure we fail if we
1948 * cross a page boundary. The trick is to add the access size-1
1949 * to the address before masking the low bits. That will make the
1950 * address overflow to the next page if we cross a page boundary,
1951 * which will then force a mismatch of the TLB compare.
1953 if (a_bits < s_bits) {
1954 unsigned a_mask = (1 << a_bits) - 1;
1955 unsigned s_mask = (1 << s_bits) - 1;
1956 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
1960 /* Mask the address for the requested alignment. */
1961 if (TARGET_LONG_BITS == 32) {
1962 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
1963 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
1964 /* Zero-extend the address for use in the final address. */
1965 tcg_out_ext32u(s, TCG_REG_R4, addrlo);
1966 addrlo = TCG_REG_R4;
1967 } else if (a_bits == 0) {
1968 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
1970 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
1971 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
1972 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
1976 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1977 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
1978 0, 7, TCG_TYPE_I32);
1979 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
1980 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1982 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
1989 /* Record the context of a call to the out of line helper code for the slow
1990 path for a load or store, so that we can later generate the correct
1992 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1993 TCGReg datalo_reg, TCGReg datahi_reg,
1994 TCGReg addrlo_reg, TCGReg addrhi_reg,
1995 tcg_insn_unit *raddr, tcg_insn_unit *lptr)
1997 TCGLabelQemuLdst *label = new_ldst_label(s);
1999 label->is_ld = is_ld;
2001 label->datalo_reg = datalo_reg;
2002 label->datahi_reg = datahi_reg;
2003 label->addrlo_reg = addrlo_reg;
2004 label->addrhi_reg = addrhi_reg;
2005 label->raddr = raddr;
2006 label->label_ptr[0] = lptr;
2009 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2011 TCGMemOpIdx oi = lb->oi;
2012 MemOp opc = get_memop(oi);
2013 TCGReg hi, lo, arg = TCG_REG_R3;
2015 if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
2019 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2021 lo = lb->addrlo_reg;
2022 hi = lb->addrhi_reg;
2023 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2024 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2027 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2028 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2030 /* If the address needed to be zero-extended, we'll have already
2031 placed it in R4. The only remaining case is 64-bit guest. */
2032 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2035 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2036 tcg_out32(s, MFSPR | RT(arg) | LR);
2038 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2040 lo = lb->datalo_reg;
2041 hi = lb->datahi_reg;
2042 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2043 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
2044 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
2045 } else if (opc & MO_SIGN) {
2046 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
2047 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
2049 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
2052 tcg_out_b(s, 0, lb->raddr);
2056 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2058 TCGMemOpIdx oi = lb->oi;
2059 MemOp opc = get_memop(oi);
2060 MemOp s_bits = opc & MO_SIZE;
2061 TCGReg hi, lo, arg = TCG_REG_R3;
2063 if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
2067 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2069 lo = lb->addrlo_reg;
2070 hi = lb->addrhi_reg;
2071 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2072 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2075 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2076 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2078 /* If the address needed to be zero-extended, we'll have already
2079 placed it in R4. The only remaining case is 64-bit guest. */
2080 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2083 lo = lb->datalo_reg;
2084 hi = lb->datahi_reg;
2085 if (TCG_TARGET_REG_BITS == 32) {
2088 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2091 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2094 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2097 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
2101 if (s_bits == MO_64) {
2102 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
2104 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
2108 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2109 tcg_out32(s, MFSPR | RT(arg) | LR);
2111 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2113 tcg_out_b(s, 0, lb->raddr);
2116 #endif /* SOFTMMU */
2118 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
2120 TCGReg datalo, datahi, addrlo, rbase;
2121 TCGReg addrhi __attribute__((unused));
2124 #ifdef CONFIG_SOFTMMU
2126 tcg_insn_unit *label_ptr;
2130 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2132 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2134 opc = get_memop(oi);
2135 s_bits = opc & MO_SIZE;
2137 #ifdef CONFIG_SOFTMMU
2138 mem_index = get_mmuidx(oi);
2139 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
2141 /* Load a pointer into the current opcode w/conditional branch-link. */
2142 label_ptr = s->code_ptr;
2143 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2146 #else /* !CONFIG_SOFTMMU */
2147 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2148 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2149 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2150 addrlo = TCG_REG_TMP1;
2154 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2155 if (opc & MO_BSWAP) {
2156 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2157 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2158 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
2159 } else if (rbase != 0) {
2160 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2161 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
2162 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
2163 } else if (addrlo == datahi) {
2164 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2165 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2167 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2168 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2171 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2172 if (!have_isa_2_06 && insn == LDBRX) {
2173 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2174 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2175 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
2176 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2178 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2180 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2181 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2182 insn = qemu_exts_opc[s_bits];
2183 tcg_out32(s, insn | RA(datalo) | RS(datalo));
2187 #ifdef CONFIG_SOFTMMU
2188 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2189 s->code_ptr, label_ptr);
2193 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
2195 TCGReg datalo, datahi, addrlo, rbase;
2196 TCGReg addrhi __attribute__((unused));
2199 #ifdef CONFIG_SOFTMMU
2201 tcg_insn_unit *label_ptr;
2205 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2207 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2209 opc = get_memop(oi);
2210 s_bits = opc & MO_SIZE;
2212 #ifdef CONFIG_SOFTMMU
2213 mem_index = get_mmuidx(oi);
2214 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
2216 /* Load a pointer into the current opcode w/conditional branch-link. */
2217 label_ptr = s->code_ptr;
2218 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2221 #else /* !CONFIG_SOFTMMU */
2222 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2223 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2224 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2225 addrlo = TCG_REG_TMP1;
2229 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2230 if (opc & MO_BSWAP) {
2231 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2232 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2233 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
2234 } else if (rbase != 0) {
2235 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2236 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
2237 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
2239 tcg_out32(s, STW | TAI(datahi, addrlo, 0));
2240 tcg_out32(s, STW | TAI(datalo, addrlo, 4));
2243 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2244 if (!have_isa_2_06 && insn == STDBRX) {
2245 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2246 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
2247 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2248 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
2250 tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
2254 #ifdef CONFIG_SOFTMMU
2255 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2256 s->code_ptr, label_ptr);
2260 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2263 for (i = 0; i < count; ++i) {
2268 /* Parameters for function call generation, used in tcg.c. */
2269 #define TCG_TARGET_STACK_ALIGN 16
2270 #define TCG_TARGET_EXTEND_ARGS 1
2273 # define LINK_AREA_SIZE (6 * SZR)
2274 # define LR_OFFSET (1 * SZR)
2275 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2276 #elif defined(TCG_TARGET_CALL_DARWIN)
2277 # define LINK_AREA_SIZE (6 * SZR)
2278 # define LR_OFFSET (2 * SZR)
2279 #elif TCG_TARGET_REG_BITS == 64
2280 # if defined(_CALL_ELF) && _CALL_ELF == 2
2281 # define LINK_AREA_SIZE (4 * SZR)
2282 # define LR_OFFSET (1 * SZR)
2284 #else /* TCG_TARGET_REG_BITS == 32 */
2285 # if defined(_CALL_SYSV)
2286 # define LINK_AREA_SIZE (2 * SZR)
2287 # define LR_OFFSET (1 * SZR)
2291 # error "Unhandled abi"
2293 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2294 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2297 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2298 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2300 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2301 + TCG_STATIC_CALL_ARGS_SIZE \
2302 + CPU_TEMP_BUF_SIZE \
2304 + TCG_TARGET_STACK_ALIGN - 1) \
2305 & -TCG_TARGET_STACK_ALIGN)
2307 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2309 static void tcg_target_qemu_prologue(TCGContext *s)
2314 void **desc = (void **)s->code_ptr;
2315 desc[0] = desc + 2; /* entry point */
2316 desc[1] = 0; /* environment pointer */
2317 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2320 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2324 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2325 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2326 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2328 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2329 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2330 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2332 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2334 #ifndef CONFIG_SOFTMMU
2336 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2337 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2341 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2342 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2344 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2346 tcg_out32(s, BCCTR | BO_ALWAYS);
2349 s->code_gen_epilogue = tb_ret_addr = s->code_ptr;
2351 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2352 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2353 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2354 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2356 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2357 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2358 tcg_out32(s, BCLR | BO_ALWAYS);
2361 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
2362 const int *const_args)
2368 case INDEX_op_exit_tb:
2369 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
2370 tcg_out_b(s, 0, tb_ret_addr);
2372 case INDEX_op_goto_tb:
2373 if (s->tb_jmp_insn_offset) {
2375 if (TCG_TARGET_REG_BITS == 64) {
2376 /* Ensure the next insns are 8-byte aligned. */
2377 if ((uintptr_t)s->code_ptr & 7) {
2380 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2381 tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2382 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2384 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2386 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
2390 /* Indirect jump. */
2391 tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
2392 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
2393 (intptr_t)(s->tb_jmp_insn_offset + args[0]));
2395 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2396 tcg_out32(s, BCCTR | BO_ALWAYS);
2397 set_jmp_reset_offset(s, args[0]);
2399 /* For the unlinked case, need to reset TCG_REG_TB. */
2400 c = -tcg_current_code_size(s);
2401 assert(c == (int16_t)c);
2402 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c));
2405 case INDEX_op_goto_ptr:
2406 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2408 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2410 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2411 tcg_out32(s, BCCTR | BO_ALWAYS);
2415 TCGLabel *l = arg_label(args[0]);
2419 insn |= reloc_pc24_val(s->code_ptr, l->u.value_ptr);
2421 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2426 case INDEX_op_ld8u_i32:
2427 case INDEX_op_ld8u_i64:
2428 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2430 case INDEX_op_ld8s_i32:
2431 case INDEX_op_ld8s_i64:
2432 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2433 tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
2435 case INDEX_op_ld16u_i32:
2436 case INDEX_op_ld16u_i64:
2437 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2439 case INDEX_op_ld16s_i32:
2440 case INDEX_op_ld16s_i64:
2441 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2443 case INDEX_op_ld_i32:
2444 case INDEX_op_ld32u_i64:
2445 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2447 case INDEX_op_ld32s_i64:
2448 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2450 case INDEX_op_ld_i64:
2451 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2453 case INDEX_op_st8_i32:
2454 case INDEX_op_st8_i64:
2455 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2457 case INDEX_op_st16_i32:
2458 case INDEX_op_st16_i64:
2459 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2461 case INDEX_op_st_i32:
2462 case INDEX_op_st32_i64:
2463 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2465 case INDEX_op_st_i64:
2466 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2469 case INDEX_op_add_i32:
2470 a0 = args[0], a1 = args[1], a2 = args[2];
2471 if (const_args[2]) {
2473 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2475 tcg_out32(s, ADD | TAB(a0, a1, a2));
2478 case INDEX_op_sub_i32:
2479 a0 = args[0], a1 = args[1], a2 = args[2];
2480 if (const_args[1]) {
2481 if (const_args[2]) {
2482 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2484 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2486 } else if (const_args[2]) {
2490 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2494 case INDEX_op_and_i32:
2495 a0 = args[0], a1 = args[1], a2 = args[2];
2496 if (const_args[2]) {
2497 tcg_out_andi32(s, a0, a1, a2);
2499 tcg_out32(s, AND | SAB(a1, a0, a2));
2502 case INDEX_op_and_i64:
2503 a0 = args[0], a1 = args[1], a2 = args[2];
2504 if (const_args[2]) {
2505 tcg_out_andi64(s, a0, a1, a2);
2507 tcg_out32(s, AND | SAB(a1, a0, a2));
2510 case INDEX_op_or_i64:
2511 case INDEX_op_or_i32:
2512 a0 = args[0], a1 = args[1], a2 = args[2];
2513 if (const_args[2]) {
2514 tcg_out_ori32(s, a0, a1, a2);
2516 tcg_out32(s, OR | SAB(a1, a0, a2));
2519 case INDEX_op_xor_i64:
2520 case INDEX_op_xor_i32:
2521 a0 = args[0], a1 = args[1], a2 = args[2];
2522 if (const_args[2]) {
2523 tcg_out_xori32(s, a0, a1, a2);
2525 tcg_out32(s, XOR | SAB(a1, a0, a2));
2528 case INDEX_op_andc_i32:
2529 a0 = args[0], a1 = args[1], a2 = args[2];
2530 if (const_args[2]) {
2531 tcg_out_andi32(s, a0, a1, ~a2);
2533 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2536 case INDEX_op_andc_i64:
2537 a0 = args[0], a1 = args[1], a2 = args[2];
2538 if (const_args[2]) {
2539 tcg_out_andi64(s, a0, a1, ~a2);
2541 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2544 case INDEX_op_orc_i32:
2545 if (const_args[2]) {
2546 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2550 case INDEX_op_orc_i64:
2551 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2553 case INDEX_op_eqv_i32:
2554 if (const_args[2]) {
2555 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2559 case INDEX_op_eqv_i64:
2560 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2562 case INDEX_op_nand_i32:
2563 case INDEX_op_nand_i64:
2564 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2566 case INDEX_op_nor_i32:
2567 case INDEX_op_nor_i64:
2568 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2571 case INDEX_op_clz_i32:
2572 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2573 args[2], const_args[2]);
2575 case INDEX_op_ctz_i32:
2576 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2577 args[2], const_args[2]);
2579 case INDEX_op_ctpop_i32:
2580 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2583 case INDEX_op_clz_i64:
2584 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2585 args[2], const_args[2]);
2587 case INDEX_op_ctz_i64:
2588 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2589 args[2], const_args[2]);
2591 case INDEX_op_ctpop_i64:
2592 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2595 case INDEX_op_mul_i32:
2596 a0 = args[0], a1 = args[1], a2 = args[2];
2597 if (const_args[2]) {
2598 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2600 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2604 case INDEX_op_div_i32:
2605 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2608 case INDEX_op_divu_i32:
2609 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2612 case INDEX_op_shl_i32:
2613 if (const_args[2]) {
2614 /* Limit immediate shift count lest we create an illegal insn. */
2615 tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2617 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2620 case INDEX_op_shr_i32:
2621 if (const_args[2]) {
2622 /* Limit immediate shift count lest we create an illegal insn. */
2623 tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2625 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2628 case INDEX_op_sar_i32:
2629 if (const_args[2]) {
2630 /* Limit immediate shift count lest we create an illegal insn. */
2631 tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
2633 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2636 case INDEX_op_rotl_i32:
2637 if (const_args[2]) {
2638 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2640 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2644 case INDEX_op_rotr_i32:
2645 if (const_args[2]) {
2646 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2648 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2649 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2654 case INDEX_op_brcond_i32:
2655 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2656 arg_label(args[3]), TCG_TYPE_I32);
2658 case INDEX_op_brcond_i64:
2659 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2660 arg_label(args[3]), TCG_TYPE_I64);
2662 case INDEX_op_brcond2_i32:
2663 tcg_out_brcond2(s, args, const_args);
2666 case INDEX_op_neg_i32:
2667 case INDEX_op_neg_i64:
2668 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2671 case INDEX_op_not_i32:
2672 case INDEX_op_not_i64:
2673 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2676 case INDEX_op_add_i64:
2677 a0 = args[0], a1 = args[1], a2 = args[2];
2678 if (const_args[2]) {
2680 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2682 tcg_out32(s, ADD | TAB(a0, a1, a2));
2685 case INDEX_op_sub_i64:
2686 a0 = args[0], a1 = args[1], a2 = args[2];
2687 if (const_args[1]) {
2688 if (const_args[2]) {
2689 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2691 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2693 } else if (const_args[2]) {
2697 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2701 case INDEX_op_shl_i64:
2702 if (const_args[2]) {
2703 /* Limit immediate shift count lest we create an illegal insn. */
2704 tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2706 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2709 case INDEX_op_shr_i64:
2710 if (const_args[2]) {
2711 /* Limit immediate shift count lest we create an illegal insn. */
2712 tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2714 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2717 case INDEX_op_sar_i64:
2718 if (const_args[2]) {
2719 int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
2720 tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
2722 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2725 case INDEX_op_rotl_i64:
2726 if (const_args[2]) {
2727 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2729 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2732 case INDEX_op_rotr_i64:
2733 if (const_args[2]) {
2734 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2736 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2737 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2741 case INDEX_op_mul_i64:
2742 a0 = args[0], a1 = args[1], a2 = args[2];
2743 if (const_args[2]) {
2744 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2746 tcg_out32(s, MULLD | TAB(a0, a1, a2));
2749 case INDEX_op_div_i64:
2750 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2752 case INDEX_op_divu_i64:
2753 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2756 case INDEX_op_qemu_ld_i32:
2757 tcg_out_qemu_ld(s, args, false);
2759 case INDEX_op_qemu_ld_i64:
2760 tcg_out_qemu_ld(s, args, true);
2762 case INDEX_op_qemu_st_i32:
2763 tcg_out_qemu_st(s, args, false);
2765 case INDEX_op_qemu_st_i64:
2766 tcg_out_qemu_st(s, args, true);
2769 case INDEX_op_ext8s_i32:
2770 case INDEX_op_ext8s_i64:
2773 case INDEX_op_ext16s_i32:
2774 case INDEX_op_ext16s_i64:
2777 case INDEX_op_ext_i32_i64:
2778 case INDEX_op_ext32s_i64:
2782 tcg_out32(s, c | RS(args[1]) | RA(args[0]));
2784 case INDEX_op_extu_i32_i64:
2785 tcg_out_ext32u(s, args[0], args[1]);
2788 case INDEX_op_setcond_i32:
2789 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2792 case INDEX_op_setcond_i64:
2793 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2796 case INDEX_op_setcond2_i32:
2797 tcg_out_setcond2(s, args, const_args);
2800 case INDEX_op_bswap16_i32:
2801 case INDEX_op_bswap16_i64:
2802 a0 = args[0], a1 = args[1];
2805 /* a0 = (a1 r<< 24) & 0xff # 000c */
2806 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
2807 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
2808 tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
2810 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
2811 tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
2812 /* a0 = (a1 r<< 24) & 0xff # 000c */
2813 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
2814 /* a0 = a0 | r0 # 00dc */
2815 tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
2819 case INDEX_op_bswap32_i32:
2820 case INDEX_op_bswap32_i64:
2821 /* Stolen from gcc's builtin_bswap32 */
2823 a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
2825 /* a1 = args[1] # abcd */
2826 /* a0 = rotate_left (a1, 8) # bcda */
2827 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
2828 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
2829 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
2830 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
2831 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
2833 if (a0 == TCG_REG_R0) {
2834 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2838 case INDEX_op_bswap64_i64:
2839 a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
2845 /* a1 = # abcd efgh */
2846 /* a0 = rl32(a1, 8) # 0000 fghe */
2847 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
2848 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
2849 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
2850 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
2851 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
2853 /* a0 = rl64(a0, 32) # hgfe 0000 */
2854 /* a2 = rl64(a1, 32) # efgh abcd */
2855 tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
2856 tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
2858 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
2859 tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
2860 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
2861 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
2862 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
2863 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
2866 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2870 case INDEX_op_deposit_i32:
2871 if (const_args[2]) {
2872 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
2873 tcg_out_andi32(s, args[0], args[0], ~mask);
2875 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
2876 32 - args[3] - args[4], 31 - args[3]);
2879 case INDEX_op_deposit_i64:
2880 if (const_args[2]) {
2881 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
2882 tcg_out_andi64(s, args[0], args[0], ~mask);
2884 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
2885 64 - args[3] - args[4]);
2889 case INDEX_op_extract_i32:
2890 tcg_out_rlw(s, RLWINM, args[0], args[1],
2891 32 - args[2], 32 - args[3], 31);
2893 case INDEX_op_extract_i64:
2894 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
2897 case INDEX_op_movcond_i32:
2898 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
2899 args[3], args[4], const_args[2]);
2901 case INDEX_op_movcond_i64:
2902 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
2903 args[3], args[4], const_args[2]);
2906 #if TCG_TARGET_REG_BITS == 64
2907 case INDEX_op_add2_i64:
2909 case INDEX_op_add2_i32:
2911 /* Note that the CA bit is defined based on the word size of the
2912 environment. So in 64-bit mode it's always carry-out of bit 63.
2913 The fallback code using deposit works just as well for 32-bit. */
2914 a0 = args[0], a1 = args[1];
2915 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
2918 if (const_args[4]) {
2919 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
2921 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
2923 if (const_args[5]) {
2924 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
2926 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
2928 if (a0 != args[0]) {
2929 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2933 #if TCG_TARGET_REG_BITS == 64
2934 case INDEX_op_sub2_i64:
2936 case INDEX_op_sub2_i32:
2938 a0 = args[0], a1 = args[1];
2939 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
2942 if (const_args[2]) {
2943 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
2945 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
2947 if (const_args[3]) {
2948 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
2950 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
2952 if (a0 != args[0]) {
2953 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2957 case INDEX_op_muluh_i32:
2958 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
2960 case INDEX_op_mulsh_i32:
2961 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
2963 case INDEX_op_muluh_i64:
2964 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
2966 case INDEX_op_mulsh_i64:
2967 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
2971 tcg_out_mb(s, args[0]);
2974 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2975 case INDEX_op_mov_i64:
2976 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2977 case INDEX_op_movi_i64:
2978 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2984 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2987 case INDEX_op_and_vec:
2988 case INDEX_op_or_vec:
2989 case INDEX_op_xor_vec:
2990 case INDEX_op_andc_vec:
2991 case INDEX_op_not_vec:
2993 case INDEX_op_orc_vec:
2994 return have_isa_2_07;
2995 case INDEX_op_add_vec:
2996 case INDEX_op_sub_vec:
2997 case INDEX_op_smax_vec:
2998 case INDEX_op_smin_vec:
2999 case INDEX_op_umax_vec:
3000 case INDEX_op_umin_vec:
3001 case INDEX_op_shlv_vec:
3002 case INDEX_op_shrv_vec:
3003 case INDEX_op_sarv_vec:
3004 case INDEX_op_rotlv_vec:
3005 return vece <= MO_32 || have_isa_2_07;
3006 case INDEX_op_ssadd_vec:
3007 case INDEX_op_sssub_vec:
3008 case INDEX_op_usadd_vec:
3009 case INDEX_op_ussub_vec:
3010 return vece <= MO_32;
3011 case INDEX_op_cmp_vec:
3012 case INDEX_op_shli_vec:
3013 case INDEX_op_shri_vec:
3014 case INDEX_op_sari_vec:
3015 case INDEX_op_rotli_vec:
3016 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3017 case INDEX_op_neg_vec:
3018 return vece >= MO_32 && have_isa_3_00;
3019 case INDEX_op_mul_vec:
3025 return have_isa_2_07 ? 1 : -1;
3027 return have_isa_3_10;
3030 case INDEX_op_bitsel_vec:
3032 case INDEX_op_rotrv_vec:
3039 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3040 TCGReg dst, TCGReg src)
3042 tcg_debug_assert(dst >= TCG_REG_V0);
3044 /* Splat from integer reg allowed via constraints for v3.00. */
3045 if (src < TCG_REG_V0) {
3046 tcg_debug_assert(have_isa_3_00);
3049 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3052 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3055 /* Fail, so that we fall back on either dupm or mov+dup. */
3061 * Recall we use (or emulate) VSX integer loads, so the integer is
3062 * right justified within the left (zero-index) double-word.
3066 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3069 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3072 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3076 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3079 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3080 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3083 g_assert_not_reached();
3088 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3089 TCGReg out, TCGReg base, intptr_t offset)
3093 tcg_debug_assert(out >= TCG_REG_V0);
3096 if (have_isa_3_00) {
3097 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3099 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3101 elt = extract32(offset, 0, 4);
3102 #ifndef HOST_WORDS_BIGENDIAN
3105 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3108 tcg_debug_assert((offset & 1) == 0);
3109 if (have_isa_3_00) {
3110 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3112 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3114 elt = extract32(offset, 1, 3);
3115 #ifndef HOST_WORDS_BIGENDIAN
3118 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3121 if (have_isa_3_00) {
3122 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3125 tcg_debug_assert((offset & 3) == 0);
3126 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3127 elt = extract32(offset, 2, 2);
3128 #ifndef HOST_WORDS_BIGENDIAN
3131 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3135 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3138 tcg_debug_assert((offset & 7) == 0);
3139 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3140 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3141 elt = extract32(offset, 3, 1);
3142 #ifndef HOST_WORDS_BIGENDIAN
3146 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3148 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3152 g_assert_not_reached();
3157 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3158 unsigned vecl, unsigned vece,
3159 const TCGArg *args, const int *const_args)
3161 static const uint32_t
3162 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3163 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3164 mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3165 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3166 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3167 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3168 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3169 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3170 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3171 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3172 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3173 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3174 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3175 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3176 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3177 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3178 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3179 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3180 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3181 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3182 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3183 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3184 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3185 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3186 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3188 TCGType type = vecl + TCG_TYPE_V64;
3189 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3193 case INDEX_op_ld_vec:
3194 tcg_out_ld(s, type, a0, a1, a2);
3196 case INDEX_op_st_vec:
3197 tcg_out_st(s, type, a0, a1, a2);
3199 case INDEX_op_dupm_vec:
3200 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3203 case INDEX_op_add_vec:
3204 insn = add_op[vece];
3206 case INDEX_op_sub_vec:
3207 insn = sub_op[vece];
3209 case INDEX_op_neg_vec:
3210 insn = neg_op[vece];
3214 case INDEX_op_mul_vec:
3215 insn = mul_op[vece];
3217 case INDEX_op_ssadd_vec:
3218 insn = ssadd_op[vece];
3220 case INDEX_op_sssub_vec:
3221 insn = sssub_op[vece];
3223 case INDEX_op_usadd_vec:
3224 insn = usadd_op[vece];
3226 case INDEX_op_ussub_vec:
3227 insn = ussub_op[vece];
3229 case INDEX_op_smin_vec:
3230 insn = smin_op[vece];
3232 case INDEX_op_umin_vec:
3233 insn = umin_op[vece];
3235 case INDEX_op_smax_vec:
3236 insn = smax_op[vece];
3238 case INDEX_op_umax_vec:
3239 insn = umax_op[vece];
3241 case INDEX_op_shlv_vec:
3242 insn = shlv_op[vece];
3244 case INDEX_op_shrv_vec:
3245 insn = shrv_op[vece];
3247 case INDEX_op_sarv_vec:
3248 insn = sarv_op[vece];
3250 case INDEX_op_and_vec:
3253 case INDEX_op_or_vec:
3256 case INDEX_op_xor_vec:
3259 case INDEX_op_andc_vec:
3262 case INDEX_op_not_vec:
3266 case INDEX_op_orc_vec:
3270 case INDEX_op_cmp_vec:
3279 insn = gts_op[vece];
3282 insn = gtu_op[vece];
3285 g_assert_not_reached();
3289 case INDEX_op_bitsel_vec:
3290 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3293 case INDEX_op_dup2_vec:
3294 assert(TCG_TARGET_REG_BITS == 32);
3295 /* With inputs a1 = xLxx, a2 = xHxx */
3296 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3297 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3298 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3301 case INDEX_op_ppc_mrgh_vec:
3302 insn = mrgh_op[vece];
3304 case INDEX_op_ppc_mrgl_vec:
3305 insn = mrgl_op[vece];
3307 case INDEX_op_ppc_muleu_vec:
3308 insn = muleu_op[vece];
3310 case INDEX_op_ppc_mulou_vec:
3311 insn = mulou_op[vece];
3313 case INDEX_op_ppc_pkum_vec:
3314 insn = pkum_op[vece];
3316 case INDEX_op_rotlv_vec:
3317 insn = rotl_op[vece];
3319 case INDEX_op_ppc_msum_vec:
3320 tcg_debug_assert(vece == MO_16);
3321 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3324 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3325 case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
3326 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3328 g_assert_not_reached();
3331 tcg_debug_assert(insn != 0);
3332 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3335 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3336 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3338 TCGv_vec t1 = tcg_temp_new_vec(type);
3340 /* Splat w/bytes for xxspltib. */
3341 tcg_gen_dupi_vec(MO_8, t1, imm & ((8 << vece) - 1));
3342 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3343 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3344 tcg_temp_free_vec(t1);
3347 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3348 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3350 bool need_swap = false, need_inv = false;
3352 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3360 if (have_isa_3_00 && vece <= MO_32) {
3374 need_swap = need_inv = true;
3377 g_assert_not_reached();
3381 cond = tcg_invert_cond(cond);
3385 t1 = v1, v1 = v2, v2 = t1;
3386 cond = tcg_swap_cond(cond);
3389 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3390 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3393 tcg_gen_not_vec(vece, v0, v0);
3397 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3398 TCGv_vec v1, TCGv_vec v2)
3400 TCGv_vec t1 = tcg_temp_new_vec(type);
3401 TCGv_vec t2 = tcg_temp_new_vec(type);
3407 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3408 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3409 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3410 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3411 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3412 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3413 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3414 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3415 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3416 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3420 tcg_debug_assert(!have_isa_2_07);
3421 t3 = tcg_temp_new_vec(type);
3422 t4 = tcg_temp_new_vec(type);
3423 tcg_gen_dupi_vec(MO_8, t4, -16);
3424 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3425 tcgv_vec_arg(v2), tcgv_vec_arg(t4));
3426 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3427 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3428 tcg_gen_dupi_vec(MO_8, t3, 0);
3429 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t3),
3430 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3431 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t3),
3432 tcgv_vec_arg(t3), tcgv_vec_arg(t4));
3433 tcg_gen_add_vec(MO_32, v0, t2, t3);
3434 tcg_temp_free_vec(t3);
3435 tcg_temp_free_vec(t4);
3439 g_assert_not_reached();
3441 tcg_temp_free_vec(t1);
3442 tcg_temp_free_vec(t2);
3445 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3449 TCGv_vec v0, v1, v2, t0;
3453 v0 = temp_tcgv_vec(arg_temp(a0));
3454 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3455 a2 = va_arg(va, TCGArg);
3458 case INDEX_op_shli_vec:
3459 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3461 case INDEX_op_shri_vec:
3462 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3464 case INDEX_op_sari_vec:
3465 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3467 case INDEX_op_rotli_vec:
3468 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3470 case INDEX_op_cmp_vec:
3471 v2 = temp_tcgv_vec(arg_temp(a2));
3472 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3474 case INDEX_op_mul_vec:
3475 v2 = temp_tcgv_vec(arg_temp(a2));
3476 expand_vec_mul(type, vece, v0, v1, v2);
3478 case INDEX_op_rotlv_vec:
3479 v2 = temp_tcgv_vec(arg_temp(a2));
3480 t0 = tcg_temp_new_vec(type);
3481 tcg_gen_neg_vec(vece, t0, v2);
3482 tcg_gen_rotlv_vec(vece, v0, v1, t0);
3483 tcg_temp_free_vec(t0);
3486 g_assert_not_reached();
3491 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
3493 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
3494 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
3495 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
3496 static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } };
3497 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
3498 static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
3499 static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
3500 static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
3501 static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } };
3502 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
3503 static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
3504 static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } };
3505 static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } };
3506 static const TCGTargetOpDef r_rI_ri
3507 = { .args_ct_str = { "r", "rI", "ri" } };
3508 static const TCGTargetOpDef r_rI_rT
3509 = { .args_ct_str = { "r", "rI", "rT" } };
3510 static const TCGTargetOpDef r_r_rZW
3511 = { .args_ct_str = { "r", "r", "rZW" } };
3512 static const TCGTargetOpDef L_L_L_L
3513 = { .args_ct_str = { "L", "L", "L", "L" } };
3514 static const TCGTargetOpDef S_S_S_S
3515 = { .args_ct_str = { "S", "S", "S", "S" } };
3516 static const TCGTargetOpDef movc
3517 = { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } };
3518 static const TCGTargetOpDef dep
3519 = { .args_ct_str = { "r", "0", "rZ" } };
3520 static const TCGTargetOpDef br2
3521 = { .args_ct_str = { "r", "r", "ri", "ri" } };
3522 static const TCGTargetOpDef setc2
3523 = { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
3524 static const TCGTargetOpDef add2
3525 = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } };
3526 static const TCGTargetOpDef sub2
3527 = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } };
3528 static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } };
3529 static const TCGTargetOpDef v_vr = { .args_ct_str = { "v", "vr" } };
3530 static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } };
3531 static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } };
3532 static const TCGTargetOpDef v_v_v_v
3533 = { .args_ct_str = { "v", "v", "v", "v" } };
3536 case INDEX_op_goto_ptr:
3539 case INDEX_op_ld8u_i32:
3540 case INDEX_op_ld8s_i32:
3541 case INDEX_op_ld16u_i32:
3542 case INDEX_op_ld16s_i32:
3543 case INDEX_op_ld_i32:
3544 case INDEX_op_st8_i32:
3545 case INDEX_op_st16_i32:
3546 case INDEX_op_st_i32:
3547 case INDEX_op_ctpop_i32:
3548 case INDEX_op_neg_i32:
3549 case INDEX_op_not_i32:
3550 case INDEX_op_ext8s_i32:
3551 case INDEX_op_ext16s_i32:
3552 case INDEX_op_bswap16_i32:
3553 case INDEX_op_bswap32_i32:
3554 case INDEX_op_extract_i32:
3555 case INDEX_op_ld8u_i64:
3556 case INDEX_op_ld8s_i64:
3557 case INDEX_op_ld16u_i64:
3558 case INDEX_op_ld16s_i64:
3559 case INDEX_op_ld32u_i64:
3560 case INDEX_op_ld32s_i64:
3561 case INDEX_op_ld_i64:
3562 case INDEX_op_st8_i64:
3563 case INDEX_op_st16_i64:
3564 case INDEX_op_st32_i64:
3565 case INDEX_op_st_i64:
3566 case INDEX_op_ctpop_i64:
3567 case INDEX_op_neg_i64:
3568 case INDEX_op_not_i64:
3569 case INDEX_op_ext8s_i64:
3570 case INDEX_op_ext16s_i64:
3571 case INDEX_op_ext32s_i64:
3572 case INDEX_op_ext_i32_i64:
3573 case INDEX_op_extu_i32_i64:
3574 case INDEX_op_bswap16_i64:
3575 case INDEX_op_bswap32_i64:
3576 case INDEX_op_bswap64_i64:
3577 case INDEX_op_extract_i64:
3580 case INDEX_op_add_i32:
3581 case INDEX_op_and_i32:
3582 case INDEX_op_or_i32:
3583 case INDEX_op_xor_i32:
3584 case INDEX_op_andc_i32:
3585 case INDEX_op_orc_i32:
3586 case INDEX_op_eqv_i32:
3587 case INDEX_op_shl_i32:
3588 case INDEX_op_shr_i32:
3589 case INDEX_op_sar_i32:
3590 case INDEX_op_rotl_i32:
3591 case INDEX_op_rotr_i32:
3592 case INDEX_op_setcond_i32:
3593 case INDEX_op_and_i64:
3594 case INDEX_op_andc_i64:
3595 case INDEX_op_shl_i64:
3596 case INDEX_op_shr_i64:
3597 case INDEX_op_sar_i64:
3598 case INDEX_op_rotl_i64:
3599 case INDEX_op_rotr_i64:
3600 case INDEX_op_setcond_i64:
3602 case INDEX_op_mul_i32:
3603 case INDEX_op_mul_i64:
3605 case INDEX_op_div_i32:
3606 case INDEX_op_divu_i32:
3607 case INDEX_op_nand_i32:
3608 case INDEX_op_nor_i32:
3609 case INDEX_op_muluh_i32:
3610 case INDEX_op_mulsh_i32:
3611 case INDEX_op_orc_i64:
3612 case INDEX_op_eqv_i64:
3613 case INDEX_op_nand_i64:
3614 case INDEX_op_nor_i64:
3615 case INDEX_op_div_i64:
3616 case INDEX_op_divu_i64:
3617 case INDEX_op_mulsh_i64:
3618 case INDEX_op_muluh_i64:
3620 case INDEX_op_sub_i32:
3622 case INDEX_op_add_i64:
3624 case INDEX_op_or_i64:
3625 case INDEX_op_xor_i64:
3627 case INDEX_op_sub_i64:
3629 case INDEX_op_clz_i32:
3630 case INDEX_op_ctz_i32:
3631 case INDEX_op_clz_i64:
3632 case INDEX_op_ctz_i64:
3635 case INDEX_op_brcond_i32:
3636 case INDEX_op_brcond_i64:
3639 case INDEX_op_movcond_i32:
3640 case INDEX_op_movcond_i64:
3642 case INDEX_op_deposit_i32:
3643 case INDEX_op_deposit_i64:
3645 case INDEX_op_brcond2_i32:
3647 case INDEX_op_setcond2_i32:
3649 case INDEX_op_add2_i64:
3650 case INDEX_op_add2_i32:
3652 case INDEX_op_sub2_i64:
3653 case INDEX_op_sub2_i32:
3656 case INDEX_op_qemu_ld_i32:
3657 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3659 case INDEX_op_qemu_st_i32:
3660 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3662 case INDEX_op_qemu_ld_i64:
3663 return (TCG_TARGET_REG_BITS == 64 ? &r_L
3664 : TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L);
3665 case INDEX_op_qemu_st_i64:
3666 return (TCG_TARGET_REG_BITS == 64 ? &S_S
3667 : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S);
3669 case INDEX_op_add_vec:
3670 case INDEX_op_sub_vec:
3671 case INDEX_op_mul_vec:
3672 case INDEX_op_and_vec:
3673 case INDEX_op_or_vec:
3674 case INDEX_op_xor_vec:
3675 case INDEX_op_andc_vec:
3676 case INDEX_op_orc_vec:
3677 case INDEX_op_cmp_vec:
3678 case INDEX_op_ssadd_vec:
3679 case INDEX_op_sssub_vec:
3680 case INDEX_op_usadd_vec:
3681 case INDEX_op_ussub_vec:
3682 case INDEX_op_smax_vec:
3683 case INDEX_op_smin_vec:
3684 case INDEX_op_umax_vec:
3685 case INDEX_op_umin_vec:
3686 case INDEX_op_shlv_vec:
3687 case INDEX_op_shrv_vec:
3688 case INDEX_op_sarv_vec:
3689 case INDEX_op_rotlv_vec:
3690 case INDEX_op_rotrv_vec:
3691 case INDEX_op_ppc_mrgh_vec:
3692 case INDEX_op_ppc_mrgl_vec:
3693 case INDEX_op_ppc_muleu_vec:
3694 case INDEX_op_ppc_mulou_vec:
3695 case INDEX_op_ppc_pkum_vec:
3696 case INDEX_op_dup2_vec:
3698 case INDEX_op_not_vec:
3699 case INDEX_op_neg_vec:
3701 case INDEX_op_dup_vec:
3702 return have_isa_3_00 ? &v_vr : &v_v;
3703 case INDEX_op_ld_vec:
3704 case INDEX_op_st_vec:
3705 case INDEX_op_dupm_vec:
3707 case INDEX_op_bitsel_vec:
3708 case INDEX_op_ppc_msum_vec:
3716 static void tcg_target_init(TCGContext *s)
3718 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3719 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3721 have_isa = tcg_isa_base;
3722 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3723 have_isa = tcg_isa_2_06;
3725 #ifdef PPC_FEATURE2_ARCH_2_07
3726 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3727 have_isa = tcg_isa_2_07;
3730 #ifdef PPC_FEATURE2_ARCH_3_00
3731 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3732 have_isa = tcg_isa_3_00;
3735 #ifdef PPC_FEATURE2_ARCH_3_10
3736 if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3737 have_isa = tcg_isa_3_10;
3741 #ifdef PPC_FEATURE2_HAS_ISEL
3742 /* Prefer explicit instruction from the kernel. */
3743 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3745 /* Fall back to knowing Power7 (2.06) has ISEL. */
3746 have_isel = have_isa_2_06;
3749 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3750 have_altivec = true;
3751 /* We only care about the portion of VSX that overlaps Altivec. */
3752 if (hwcap & PPC_FEATURE_HAS_VSX) {
3757 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3758 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3760 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3761 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3764 tcg_target_call_clobber_regs = 0;
3765 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3766 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3767 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3768 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3769 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3770 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3771 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3772 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3773 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3774 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3775 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3776 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3778 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3779 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3780 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3781 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3782 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3783 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3784 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3785 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3786 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3787 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3788 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3789 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3790 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3791 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3792 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3793 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3794 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3795 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3796 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3797 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3799 s->reserved_regs = 0;
3800 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3801 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3802 #if defined(_CALL_SYSV)
3803 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3805 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3806 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3808 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
3809 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3810 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3812 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
3819 DebugFrameFDEHeader fde;
3820 uint8_t fde_def_cfa[4];
3821 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3824 /* We're expecting a 2 byte uleb128 encoded value. */
3825 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3827 #if TCG_TARGET_REG_BITS == 64
3828 # define ELF_HOST_MACHINE EM_PPC64
3830 # define ELF_HOST_MACHINE EM_PPC
3833 static DebugFrame debug_frame = {
3834 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3837 .cie.code_align = 1,
3838 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
3839 .cie.return_column = 65,
3841 /* Total FDE size does not include the "len" member. */
3842 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3845 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
3846 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3850 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3851 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
3855 void tcg_register_jit(void *buf, size_t buf_size)
3857 uint8_t *p = &debug_frame.fde_reg_ofs[3];
3860 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
3861 p[0] = 0x80 + tcg_target_callee_save_regs[i];
3862 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
3865 debug_frame.fde.func_start = (uintptr_t)buf;
3866 debug_frame.fde.func_len = buf_size;
3868 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3870 #endif /* __ELF__ */
3872 void flush_icache_range(uintptr_t start, uintptr_t stop)
3874 uintptr_t p, start1, stop1;
3875 size_t dsize = qemu_dcache_linesize;
3876 size_t isize = qemu_icache_linesize;
3878 start1 = start & ~(dsize - 1);
3879 stop1 = (stop + dsize - 1) & ~(dsize - 1);
3880 for (p = start1; p < stop1; p += dsize) {
3881 asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
3883 asm volatile ("sync" : : : "memory");
3885 start &= start & ~(isize - 1);
3886 stop1 = (stop + isize - 1) & ~(isize - 1);
3887 for (p = start1; p < stop1; p += isize) {
3888 asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
3890 asm volatile ("sync" : : : "memory");
3891 asm volatile ("isync" : : : "memory");