2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.inc.c"
28 #if defined _CALL_DARWIN || defined __APPLE__
29 #define TCG_TARGET_CALL_DARWIN
32 # define TCG_TARGET_CALL_ALIGN_ARGS 1
35 /* For some memory operations, we need a scratch that isn't R0. For the AIX
36 calling convention, we can re-use the TOC register since we'll be reloading
37 it at every call. Otherwise R12 will do nicely as neither a call-saved
38 register nor a parameter register. */
40 # define TCG_REG_TMP1 TCG_REG_R2
42 # define TCG_REG_TMP1 TCG_REG_R12
45 #define TCG_VEC_TMP1 TCG_REG_V0
46 #define TCG_VEC_TMP2 TCG_REG_V1
48 #define TCG_REG_TB TCG_REG_R31
49 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
51 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
52 #define SZP ((int)sizeof(void *))
54 /* Shorthand for size of a register. */
55 #define SZR (TCG_TARGET_REG_BITS / 8)
57 #define TCG_CT_CONST_S16 0x100
58 #define TCG_CT_CONST_U16 0x200
59 #define TCG_CT_CONST_S32 0x400
60 #define TCG_CT_CONST_U32 0x800
61 #define TCG_CT_CONST_ZERO 0x1000
62 #define TCG_CT_CONST_MONE 0x2000
63 #define TCG_CT_CONST_WSZ 0x4000
65 static tcg_insn_unit
*tb_ret_addr
;
68 static bool have_isel
;
72 #ifndef CONFIG_SOFTMMU
73 #define TCG_GUEST_BASE_REG 30
76 #ifdef CONFIG_DEBUG_TCG
77 static const char tcg_target_reg_names
[TCG_TARGET_NB_REGS
][4] = {
78 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
80 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
81 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
82 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
83 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
84 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
85 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
89 static const int tcg_target_reg_alloc_order
[] = {
90 TCG_REG_R14
, /* call saved registers */
108 TCG_REG_R12
, /* call clobbered, non-arguments */
112 TCG_REG_R10
, /* call clobbered, arguments */
121 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
122 TCG_REG_V2
, /* call clobbered, vectors */
142 static const int tcg_target_call_iarg_regs
[] = {
153 static const int tcg_target_call_oarg_regs
[] = {
158 static const int tcg_target_callee_save_regs
[] = {
159 #ifdef TCG_TARGET_CALL_DARWIN
175 TCG_REG_R27
, /* currently used for the global env */
182 static inline bool in_range_b(tcg_target_long target
)
184 return target
== sextract64(target
, 0, 26);
187 static uint32_t reloc_pc24_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
189 ptrdiff_t disp
= tcg_ptr_byte_diff(target
, pc
);
190 tcg_debug_assert(in_range_b(disp
));
191 return disp
& 0x3fffffc;
194 static bool reloc_pc24(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
196 ptrdiff_t disp
= tcg_ptr_byte_diff(target
, pc
);
197 if (in_range_b(disp
)) {
198 *pc
= (*pc
& ~0x3fffffc) | (disp
& 0x3fffffc);
204 static uint16_t reloc_pc14_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
206 ptrdiff_t disp
= tcg_ptr_byte_diff(target
, pc
);
207 tcg_debug_assert(disp
== (int16_t) disp
);
208 return disp
& 0xfffc;
211 static bool reloc_pc14(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
213 ptrdiff_t disp
= tcg_ptr_byte_diff(target
, pc
);
214 if (disp
== (int16_t) disp
) {
215 *pc
= (*pc
& ~0xfffc) | (disp
& 0xfffc);
221 /* parse target specific constraints */
222 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
223 const char *ct_str
, TCGType type
)
226 case 'A': case 'B': case 'C': case 'D':
227 ct
->ct
|= TCG_CT_REG
;
228 tcg_regset_set_reg(ct
->u
.regs
, 3 + ct_str
[0] - 'A');
231 ct
->ct
|= TCG_CT_REG
;
232 ct
->u
.regs
= 0xffffffff;
235 ct
->ct
|= TCG_CT_REG
;
236 ct
->u
.regs
= 0xffffffff00000000ull
;
238 case 'L': /* qemu_ld constraint */
239 ct
->ct
|= TCG_CT_REG
;
240 ct
->u
.regs
= 0xffffffff;
241 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
242 #ifdef CONFIG_SOFTMMU
243 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R4
);
244 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R5
);
247 case 'S': /* qemu_st constraint */
248 ct
->ct
|= TCG_CT_REG
;
249 ct
->u
.regs
= 0xffffffff;
250 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
251 #ifdef CONFIG_SOFTMMU
252 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R4
);
253 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R5
);
254 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R6
);
258 ct
->ct
|= TCG_CT_CONST_S16
;
261 ct
->ct
|= TCG_CT_CONST_U16
;
264 ct
->ct
|= TCG_CT_CONST_MONE
;
267 ct
->ct
|= TCG_CT_CONST_S32
;
270 ct
->ct
|= TCG_CT_CONST_U32
;
273 ct
->ct
|= TCG_CT_CONST_WSZ
;
276 ct
->ct
|= TCG_CT_CONST_ZERO
;
284 /* test if a constant matches the constraint */
285 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
286 const TCGArgConstraint
*arg_ct
)
289 if (ct
& TCG_CT_CONST
) {
293 /* The only 32-bit constraint we use aside from
294 TCG_CT_CONST is TCG_CT_CONST_S16. */
295 if (type
== TCG_TYPE_I32
) {
299 if ((ct
& TCG_CT_CONST_S16
) && val
== (int16_t)val
) {
301 } else if ((ct
& TCG_CT_CONST_U16
) && val
== (uint16_t)val
) {
303 } else if ((ct
& TCG_CT_CONST_S32
) && val
== (int32_t)val
) {
305 } else if ((ct
& TCG_CT_CONST_U32
) && val
== (uint32_t)val
) {
307 } else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
309 } else if ((ct
& TCG_CT_CONST_MONE
) && val
== -1) {
311 } else if ((ct
& TCG_CT_CONST_WSZ
)
312 && val
== (type
== TCG_TYPE_I32
? 32 : 64)) {
318 #define OPCD(opc) ((opc)<<26)
319 #define XO19(opc) (OPCD(19)|((opc)<<1))
320 #define MD30(opc) (OPCD(30)|((opc)<<2))
321 #define MDS30(opc) (OPCD(30)|((opc)<<1))
322 #define XO31(opc) (OPCD(31)|((opc)<<1))
323 #define XO58(opc) (OPCD(58)|(opc))
324 #define XO62(opc) (OPCD(62)|(opc))
325 #define VX4(opc) (OPCD(4)|(opc))
329 #define LBZ OPCD( 34)
330 #define LHZ OPCD( 40)
331 #define LHA OPCD( 42)
332 #define LWZ OPCD( 32)
333 #define LWZUX XO31( 55)
334 #define STB OPCD( 38)
335 #define STH OPCD( 44)
336 #define STW OPCD( 36)
339 #define STDU XO62( 1)
340 #define STDX XO31(149)
343 #define LDX XO31( 21)
345 #define LDUX XO31( 53)
347 #define LWAX XO31(341)
349 #define ADDIC OPCD( 12)
350 #define ADDI OPCD( 14)
351 #define ADDIS OPCD( 15)
352 #define ORI OPCD( 24)
353 #define ORIS OPCD( 25)
354 #define XORI OPCD( 26)
355 #define XORIS OPCD( 27)
356 #define ANDI OPCD( 28)
357 #define ANDIS OPCD( 29)
358 #define MULLI OPCD( 7)
359 #define CMPLI OPCD( 10)
360 #define CMPI OPCD( 11)
361 #define SUBFIC OPCD( 8)
363 #define LWZU OPCD( 33)
364 #define STWU OPCD( 37)
366 #define RLWIMI OPCD( 20)
367 #define RLWINM OPCD( 21)
368 #define RLWNM OPCD( 23)
370 #define RLDICL MD30( 0)
371 #define RLDICR MD30( 1)
372 #define RLDIMI MD30( 3)
373 #define RLDCL MDS30( 8)
375 #define BCLR XO19( 16)
376 #define BCCTR XO19(528)
377 #define CRAND XO19(257)
378 #define CRANDC XO19(129)
379 #define CRNAND XO19(225)
380 #define CROR XO19(449)
381 #define CRNOR XO19( 33)
383 #define EXTSB XO31(954)
384 #define EXTSH XO31(922)
385 #define EXTSW XO31(986)
386 #define ADD XO31(266)
387 #define ADDE XO31(138)
388 #define ADDME XO31(234)
389 #define ADDZE XO31(202)
390 #define ADDC XO31( 10)
391 #define AND XO31( 28)
392 #define SUBF XO31( 40)
393 #define SUBFC XO31( 8)
394 #define SUBFE XO31(136)
395 #define SUBFME XO31(232)
396 #define SUBFZE XO31(200)
398 #define XOR XO31(316)
399 #define MULLW XO31(235)
400 #define MULHW XO31( 75)
401 #define MULHWU XO31( 11)
402 #define DIVW XO31(491)
403 #define DIVWU XO31(459)
405 #define CMPL XO31( 32)
406 #define LHBRX XO31(790)
407 #define LWBRX XO31(534)
408 #define LDBRX XO31(532)
409 #define STHBRX XO31(918)
410 #define STWBRX XO31(662)
411 #define STDBRX XO31(660)
412 #define MFSPR XO31(339)
413 #define MTSPR XO31(467)
414 #define SRAWI XO31(824)
415 #define NEG XO31(104)
416 #define MFCR XO31( 19)
417 #define MFOCRF (MFCR | (1u << 20))
418 #define NOR XO31(124)
419 #define CNTLZW XO31( 26)
420 #define CNTLZD XO31( 58)
421 #define CNTTZW XO31(538)
422 #define CNTTZD XO31(570)
423 #define CNTPOPW XO31(378)
424 #define CNTPOPD XO31(506)
425 #define ANDC XO31( 60)
426 #define ORC XO31(412)
427 #define EQV XO31(284)
428 #define NAND XO31(476)
429 #define ISEL XO31( 15)
431 #define MULLD XO31(233)
432 #define MULHD XO31( 73)
433 #define MULHDU XO31( 9)
434 #define DIVD XO31(489)
435 #define DIVDU XO31(457)
437 #define LBZX XO31( 87)
438 #define LHZX XO31(279)
439 #define LHAX XO31(343)
440 #define LWZX XO31( 23)
441 #define STBX XO31(215)
442 #define STHX XO31(407)
443 #define STWX XO31(151)
445 #define EIEIO XO31(854)
446 #define HWSYNC XO31(598)
447 #define LWSYNC (HWSYNC | (1u << 21))
449 #define SPR(a, b) ((((a)<<5)|(b))<<11)
451 #define CTR SPR(9, 0)
453 #define SLW XO31( 24)
454 #define SRW XO31(536)
455 #define SRAW XO31(792)
457 #define SLD XO31( 27)
458 #define SRD XO31(539)
459 #define SRAD XO31(794)
460 #define SRADI XO31(413<<1)
463 #define TRAP (TW | TO(31))
465 #define NOP ORI /* ori 0,0,0 */
467 #define LVX XO31(103)
468 #define LVEBX XO31(7)
469 #define LVEHX XO31(39)
470 #define LVEWX XO31(71)
471 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
472 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
473 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
474 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
475 #define LXSD (OPCD(57) | 2) /* v3.00 */
476 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
478 #define STVX XO31(231)
479 #define STVEWX XO31(199)
480 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
481 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
482 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
483 #define STXSD (OPCD(61) | 2) /* v3.00 */
485 #define VADDSBS VX4(768)
486 #define VADDUBS VX4(512)
487 #define VADDUBM VX4(0)
488 #define VADDSHS VX4(832)
489 #define VADDUHS VX4(576)
490 #define VADDUHM VX4(64)
491 #define VADDSWS VX4(896)
492 #define VADDUWS VX4(640)
493 #define VADDUWM VX4(128)
494 #define VADDUDM VX4(192) /* v2.07 */
496 #define VSUBSBS VX4(1792)
497 #define VSUBUBS VX4(1536)
498 #define VSUBUBM VX4(1024)
499 #define VSUBSHS VX4(1856)
500 #define VSUBUHS VX4(1600)
501 #define VSUBUHM VX4(1088)
502 #define VSUBSWS VX4(1920)
503 #define VSUBUWS VX4(1664)
504 #define VSUBUWM VX4(1152)
505 #define VSUBUDM VX4(1216) /* v2.07 */
507 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
508 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
510 #define VMAXSB VX4(258)
511 #define VMAXSH VX4(322)
512 #define VMAXSW VX4(386)
513 #define VMAXSD VX4(450) /* v2.07 */
514 #define VMAXUB VX4(2)
515 #define VMAXUH VX4(66)
516 #define VMAXUW VX4(130)
517 #define VMAXUD VX4(194) /* v2.07 */
518 #define VMINSB VX4(770)
519 #define VMINSH VX4(834)
520 #define VMINSW VX4(898)
521 #define VMINSD VX4(962) /* v2.07 */
522 #define VMINUB VX4(514)
523 #define VMINUH VX4(578)
524 #define VMINUW VX4(642)
525 #define VMINUD VX4(706) /* v2.07 */
527 #define VCMPEQUB VX4(6)
528 #define VCMPEQUH VX4(70)
529 #define VCMPEQUW VX4(134)
530 #define VCMPEQUD VX4(199) /* v2.07 */
531 #define VCMPGTSB VX4(774)
532 #define VCMPGTSH VX4(838)
533 #define VCMPGTSW VX4(902)
534 #define VCMPGTSD VX4(967) /* v2.07 */
535 #define VCMPGTUB VX4(518)
536 #define VCMPGTUH VX4(582)
537 #define VCMPGTUW VX4(646)
538 #define VCMPGTUD VX4(711) /* v2.07 */
539 #define VCMPNEB VX4(7) /* v3.00 */
540 #define VCMPNEH VX4(71) /* v3.00 */
541 #define VCMPNEW VX4(135) /* v3.00 */
543 #define VSLB VX4(260)
544 #define VSLH VX4(324)
545 #define VSLW VX4(388)
546 #define VSLD VX4(1476) /* v2.07 */
547 #define VSRB VX4(516)
548 #define VSRH VX4(580)
549 #define VSRW VX4(644)
550 #define VSRD VX4(1732) /* v2.07 */
551 #define VSRAB VX4(772)
552 #define VSRAH VX4(836)
553 #define VSRAW VX4(900)
554 #define VSRAD VX4(964) /* v2.07 */
557 #define VRLW VX4(132)
558 #define VRLD VX4(196) /* v2.07 */
560 #define VMULEUB VX4(520)
561 #define VMULEUH VX4(584)
562 #define VMULEUW VX4(648) /* v2.07 */
563 #define VMULOUB VX4(8)
564 #define VMULOUH VX4(72)
565 #define VMULOUW VX4(136) /* v2.07 */
566 #define VMULUWM VX4(137) /* v2.07 */
567 #define VMSUMUHM VX4(38)
569 #define VMRGHB VX4(12)
570 #define VMRGHH VX4(76)
571 #define VMRGHW VX4(140)
572 #define VMRGLB VX4(268)
573 #define VMRGLH VX4(332)
574 #define VMRGLW VX4(396)
576 #define VPKUHUM VX4(14)
577 #define VPKUWUM VX4(78)
579 #define VAND VX4(1028)
580 #define VANDC VX4(1092)
581 #define VNOR VX4(1284)
582 #define VOR VX4(1156)
583 #define VXOR VX4(1220)
584 #define VEQV VX4(1668) /* v2.07 */
585 #define VNAND VX4(1412) /* v2.07 */
586 #define VORC VX4(1348) /* v2.07 */
588 #define VSPLTB VX4(524)
589 #define VSPLTH VX4(588)
590 #define VSPLTW VX4(652)
591 #define VSPLTISB VX4(780)
592 #define VSPLTISH VX4(844)
593 #define VSPLTISW VX4(908)
595 #define VSLDOI VX4(44)
597 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
598 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
599 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
601 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
602 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
603 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
604 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
605 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
606 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
608 #define RT(r) ((r)<<21)
609 #define RS(r) ((r)<<21)
610 #define RA(r) ((r)<<16)
611 #define RB(r) ((r)<<11)
612 #define TO(t) ((t)<<21)
613 #define SH(s) ((s)<<11)
614 #define MB(b) ((b)<<6)
615 #define ME(e) ((e)<<1)
616 #define BO(o) ((o)<<21)
617 #define MB64(b) ((b)<<5)
618 #define FXM(b) (1 << (19 - (b)))
620 #define VRT(r) (((r) & 31) << 21)
621 #define VRA(r) (((r) & 31) << 16)
622 #define VRB(r) (((r) & 31) << 11)
623 #define VRC(r) (((r) & 31) << 6)
627 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
628 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
629 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
630 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
632 #define BF(n) ((n)<<23)
633 #define BI(n, c) (((c)+((n)*4))<<16)
634 #define BT(n, c) (((c)+((n)*4))<<21)
635 #define BA(n, c) (((c)+((n)*4))<<16)
636 #define BB(n, c) (((c)+((n)*4))<<11)
637 #define BC_(n, c) (((c)+((n)*4))<<6)
639 #define BO_COND_TRUE BO(12)
640 #define BO_COND_FALSE BO( 4)
641 #define BO_ALWAYS BO(20)
650 static const uint32_t tcg_to_bc
[] = {
651 [TCG_COND_EQ
] = BC
| BI(7, CR_EQ
) | BO_COND_TRUE
,
652 [TCG_COND_NE
] = BC
| BI(7, CR_EQ
) | BO_COND_FALSE
,
653 [TCG_COND_LT
] = BC
| BI(7, CR_LT
) | BO_COND_TRUE
,
654 [TCG_COND_GE
] = BC
| BI(7, CR_LT
) | BO_COND_FALSE
,
655 [TCG_COND_LE
] = BC
| BI(7, CR_GT
) | BO_COND_FALSE
,
656 [TCG_COND_GT
] = BC
| BI(7, CR_GT
) | BO_COND_TRUE
,
657 [TCG_COND_LTU
] = BC
| BI(7, CR_LT
) | BO_COND_TRUE
,
658 [TCG_COND_GEU
] = BC
| BI(7, CR_LT
) | BO_COND_FALSE
,
659 [TCG_COND_LEU
] = BC
| BI(7, CR_GT
) | BO_COND_FALSE
,
660 [TCG_COND_GTU
] = BC
| BI(7, CR_GT
) | BO_COND_TRUE
,
663 /* The low bit here is set if the RA and RB fields must be inverted. */
664 static const uint32_t tcg_to_isel
[] = {
665 [TCG_COND_EQ
] = ISEL
| BC_(7, CR_EQ
),
666 [TCG_COND_NE
] = ISEL
| BC_(7, CR_EQ
) | 1,
667 [TCG_COND_LT
] = ISEL
| BC_(7, CR_LT
),
668 [TCG_COND_GE
] = ISEL
| BC_(7, CR_LT
) | 1,
669 [TCG_COND_LE
] = ISEL
| BC_(7, CR_GT
) | 1,
670 [TCG_COND_GT
] = ISEL
| BC_(7, CR_GT
),
671 [TCG_COND_LTU
] = ISEL
| BC_(7, CR_LT
),
672 [TCG_COND_GEU
] = ISEL
| BC_(7, CR_LT
) | 1,
673 [TCG_COND_LEU
] = ISEL
| BC_(7, CR_GT
) | 1,
674 [TCG_COND_GTU
] = ISEL
| BC_(7, CR_GT
),
677 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
678 intptr_t value
, intptr_t addend
)
680 tcg_insn_unit
*target
;
685 target
= (tcg_insn_unit
*)value
;
689 return reloc_pc14(code_ptr
, target
);
691 return reloc_pc24(code_ptr
, target
);
694 * We are (slightly) abusing this relocation type. In particular,
695 * assert that the low 2 bits are zero, and do not modify them.
696 * That way we can use this with LD et al that have opcode bits
697 * in the low 2 bits of the insn.
699 if ((value
& 3) || value
!= (int16_t)value
) {
702 *code_ptr
= (*code_ptr
& ~0xfffc) | (value
& 0xfffc);
706 * We are abusing this relocation type. Again, this points to
707 * a pair of insns, lis + load. This is an absolute address
708 * relocation for PPC32 so the lis cannot be removed.
712 if (hi
+ lo
!= value
) {
715 code_ptr
[0] = deposit32(code_ptr
[0], 0, 16, hi
>> 16);
716 code_ptr
[1] = deposit32(code_ptr
[1], 0, 16, lo
);
719 g_assert_not_reached();
724 static void tcg_out_mem_long(TCGContext
*s
, int opi
, int opx
, TCGReg rt
,
725 TCGReg base
, tcg_target_long offset
);
727 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
734 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
737 if (ret
< TCG_REG_V0
) {
738 if (arg
< TCG_REG_V0
) {
739 tcg_out32(s
, OR
| SAB(arg
, ret
, arg
));
741 } else if (have_isa_2_07
) {
742 tcg_out32(s
, (type
== TCG_TYPE_I32
? MFVSRWZ
: MFVSRD
)
743 | VRT(arg
) | RA(ret
));
746 /* Altivec does not support vector->integer moves. */
749 } else if (arg
< TCG_REG_V0
) {
751 tcg_out32(s
, (type
== TCG_TYPE_I32
? MTVSRWZ
: MTVSRD
)
752 | VRT(ret
) | RA(arg
));
755 /* Altivec does not support integer->vector moves. */
762 tcg_debug_assert(ret
>= TCG_REG_V0
&& arg
>= TCG_REG_V0
);
763 tcg_out32(s
, VOR
| VRT(ret
) | VRA(arg
) | VRB(arg
));
766 g_assert_not_reached();
771 static inline void tcg_out_rld(TCGContext
*s
, int op
, TCGReg ra
, TCGReg rs
,
774 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
775 sh
= SH(sh
& 0x1f) | (((sh
>> 5) & 1) << 1);
776 mb
= MB64((mb
>> 5) | ((mb
<< 1) & 0x3f));
777 tcg_out32(s
, op
| RA(ra
) | RS(rs
) | sh
| mb
);
780 static inline void tcg_out_rlw(TCGContext
*s
, int op
, TCGReg ra
, TCGReg rs
,
781 int sh
, int mb
, int me
)
783 tcg_out32(s
, op
| RA(ra
) | RS(rs
) | SH(sh
) | MB(mb
) | ME(me
));
786 static inline void tcg_out_ext32u(TCGContext
*s
, TCGReg dst
, TCGReg src
)
788 tcg_out_rld(s
, RLDICL
, dst
, src
, 0, 32);
791 static inline void tcg_out_shli32(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
793 tcg_out_rlw(s
, RLWINM
, dst
, src
, c
, 0, 31 - c
);
796 static inline void tcg_out_shli64(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
798 tcg_out_rld(s
, RLDICR
, dst
, src
, c
, 63 - c
);
801 static inline void tcg_out_shri32(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
803 tcg_out_rlw(s
, RLWINM
, dst
, src
, 32 - c
, c
, 31);
806 static inline void tcg_out_shri64(TCGContext
*s
, TCGReg dst
, TCGReg src
, int c
)
808 tcg_out_rld(s
, RLDICL
, dst
, src
, 64 - c
, c
);
811 /* Emit a move into ret of arg, if it can be done in one insn. */
812 static bool tcg_out_movi_one(TCGContext
*s
, TCGReg ret
, tcg_target_long arg
)
814 if (arg
== (int16_t)arg
) {
815 tcg_out32(s
, ADDI
| TAI(ret
, 0, arg
));
818 if (arg
== (int32_t)arg
&& (arg
& 0xffff) == 0) {
819 tcg_out32(s
, ADDIS
| TAI(ret
, 0, arg
>> 16));
825 static void tcg_out_movi_int(TCGContext
*s
, TCGType type
, TCGReg ret
,
826 tcg_target_long arg
, bool in_prologue
)
832 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
834 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
838 /* Load 16-bit immediates with one insn. */
839 if (tcg_out_movi_one(s
, ret
, arg
)) {
843 /* Load addresses within the TB with one insn. */
844 tb_diff
= arg
- (intptr_t)s
->code_gen_ptr
;
845 if (!in_prologue
&& USE_REG_TB
&& tb_diff
== (int16_t)tb_diff
) {
846 tcg_out32(s
, ADDI
| TAI(ret
, TCG_REG_TB
, tb_diff
));
850 /* Load 32-bit immediates with two insns. Note that we've already
851 eliminated bare ADDIS, so we know both insns are required. */
852 if (TCG_TARGET_REG_BITS
== 32 || arg
== (int32_t)arg
) {
853 tcg_out32(s
, ADDIS
| TAI(ret
, 0, arg
>> 16));
854 tcg_out32(s
, ORI
| SAI(ret
, ret
, arg
));
857 if (arg
== (uint32_t)arg
&& !(arg
& 0x8000)) {
858 tcg_out32(s
, ADDI
| TAI(ret
, 0, arg
));
859 tcg_out32(s
, ORIS
| SAI(ret
, ret
, arg
>> 16));
863 /* Load masked 16-bit value. */
864 if (arg
> 0 && (arg
& 0x8000)) {
866 if ((tmp
& (tmp
+ 1)) == 0) {
867 int mb
= clz64(tmp
+ 1) + 1;
868 tcg_out32(s
, ADDI
| TAI(ret
, 0, arg
));
869 tcg_out_rld(s
, RLDICL
, ret
, ret
, 0, mb
);
874 /* Load common masks with 2 insns. */
877 if (tmp
== (int16_t)tmp
) {
878 tcg_out32(s
, ADDI
| TAI(ret
, 0, tmp
));
879 tcg_out_shli64(s
, ret
, ret
, shift
);
883 if (tcg_out_movi_one(s
, ret
, arg
<< shift
)) {
884 tcg_out_shri64(s
, ret
, ret
, shift
);
888 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
889 if (!in_prologue
&& USE_REG_TB
&& tb_diff
== (int32_t)tb_diff
) {
890 tcg_out_mem_long(s
, ADDI
, ADD
, ret
, TCG_REG_TB
, tb_diff
);
894 /* Use the constant pool, if possible. */
895 if (!in_prologue
&& USE_REG_TB
) {
896 new_pool_label(s
, arg
, R_PPC_ADDR16
, s
->code_ptr
,
897 -(intptr_t)s
->code_gen_ptr
);
898 tcg_out32(s
, LD
| TAI(ret
, TCG_REG_TB
, 0));
902 tmp
= arg
>> 31 >> 1;
903 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, tmp
);
905 tcg_out_shli64(s
, ret
, ret
, 32);
907 if (arg
& 0xffff0000) {
908 tcg_out32(s
, ORIS
| SAI(ret
, ret
, arg
>> 16));
911 tcg_out32(s
, ORI
| SAI(ret
, ret
, arg
));
915 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, TCGReg ret
,
923 if (low
>= -16 && low
< 16) {
924 if (val
== (tcg_target_long
)dup_const(MO_8
, low
)) {
925 tcg_out32(s
, VSPLTISB
| VRT(ret
) | ((val
& 31) << 16));
928 if (val
== (tcg_target_long
)dup_const(MO_16
, low
)) {
929 tcg_out32(s
, VSPLTISH
| VRT(ret
) | ((val
& 31) << 16));
932 if (val
== (tcg_target_long
)dup_const(MO_32
, low
)) {
933 tcg_out32(s
, VSPLTISW
| VRT(ret
) | ((val
& 31) << 16));
937 if (have_isa_3_00
&& val
== (tcg_target_long
)dup_const(MO_8
, val
)) {
938 tcg_out32(s
, XXSPLTIB
| VRT(ret
) | ((val
& 0xff) << 11));
943 * Otherwise we must load the value from the constant pool.
947 add
= -(intptr_t)s
->code_gen_ptr
;
954 load_insn
= type
== TCG_TYPE_V64
? LXSDX
: LXVDSX
;
955 load_insn
|= VRT(ret
) | RB(TCG_REG_TMP1
);
956 if (TCG_TARGET_REG_BITS
== 64) {
957 new_pool_label(s
, val
, rel
, s
->code_ptr
, add
);
959 new_pool_l2(s
, rel
, s
->code_ptr
, add
, val
, val
);
962 load_insn
= LVX
| VRT(ret
) | RB(TCG_REG_TMP1
);
963 if (TCG_TARGET_REG_BITS
== 64) {
964 new_pool_l2(s
, rel
, s
->code_ptr
, add
, val
, val
);
966 new_pool_l4(s
, rel
, s
->code_ptr
, add
, val
, val
, val
, val
);
971 tcg_out32(s
, ADDI
| TAI(TCG_REG_TMP1
, 0, 0));
972 load_insn
|= RA(TCG_REG_TB
);
974 tcg_out32(s
, ADDIS
| TAI(TCG_REG_TMP1
, 0, 0));
975 tcg_out32(s
, ADDI
| TAI(TCG_REG_TMP1
, TCG_REG_TMP1
, 0));
977 tcg_out32(s
, load_insn
);
980 static void tcg_out_movi(TCGContext
*s
, TCGType type
, TCGReg ret
,
986 tcg_debug_assert(ret
< TCG_REG_V0
);
987 tcg_out_movi_int(s
, type
, ret
, arg
, false);
992 tcg_debug_assert(ret
>= TCG_REG_V0
);
993 tcg_out_dupi_vec(s
, type
, ret
, arg
);
997 g_assert_not_reached();
1001 static bool mask_operand(uint32_t c
, int *mb
, int *me
)
1005 /* Accept a bit pattern like:
1009 Keep track of the transitions. */
1010 if (c
== 0 || c
== -1) {
1016 if (test
& (test
- 1)) {
1021 *mb
= test
? clz32(test
& -test
) + 1 : 0;
1025 static bool mask64_operand(uint64_t c
, int *mb
, int *me
)
1034 /* Accept 1..10..0. */
1040 /* Accept 0..01..1. */
1041 if (lsb
== 1 && (c
& (c
+ 1)) == 0) {
1042 *mb
= clz64(c
+ 1) + 1;
1049 static void tcg_out_andi32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
1053 if (mask_operand(c
, &mb
, &me
)) {
1054 tcg_out_rlw(s
, RLWINM
, dst
, src
, 0, mb
, me
);
1055 } else if ((c
& 0xffff) == c
) {
1056 tcg_out32(s
, ANDI
| SAI(src
, dst
, c
));
1058 } else if ((c
& 0xffff0000) == c
) {
1059 tcg_out32(s
, ANDIS
| SAI(src
, dst
, c
>> 16));
1062 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R0
, c
);
1063 tcg_out32(s
, AND
| SAB(src
, dst
, TCG_REG_R0
));
1067 static void tcg_out_andi64(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint64_t c
)
1071 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
1072 if (mask64_operand(c
, &mb
, &me
)) {
1074 tcg_out_rld(s
, RLDICR
, dst
, src
, 0, me
);
1076 tcg_out_rld(s
, RLDICL
, dst
, src
, 0, mb
);
1078 } else if ((c
& 0xffff) == c
) {
1079 tcg_out32(s
, ANDI
| SAI(src
, dst
, c
));
1081 } else if ((c
& 0xffff0000) == c
) {
1082 tcg_out32(s
, ANDIS
| SAI(src
, dst
, c
>> 16));
1085 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_R0
, c
);
1086 tcg_out32(s
, AND
| SAB(src
, dst
, TCG_REG_R0
));
1090 static void tcg_out_zori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
,
1091 int op_lo
, int op_hi
)
1094 tcg_out32(s
, op_hi
| SAI(src
, dst
, c
>> 16));
1098 tcg_out32(s
, op_lo
| SAI(src
, dst
, c
));
1103 static void tcg_out_ori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
1105 tcg_out_zori32(s
, dst
, src
, c
, ORI
, ORIS
);
1108 static void tcg_out_xori32(TCGContext
*s
, TCGReg dst
, TCGReg src
, uint32_t c
)
1110 tcg_out_zori32(s
, dst
, src
, c
, XORI
, XORIS
);
1113 static void tcg_out_b(TCGContext
*s
, int mask
, tcg_insn_unit
*target
)
1115 ptrdiff_t disp
= tcg_pcrel_diff(s
, target
);
1116 if (in_range_b(disp
)) {
1117 tcg_out32(s
, B
| (disp
& 0x3fffffc) | mask
);
1119 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R0
, (uintptr_t)target
);
1120 tcg_out32(s
, MTSPR
| RS(TCG_REG_R0
) | CTR
);
1121 tcg_out32(s
, BCCTR
| BO_ALWAYS
| mask
);
1125 static void tcg_out_mem_long(TCGContext
*s
, int opi
, int opx
, TCGReg rt
,
1126 TCGReg base
, tcg_target_long offset
)
1128 tcg_target_long orig
= offset
, l0
, l1
, extra
= 0, align
= 0;
1129 bool is_int_store
= false;
1130 TCGReg rs
= TCG_REG_TMP1
;
1137 if (rt
> TCG_REG_R0
&& rt
< TCG_REG_V0
) {
1153 case STB
: case STH
: case STW
:
1154 is_int_store
= true;
1158 /* For unaligned, or very large offsets, use the indexed form. */
1159 if (offset
& align
|| offset
!= (int32_t)offset
|| opi
== 0) {
1163 tcg_debug_assert(!is_int_store
|| rs
!= rt
);
1164 tcg_out_movi(s
, TCG_TYPE_PTR
, rs
, orig
);
1165 tcg_out32(s
, opx
| TAB(rt
& 31, base
, rs
));
1169 l0
= (int16_t)offset
;
1170 offset
= (offset
- l0
) >> 16;
1171 l1
= (int16_t)offset
;
1173 if (l1
< 0 && orig
>= 0) {
1175 l1
= (int16_t)(offset
- 0x4000);
1178 tcg_out32(s
, ADDIS
| TAI(rs
, base
, l1
));
1182 tcg_out32(s
, ADDIS
| TAI(rs
, base
, extra
));
1185 if (opi
!= ADDI
|| base
!= rt
|| l0
!= 0) {
1186 tcg_out32(s
, opi
| TAI(rt
& 31, base
, l0
));
1190 static void tcg_out_vsldoi(TCGContext
*s
, TCGReg ret
,
1191 TCGReg va
, TCGReg vb
, int shb
)
1193 tcg_out32(s
, VSLDOI
| VRT(ret
) | VRA(va
) | VRB(vb
) | (shb
<< 6));
1196 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
1197 TCGReg base
, intptr_t offset
)
1203 if (ret
< TCG_REG_V0
) {
1204 tcg_out_mem_long(s
, LWZ
, LWZX
, ret
, base
, offset
);
1207 if (have_isa_2_07
&& have_vsx
) {
1208 tcg_out_mem_long(s
, 0, LXSIWZX
, ret
, base
, offset
);
1211 tcg_debug_assert((offset
& 3) == 0);
1212 tcg_out_mem_long(s
, 0, LVEWX
, ret
, base
, offset
);
1213 shift
= (offset
- 4) & 0xc;
1215 tcg_out_vsldoi(s
, ret
, ret
, ret
, shift
);
1219 if (ret
< TCG_REG_V0
) {
1220 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
1221 tcg_out_mem_long(s
, LD
, LDX
, ret
, base
, offset
);
1226 tcg_debug_assert(ret
>= TCG_REG_V0
);
1228 tcg_out_mem_long(s
, have_isa_3_00
? LXSD
: 0, LXSDX
,
1232 tcg_debug_assert((offset
& 7) == 0);
1233 tcg_out_mem_long(s
, 0, LVX
, ret
, base
, offset
& -16);
1235 tcg_out_vsldoi(s
, ret
, ret
, ret
, 8);
1239 tcg_debug_assert(ret
>= TCG_REG_V0
);
1240 tcg_debug_assert((offset
& 15) == 0);
1241 tcg_out_mem_long(s
, have_isa_3_00
? LXV
: 0,
1242 LVX
, ret
, base
, offset
);
1245 g_assert_not_reached();
1249 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
1250 TCGReg base
, intptr_t offset
)
1256 if (arg
< TCG_REG_V0
) {
1257 tcg_out_mem_long(s
, STW
, STWX
, arg
, base
, offset
);
1260 if (have_isa_2_07
&& have_vsx
) {
1261 tcg_out_mem_long(s
, 0, STXSIWX
, arg
, base
, offset
);
1264 assert((offset
& 3) == 0);
1265 tcg_debug_assert((offset
& 3) == 0);
1266 shift
= (offset
- 4) & 0xc;
1268 tcg_out_vsldoi(s
, TCG_VEC_TMP1
, arg
, arg
, shift
);
1271 tcg_out_mem_long(s
, 0, STVEWX
, arg
, base
, offset
);
1274 if (arg
< TCG_REG_V0
) {
1275 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
1276 tcg_out_mem_long(s
, STD
, STDX
, arg
, base
, offset
);
1281 tcg_debug_assert(arg
>= TCG_REG_V0
);
1283 tcg_out_mem_long(s
, have_isa_3_00
? STXSD
: 0,
1284 STXSDX
, arg
, base
, offset
);
1287 tcg_debug_assert((offset
& 7) == 0);
1289 tcg_out_vsldoi(s
, TCG_VEC_TMP1
, arg
, arg
, 8);
1292 tcg_out_mem_long(s
, 0, STVEWX
, arg
, base
, offset
);
1293 tcg_out_mem_long(s
, 0, STVEWX
, arg
, base
, offset
+ 4);
1296 tcg_debug_assert(arg
>= TCG_REG_V0
);
1297 tcg_out_mem_long(s
, have_isa_3_00
? STXV
: 0,
1298 STVX
, arg
, base
, offset
);
1301 g_assert_not_reached();
1305 static inline bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
1306 TCGReg base
, intptr_t ofs
)
1311 static void tcg_out_cmp(TCGContext
*s
, int cond
, TCGArg arg1
, TCGArg arg2
,
1312 int const_arg2
, int cr
, TCGType type
)
1317 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
1319 /* Simplify the comparisons below wrt CMPI. */
1320 if (type
== TCG_TYPE_I32
) {
1321 arg2
= (int32_t)arg2
;
1328 if ((int16_t) arg2
== arg2
) {
1332 } else if ((uint16_t) arg2
== arg2
) {
1347 if ((int16_t) arg2
== arg2
) {
1362 if ((uint16_t) arg2
== arg2
) {
1375 op
|= BF(cr
) | ((type
== TCG_TYPE_I64
) << 21);
1378 tcg_out32(s
, op
| RA(arg1
) | (arg2
& 0xffff));
1381 tcg_out_movi(s
, type
, TCG_REG_R0
, arg2
);
1384 tcg_out32(s
, op
| RA(arg1
) | RB(arg2
));
1388 static void tcg_out_setcond_eq0(TCGContext
*s
, TCGType type
,
1389 TCGReg dst
, TCGReg src
)
1391 if (type
== TCG_TYPE_I32
) {
1392 tcg_out32(s
, CNTLZW
| RS(src
) | RA(dst
));
1393 tcg_out_shri32(s
, dst
, dst
, 5);
1395 tcg_out32(s
, CNTLZD
| RS(src
) | RA(dst
));
1396 tcg_out_shri64(s
, dst
, dst
, 6);
1400 static void tcg_out_setcond_ne0(TCGContext
*s
, TCGReg dst
, TCGReg src
)
1402 /* X != 0 implies X + -1 generates a carry. Extra addition
1403 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1405 tcg_out32(s
, ADDIC
| TAI(dst
, src
, -1));
1406 tcg_out32(s
, SUBFE
| TAB(dst
, dst
, src
));
1408 tcg_out32(s
, ADDIC
| TAI(TCG_REG_R0
, src
, -1));
1409 tcg_out32(s
, SUBFE
| TAB(dst
, TCG_REG_R0
, src
));
1413 static TCGReg
tcg_gen_setcond_xor(TCGContext
*s
, TCGReg arg1
, TCGArg arg2
,
1417 if ((uint32_t)arg2
== arg2
) {
1418 tcg_out_xori32(s
, TCG_REG_R0
, arg1
, arg2
);
1420 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_R0
, arg2
);
1421 tcg_out32(s
, XOR
| SAB(arg1
, TCG_REG_R0
, TCG_REG_R0
));
1424 tcg_out32(s
, XOR
| SAB(arg1
, TCG_REG_R0
, arg2
));
1429 static void tcg_out_setcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
1430 TCGArg arg0
, TCGArg arg1
, TCGArg arg2
,
1435 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
1437 /* Ignore high bits of a potential constant arg2. */
1438 if (type
== TCG_TYPE_I32
) {
1439 arg2
= (uint32_t)arg2
;
1442 /* Handle common and trivial cases before handling anything else. */
1446 tcg_out_setcond_eq0(s
, type
, arg0
, arg1
);
1449 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
1450 tcg_out_ext32u(s
, TCG_REG_R0
, arg1
);
1453 tcg_out_setcond_ne0(s
, arg0
, arg1
);
1456 tcg_out32(s
, NOR
| SAB(arg1
, arg0
, arg1
));
1460 /* Extract the sign bit. */
1461 if (type
== TCG_TYPE_I32
) {
1462 tcg_out_shri32(s
, arg0
, arg1
, 31);
1464 tcg_out_shri64(s
, arg0
, arg1
, 63);
1472 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1473 All other cases below are also at least 3 insns, so speed up the
1474 code generator by not considering them and always using ISEL. */
1478 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1480 isel
= tcg_to_isel
[cond
];
1482 tcg_out_movi(s
, type
, arg0
, 1);
1484 /* arg0 = (bc ? 0 : 1) */
1485 tab
= TAB(arg0
, 0, arg0
);
1488 /* arg0 = (bc ? 1 : 0) */
1489 tcg_out_movi(s
, type
, TCG_REG_R0
, 0);
1490 tab
= TAB(arg0
, arg0
, TCG_REG_R0
);
1492 tcg_out32(s
, isel
| tab
);
1498 arg1
= tcg_gen_setcond_xor(s
, arg1
, arg2
, const_arg2
);
1499 tcg_out_setcond_eq0(s
, type
, arg0
, arg1
);
1503 arg1
= tcg_gen_setcond_xor(s
, arg1
, arg2
, const_arg2
);
1504 /* Discard the high bits only once, rather than both inputs. */
1505 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
1506 tcg_out_ext32u(s
, TCG_REG_R0
, arg1
);
1509 tcg_out_setcond_ne0(s
, arg0
, arg1
);
1527 crop
= CRNOR
| BT(7, CR_EQ
) | BA(7, CR_LT
) | BB(7, CR_LT
);
1533 crop
= CRNOR
| BT(7, CR_EQ
) | BA(7, CR_GT
) | BB(7, CR_GT
);
1535 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1539 tcg_out32(s
, MFOCRF
| RT(TCG_REG_R0
) | FXM(7));
1540 tcg_out_rlw(s
, RLWINM
, arg0
, TCG_REG_R0
, sh
, 31, 31);
1548 static void tcg_out_bc(TCGContext
*s
, int bc
, TCGLabel
*l
)
1551 bc
|= reloc_pc14_val(s
->code_ptr
, l
->u
.value_ptr
);
1553 tcg_out_reloc(s
, s
->code_ptr
, R_PPC_REL14
, l
, 0);
1558 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
,
1559 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
1560 TCGLabel
*l
, TCGType type
)
1562 tcg_out_cmp(s
, cond
, arg1
, arg2
, const_arg2
, 7, type
);
1563 tcg_out_bc(s
, tcg_to_bc
[cond
], l
);
1566 static void tcg_out_movcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
1567 TCGArg dest
, TCGArg c1
, TCGArg c2
, TCGArg v1
,
1568 TCGArg v2
, bool const_c2
)
1570 /* If for some reason both inputs are zero, don't produce bad code. */
1571 if (v1
== 0 && v2
== 0) {
1572 tcg_out_movi(s
, type
, dest
, 0);
1576 tcg_out_cmp(s
, cond
, c1
, c2
, const_c2
, 7, type
);
1579 int isel
= tcg_to_isel
[cond
];
1581 /* Swap the V operands if the operation indicates inversion. */
1588 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1590 tcg_out_movi(s
, type
, TCG_REG_R0
, 0);
1592 tcg_out32(s
, isel
| TAB(dest
, v1
, v2
));
1595 cond
= tcg_invert_cond(cond
);
1597 } else if (dest
!= v1
) {
1599 tcg_out_movi(s
, type
, dest
, 0);
1601 tcg_out_mov(s
, type
, dest
, v1
);
1604 /* Branch forward over one insn */
1605 tcg_out32(s
, tcg_to_bc
[cond
] | 8);
1607 tcg_out_movi(s
, type
, dest
, 0);
1609 tcg_out_mov(s
, type
, dest
, v2
);
1614 static void tcg_out_cntxz(TCGContext
*s
, TCGType type
, uint32_t opc
,
1615 TCGArg a0
, TCGArg a1
, TCGArg a2
, bool const_a2
)
1617 if (const_a2
&& a2
== (type
== TCG_TYPE_I32
? 32 : 64)) {
1618 tcg_out32(s
, opc
| RA(a0
) | RS(a1
));
1620 tcg_out_cmp(s
, TCG_COND_EQ
, a1
, 0, 1, 7, type
);
1621 /* Note that the only other valid constant for a2 is 0. */
1623 tcg_out32(s
, opc
| RA(TCG_REG_R0
) | RS(a1
));
1624 tcg_out32(s
, tcg_to_isel
[TCG_COND_EQ
] | TAB(a0
, a2
, TCG_REG_R0
));
1625 } else if (!const_a2
&& a0
== a2
) {
1626 tcg_out32(s
, tcg_to_bc
[TCG_COND_EQ
] | 8);
1627 tcg_out32(s
, opc
| RA(a0
) | RS(a1
));
1629 tcg_out32(s
, opc
| RA(a0
) | RS(a1
));
1630 tcg_out32(s
, tcg_to_bc
[TCG_COND_NE
] | 8);
1632 tcg_out_movi(s
, type
, a0
, 0);
1634 tcg_out_mov(s
, type
, a0
, a2
);
1640 static void tcg_out_cmp2(TCGContext
*s
, const TCGArg
*args
,
1641 const int *const_args
)
1643 static const struct { uint8_t bit1
, bit2
; } bits
[] = {
1644 [TCG_COND_LT
] = { CR_LT
, CR_LT
},
1645 [TCG_COND_LE
] = { CR_LT
, CR_GT
},
1646 [TCG_COND_GT
] = { CR_GT
, CR_GT
},
1647 [TCG_COND_GE
] = { CR_GT
, CR_LT
},
1648 [TCG_COND_LTU
] = { CR_LT
, CR_LT
},
1649 [TCG_COND_LEU
] = { CR_LT
, CR_GT
},
1650 [TCG_COND_GTU
] = { CR_GT
, CR_GT
},
1651 [TCG_COND_GEU
] = { CR_GT
, CR_LT
},
1654 TCGCond cond
= args
[4], cond2
;
1655 TCGArg al
, ah
, bl
, bh
;
1656 int blconst
, bhconst
;
1663 blconst
= const_args
[2];
1664 bhconst
= const_args
[3];
1673 tcg_out_cmp(s
, cond
, al
, bl
, blconst
, 6, TCG_TYPE_I32
);
1674 tcg_out_cmp(s
, cond
, ah
, bh
, bhconst
, 7, TCG_TYPE_I32
);
1675 tcg_out32(s
, op
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, CR_EQ
));
1686 bit1
= bits
[cond
].bit1
;
1687 bit2
= bits
[cond
].bit2
;
1688 op
= (bit1
!= bit2
? CRANDC
: CRAND
);
1689 cond2
= tcg_unsigned_cond(cond
);
1691 tcg_out_cmp(s
, cond
, ah
, bh
, bhconst
, 6, TCG_TYPE_I32
);
1692 tcg_out_cmp(s
, cond2
, al
, bl
, blconst
, 7, TCG_TYPE_I32
);
1693 tcg_out32(s
, op
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, bit2
));
1694 tcg_out32(s
, CROR
| BT(7, CR_EQ
) | BA(6, bit1
) | BB(7, CR_EQ
));
1702 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
1703 const int *const_args
)
1705 tcg_out_cmp2(s
, args
+ 1, const_args
+ 1);
1706 tcg_out32(s
, MFOCRF
| RT(TCG_REG_R0
) | FXM(7));
1707 tcg_out_rlw(s
, RLWINM
, args
[0], TCG_REG_R0
, 31, 31, 31);
1710 static void tcg_out_brcond2 (TCGContext
*s
, const TCGArg
*args
,
1711 const int *const_args
)
1713 tcg_out_cmp2(s
, args
, const_args
);
1714 tcg_out_bc(s
, BC
| BI(7, CR_EQ
) | BO_COND_TRUE
, arg_label(args
[5]));
1717 static void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
1719 uint32_t insn
= HWSYNC
;
1721 if (a0
== TCG_MO_LD_LD
) {
1723 } else if (a0
== TCG_MO_ST_ST
) {
1729 void tb_target_set_jmp_target(uintptr_t tc_ptr
, uintptr_t jmp_addr
,
1732 if (TCG_TARGET_REG_BITS
== 64) {
1733 tcg_insn_unit i1
, i2
;
1734 intptr_t tb_diff
= addr
- tc_ptr
;
1735 intptr_t br_diff
= addr
- (jmp_addr
+ 4);
1738 /* This does not exercise the range of the branch, but we do
1739 still need to be able to load the new value of TCG_REG_TB.
1740 But this does still happen quite often. */
1741 if (tb_diff
== (int16_t)tb_diff
) {
1742 i1
= ADDI
| TAI(TCG_REG_TB
, TCG_REG_TB
, tb_diff
);
1743 i2
= B
| (br_diff
& 0x3fffffc);
1745 intptr_t lo
= (int16_t)tb_diff
;
1746 intptr_t hi
= (int32_t)(tb_diff
- lo
);
1747 assert(tb_diff
== hi
+ lo
);
1748 i1
= ADDIS
| TAI(TCG_REG_TB
, TCG_REG_TB
, hi
>> 16);
1749 i2
= ADDI
| TAI(TCG_REG_TB
, TCG_REG_TB
, lo
);
1751 #ifdef HOST_WORDS_BIGENDIAN
1752 pair
= (uint64_t)i1
<< 32 | i2
;
1754 pair
= (uint64_t)i2
<< 32 | i1
;
1757 /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
1758 within atomic_set that would fail to build a ppc32 host. */
1759 atomic_set__nocheck((uint64_t *)jmp_addr
, pair
);
1760 flush_icache_range(jmp_addr
, jmp_addr
+ 8);
1762 intptr_t diff
= addr
- jmp_addr
;
1763 tcg_debug_assert(in_range_b(diff
));
1764 atomic_set((uint32_t *)jmp_addr
, B
| (diff
& 0x3fffffc));
1765 flush_icache_range(jmp_addr
, jmp_addr
+ 4);
1769 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*target
)
1772 /* Look through the descriptor. If the branch is in range, and we
1773 don't have to spend too much effort on building the toc. */
1774 void *tgt
= ((void **)target
)[0];
1775 uintptr_t toc
= ((uintptr_t *)target
)[1];
1776 intptr_t diff
= tcg_pcrel_diff(s
, tgt
);
1778 if (in_range_b(diff
) && toc
== (uint32_t)toc
) {
1779 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP1
, toc
);
1780 tcg_out_b(s
, LK
, tgt
);
1782 /* Fold the low bits of the constant into the addresses below. */
1783 intptr_t arg
= (intptr_t)target
;
1784 int ofs
= (int16_t)arg
;
1786 if (ofs
+ 8 < 0x8000) {
1791 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP1
, arg
);
1792 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_TMP1
, ofs
);
1793 tcg_out32(s
, MTSPR
| RA(TCG_REG_R0
) | CTR
);
1794 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_REG_TMP1
, ofs
+ SZP
);
1795 tcg_out32(s
, BCCTR
| BO_ALWAYS
| LK
);
1797 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1800 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1801 address, which the callee uses to compute its TOC address. */
1802 /* FIXME: when the branch is in range, we could avoid r12 load if we
1803 knew that the destination uses the same TOC, and what its local
1804 entry point offset is. */
1805 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R12
, (intptr_t)target
);
1807 diff
= tcg_pcrel_diff(s
, target
);
1808 if (in_range_b(diff
)) {
1809 tcg_out_b(s
, LK
, target
);
1811 tcg_out32(s
, MTSPR
| RS(TCG_REG_R12
) | CTR
);
1812 tcg_out32(s
, BCCTR
| BO_ALWAYS
| LK
);
1815 tcg_out_b(s
, LK
, target
);
1819 static const uint32_t qemu_ldx_opc
[16] = {
1826 [MO_BSWAP
| MO_UB
] = LBZX
,
1827 [MO_BSWAP
| MO_UW
] = LHBRX
,
1828 [MO_BSWAP
| MO_UL
] = LWBRX
,
1829 [MO_BSWAP
| MO_Q
] = LDBRX
,
1832 static const uint32_t qemu_stx_opc
[16] = {
1837 [MO_BSWAP
| MO_UB
] = STBX
,
1838 [MO_BSWAP
| MO_UW
] = STHBRX
,
1839 [MO_BSWAP
| MO_UL
] = STWBRX
,
1840 [MO_BSWAP
| MO_Q
] = STDBRX
,
1843 static const uint32_t qemu_exts_opc
[4] = {
1844 EXTSB
, EXTSH
, EXTSW
, 0
1847 #if defined (CONFIG_SOFTMMU)
1848 #include "../tcg-ldst.inc.c"
1850 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1851 * int mmu_idx, uintptr_t ra)
1853 static void * const qemu_ld_helpers
[16] = {
1854 [MO_UB
] = helper_ret_ldub_mmu
,
1855 [MO_LEUW
] = helper_le_lduw_mmu
,
1856 [MO_LEUL
] = helper_le_ldul_mmu
,
1857 [MO_LEQ
] = helper_le_ldq_mmu
,
1858 [MO_BEUW
] = helper_be_lduw_mmu
,
1859 [MO_BEUL
] = helper_be_ldul_mmu
,
1860 [MO_BEQ
] = helper_be_ldq_mmu
,
1863 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1864 * uintxx_t val, int mmu_idx, uintptr_t ra)
1866 static void * const qemu_st_helpers
[16] = {
1867 [MO_UB
] = helper_ret_stb_mmu
,
1868 [MO_LEUW
] = helper_le_stw_mmu
,
1869 [MO_LEUL
] = helper_le_stl_mmu
,
1870 [MO_LEQ
] = helper_le_stq_mmu
,
1871 [MO_BEUW
] = helper_be_stw_mmu
,
1872 [MO_BEUL
] = helper_be_stl_mmu
,
1873 [MO_BEQ
] = helper_be_stq_mmu
,
1876 /* We expect to use a 16-bit negative offset from ENV. */
1877 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1878 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1880 /* Perform the TLB load and compare. Places the result of the comparison
1881 in CR7, loads the addend of the TLB into R3, and returns the register
1882 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1884 static TCGReg
tcg_out_tlb_read(TCGContext
*s
, MemOp opc
,
1885 TCGReg addrlo
, TCGReg addrhi
,
1886 int mem_index
, bool is_read
)
1890 ? offsetof(CPUTLBEntry
, addr_read
)
1891 : offsetof(CPUTLBEntry
, addr_write
));
1892 int fast_off
= TLB_MASK_TABLE_OFS(mem_index
);
1893 int mask_off
= fast_off
+ offsetof(CPUTLBDescFast
, mask
);
1894 int table_off
= fast_off
+ offsetof(CPUTLBDescFast
, table
);
1895 unsigned s_bits
= opc
& MO_SIZE
;
1896 unsigned a_bits
= get_alignment_bits(opc
);
1898 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1899 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R3
, TCG_AREG0
, mask_off
);
1900 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R4
, TCG_AREG0
, table_off
);
1902 /* Extract the page index, shifted into place for tlb index. */
1903 if (TCG_TARGET_REG_BITS
== 32) {
1904 tcg_out_shri32(s
, TCG_REG_TMP1
, addrlo
,
1905 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1907 tcg_out_shri64(s
, TCG_REG_TMP1
, addrlo
,
1908 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1910 tcg_out32(s
, AND
| SAB(TCG_REG_R3
, TCG_REG_R3
, TCG_REG_TMP1
));
1912 /* Load the TLB comparator. */
1913 if (cmp_off
== 0 && TCG_TARGET_REG_BITS
>= TARGET_LONG_BITS
) {
1914 uint32_t lxu
= (TCG_TARGET_REG_BITS
== 32 || TARGET_LONG_BITS
== 32
1916 tcg_out32(s
, lxu
| TAB(TCG_REG_TMP1
, TCG_REG_R3
, TCG_REG_R4
));
1918 tcg_out32(s
, ADD
| TAB(TCG_REG_R3
, TCG_REG_R3
, TCG_REG_R4
));
1919 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1920 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_REG_TMP1
, TCG_REG_R3
, cmp_off
+ 4);
1921 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_REG_R4
, TCG_REG_R3
, cmp_off
);
1923 tcg_out_ld(s
, TCG_TYPE_TL
, TCG_REG_TMP1
, TCG_REG_R3
, cmp_off
);
1927 /* Load the TLB addend for use on the fast path. Do this asap
1928 to minimize any load use delay. */
1929 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R3
, TCG_REG_R3
,
1930 offsetof(CPUTLBEntry
, addend
));
1932 /* Clear the non-page, non-alignment bits from the address */
1933 if (TCG_TARGET_REG_BITS
== 32) {
1934 /* We don't support unaligned accesses on 32-bits.
1935 * Preserve the bottom bits and thus trigger a comparison
1936 * failure on unaligned accesses.
1938 if (a_bits
< s_bits
) {
1941 tcg_out_rlw(s
, RLWINM
, TCG_REG_R0
, addrlo
, 0,
1942 (32 - a_bits
) & 31, 31 - TARGET_PAGE_BITS
);
1946 /* If the access is unaligned, we need to make sure we fail if we
1947 * cross a page boundary. The trick is to add the access size-1
1948 * to the address before masking the low bits. That will make the
1949 * address overflow to the next page if we cross a page boundary,
1950 * which will then force a mismatch of the TLB compare.
1952 if (a_bits
< s_bits
) {
1953 unsigned a_mask
= (1 << a_bits
) - 1;
1954 unsigned s_mask
= (1 << s_bits
) - 1;
1955 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, t
, s_mask
- a_mask
));
1959 /* Mask the address for the requested alignment. */
1960 if (TARGET_LONG_BITS
== 32) {
1961 tcg_out_rlw(s
, RLWINM
, TCG_REG_R0
, t
, 0,
1962 (32 - a_bits
) & 31, 31 - TARGET_PAGE_BITS
);
1963 /* Zero-extend the address for use in the final address. */
1964 tcg_out_ext32u(s
, TCG_REG_R4
, addrlo
);
1965 addrlo
= TCG_REG_R4
;
1966 } else if (a_bits
== 0) {
1967 tcg_out_rld(s
, RLDICR
, TCG_REG_R0
, t
, 0, 63 - TARGET_PAGE_BITS
);
1969 tcg_out_rld(s
, RLDICL
, TCG_REG_R0
, t
,
1970 64 - TARGET_PAGE_BITS
, TARGET_PAGE_BITS
- a_bits
);
1971 tcg_out_rld(s
, RLDICL
, TCG_REG_R0
, TCG_REG_R0
, TARGET_PAGE_BITS
, 0);
1975 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1976 tcg_out_cmp(s
, TCG_COND_EQ
, TCG_REG_R0
, TCG_REG_TMP1
,
1977 0, 7, TCG_TYPE_I32
);
1978 tcg_out_cmp(s
, TCG_COND_EQ
, addrhi
, TCG_REG_R4
, 0, 6, TCG_TYPE_I32
);
1979 tcg_out32(s
, CRAND
| BT(7, CR_EQ
) | BA(6, CR_EQ
) | BB(7, CR_EQ
));
1981 tcg_out_cmp(s
, TCG_COND_EQ
, TCG_REG_R0
, TCG_REG_TMP1
,
1988 /* Record the context of a call to the out of line helper code for the slow
1989 path for a load or store, so that we can later generate the correct
1991 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1992 TCGReg datalo_reg
, TCGReg datahi_reg
,
1993 TCGReg addrlo_reg
, TCGReg addrhi_reg
,
1994 tcg_insn_unit
*raddr
, tcg_insn_unit
*lptr
)
1996 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1998 label
->is_ld
= is_ld
;
2000 label
->datalo_reg
= datalo_reg
;
2001 label
->datahi_reg
= datahi_reg
;
2002 label
->addrlo_reg
= addrlo_reg
;
2003 label
->addrhi_reg
= addrhi_reg
;
2004 label
->raddr
= raddr
;
2005 label
->label_ptr
[0] = lptr
;
2008 static bool tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
2010 TCGMemOpIdx oi
= lb
->oi
;
2011 MemOp opc
= get_memop(oi
);
2012 TCGReg hi
, lo
, arg
= TCG_REG_R3
;
2014 if (!reloc_pc14(lb
->label_ptr
[0], s
->code_ptr
)) {
2018 tcg_out_mov(s
, TCG_TYPE_PTR
, arg
++, TCG_AREG0
);
2020 lo
= lb
->addrlo_reg
;
2021 hi
= lb
->addrhi_reg
;
2022 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
2023 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2026 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
2027 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
2029 /* If the address needed to be zero-extended, we'll have already
2030 placed it in R4. The only remaining case is 64-bit guest. */
2031 tcg_out_mov(s
, TCG_TYPE_TL
, arg
++, lo
);
2034 tcg_out_movi(s
, TCG_TYPE_I32
, arg
++, oi
);
2035 tcg_out32(s
, MFSPR
| RT(arg
) | LR
);
2037 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
2039 lo
= lb
->datalo_reg
;
2040 hi
= lb
->datahi_reg
;
2041 if (TCG_TARGET_REG_BITS
== 32 && (opc
& MO_SIZE
) == MO_64
) {
2042 tcg_out_mov(s
, TCG_TYPE_I32
, lo
, TCG_REG_R4
);
2043 tcg_out_mov(s
, TCG_TYPE_I32
, hi
, TCG_REG_R3
);
2044 } else if (opc
& MO_SIGN
) {
2045 uint32_t insn
= qemu_exts_opc
[opc
& MO_SIZE
];
2046 tcg_out32(s
, insn
| RA(lo
) | RS(TCG_REG_R3
));
2048 tcg_out_mov(s
, TCG_TYPE_REG
, lo
, TCG_REG_R3
);
2051 tcg_out_b(s
, 0, lb
->raddr
);
2055 static bool tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
2057 TCGMemOpIdx oi
= lb
->oi
;
2058 MemOp opc
= get_memop(oi
);
2059 MemOp s_bits
= opc
& MO_SIZE
;
2060 TCGReg hi
, lo
, arg
= TCG_REG_R3
;
2062 if (!reloc_pc14(lb
->label_ptr
[0], s
->code_ptr
)) {
2066 tcg_out_mov(s
, TCG_TYPE_PTR
, arg
++, TCG_AREG0
);
2068 lo
= lb
->addrlo_reg
;
2069 hi
= lb
->addrhi_reg
;
2070 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
2071 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2074 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
2075 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
2077 /* If the address needed to be zero-extended, we'll have already
2078 placed it in R4. The only remaining case is 64-bit guest. */
2079 tcg_out_mov(s
, TCG_TYPE_TL
, arg
++, lo
);
2082 lo
= lb
->datalo_reg
;
2083 hi
= lb
->datahi_reg
;
2084 if (TCG_TARGET_REG_BITS
== 32) {
2087 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2090 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, hi
);
2093 tcg_out_mov(s
, TCG_TYPE_I32
, arg
++, lo
);
2096 tcg_out_rlw(s
, RLWINM
, arg
++, lo
, 0, 32 - (8 << s_bits
), 31);
2100 if (s_bits
== MO_64
) {
2101 tcg_out_mov(s
, TCG_TYPE_I64
, arg
++, lo
);
2103 tcg_out_rld(s
, RLDICL
, arg
++, lo
, 0, 64 - (8 << s_bits
));
2107 tcg_out_movi(s
, TCG_TYPE_I32
, arg
++, oi
);
2108 tcg_out32(s
, MFSPR
| RT(arg
) | LR
);
2110 tcg_out_call(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
2112 tcg_out_b(s
, 0, lb
->raddr
);
2115 #endif /* SOFTMMU */
2117 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
2119 TCGReg datalo
, datahi
, addrlo
, rbase
;
2120 TCGReg addrhi
__attribute__((unused
));
2123 #ifdef CONFIG_SOFTMMU
2125 tcg_insn_unit
*label_ptr
;
2129 datahi
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
2131 addrhi
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
2133 opc
= get_memop(oi
);
2134 s_bits
= opc
& MO_SIZE
;
2136 #ifdef CONFIG_SOFTMMU
2137 mem_index
= get_mmuidx(oi
);
2138 addrlo
= tcg_out_tlb_read(s
, opc
, addrlo
, addrhi
, mem_index
, true);
2140 /* Load a pointer into the current opcode w/conditional branch-link. */
2141 label_ptr
= s
->code_ptr
;
2142 tcg_out32(s
, BC
| BI(7, CR_EQ
) | BO_COND_FALSE
| LK
);
2145 #else /* !CONFIG_SOFTMMU */
2146 rbase
= guest_base
? TCG_GUEST_BASE_REG
: 0;
2147 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
2148 tcg_out_ext32u(s
, TCG_REG_TMP1
, addrlo
);
2149 addrlo
= TCG_REG_TMP1
;
2153 if (TCG_TARGET_REG_BITS
== 32 && s_bits
== MO_64
) {
2154 if (opc
& MO_BSWAP
) {
2155 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
2156 tcg_out32(s
, LWBRX
| TAB(datalo
, rbase
, addrlo
));
2157 tcg_out32(s
, LWBRX
| TAB(datahi
, rbase
, TCG_REG_R0
));
2158 } else if (rbase
!= 0) {
2159 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
2160 tcg_out32(s
, LWZX
| TAB(datahi
, rbase
, addrlo
));
2161 tcg_out32(s
, LWZX
| TAB(datalo
, rbase
, TCG_REG_R0
));
2162 } else if (addrlo
== datahi
) {
2163 tcg_out32(s
, LWZ
| TAI(datalo
, addrlo
, 4));
2164 tcg_out32(s
, LWZ
| TAI(datahi
, addrlo
, 0));
2166 tcg_out32(s
, LWZ
| TAI(datahi
, addrlo
, 0));
2167 tcg_out32(s
, LWZ
| TAI(datalo
, addrlo
, 4));
2170 uint32_t insn
= qemu_ldx_opc
[opc
& (MO_BSWAP
| MO_SSIZE
)];
2171 if (!have_isa_2_06
&& insn
== LDBRX
) {
2172 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
2173 tcg_out32(s
, LWBRX
| TAB(datalo
, rbase
, addrlo
));
2174 tcg_out32(s
, LWBRX
| TAB(TCG_REG_R0
, rbase
, TCG_REG_R0
));
2175 tcg_out_rld(s
, RLDIMI
, datalo
, TCG_REG_R0
, 32, 0);
2177 tcg_out32(s
, insn
| TAB(datalo
, rbase
, addrlo
));
2179 insn
= qemu_ldx_opc
[opc
& (MO_SIZE
| MO_BSWAP
)];
2180 tcg_out32(s
, insn
| TAB(datalo
, rbase
, addrlo
));
2181 insn
= qemu_exts_opc
[s_bits
];
2182 tcg_out32(s
, insn
| RA(datalo
) | RS(datalo
));
2186 #ifdef CONFIG_SOFTMMU
2187 add_qemu_ldst_label(s
, true, oi
, datalo
, datahi
, addrlo
, addrhi
,
2188 s
->code_ptr
, label_ptr
);
2192 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
2194 TCGReg datalo
, datahi
, addrlo
, rbase
;
2195 TCGReg addrhi
__attribute__((unused
));
2198 #ifdef CONFIG_SOFTMMU
2200 tcg_insn_unit
*label_ptr
;
2204 datahi
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
2206 addrhi
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
2208 opc
= get_memop(oi
);
2209 s_bits
= opc
& MO_SIZE
;
2211 #ifdef CONFIG_SOFTMMU
2212 mem_index
= get_mmuidx(oi
);
2213 addrlo
= tcg_out_tlb_read(s
, opc
, addrlo
, addrhi
, mem_index
, false);
2215 /* Load a pointer into the current opcode w/conditional branch-link. */
2216 label_ptr
= s
->code_ptr
;
2217 tcg_out32(s
, BC
| BI(7, CR_EQ
) | BO_COND_FALSE
| LK
);
2220 #else /* !CONFIG_SOFTMMU */
2221 rbase
= guest_base
? TCG_GUEST_BASE_REG
: 0;
2222 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
2223 tcg_out_ext32u(s
, TCG_REG_TMP1
, addrlo
);
2224 addrlo
= TCG_REG_TMP1
;
2228 if (TCG_TARGET_REG_BITS
== 32 && s_bits
== MO_64
) {
2229 if (opc
& MO_BSWAP
) {
2230 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
2231 tcg_out32(s
, STWBRX
| SAB(datalo
, rbase
, addrlo
));
2232 tcg_out32(s
, STWBRX
| SAB(datahi
, rbase
, TCG_REG_R0
));
2233 } else if (rbase
!= 0) {
2234 tcg_out32(s
, ADDI
| TAI(TCG_REG_R0
, addrlo
, 4));
2235 tcg_out32(s
, STWX
| SAB(datahi
, rbase
, addrlo
));
2236 tcg_out32(s
, STWX
| SAB(datalo
, rbase
, TCG_REG_R0
));
2238 tcg_out32(s
, STW
| TAI(datahi
, addrlo
, 0));
2239 tcg_out32(s
, STW
| TAI(datalo
, addrlo
, 4));
2242 uint32_t insn
= qemu_stx_opc
[opc
& (MO_BSWAP
| MO_SIZE
)];
2243 if (!have_isa_2_06
&& insn
== STDBRX
) {
2244 tcg_out32(s
, STWBRX
| SAB(datalo
, rbase
, addrlo
));
2245 tcg_out32(s
, ADDI
| TAI(TCG_REG_TMP1
, addrlo
, 4));
2246 tcg_out_shri64(s
, TCG_REG_R0
, datalo
, 32);
2247 tcg_out32(s
, STWBRX
| SAB(TCG_REG_R0
, rbase
, TCG_REG_TMP1
));
2249 tcg_out32(s
, insn
| SAB(datalo
, rbase
, addrlo
));
2253 #ifdef CONFIG_SOFTMMU
2254 add_qemu_ldst_label(s
, false, oi
, datalo
, datahi
, addrlo
, addrhi
,
2255 s
->code_ptr
, label_ptr
);
2259 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
2262 for (i
= 0; i
< count
; ++i
) {
2267 /* Parameters for function call generation, used in tcg.c. */
2268 #define TCG_TARGET_STACK_ALIGN 16
2269 #define TCG_TARGET_EXTEND_ARGS 1
2272 # define LINK_AREA_SIZE (6 * SZR)
2273 # define LR_OFFSET (1 * SZR)
2274 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2275 #elif defined(TCG_TARGET_CALL_DARWIN)
2276 # define LINK_AREA_SIZE (6 * SZR)
2277 # define LR_OFFSET (2 * SZR)
2278 #elif TCG_TARGET_REG_BITS == 64
2279 # if defined(_CALL_ELF) && _CALL_ELF == 2
2280 # define LINK_AREA_SIZE (4 * SZR)
2281 # define LR_OFFSET (1 * SZR)
2283 #else /* TCG_TARGET_REG_BITS == 32 */
2284 # if defined(_CALL_SYSV)
2285 # define LINK_AREA_SIZE (2 * SZR)
2286 # define LR_OFFSET (1 * SZR)
2290 # error "Unhandled abi"
2292 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2293 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2296 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2297 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2299 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2300 + TCG_STATIC_CALL_ARGS_SIZE \
2301 + CPU_TEMP_BUF_SIZE \
2303 + TCG_TARGET_STACK_ALIGN - 1) \
2304 & -TCG_TARGET_STACK_ALIGN)
2306 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2308 static void tcg_target_qemu_prologue(TCGContext
*s
)
2313 void **desc
= (void **)s
->code_ptr
;
2314 desc
[0] = desc
+ 2; /* entry point */
2315 desc
[1] = 0; /* environment pointer */
2316 s
->code_ptr
= (void *)(desc
+ 2); /* skip over descriptor */
2319 tcg_set_frame(s
, TCG_REG_CALL_STACK
, REG_SAVE_BOT
- CPU_TEMP_BUF_SIZE
,
2323 tcg_out32(s
, MFSPR
| RT(TCG_REG_R0
) | LR
);
2324 tcg_out32(s
, (SZR
== 8 ? STDU
: STWU
)
2325 | SAI(TCG_REG_R1
, TCG_REG_R1
, -FRAME_SIZE
));
2327 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
) {
2328 tcg_out_st(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
2329 TCG_REG_R1
, REG_SAVE_BOT
+ i
* SZR
);
2331 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_R1
, FRAME_SIZE
+LR_OFFSET
);
2333 #ifndef CONFIG_SOFTMMU
2335 tcg_out_movi_int(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, guest_base
, true);
2336 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2340 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2341 tcg_out32(s
, MTSPR
| RS(tcg_target_call_iarg_regs
[1]) | CTR
);
2343 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_TB
, tcg_target_call_iarg_regs
[1]);
2345 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
2348 s
->code_gen_epilogue
= tb_ret_addr
= s
->code_ptr
;
2350 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_R0
, TCG_REG_R1
, FRAME_SIZE
+LR_OFFSET
);
2351 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
) {
2352 tcg_out_ld(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
2353 TCG_REG_R1
, REG_SAVE_BOT
+ i
* SZR
);
2355 tcg_out32(s
, MTSPR
| RS(TCG_REG_R0
) | LR
);
2356 tcg_out32(s
, ADDI
| TAI(TCG_REG_R1
, TCG_REG_R1
, FRAME_SIZE
));
2357 tcg_out32(s
, BCLR
| BO_ALWAYS
);
2360 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
2361 const int *const_args
)
2367 case INDEX_op_exit_tb
:
2368 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R3
, args
[0]);
2369 tcg_out_b(s
, 0, tb_ret_addr
);
2371 case INDEX_op_goto_tb
:
2372 if (s
->tb_jmp_insn_offset
) {
2374 if (TCG_TARGET_REG_BITS
== 64) {
2375 /* Ensure the next insns are 8-byte aligned. */
2376 if ((uintptr_t)s
->code_ptr
& 7) {
2379 s
->tb_jmp_insn_offset
[args
[0]] = tcg_current_code_size(s
);
2380 tcg_out32(s
, ADDIS
| TAI(TCG_REG_TB
, TCG_REG_TB
, 0));
2381 tcg_out32(s
, ADDI
| TAI(TCG_REG_TB
, TCG_REG_TB
, 0));
2383 s
->tb_jmp_insn_offset
[args
[0]] = tcg_current_code_size(s
);
2385 s
->tb_jmp_reset_offset
[args
[0]] = tcg_current_code_size(s
);
2389 /* Indirect jump. */
2390 tcg_debug_assert(s
->tb_jmp_insn_offset
== NULL
);
2391 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TB
, 0,
2392 (intptr_t)(s
->tb_jmp_insn_offset
+ args
[0]));
2394 tcg_out32(s
, MTSPR
| RS(TCG_REG_TB
) | CTR
);
2395 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
2396 set_jmp_reset_offset(s
, args
[0]);
2398 /* For the unlinked case, need to reset TCG_REG_TB. */
2399 c
= -tcg_current_code_size(s
);
2400 assert(c
== (int16_t)c
);
2401 tcg_out32(s
, ADDI
| TAI(TCG_REG_TB
, TCG_REG_TB
, c
));
2404 case INDEX_op_goto_ptr
:
2405 tcg_out32(s
, MTSPR
| RS(args
[0]) | CTR
);
2407 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_TB
, args
[0]);
2409 tcg_out32(s
, ADDI
| TAI(TCG_REG_R3
, 0, 0));
2410 tcg_out32(s
, BCCTR
| BO_ALWAYS
);
2414 TCGLabel
*l
= arg_label(args
[0]);
2418 insn
|= reloc_pc24_val(s
->code_ptr
, l
->u
.value_ptr
);
2420 tcg_out_reloc(s
, s
->code_ptr
, R_PPC_REL24
, l
, 0);
2425 case INDEX_op_ld8u_i32
:
2426 case INDEX_op_ld8u_i64
:
2427 tcg_out_mem_long(s
, LBZ
, LBZX
, args
[0], args
[1], args
[2]);
2429 case INDEX_op_ld8s_i32
:
2430 case INDEX_op_ld8s_i64
:
2431 tcg_out_mem_long(s
, LBZ
, LBZX
, args
[0], args
[1], args
[2]);
2432 tcg_out32(s
, EXTSB
| RS(args
[0]) | RA(args
[0]));
2434 case INDEX_op_ld16u_i32
:
2435 case INDEX_op_ld16u_i64
:
2436 tcg_out_mem_long(s
, LHZ
, LHZX
, args
[0], args
[1], args
[2]);
2438 case INDEX_op_ld16s_i32
:
2439 case INDEX_op_ld16s_i64
:
2440 tcg_out_mem_long(s
, LHA
, LHAX
, args
[0], args
[1], args
[2]);
2442 case INDEX_op_ld_i32
:
2443 case INDEX_op_ld32u_i64
:
2444 tcg_out_mem_long(s
, LWZ
, LWZX
, args
[0], args
[1], args
[2]);
2446 case INDEX_op_ld32s_i64
:
2447 tcg_out_mem_long(s
, LWA
, LWAX
, args
[0], args
[1], args
[2]);
2449 case INDEX_op_ld_i64
:
2450 tcg_out_mem_long(s
, LD
, LDX
, args
[0], args
[1], args
[2]);
2452 case INDEX_op_st8_i32
:
2453 case INDEX_op_st8_i64
:
2454 tcg_out_mem_long(s
, STB
, STBX
, args
[0], args
[1], args
[2]);
2456 case INDEX_op_st16_i32
:
2457 case INDEX_op_st16_i64
:
2458 tcg_out_mem_long(s
, STH
, STHX
, args
[0], args
[1], args
[2]);
2460 case INDEX_op_st_i32
:
2461 case INDEX_op_st32_i64
:
2462 tcg_out_mem_long(s
, STW
, STWX
, args
[0], args
[1], args
[2]);
2464 case INDEX_op_st_i64
:
2465 tcg_out_mem_long(s
, STD
, STDX
, args
[0], args
[1], args
[2]);
2468 case INDEX_op_add_i32
:
2469 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2470 if (const_args
[2]) {
2472 tcg_out_mem_long(s
, ADDI
, ADD
, a0
, a1
, (int32_t)a2
);
2474 tcg_out32(s
, ADD
| TAB(a0
, a1
, a2
));
2477 case INDEX_op_sub_i32
:
2478 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2479 if (const_args
[1]) {
2480 if (const_args
[2]) {
2481 tcg_out_movi(s
, TCG_TYPE_I32
, a0
, a1
- a2
);
2483 tcg_out32(s
, SUBFIC
| TAI(a0
, a2
, a1
));
2485 } else if (const_args
[2]) {
2489 tcg_out32(s
, SUBF
| TAB(a0
, a2
, a1
));
2493 case INDEX_op_and_i32
:
2494 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2495 if (const_args
[2]) {
2496 tcg_out_andi32(s
, a0
, a1
, a2
);
2498 tcg_out32(s
, AND
| SAB(a1
, a0
, a2
));
2501 case INDEX_op_and_i64
:
2502 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2503 if (const_args
[2]) {
2504 tcg_out_andi64(s
, a0
, a1
, a2
);
2506 tcg_out32(s
, AND
| SAB(a1
, a0
, a2
));
2509 case INDEX_op_or_i64
:
2510 case INDEX_op_or_i32
:
2511 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2512 if (const_args
[2]) {
2513 tcg_out_ori32(s
, a0
, a1
, a2
);
2515 tcg_out32(s
, OR
| SAB(a1
, a0
, a2
));
2518 case INDEX_op_xor_i64
:
2519 case INDEX_op_xor_i32
:
2520 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2521 if (const_args
[2]) {
2522 tcg_out_xori32(s
, a0
, a1
, a2
);
2524 tcg_out32(s
, XOR
| SAB(a1
, a0
, a2
));
2527 case INDEX_op_andc_i32
:
2528 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2529 if (const_args
[2]) {
2530 tcg_out_andi32(s
, a0
, a1
, ~a2
);
2532 tcg_out32(s
, ANDC
| SAB(a1
, a0
, a2
));
2535 case INDEX_op_andc_i64
:
2536 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2537 if (const_args
[2]) {
2538 tcg_out_andi64(s
, a0
, a1
, ~a2
);
2540 tcg_out32(s
, ANDC
| SAB(a1
, a0
, a2
));
2543 case INDEX_op_orc_i32
:
2544 if (const_args
[2]) {
2545 tcg_out_ori32(s
, args
[0], args
[1], ~args
[2]);
2549 case INDEX_op_orc_i64
:
2550 tcg_out32(s
, ORC
| SAB(args
[1], args
[0], args
[2]));
2552 case INDEX_op_eqv_i32
:
2553 if (const_args
[2]) {
2554 tcg_out_xori32(s
, args
[0], args
[1], ~args
[2]);
2558 case INDEX_op_eqv_i64
:
2559 tcg_out32(s
, EQV
| SAB(args
[1], args
[0], args
[2]));
2561 case INDEX_op_nand_i32
:
2562 case INDEX_op_nand_i64
:
2563 tcg_out32(s
, NAND
| SAB(args
[1], args
[0], args
[2]));
2565 case INDEX_op_nor_i32
:
2566 case INDEX_op_nor_i64
:
2567 tcg_out32(s
, NOR
| SAB(args
[1], args
[0], args
[2]));
2570 case INDEX_op_clz_i32
:
2571 tcg_out_cntxz(s
, TCG_TYPE_I32
, CNTLZW
, args
[0], args
[1],
2572 args
[2], const_args
[2]);
2574 case INDEX_op_ctz_i32
:
2575 tcg_out_cntxz(s
, TCG_TYPE_I32
, CNTTZW
, args
[0], args
[1],
2576 args
[2], const_args
[2]);
2578 case INDEX_op_ctpop_i32
:
2579 tcg_out32(s
, CNTPOPW
| SAB(args
[1], args
[0], 0));
2582 case INDEX_op_clz_i64
:
2583 tcg_out_cntxz(s
, TCG_TYPE_I64
, CNTLZD
, args
[0], args
[1],
2584 args
[2], const_args
[2]);
2586 case INDEX_op_ctz_i64
:
2587 tcg_out_cntxz(s
, TCG_TYPE_I64
, CNTTZD
, args
[0], args
[1],
2588 args
[2], const_args
[2]);
2590 case INDEX_op_ctpop_i64
:
2591 tcg_out32(s
, CNTPOPD
| SAB(args
[1], args
[0], 0));
2594 case INDEX_op_mul_i32
:
2595 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2596 if (const_args
[2]) {
2597 tcg_out32(s
, MULLI
| TAI(a0
, a1
, a2
));
2599 tcg_out32(s
, MULLW
| TAB(a0
, a1
, a2
));
2603 case INDEX_op_div_i32
:
2604 tcg_out32(s
, DIVW
| TAB(args
[0], args
[1], args
[2]));
2607 case INDEX_op_divu_i32
:
2608 tcg_out32(s
, DIVWU
| TAB(args
[0], args
[1], args
[2]));
2611 case INDEX_op_shl_i32
:
2612 if (const_args
[2]) {
2613 /* Limit immediate shift count lest we create an illegal insn. */
2614 tcg_out_shli32(s
, args
[0], args
[1], args
[2] & 31);
2616 tcg_out32(s
, SLW
| SAB(args
[1], args
[0], args
[2]));
2619 case INDEX_op_shr_i32
:
2620 if (const_args
[2]) {
2621 /* Limit immediate shift count lest we create an illegal insn. */
2622 tcg_out_shri32(s
, args
[0], args
[1], args
[2] & 31);
2624 tcg_out32(s
, SRW
| SAB(args
[1], args
[0], args
[2]));
2627 case INDEX_op_sar_i32
:
2628 if (const_args
[2]) {
2629 /* Limit immediate shift count lest we create an illegal insn. */
2630 tcg_out32(s
, SRAWI
| RS(args
[1]) | RA(args
[0]) | SH(args
[2] & 31));
2632 tcg_out32(s
, SRAW
| SAB(args
[1], args
[0], args
[2]));
2635 case INDEX_op_rotl_i32
:
2636 if (const_args
[2]) {
2637 tcg_out_rlw(s
, RLWINM
, args
[0], args
[1], args
[2], 0, 31);
2639 tcg_out32(s
, RLWNM
| SAB(args
[1], args
[0], args
[2])
2643 case INDEX_op_rotr_i32
:
2644 if (const_args
[2]) {
2645 tcg_out_rlw(s
, RLWINM
, args
[0], args
[1], 32 - args
[2], 0, 31);
2647 tcg_out32(s
, SUBFIC
| TAI(TCG_REG_R0
, args
[2], 32));
2648 tcg_out32(s
, RLWNM
| SAB(args
[1], args
[0], TCG_REG_R0
)
2653 case INDEX_op_brcond_i32
:
2654 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
2655 arg_label(args
[3]), TCG_TYPE_I32
);
2657 case INDEX_op_brcond_i64
:
2658 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
2659 arg_label(args
[3]), TCG_TYPE_I64
);
2661 case INDEX_op_brcond2_i32
:
2662 tcg_out_brcond2(s
, args
, const_args
);
2665 case INDEX_op_neg_i32
:
2666 case INDEX_op_neg_i64
:
2667 tcg_out32(s
, NEG
| RT(args
[0]) | RA(args
[1]));
2670 case INDEX_op_not_i32
:
2671 case INDEX_op_not_i64
:
2672 tcg_out32(s
, NOR
| SAB(args
[1], args
[0], args
[1]));
2675 case INDEX_op_add_i64
:
2676 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2677 if (const_args
[2]) {
2679 tcg_out_mem_long(s
, ADDI
, ADD
, a0
, a1
, a2
);
2681 tcg_out32(s
, ADD
| TAB(a0
, a1
, a2
));
2684 case INDEX_op_sub_i64
:
2685 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2686 if (const_args
[1]) {
2687 if (const_args
[2]) {
2688 tcg_out_movi(s
, TCG_TYPE_I64
, a0
, a1
- a2
);
2690 tcg_out32(s
, SUBFIC
| TAI(a0
, a2
, a1
));
2692 } else if (const_args
[2]) {
2696 tcg_out32(s
, SUBF
| TAB(a0
, a2
, a1
));
2700 case INDEX_op_shl_i64
:
2701 if (const_args
[2]) {
2702 /* Limit immediate shift count lest we create an illegal insn. */
2703 tcg_out_shli64(s
, args
[0], args
[1], args
[2] & 63);
2705 tcg_out32(s
, SLD
| SAB(args
[1], args
[0], args
[2]));
2708 case INDEX_op_shr_i64
:
2709 if (const_args
[2]) {
2710 /* Limit immediate shift count lest we create an illegal insn. */
2711 tcg_out_shri64(s
, args
[0], args
[1], args
[2] & 63);
2713 tcg_out32(s
, SRD
| SAB(args
[1], args
[0], args
[2]));
2716 case INDEX_op_sar_i64
:
2717 if (const_args
[2]) {
2718 int sh
= SH(args
[2] & 0x1f) | (((args
[2] >> 5) & 1) << 1);
2719 tcg_out32(s
, SRADI
| RA(args
[0]) | RS(args
[1]) | sh
);
2721 tcg_out32(s
, SRAD
| SAB(args
[1], args
[0], args
[2]));
2724 case INDEX_op_rotl_i64
:
2725 if (const_args
[2]) {
2726 tcg_out_rld(s
, RLDICL
, args
[0], args
[1], args
[2], 0);
2728 tcg_out32(s
, RLDCL
| SAB(args
[1], args
[0], args
[2]) | MB64(0));
2731 case INDEX_op_rotr_i64
:
2732 if (const_args
[2]) {
2733 tcg_out_rld(s
, RLDICL
, args
[0], args
[1], 64 - args
[2], 0);
2735 tcg_out32(s
, SUBFIC
| TAI(TCG_REG_R0
, args
[2], 64));
2736 tcg_out32(s
, RLDCL
| SAB(args
[1], args
[0], TCG_REG_R0
) | MB64(0));
2740 case INDEX_op_mul_i64
:
2741 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2742 if (const_args
[2]) {
2743 tcg_out32(s
, MULLI
| TAI(a0
, a1
, a2
));
2745 tcg_out32(s
, MULLD
| TAB(a0
, a1
, a2
));
2748 case INDEX_op_div_i64
:
2749 tcg_out32(s
, DIVD
| TAB(args
[0], args
[1], args
[2]));
2751 case INDEX_op_divu_i64
:
2752 tcg_out32(s
, DIVDU
| TAB(args
[0], args
[1], args
[2]));
2755 case INDEX_op_qemu_ld_i32
:
2756 tcg_out_qemu_ld(s
, args
, false);
2758 case INDEX_op_qemu_ld_i64
:
2759 tcg_out_qemu_ld(s
, args
, true);
2761 case INDEX_op_qemu_st_i32
:
2762 tcg_out_qemu_st(s
, args
, false);
2764 case INDEX_op_qemu_st_i64
:
2765 tcg_out_qemu_st(s
, args
, true);
2768 case INDEX_op_ext8s_i32
:
2769 case INDEX_op_ext8s_i64
:
2772 case INDEX_op_ext16s_i32
:
2773 case INDEX_op_ext16s_i64
:
2776 case INDEX_op_ext_i32_i64
:
2777 case INDEX_op_ext32s_i64
:
2781 tcg_out32(s
, c
| RS(args
[1]) | RA(args
[0]));
2783 case INDEX_op_extu_i32_i64
:
2784 tcg_out_ext32u(s
, args
[0], args
[1]);
2787 case INDEX_op_setcond_i32
:
2788 tcg_out_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1], args
[2],
2791 case INDEX_op_setcond_i64
:
2792 tcg_out_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1], args
[2],
2795 case INDEX_op_setcond2_i32
:
2796 tcg_out_setcond2(s
, args
, const_args
);
2799 case INDEX_op_bswap16_i32
:
2800 case INDEX_op_bswap16_i64
:
2801 a0
= args
[0], a1
= args
[1];
2804 /* a0 = (a1 r<< 24) & 0xff # 000c */
2805 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 24, 24, 31);
2806 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
2807 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 8, 16, 23);
2809 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
2810 tcg_out_rlw(s
, RLWINM
, TCG_REG_R0
, a1
, 8, 16, 23);
2811 /* a0 = (a1 r<< 24) & 0xff # 000c */
2812 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 24, 24, 31);
2813 /* a0 = a0 | r0 # 00dc */
2814 tcg_out32(s
, OR
| SAB(TCG_REG_R0
, a0
, a0
));
2818 case INDEX_op_bswap32_i32
:
2819 case INDEX_op_bswap32_i64
:
2820 /* Stolen from gcc's builtin_bswap32 */
2822 a0
= args
[0] == a1
? TCG_REG_R0
: args
[0];
2824 /* a1 = args[1] # abcd */
2825 /* a0 = rotate_left (a1, 8) # bcda */
2826 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 8, 0, 31);
2827 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
2828 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 0, 7);
2829 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
2830 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 16, 23);
2832 if (a0
== TCG_REG_R0
) {
2833 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2837 case INDEX_op_bswap64_i64
:
2838 a0
= args
[0], a1
= args
[1], a2
= TCG_REG_R0
;
2844 /* a1 = # abcd efgh */
2845 /* a0 = rl32(a1, 8) # 0000 fghe */
2846 tcg_out_rlw(s
, RLWINM
, a0
, a1
, 8, 0, 31);
2847 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
2848 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 0, 7);
2849 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
2850 tcg_out_rlw(s
, RLWIMI
, a0
, a1
, 24, 16, 23);
2852 /* a0 = rl64(a0, 32) # hgfe 0000 */
2853 /* a2 = rl64(a1, 32) # efgh abcd */
2854 tcg_out_rld(s
, RLDICL
, a0
, a0
, 32, 0);
2855 tcg_out_rld(s
, RLDICL
, a2
, a1
, 32, 0);
2857 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
2858 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 8, 0, 31);
2859 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
2860 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 24, 0, 7);
2861 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
2862 tcg_out_rlw(s
, RLWIMI
, a0
, a2
, 24, 16, 23);
2865 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2869 case INDEX_op_deposit_i32
:
2870 if (const_args
[2]) {
2871 uint32_t mask
= ((2u << (args
[4] - 1)) - 1) << args
[3];
2872 tcg_out_andi32(s
, args
[0], args
[0], ~mask
);
2874 tcg_out_rlw(s
, RLWIMI
, args
[0], args
[2], args
[3],
2875 32 - args
[3] - args
[4], 31 - args
[3]);
2878 case INDEX_op_deposit_i64
:
2879 if (const_args
[2]) {
2880 uint64_t mask
= ((2ull << (args
[4] - 1)) - 1) << args
[3];
2881 tcg_out_andi64(s
, args
[0], args
[0], ~mask
);
2883 tcg_out_rld(s
, RLDIMI
, args
[0], args
[2], args
[3],
2884 64 - args
[3] - args
[4]);
2888 case INDEX_op_extract_i32
:
2889 tcg_out_rlw(s
, RLWINM
, args
[0], args
[1],
2890 32 - args
[2], 32 - args
[3], 31);
2892 case INDEX_op_extract_i64
:
2893 tcg_out_rld(s
, RLDICL
, args
[0], args
[1], 64 - args
[2], 64 - args
[3]);
2896 case INDEX_op_movcond_i32
:
2897 tcg_out_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1], args
[2],
2898 args
[3], args
[4], const_args
[2]);
2900 case INDEX_op_movcond_i64
:
2901 tcg_out_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1], args
[2],
2902 args
[3], args
[4], const_args
[2]);
2905 #if TCG_TARGET_REG_BITS == 64
2906 case INDEX_op_add2_i64
:
2908 case INDEX_op_add2_i32
:
2910 /* Note that the CA bit is defined based on the word size of the
2911 environment. So in 64-bit mode it's always carry-out of bit 63.
2912 The fallback code using deposit works just as well for 32-bit. */
2913 a0
= args
[0], a1
= args
[1];
2914 if (a0
== args
[3] || (!const_args
[5] && a0
== args
[5])) {
2917 if (const_args
[4]) {
2918 tcg_out32(s
, ADDIC
| TAI(a0
, args
[2], args
[4]));
2920 tcg_out32(s
, ADDC
| TAB(a0
, args
[2], args
[4]));
2922 if (const_args
[5]) {
2923 tcg_out32(s
, (args
[5] ? ADDME
: ADDZE
) | RT(a1
) | RA(args
[3]));
2925 tcg_out32(s
, ADDE
| TAB(a1
, args
[3], args
[5]));
2927 if (a0
!= args
[0]) {
2928 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2932 #if TCG_TARGET_REG_BITS == 64
2933 case INDEX_op_sub2_i64
:
2935 case INDEX_op_sub2_i32
:
2937 a0
= args
[0], a1
= args
[1];
2938 if (a0
== args
[5] || (!const_args
[3] && a0
== args
[3])) {
2941 if (const_args
[2]) {
2942 tcg_out32(s
, SUBFIC
| TAI(a0
, args
[4], args
[2]));
2944 tcg_out32(s
, SUBFC
| TAB(a0
, args
[4], args
[2]));
2946 if (const_args
[3]) {
2947 tcg_out32(s
, (args
[3] ? SUBFME
: SUBFZE
) | RT(a1
) | RA(args
[5]));
2949 tcg_out32(s
, SUBFE
| TAB(a1
, args
[5], args
[3]));
2951 if (a0
!= args
[0]) {
2952 tcg_out_mov(s
, TCG_TYPE_REG
, args
[0], a0
);
2956 case INDEX_op_muluh_i32
:
2957 tcg_out32(s
, MULHWU
| TAB(args
[0], args
[1], args
[2]));
2959 case INDEX_op_mulsh_i32
:
2960 tcg_out32(s
, MULHW
| TAB(args
[0], args
[1], args
[2]));
2962 case INDEX_op_muluh_i64
:
2963 tcg_out32(s
, MULHDU
| TAB(args
[0], args
[1], args
[2]));
2965 case INDEX_op_mulsh_i64
:
2966 tcg_out32(s
, MULHD
| TAB(args
[0], args
[1], args
[2]));
2970 tcg_out_mb(s
, args
[0]);
2973 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2974 case INDEX_op_mov_i64
:
2975 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2976 case INDEX_op_movi_i64
:
2977 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2983 int tcg_can_emit_vec_op(TCGOpcode opc
, TCGType type
, unsigned vece
)
2986 case INDEX_op_and_vec
:
2987 case INDEX_op_or_vec
:
2988 case INDEX_op_xor_vec
:
2989 case INDEX_op_andc_vec
:
2990 case INDEX_op_not_vec
:
2992 case INDEX_op_orc_vec
:
2993 return have_isa_2_07
;
2994 case INDEX_op_add_vec
:
2995 case INDEX_op_sub_vec
:
2996 case INDEX_op_smax_vec
:
2997 case INDEX_op_smin_vec
:
2998 case INDEX_op_umax_vec
:
2999 case INDEX_op_umin_vec
:
3000 case INDEX_op_shlv_vec
:
3001 case INDEX_op_shrv_vec
:
3002 case INDEX_op_sarv_vec
:
3003 case INDEX_op_rotlv_vec
:
3004 return vece
<= MO_32
|| have_isa_2_07
;
3005 case INDEX_op_ssadd_vec
:
3006 case INDEX_op_sssub_vec
:
3007 case INDEX_op_usadd_vec
:
3008 case INDEX_op_ussub_vec
:
3009 return vece
<= MO_32
;
3010 case INDEX_op_cmp_vec
:
3011 case INDEX_op_shli_vec
:
3012 case INDEX_op_shri_vec
:
3013 case INDEX_op_sari_vec
:
3014 case INDEX_op_rotli_vec
:
3015 return vece
<= MO_32
|| have_isa_2_07
? -1 : 0;
3016 case INDEX_op_neg_vec
:
3017 return vece
>= MO_32
&& have_isa_3_00
;
3018 case INDEX_op_mul_vec
:
3024 return have_isa_2_07
? 1 : -1;
3027 case INDEX_op_bitsel_vec
:
3029 case INDEX_op_rotrv_vec
:
3036 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
3037 TCGReg dst
, TCGReg src
)
3039 tcg_debug_assert(dst
>= TCG_REG_V0
);
3041 /* Splat from integer reg allowed via constraints for v3.00. */
3042 if (src
< TCG_REG_V0
) {
3043 tcg_debug_assert(have_isa_3_00
);
3046 tcg_out32(s
, MTVSRDD
| VRT(dst
) | RA(src
) | RB(src
));
3049 tcg_out32(s
, MTVSRWS
| VRT(dst
) | RA(src
));
3052 /* Fail, so that we fall back on either dupm or mov+dup. */
3058 * Recall we use (or emulate) VSX integer loads, so the integer is
3059 * right justified within the left (zero-index) double-word.
3063 tcg_out32(s
, VSPLTB
| VRT(dst
) | VRB(src
) | (7 << 16));
3066 tcg_out32(s
, VSPLTH
| VRT(dst
) | VRB(src
) | (3 << 16));
3069 tcg_out32(s
, VSPLTW
| VRT(dst
) | VRB(src
) | (1 << 16));
3073 tcg_out32(s
, XXPERMDI
| VRT(dst
) | VRA(src
) | VRB(src
));
3076 tcg_out_vsldoi(s
, TCG_VEC_TMP1
, src
, src
, 8);
3077 tcg_out_vsldoi(s
, dst
, TCG_VEC_TMP1
, src
, 8);
3080 g_assert_not_reached();
3085 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
3086 TCGReg out
, TCGReg base
, intptr_t offset
)
3090 tcg_debug_assert(out
>= TCG_REG_V0
);
3093 if (have_isa_3_00
) {
3094 tcg_out_mem_long(s
, LXV
, LVX
, out
, base
, offset
& -16);
3096 tcg_out_mem_long(s
, 0, LVEBX
, out
, base
, offset
);
3098 elt
= extract32(offset
, 0, 4);
3099 #ifndef HOST_WORDS_BIGENDIAN
3102 tcg_out32(s
, VSPLTB
| VRT(out
) | VRB(out
) | (elt
<< 16));
3105 tcg_debug_assert((offset
& 1) == 0);
3106 if (have_isa_3_00
) {
3107 tcg_out_mem_long(s
, LXV
| 8, LVX
, out
, base
, offset
& -16);
3109 tcg_out_mem_long(s
, 0, LVEHX
, out
, base
, offset
);
3111 elt
= extract32(offset
, 1, 3);
3112 #ifndef HOST_WORDS_BIGENDIAN
3115 tcg_out32(s
, VSPLTH
| VRT(out
) | VRB(out
) | (elt
<< 16));
3118 if (have_isa_3_00
) {
3119 tcg_out_mem_long(s
, 0, LXVWSX
, out
, base
, offset
);
3122 tcg_debug_assert((offset
& 3) == 0);
3123 tcg_out_mem_long(s
, 0, LVEWX
, out
, base
, offset
);
3124 elt
= extract32(offset
, 2, 2);
3125 #ifndef HOST_WORDS_BIGENDIAN
3128 tcg_out32(s
, VSPLTW
| VRT(out
) | VRB(out
) | (elt
<< 16));
3132 tcg_out_mem_long(s
, 0, LXVDSX
, out
, base
, offset
);
3135 tcg_debug_assert((offset
& 7) == 0);
3136 tcg_out_mem_long(s
, 0, LVX
, out
, base
, offset
& -16);
3137 tcg_out_vsldoi(s
, TCG_VEC_TMP1
, out
, out
, 8);
3138 elt
= extract32(offset
, 3, 1);
3139 #ifndef HOST_WORDS_BIGENDIAN
3143 tcg_out_vsldoi(s
, out
, out
, TCG_VEC_TMP1
, 8);
3145 tcg_out_vsldoi(s
, out
, TCG_VEC_TMP1
, out
, 8);
3149 g_assert_not_reached();
3154 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
3155 unsigned vecl
, unsigned vece
,
3156 const TCGArg
*args
, const int *const_args
)
3158 static const uint32_t
3159 add_op
[4] = { VADDUBM
, VADDUHM
, VADDUWM
, VADDUDM
},
3160 sub_op
[4] = { VSUBUBM
, VSUBUHM
, VSUBUWM
, VSUBUDM
},
3161 neg_op
[4] = { 0, 0, VNEGW
, VNEGD
},
3162 eq_op
[4] = { VCMPEQUB
, VCMPEQUH
, VCMPEQUW
, VCMPEQUD
},
3163 ne_op
[4] = { VCMPNEB
, VCMPNEH
, VCMPNEW
, 0 },
3164 gts_op
[4] = { VCMPGTSB
, VCMPGTSH
, VCMPGTSW
, VCMPGTSD
},
3165 gtu_op
[4] = { VCMPGTUB
, VCMPGTUH
, VCMPGTUW
, VCMPGTUD
},
3166 ssadd_op
[4] = { VADDSBS
, VADDSHS
, VADDSWS
, 0 },
3167 usadd_op
[4] = { VADDUBS
, VADDUHS
, VADDUWS
, 0 },
3168 sssub_op
[4] = { VSUBSBS
, VSUBSHS
, VSUBSWS
, 0 },
3169 ussub_op
[4] = { VSUBUBS
, VSUBUHS
, VSUBUWS
, 0 },
3170 umin_op
[4] = { VMINUB
, VMINUH
, VMINUW
, VMINUD
},
3171 smin_op
[4] = { VMINSB
, VMINSH
, VMINSW
, VMINSD
},
3172 umax_op
[4] = { VMAXUB
, VMAXUH
, VMAXUW
, VMAXUD
},
3173 smax_op
[4] = { VMAXSB
, VMAXSH
, VMAXSW
, VMAXSD
},
3174 shlv_op
[4] = { VSLB
, VSLH
, VSLW
, VSLD
},
3175 shrv_op
[4] = { VSRB
, VSRH
, VSRW
, VSRD
},
3176 sarv_op
[4] = { VSRAB
, VSRAH
, VSRAW
, VSRAD
},
3177 mrgh_op
[4] = { VMRGHB
, VMRGHH
, VMRGHW
, 0 },
3178 mrgl_op
[4] = { VMRGLB
, VMRGLH
, VMRGLW
, 0 },
3179 muleu_op
[4] = { VMULEUB
, VMULEUH
, VMULEUW
, 0 },
3180 mulou_op
[4] = { VMULOUB
, VMULOUH
, VMULOUW
, 0 },
3181 pkum_op
[4] = { VPKUHUM
, VPKUWUM
, 0, 0 },
3182 rotl_op
[4] = { VRLB
, VRLH
, VRLW
, VRLD
};
3184 TCGType type
= vecl
+ TCG_TYPE_V64
;
3185 TCGArg a0
= args
[0], a1
= args
[1], a2
= args
[2];
3189 case INDEX_op_ld_vec
:
3190 tcg_out_ld(s
, type
, a0
, a1
, a2
);
3192 case INDEX_op_st_vec
:
3193 tcg_out_st(s
, type
, a0
, a1
, a2
);
3195 case INDEX_op_dupm_vec
:
3196 tcg_out_dupm_vec(s
, type
, vece
, a0
, a1
, a2
);
3199 case INDEX_op_add_vec
:
3200 insn
= add_op
[vece
];
3202 case INDEX_op_sub_vec
:
3203 insn
= sub_op
[vece
];
3205 case INDEX_op_neg_vec
:
3206 insn
= neg_op
[vece
];
3210 case INDEX_op_mul_vec
:
3211 tcg_debug_assert(vece
== MO_32
&& have_isa_2_07
);
3214 case INDEX_op_ssadd_vec
:
3215 insn
= ssadd_op
[vece
];
3217 case INDEX_op_sssub_vec
:
3218 insn
= sssub_op
[vece
];
3220 case INDEX_op_usadd_vec
:
3221 insn
= usadd_op
[vece
];
3223 case INDEX_op_ussub_vec
:
3224 insn
= ussub_op
[vece
];
3226 case INDEX_op_smin_vec
:
3227 insn
= smin_op
[vece
];
3229 case INDEX_op_umin_vec
:
3230 insn
= umin_op
[vece
];
3232 case INDEX_op_smax_vec
:
3233 insn
= smax_op
[vece
];
3235 case INDEX_op_umax_vec
:
3236 insn
= umax_op
[vece
];
3238 case INDEX_op_shlv_vec
:
3239 insn
= shlv_op
[vece
];
3241 case INDEX_op_shrv_vec
:
3242 insn
= shrv_op
[vece
];
3244 case INDEX_op_sarv_vec
:
3245 insn
= sarv_op
[vece
];
3247 case INDEX_op_and_vec
:
3250 case INDEX_op_or_vec
:
3253 case INDEX_op_xor_vec
:
3256 case INDEX_op_andc_vec
:
3259 case INDEX_op_not_vec
:
3263 case INDEX_op_orc_vec
:
3267 case INDEX_op_cmp_vec
:
3276 insn
= gts_op
[vece
];
3279 insn
= gtu_op
[vece
];
3282 g_assert_not_reached();
3286 case INDEX_op_bitsel_vec
:
3287 tcg_out32(s
, XXSEL
| VRT(a0
) | VRC(a1
) | VRB(a2
) | VRA(args
[3]));
3290 case INDEX_op_dup2_vec
:
3291 assert(TCG_TARGET_REG_BITS
== 32);
3292 /* With inputs a1 = xLxx, a2 = xHxx */
3293 tcg_out32(s
, VMRGHW
| VRT(a0
) | VRA(a2
) | VRB(a1
)); /* a0 = xxHL */
3294 tcg_out_vsldoi(s
, TCG_VEC_TMP1
, a0
, a0
, 8); /* tmp = HLxx */
3295 tcg_out_vsldoi(s
, a0
, a0
, TCG_VEC_TMP1
, 8); /* a0 = HLHL */
3298 case INDEX_op_ppc_mrgh_vec
:
3299 insn
= mrgh_op
[vece
];
3301 case INDEX_op_ppc_mrgl_vec
:
3302 insn
= mrgl_op
[vece
];
3304 case INDEX_op_ppc_muleu_vec
:
3305 insn
= muleu_op
[vece
];
3307 case INDEX_op_ppc_mulou_vec
:
3308 insn
= mulou_op
[vece
];
3310 case INDEX_op_ppc_pkum_vec
:
3311 insn
= pkum_op
[vece
];
3313 case INDEX_op_rotlv_vec
:
3314 insn
= rotl_op
[vece
];
3316 case INDEX_op_ppc_msum_vec
:
3317 tcg_debug_assert(vece
== MO_16
);
3318 tcg_out32(s
, VMSUMUHM
| VRT(a0
) | VRA(a1
) | VRB(a2
) | VRC(args
[3]));
3321 case INDEX_op_mov_vec
: /* Always emitted via tcg_out_mov. */
3322 case INDEX_op_dupi_vec
: /* Always emitted via tcg_out_movi. */
3323 case INDEX_op_dup_vec
: /* Always emitted via tcg_out_dup_vec. */
3325 g_assert_not_reached();
3328 tcg_debug_assert(insn
!= 0);
3329 tcg_out32(s
, insn
| VRT(a0
) | VRA(a1
) | VRB(a2
));
3332 static void expand_vec_shi(TCGType type
, unsigned vece
, TCGv_vec v0
,
3333 TCGv_vec v1
, TCGArg imm
, TCGOpcode opci
)
3335 TCGv_vec t1
= tcg_temp_new_vec(type
);
3337 /* Splat w/bytes for xxspltib. */
3338 tcg_gen_dupi_vec(MO_8
, t1
, imm
& ((8 << vece
) - 1));
3339 vec_gen_3(opci
, type
, vece
, tcgv_vec_arg(v0
),
3340 tcgv_vec_arg(v1
), tcgv_vec_arg(t1
));
3341 tcg_temp_free_vec(t1
);
3344 static void expand_vec_cmp(TCGType type
, unsigned vece
, TCGv_vec v0
,
3345 TCGv_vec v1
, TCGv_vec v2
, TCGCond cond
)
3347 bool need_swap
= false, need_inv
= false;
3349 tcg_debug_assert(vece
<= MO_32
|| have_isa_2_07
);
3357 if (have_isa_3_00
&& vece
<= MO_32
) {
3371 need_swap
= need_inv
= true;
3374 g_assert_not_reached();
3378 cond
= tcg_invert_cond(cond
);
3382 t1
= v1
, v1
= v2
, v2
= t1
;
3383 cond
= tcg_swap_cond(cond
);
3386 vec_gen_4(INDEX_op_cmp_vec
, type
, vece
, tcgv_vec_arg(v0
),
3387 tcgv_vec_arg(v1
), tcgv_vec_arg(v2
), cond
);
3390 tcg_gen_not_vec(vece
, v0
, v0
);
3394 static void expand_vec_mul(TCGType type
, unsigned vece
, TCGv_vec v0
,
3395 TCGv_vec v1
, TCGv_vec v2
)
3397 TCGv_vec t1
= tcg_temp_new_vec(type
);
3398 TCGv_vec t2
= tcg_temp_new_vec(type
);
3404 vec_gen_3(INDEX_op_ppc_muleu_vec
, type
, vece
, tcgv_vec_arg(t1
),
3405 tcgv_vec_arg(v1
), tcgv_vec_arg(v2
));
3406 vec_gen_3(INDEX_op_ppc_mulou_vec
, type
, vece
, tcgv_vec_arg(t2
),
3407 tcgv_vec_arg(v1
), tcgv_vec_arg(v2
));
3408 vec_gen_3(INDEX_op_ppc_mrgh_vec
, type
, vece
+ 1, tcgv_vec_arg(v0
),
3409 tcgv_vec_arg(t1
), tcgv_vec_arg(t2
));
3410 vec_gen_3(INDEX_op_ppc_mrgl_vec
, type
, vece
+ 1, tcgv_vec_arg(t1
),
3411 tcgv_vec_arg(t1
), tcgv_vec_arg(t2
));
3412 vec_gen_3(INDEX_op_ppc_pkum_vec
, type
, vece
, tcgv_vec_arg(v0
),
3413 tcgv_vec_arg(v0
), tcgv_vec_arg(t1
));
3417 tcg_debug_assert(!have_isa_2_07
);
3418 t3
= tcg_temp_new_vec(type
);
3419 t4
= tcg_temp_new_vec(type
);
3420 tcg_gen_dupi_vec(MO_8
, t4
, -16);
3421 vec_gen_3(INDEX_op_rotlv_vec
, type
, MO_32
, tcgv_vec_arg(t1
),
3422 tcgv_vec_arg(v2
), tcgv_vec_arg(t4
));
3423 vec_gen_3(INDEX_op_ppc_mulou_vec
, type
, MO_16
, tcgv_vec_arg(t2
),
3424 tcgv_vec_arg(v1
), tcgv_vec_arg(v2
));
3425 tcg_gen_dupi_vec(MO_8
, t3
, 0);
3426 vec_gen_4(INDEX_op_ppc_msum_vec
, type
, MO_16
, tcgv_vec_arg(t3
),
3427 tcgv_vec_arg(v1
), tcgv_vec_arg(t1
), tcgv_vec_arg(t3
));
3428 vec_gen_3(INDEX_op_shlv_vec
, type
, MO_32
, tcgv_vec_arg(t3
),
3429 tcgv_vec_arg(t3
), tcgv_vec_arg(t4
));
3430 tcg_gen_add_vec(MO_32
, v0
, t2
, t3
);
3431 tcg_temp_free_vec(t3
);
3432 tcg_temp_free_vec(t4
);
3436 g_assert_not_reached();
3438 tcg_temp_free_vec(t1
);
3439 tcg_temp_free_vec(t2
);
3442 void tcg_expand_vec_op(TCGOpcode opc
, TCGType type
, unsigned vece
,
3446 TCGv_vec v0
, v1
, v2
, t0
;
3450 v0
= temp_tcgv_vec(arg_temp(a0
));
3451 v1
= temp_tcgv_vec(arg_temp(va_arg(va
, TCGArg
)));
3452 a2
= va_arg(va
, TCGArg
);
3455 case INDEX_op_shli_vec
:
3456 expand_vec_shi(type
, vece
, v0
, v1
, a2
, INDEX_op_shlv_vec
);
3458 case INDEX_op_shri_vec
:
3459 expand_vec_shi(type
, vece
, v0
, v1
, a2
, INDEX_op_shrv_vec
);
3461 case INDEX_op_sari_vec
:
3462 expand_vec_shi(type
, vece
, v0
, v1
, a2
, INDEX_op_sarv_vec
);
3464 case INDEX_op_rotli_vec
:
3465 expand_vec_shi(type
, vece
, v0
, v1
, a2
, INDEX_op_rotlv_vec
);
3467 case INDEX_op_cmp_vec
:
3468 v2
= temp_tcgv_vec(arg_temp(a2
));
3469 expand_vec_cmp(type
, vece
, v0
, v1
, v2
, va_arg(va
, TCGArg
));
3471 case INDEX_op_mul_vec
:
3472 v2
= temp_tcgv_vec(arg_temp(a2
));
3473 expand_vec_mul(type
, vece
, v0
, v1
, v2
);
3475 case INDEX_op_rotlv_vec
:
3476 v2
= temp_tcgv_vec(arg_temp(a2
));
3477 t0
= tcg_temp_new_vec(type
);
3478 tcg_gen_neg_vec(vece
, t0
, v2
);
3479 tcg_gen_rotlv_vec(vece
, v0
, v1
, t0
);
3480 tcg_temp_free_vec(t0
);
3483 g_assert_not_reached();
3488 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
3490 static const TCGTargetOpDef r
= { .args_ct_str
= { "r" } };
3491 static const TCGTargetOpDef r_r
= { .args_ct_str
= { "r", "r" } };
3492 static const TCGTargetOpDef r_L
= { .args_ct_str
= { "r", "L" } };
3493 static const TCGTargetOpDef S_S
= { .args_ct_str
= { "S", "S" } };
3494 static const TCGTargetOpDef r_ri
= { .args_ct_str
= { "r", "ri" } };
3495 static const TCGTargetOpDef r_r_r
= { .args_ct_str
= { "r", "r", "r" } };
3496 static const TCGTargetOpDef r_L_L
= { .args_ct_str
= { "r", "L", "L" } };
3497 static const TCGTargetOpDef L_L_L
= { .args_ct_str
= { "L", "L", "L" } };
3498 static const TCGTargetOpDef S_S_S
= { .args_ct_str
= { "S", "S", "S" } };
3499 static const TCGTargetOpDef r_r_ri
= { .args_ct_str
= { "r", "r", "ri" } };
3500 static const TCGTargetOpDef r_r_rI
= { .args_ct_str
= { "r", "r", "rI" } };
3501 static const TCGTargetOpDef r_r_rT
= { .args_ct_str
= { "r", "r", "rT" } };
3502 static const TCGTargetOpDef r_r_rU
= { .args_ct_str
= { "r", "r", "rU" } };
3503 static const TCGTargetOpDef r_rI_ri
3504 = { .args_ct_str
= { "r", "rI", "ri" } };
3505 static const TCGTargetOpDef r_rI_rT
3506 = { .args_ct_str
= { "r", "rI", "rT" } };
3507 static const TCGTargetOpDef r_r_rZW
3508 = { .args_ct_str
= { "r", "r", "rZW" } };
3509 static const TCGTargetOpDef L_L_L_L
3510 = { .args_ct_str
= { "L", "L", "L", "L" } };
3511 static const TCGTargetOpDef S_S_S_S
3512 = { .args_ct_str
= { "S", "S", "S", "S" } };
3513 static const TCGTargetOpDef movc
3514 = { .args_ct_str
= { "r", "r", "ri", "rZ", "rZ" } };
3515 static const TCGTargetOpDef dep
3516 = { .args_ct_str
= { "r", "0", "rZ" } };
3517 static const TCGTargetOpDef br2
3518 = { .args_ct_str
= { "r", "r", "ri", "ri" } };
3519 static const TCGTargetOpDef setc2
3520 = { .args_ct_str
= { "r", "r", "r", "ri", "ri" } };
3521 static const TCGTargetOpDef add2
3522 = { .args_ct_str
= { "r", "r", "r", "r", "rI", "rZM" } };
3523 static const TCGTargetOpDef sub2
3524 = { .args_ct_str
= { "r", "r", "rI", "rZM", "r", "r" } };
3525 static const TCGTargetOpDef v_r
= { .args_ct_str
= { "v", "r" } };
3526 static const TCGTargetOpDef v_vr
= { .args_ct_str
= { "v", "vr" } };
3527 static const TCGTargetOpDef v_v
= { .args_ct_str
= { "v", "v" } };
3528 static const TCGTargetOpDef v_v_v
= { .args_ct_str
= { "v", "v", "v" } };
3529 static const TCGTargetOpDef v_v_v_v
3530 = { .args_ct_str
= { "v", "v", "v", "v" } };
3533 case INDEX_op_goto_ptr
:
3536 case INDEX_op_ld8u_i32
:
3537 case INDEX_op_ld8s_i32
:
3538 case INDEX_op_ld16u_i32
:
3539 case INDEX_op_ld16s_i32
:
3540 case INDEX_op_ld_i32
:
3541 case INDEX_op_st8_i32
:
3542 case INDEX_op_st16_i32
:
3543 case INDEX_op_st_i32
:
3544 case INDEX_op_ctpop_i32
:
3545 case INDEX_op_neg_i32
:
3546 case INDEX_op_not_i32
:
3547 case INDEX_op_ext8s_i32
:
3548 case INDEX_op_ext16s_i32
:
3549 case INDEX_op_bswap16_i32
:
3550 case INDEX_op_bswap32_i32
:
3551 case INDEX_op_extract_i32
:
3552 case INDEX_op_ld8u_i64
:
3553 case INDEX_op_ld8s_i64
:
3554 case INDEX_op_ld16u_i64
:
3555 case INDEX_op_ld16s_i64
:
3556 case INDEX_op_ld32u_i64
:
3557 case INDEX_op_ld32s_i64
:
3558 case INDEX_op_ld_i64
:
3559 case INDEX_op_st8_i64
:
3560 case INDEX_op_st16_i64
:
3561 case INDEX_op_st32_i64
:
3562 case INDEX_op_st_i64
:
3563 case INDEX_op_ctpop_i64
:
3564 case INDEX_op_neg_i64
:
3565 case INDEX_op_not_i64
:
3566 case INDEX_op_ext8s_i64
:
3567 case INDEX_op_ext16s_i64
:
3568 case INDEX_op_ext32s_i64
:
3569 case INDEX_op_ext_i32_i64
:
3570 case INDEX_op_extu_i32_i64
:
3571 case INDEX_op_bswap16_i64
:
3572 case INDEX_op_bswap32_i64
:
3573 case INDEX_op_bswap64_i64
:
3574 case INDEX_op_extract_i64
:
3577 case INDEX_op_add_i32
:
3578 case INDEX_op_and_i32
:
3579 case INDEX_op_or_i32
:
3580 case INDEX_op_xor_i32
:
3581 case INDEX_op_andc_i32
:
3582 case INDEX_op_orc_i32
:
3583 case INDEX_op_eqv_i32
:
3584 case INDEX_op_shl_i32
:
3585 case INDEX_op_shr_i32
:
3586 case INDEX_op_sar_i32
:
3587 case INDEX_op_rotl_i32
:
3588 case INDEX_op_rotr_i32
:
3589 case INDEX_op_setcond_i32
:
3590 case INDEX_op_and_i64
:
3591 case INDEX_op_andc_i64
:
3592 case INDEX_op_shl_i64
:
3593 case INDEX_op_shr_i64
:
3594 case INDEX_op_sar_i64
:
3595 case INDEX_op_rotl_i64
:
3596 case INDEX_op_rotr_i64
:
3597 case INDEX_op_setcond_i64
:
3599 case INDEX_op_mul_i32
:
3600 case INDEX_op_mul_i64
:
3602 case INDEX_op_div_i32
:
3603 case INDEX_op_divu_i32
:
3604 case INDEX_op_nand_i32
:
3605 case INDEX_op_nor_i32
:
3606 case INDEX_op_muluh_i32
:
3607 case INDEX_op_mulsh_i32
:
3608 case INDEX_op_orc_i64
:
3609 case INDEX_op_eqv_i64
:
3610 case INDEX_op_nand_i64
:
3611 case INDEX_op_nor_i64
:
3612 case INDEX_op_div_i64
:
3613 case INDEX_op_divu_i64
:
3614 case INDEX_op_mulsh_i64
:
3615 case INDEX_op_muluh_i64
:
3617 case INDEX_op_sub_i32
:
3619 case INDEX_op_add_i64
:
3621 case INDEX_op_or_i64
:
3622 case INDEX_op_xor_i64
:
3624 case INDEX_op_sub_i64
:
3626 case INDEX_op_clz_i32
:
3627 case INDEX_op_ctz_i32
:
3628 case INDEX_op_clz_i64
:
3629 case INDEX_op_ctz_i64
:
3632 case INDEX_op_brcond_i32
:
3633 case INDEX_op_brcond_i64
:
3636 case INDEX_op_movcond_i32
:
3637 case INDEX_op_movcond_i64
:
3639 case INDEX_op_deposit_i32
:
3640 case INDEX_op_deposit_i64
:
3642 case INDEX_op_brcond2_i32
:
3644 case INDEX_op_setcond2_i32
:
3646 case INDEX_op_add2_i64
:
3647 case INDEX_op_add2_i32
:
3649 case INDEX_op_sub2_i64
:
3650 case INDEX_op_sub2_i32
:
3653 case INDEX_op_qemu_ld_i32
:
3654 return (TCG_TARGET_REG_BITS
== 64 || TARGET_LONG_BITS
== 32
3656 case INDEX_op_qemu_st_i32
:
3657 return (TCG_TARGET_REG_BITS
== 64 || TARGET_LONG_BITS
== 32
3659 case INDEX_op_qemu_ld_i64
:
3660 return (TCG_TARGET_REG_BITS
== 64 ? &r_L
3661 : TARGET_LONG_BITS
== 32 ? &L_L_L
: &L_L_L_L
);
3662 case INDEX_op_qemu_st_i64
:
3663 return (TCG_TARGET_REG_BITS
== 64 ? &S_S
3664 : TARGET_LONG_BITS
== 32 ? &S_S_S
: &S_S_S_S
);
3666 case INDEX_op_add_vec
:
3667 case INDEX_op_sub_vec
:
3668 case INDEX_op_mul_vec
:
3669 case INDEX_op_and_vec
:
3670 case INDEX_op_or_vec
:
3671 case INDEX_op_xor_vec
:
3672 case INDEX_op_andc_vec
:
3673 case INDEX_op_orc_vec
:
3674 case INDEX_op_cmp_vec
:
3675 case INDEX_op_ssadd_vec
:
3676 case INDEX_op_sssub_vec
:
3677 case INDEX_op_usadd_vec
:
3678 case INDEX_op_ussub_vec
:
3679 case INDEX_op_smax_vec
:
3680 case INDEX_op_smin_vec
:
3681 case INDEX_op_umax_vec
:
3682 case INDEX_op_umin_vec
:
3683 case INDEX_op_shlv_vec
:
3684 case INDEX_op_shrv_vec
:
3685 case INDEX_op_sarv_vec
:
3686 case INDEX_op_rotlv_vec
:
3687 case INDEX_op_rotrv_vec
:
3688 case INDEX_op_ppc_mrgh_vec
:
3689 case INDEX_op_ppc_mrgl_vec
:
3690 case INDEX_op_ppc_muleu_vec
:
3691 case INDEX_op_ppc_mulou_vec
:
3692 case INDEX_op_ppc_pkum_vec
:
3693 case INDEX_op_dup2_vec
:
3695 case INDEX_op_not_vec
:
3696 case INDEX_op_neg_vec
:
3698 case INDEX_op_dup_vec
:
3699 return have_isa_3_00
? &v_vr
: &v_v
;
3700 case INDEX_op_ld_vec
:
3701 case INDEX_op_st_vec
:
3702 case INDEX_op_dupm_vec
:
3704 case INDEX_op_bitsel_vec
:
3705 case INDEX_op_ppc_msum_vec
:
3713 static void tcg_target_init(TCGContext
*s
)
3715 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
3716 unsigned long hwcap2
= qemu_getauxval(AT_HWCAP2
);
3718 have_isa
= tcg_isa_base
;
3719 if (hwcap
& PPC_FEATURE_ARCH_2_06
) {
3720 have_isa
= tcg_isa_2_06
;
3722 #ifdef PPC_FEATURE2_ARCH_2_07
3723 if (hwcap2
& PPC_FEATURE2_ARCH_2_07
) {
3724 have_isa
= tcg_isa_2_07
;
3727 #ifdef PPC_FEATURE2_ARCH_3_00
3728 if (hwcap2
& PPC_FEATURE2_ARCH_3_00
) {
3729 have_isa
= tcg_isa_3_00
;
3733 #ifdef PPC_FEATURE2_HAS_ISEL
3734 /* Prefer explicit instruction from the kernel. */
3735 have_isel
= (hwcap2
& PPC_FEATURE2_HAS_ISEL
) != 0;
3737 /* Fall back to knowing Power7 (2.06) has ISEL. */
3738 have_isel
= have_isa_2_06
;
3741 if (hwcap
& PPC_FEATURE_HAS_ALTIVEC
) {
3742 have_altivec
= true;
3743 /* We only care about the portion of VSX that overlaps Altivec. */
3744 if (hwcap
& PPC_FEATURE_HAS_VSX
) {
3749 tcg_target_available_regs
[TCG_TYPE_I32
] = 0xffffffff;
3750 tcg_target_available_regs
[TCG_TYPE_I64
] = 0xffffffff;
3752 tcg_target_available_regs
[TCG_TYPE_V64
] = 0xffffffff00000000ull
;
3753 tcg_target_available_regs
[TCG_TYPE_V128
] = 0xffffffff00000000ull
;
3756 tcg_target_call_clobber_regs
= 0;
3757 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
3758 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
3759 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
3760 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R4
);
3761 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R5
);
3762 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R6
);
3763 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R7
);
3764 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R8
);
3765 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R9
);
3766 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R10
);
3767 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R11
);
3768 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R12
);
3770 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V0
);
3771 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V1
);
3772 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V2
);
3773 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V3
);
3774 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V4
);
3775 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V5
);
3776 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V6
);
3777 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V7
);
3778 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V8
);
3779 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V9
);
3780 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V10
);
3781 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V11
);
3782 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V12
);
3783 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V13
);
3784 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V14
);
3785 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V15
);
3786 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V16
);
3787 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V17
);
3788 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V18
);
3789 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_V19
);
3791 s
->reserved_regs
= 0;
3792 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
); /* tcg temp */
3793 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R1
); /* stack pointer */
3794 #if defined(_CALL_SYSV)
3795 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R2
); /* toc pointer */
3797 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3798 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R13
); /* thread pointer */
3800 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP1
); /* mem temp */
3801 tcg_regset_set_reg(s
->reserved_regs
, TCG_VEC_TMP1
);
3802 tcg_regset_set_reg(s
->reserved_regs
, TCG_VEC_TMP2
);
3804 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TB
); /* tb->tc_ptr */
3811 DebugFrameFDEHeader fde
;
3812 uint8_t fde_def_cfa
[4];
3813 uint8_t fde_reg_ofs
[ARRAY_SIZE(tcg_target_callee_save_regs
) * 2 + 3];
3816 /* We're expecting a 2 byte uleb128 encoded value. */
3817 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
3819 #if TCG_TARGET_REG_BITS == 64
3820 # define ELF_HOST_MACHINE EM_PPC64
3822 # define ELF_HOST_MACHINE EM_PPC
3825 static DebugFrame debug_frame
= {
3826 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
3829 .cie
.code_align
= 1,
3830 .cie
.data_align
= (-SZR
& 0x7f), /* sleb128 -SZR */
3831 .cie
.return_column
= 65,
3833 /* Total FDE size does not include the "len" member. */
3834 .fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, fde
.cie_offset
),
3837 12, TCG_REG_R1
, /* DW_CFA_def_cfa r1, ... */
3838 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3842 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3843 0x11, 65, (LR_OFFSET
/ -SZR
) & 0x7f,
3847 void tcg_register_jit(void *buf
, size_t buf_size
)
3849 uint8_t *p
= &debug_frame
.fde_reg_ofs
[3];
3852 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); ++i
, p
+= 2) {
3853 p
[0] = 0x80 + tcg_target_callee_save_regs
[i
];
3854 p
[1] = (FRAME_SIZE
- (REG_SAVE_BOT
+ i
* SZR
)) / SZR
;
3857 debug_frame
.fde
.func_start
= (uintptr_t)buf
;
3858 debug_frame
.fde
.func_len
= buf_size
;
3860 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));
3862 #endif /* __ELF__ */
3864 void flush_icache_range(uintptr_t start
, uintptr_t stop
)
3866 uintptr_t p
, start1
, stop1
;
3867 size_t dsize
= qemu_dcache_linesize
;
3868 size_t isize
= qemu_icache_linesize
;
3870 start1
= start
& ~(dsize
- 1);
3871 stop1
= (stop
+ dsize
- 1) & ~(dsize
- 1);
3872 for (p
= start1
; p
< stop1
; p
+= dsize
) {
3873 asm volatile ("dcbst 0,%0" : : "r"(p
) : "memory");
3875 asm volatile ("sync" : : : "memory");
3877 start
&= start
& ~(isize
- 1);
3878 stop1
= (stop
+ isize
- 1) & ~(isize
- 1);
3879 for (p
= start1
; p
< stop1
; p
+= isize
) {
3880 asm volatile ("icbi 0,%0" : : "r"(p
) : "memory");
3882 asm volatile ("sync" : : : "memory");
3883 asm volatile ("isync" : : : "memory");