2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.c.inc"
28 #if defined _CALL_DARWIN || defined __APPLE__
29 #define TCG_TARGET_CALL_DARWIN
32 # define TCG_TARGET_CALL_ALIGN_ARGS 1
35 /* For some memory operations, we need a scratch that isn't R0. For the AIX
36 calling convention, we can re-use the TOC register since we'll be reloading
37 it at every call. Otherwise R12 will do nicely as neither a call-saved
38 register nor a parameter register. */
40 # define TCG_REG_TMP1 TCG_REG_R2
42 # define TCG_REG_TMP1 TCG_REG_R12
45 #define TCG_VEC_TMP1 TCG_REG_V0
46 #define TCG_VEC_TMP2 TCG_REG_V1
48 #define TCG_REG_TB TCG_REG_R31
49 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
51 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
52 #define SZP ((int)sizeof(void *))
54 /* Shorthand for size of a register. */
55 #define SZR (TCG_TARGET_REG_BITS / 8)
57 #define TCG_CT_CONST_S16 0x100
58 #define TCG_CT_CONST_U16 0x200
59 #define TCG_CT_CONST_S32 0x400
60 #define TCG_CT_CONST_U32 0x800
61 #define TCG_CT_CONST_ZERO 0x1000
62 #define TCG_CT_CONST_MONE 0x2000
63 #define TCG_CT_CONST_WSZ 0x4000
65 #define ALL_GENERAL_REGS 0xffffffffu
66 #define ALL_VECTOR_REGS 0xffffffff00000000ull
69 #define ALL_QLOAD_REGS \
71 ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5)))
72 #define ALL_QSTORE_REGS \
73 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | \
74 (1 << TCG_REG_R5) | (1 << TCG_REG_R6)))
76 #define ALL_QLOAD_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R3))
77 #define ALL_QSTORE_REGS ALL_QLOAD_REGS
81 static bool have_isel;
85 #ifndef CONFIG_SOFTMMU
86 #define TCG_GUEST_BASE_REG 30
89 #ifdef CONFIG_DEBUG_TCG
90 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
91 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
92 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
93 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
94 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
95 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
96 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
97 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
98 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
102 static const int tcg_target_reg_alloc_order[] = {
103 TCG_REG_R14, /* call saved registers */
121 TCG_REG_R12, /* call clobbered, non-arguments */
125 TCG_REG_R10, /* call clobbered, arguments */
134 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
135 TCG_REG_V2, /* call clobbered, vectors */
155 static const int tcg_target_call_iarg_regs[] = {
166 static const int tcg_target_call_oarg_regs[] = {
171 static const int tcg_target_callee_save_regs[] = {
172 #ifdef TCG_TARGET_CALL_DARWIN
188 TCG_REG_R27, /* currently used for the global env */
195 static inline bool in_range_b(tcg_target_long target)
197 return target == sextract64(target, 0, 26);
200 static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
201 const tcg_insn_unit *target)
203 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
204 tcg_debug_assert(in_range_b(disp));
205 return disp & 0x3fffffc;
208 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
210 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
211 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
213 if (in_range_b(disp)) {
214 *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
220 static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
221 const tcg_insn_unit *target)
223 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
224 tcg_debug_assert(disp == (int16_t) disp);
225 return disp & 0xfffc;
228 static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
230 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
231 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
233 if (disp == (int16_t) disp) {
234 *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
240 /* test if a constant matches the constraint */
241 static int tcg_target_const_match(tcg_target_long val, TCGType type,
242 const TCGArgConstraint *arg_ct)
245 if (ct & TCG_CT_CONST) {
249 /* The only 32-bit constraint we use aside from
250 TCG_CT_CONST is TCG_CT_CONST_S16. */
251 if (type == TCG_TYPE_I32) {
255 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
257 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
259 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
261 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
263 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
265 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
267 } else if ((ct & TCG_CT_CONST_WSZ)
268 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
274 #define OPCD(opc) ((opc)<<26)
275 #define XO19(opc) (OPCD(19)|((opc)<<1))
276 #define MD30(opc) (OPCD(30)|((opc)<<2))
277 #define MDS30(opc) (OPCD(30)|((opc)<<1))
278 #define XO31(opc) (OPCD(31)|((opc)<<1))
279 #define XO58(opc) (OPCD(58)|(opc))
280 #define XO62(opc) (OPCD(62)|(opc))
281 #define VX4(opc) (OPCD(4)|(opc))
285 #define LBZ OPCD( 34)
286 #define LHZ OPCD( 40)
287 #define LHA OPCD( 42)
288 #define LWZ OPCD( 32)
289 #define LWZUX XO31( 55)
290 #define STB OPCD( 38)
291 #define STH OPCD( 44)
292 #define STW OPCD( 36)
295 #define STDU XO62( 1)
296 #define STDX XO31(149)
299 #define LDX XO31( 21)
301 #define LDUX XO31( 53)
303 #define LWAX XO31(341)
305 #define ADDIC OPCD( 12)
306 #define ADDI OPCD( 14)
307 #define ADDIS OPCD( 15)
308 #define ORI OPCD( 24)
309 #define ORIS OPCD( 25)
310 #define XORI OPCD( 26)
311 #define XORIS OPCD( 27)
312 #define ANDI OPCD( 28)
313 #define ANDIS OPCD( 29)
314 #define MULLI OPCD( 7)
315 #define CMPLI OPCD( 10)
316 #define CMPI OPCD( 11)
317 #define SUBFIC OPCD( 8)
319 #define LWZU OPCD( 33)
320 #define STWU OPCD( 37)
322 #define RLWIMI OPCD( 20)
323 #define RLWINM OPCD( 21)
324 #define RLWNM OPCD( 23)
326 #define RLDICL MD30( 0)
327 #define RLDICR MD30( 1)
328 #define RLDIMI MD30( 3)
329 #define RLDCL MDS30( 8)
331 #define BCLR XO19( 16)
332 #define BCCTR XO19(528)
333 #define CRAND XO19(257)
334 #define CRANDC XO19(129)
335 #define CRNAND XO19(225)
336 #define CROR XO19(449)
337 #define CRNOR XO19( 33)
339 #define EXTSB XO31(954)
340 #define EXTSH XO31(922)
341 #define EXTSW XO31(986)
342 #define ADD XO31(266)
343 #define ADDE XO31(138)
344 #define ADDME XO31(234)
345 #define ADDZE XO31(202)
346 #define ADDC XO31( 10)
347 #define AND XO31( 28)
348 #define SUBF XO31( 40)
349 #define SUBFC XO31( 8)
350 #define SUBFE XO31(136)
351 #define SUBFME XO31(232)
352 #define SUBFZE XO31(200)
354 #define XOR XO31(316)
355 #define MULLW XO31(235)
356 #define MULHW XO31( 75)
357 #define MULHWU XO31( 11)
358 #define DIVW XO31(491)
359 #define DIVWU XO31(459)
361 #define CMPL XO31( 32)
362 #define LHBRX XO31(790)
363 #define LWBRX XO31(534)
364 #define LDBRX XO31(532)
365 #define STHBRX XO31(918)
366 #define STWBRX XO31(662)
367 #define STDBRX XO31(660)
368 #define MFSPR XO31(339)
369 #define MTSPR XO31(467)
370 #define SRAWI XO31(824)
371 #define NEG XO31(104)
372 #define MFCR XO31( 19)
373 #define MFOCRF (MFCR | (1u << 20))
374 #define NOR XO31(124)
375 #define CNTLZW XO31( 26)
376 #define CNTLZD XO31( 58)
377 #define CNTTZW XO31(538)
378 #define CNTTZD XO31(570)
379 #define CNTPOPW XO31(378)
380 #define CNTPOPD XO31(506)
381 #define ANDC XO31( 60)
382 #define ORC XO31(412)
383 #define EQV XO31(284)
384 #define NAND XO31(476)
385 #define ISEL XO31( 15)
387 #define MULLD XO31(233)
388 #define MULHD XO31( 73)
389 #define MULHDU XO31( 9)
390 #define DIVD XO31(489)
391 #define DIVDU XO31(457)
393 #define LBZX XO31( 87)
394 #define LHZX XO31(279)
395 #define LHAX XO31(343)
396 #define LWZX XO31( 23)
397 #define STBX XO31(215)
398 #define STHX XO31(407)
399 #define STWX XO31(151)
401 #define EIEIO XO31(854)
402 #define HWSYNC XO31(598)
403 #define LWSYNC (HWSYNC | (1u << 21))
405 #define SPR(a, b) ((((a)<<5)|(b))<<11)
407 #define CTR SPR(9, 0)
409 #define SLW XO31( 24)
410 #define SRW XO31(536)
411 #define SRAW XO31(792)
413 #define SLD XO31( 27)
414 #define SRD XO31(539)
415 #define SRAD XO31(794)
416 #define SRADI XO31(413<<1)
419 #define TRAP (TW | TO(31))
421 #define NOP ORI /* ori 0,0,0 */
423 #define LVX XO31(103)
424 #define LVEBX XO31(7)
425 #define LVEHX XO31(39)
426 #define LVEWX XO31(71)
427 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
428 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
429 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
430 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
431 #define LXSD (OPCD(57) | 2) /* v3.00 */
432 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
434 #define STVX XO31(231)
435 #define STVEWX XO31(199)
436 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
437 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
438 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
439 #define STXSD (OPCD(61) | 2) /* v3.00 */
441 #define VADDSBS VX4(768)
442 #define VADDUBS VX4(512)
443 #define VADDUBM VX4(0)
444 #define VADDSHS VX4(832)
445 #define VADDUHS VX4(576)
446 #define VADDUHM VX4(64)
447 #define VADDSWS VX4(896)
448 #define VADDUWS VX4(640)
449 #define VADDUWM VX4(128)
450 #define VADDUDM VX4(192) /* v2.07 */
452 #define VSUBSBS VX4(1792)
453 #define VSUBUBS VX4(1536)
454 #define VSUBUBM VX4(1024)
455 #define VSUBSHS VX4(1856)
456 #define VSUBUHS VX4(1600)
457 #define VSUBUHM VX4(1088)
458 #define VSUBSWS VX4(1920)
459 #define VSUBUWS VX4(1664)
460 #define VSUBUWM VX4(1152)
461 #define VSUBUDM VX4(1216) /* v2.07 */
463 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
464 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
466 #define VMAXSB VX4(258)
467 #define VMAXSH VX4(322)
468 #define VMAXSW VX4(386)
469 #define VMAXSD VX4(450) /* v2.07 */
470 #define VMAXUB VX4(2)
471 #define VMAXUH VX4(66)
472 #define VMAXUW VX4(130)
473 #define VMAXUD VX4(194) /* v2.07 */
474 #define VMINSB VX4(770)
475 #define VMINSH VX4(834)
476 #define VMINSW VX4(898)
477 #define VMINSD VX4(962) /* v2.07 */
478 #define VMINUB VX4(514)
479 #define VMINUH VX4(578)
480 #define VMINUW VX4(642)
481 #define VMINUD VX4(706) /* v2.07 */
483 #define VCMPEQUB VX4(6)
484 #define VCMPEQUH VX4(70)
485 #define VCMPEQUW VX4(134)
486 #define VCMPEQUD VX4(199) /* v2.07 */
487 #define VCMPGTSB VX4(774)
488 #define VCMPGTSH VX4(838)
489 #define VCMPGTSW VX4(902)
490 #define VCMPGTSD VX4(967) /* v2.07 */
491 #define VCMPGTUB VX4(518)
492 #define VCMPGTUH VX4(582)
493 #define VCMPGTUW VX4(646)
494 #define VCMPGTUD VX4(711) /* v2.07 */
495 #define VCMPNEB VX4(7) /* v3.00 */
496 #define VCMPNEH VX4(71) /* v3.00 */
497 #define VCMPNEW VX4(135) /* v3.00 */
499 #define VSLB VX4(260)
500 #define VSLH VX4(324)
501 #define VSLW VX4(388)
502 #define VSLD VX4(1476) /* v2.07 */
503 #define VSRB VX4(516)
504 #define VSRH VX4(580)
505 #define VSRW VX4(644)
506 #define VSRD VX4(1732) /* v2.07 */
507 #define VSRAB VX4(772)
508 #define VSRAH VX4(836)
509 #define VSRAW VX4(900)
510 #define VSRAD VX4(964) /* v2.07 */
513 #define VRLW VX4(132)
514 #define VRLD VX4(196) /* v2.07 */
516 #define VMULEUB VX4(520)
517 #define VMULEUH VX4(584)
518 #define VMULEUW VX4(648) /* v2.07 */
519 #define VMULOUB VX4(8)
520 #define VMULOUH VX4(72)
521 #define VMULOUW VX4(136) /* v2.07 */
522 #define VMULUWM VX4(137) /* v2.07 */
523 #define VMULLD VX4(457) /* v3.10 */
524 #define VMSUMUHM VX4(38)
526 #define VMRGHB VX4(12)
527 #define VMRGHH VX4(76)
528 #define VMRGHW VX4(140)
529 #define VMRGLB VX4(268)
530 #define VMRGLH VX4(332)
531 #define VMRGLW VX4(396)
533 #define VPKUHUM VX4(14)
534 #define VPKUWUM VX4(78)
536 #define VAND VX4(1028)
537 #define VANDC VX4(1092)
538 #define VNOR VX4(1284)
539 #define VOR VX4(1156)
540 #define VXOR VX4(1220)
541 #define VEQV VX4(1668) /* v2.07 */
542 #define VNAND VX4(1412) /* v2.07 */
543 #define VORC VX4(1348) /* v2.07 */
545 #define VSPLTB VX4(524)
546 #define VSPLTH VX4(588)
547 #define VSPLTW VX4(652)
548 #define VSPLTISB VX4(780)
549 #define VSPLTISH VX4(844)
550 #define VSPLTISW VX4(908)
552 #define VSLDOI VX4(44)
554 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
555 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
556 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
558 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
559 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
560 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
561 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
562 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
563 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
565 #define RT(r) ((r)<<21)
566 #define RS(r) ((r)<<21)
567 #define RA(r) ((r)<<16)
568 #define RB(r) ((r)<<11)
569 #define TO(t) ((t)<<21)
570 #define SH(s) ((s)<<11)
571 #define MB(b) ((b)<<6)
572 #define ME(e) ((e)<<1)
573 #define BO(o) ((o)<<21)
574 #define MB64(b) ((b)<<5)
575 #define FXM(b) (1 << (19 - (b)))
577 #define VRT(r) (((r) & 31) << 21)
578 #define VRA(r) (((r) & 31) << 16)
579 #define VRB(r) (((r) & 31) << 11)
580 #define VRC(r) (((r) & 31) << 6)
584 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
585 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
586 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
587 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
589 #define BF(n) ((n)<<23)
590 #define BI(n, c) (((c)+((n)*4))<<16)
591 #define BT(n, c) (((c)+((n)*4))<<21)
592 #define BA(n, c) (((c)+((n)*4))<<16)
593 #define BB(n, c) (((c)+((n)*4))<<11)
594 #define BC_(n, c) (((c)+((n)*4))<<6)
596 #define BO_COND_TRUE BO(12)
597 #define BO_COND_FALSE BO( 4)
598 #define BO_ALWAYS BO(20)
607 static const uint32_t tcg_to_bc[] = {
608 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
609 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
610 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
611 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
612 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
613 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
614 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
615 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
616 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
617 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
620 /* The low bit here is set if the RA and RB fields must be inverted. */
621 static const uint32_t tcg_to_isel[] = {
622 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
623 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
624 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
625 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
626 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
627 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
628 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
629 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
630 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
631 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
634 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
635 intptr_t value, intptr_t addend)
637 const tcg_insn_unit *target;
642 target = (const tcg_insn_unit *)value;
646 return reloc_pc14(code_ptr, target);
648 return reloc_pc24(code_ptr, target);
651 * We are (slightly) abusing this relocation type. In particular,
652 * assert that the low 2 bits are zero, and do not modify them.
653 * That way we can use this with LD et al that have opcode bits
654 * in the low 2 bits of the insn.
656 if ((value & 3) || value != (int16_t)value) {
659 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
663 * We are abusing this relocation type. Again, this points to
664 * a pair of insns, lis + load. This is an absolute address
665 * relocation for PPC32 so the lis cannot be removed.
669 if (hi + lo != value) {
672 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
673 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
676 g_assert_not_reached();
681 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
682 TCGReg base, tcg_target_long offset);
684 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
691 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
694 if (ret < TCG_REG_V0) {
695 if (arg < TCG_REG_V0) {
696 tcg_out32(s, OR | SAB(arg, ret, arg));
698 } else if (have_isa_2_07) {
699 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
700 | VRT(arg) | RA(ret));
703 /* Altivec does not support vector->integer moves. */
706 } else if (arg < TCG_REG_V0) {
708 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
709 | VRT(ret) | RA(arg));
712 /* Altivec does not support integer->vector moves. */
719 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
720 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
723 g_assert_not_reached();
728 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
731 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
732 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
733 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
734 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
737 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
738 int sh, int mb, int me)
740 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
743 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
745 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
748 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
750 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
753 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
755 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
758 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
760 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
763 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
765 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
768 /* Emit a move into ret of arg, if it can be done in one insn. */
769 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
771 if (arg == (int16_t)arg) {
772 tcg_out32(s, ADDI | TAI(ret, 0, arg));
775 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
776 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
782 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
783 tcg_target_long arg, bool in_prologue)
789 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
791 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
795 /* Load 16-bit immediates with one insn. */
796 if (tcg_out_movi_one(s, ret, arg)) {
800 /* Load addresses within the TB with one insn. */
801 tb_diff = tcg_tbrel_diff(s, (void *)arg);
802 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
803 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
807 /* Load 32-bit immediates with two insns. Note that we've already
808 eliminated bare ADDIS, so we know both insns are required. */
809 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
810 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
811 tcg_out32(s, ORI | SAI(ret, ret, arg));
814 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
815 tcg_out32(s, ADDI | TAI(ret, 0, arg));
816 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
820 /* Load masked 16-bit value. */
821 if (arg > 0 && (arg & 0x8000)) {
823 if ((tmp & (tmp + 1)) == 0) {
824 int mb = clz64(tmp + 1) + 1;
825 tcg_out32(s, ADDI | TAI(ret, 0, arg));
826 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
831 /* Load common masks with 2 insns. */
834 if (tmp == (int16_t)tmp) {
835 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
836 tcg_out_shli64(s, ret, ret, shift);
840 if (tcg_out_movi_one(s, ret, arg << shift)) {
841 tcg_out_shri64(s, ret, ret, shift);
845 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
846 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
847 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
851 /* Use the constant pool, if possible. */
852 if (!in_prologue && USE_REG_TB) {
853 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
854 tcg_tbrel_diff(s, NULL));
855 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
859 tmp = arg >> 31 >> 1;
860 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
862 tcg_out_shli64(s, ret, ret, 32);
864 if (arg & 0xffff0000) {
865 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
868 tcg_out32(s, ORI | SAI(ret, ret, arg));
872 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
873 TCGReg ret, int64_t val)
882 if (low >= -16 && low < 16) {
883 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
887 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
894 if (low >= -16 && low < 16) {
895 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
902 if (low >= -16 && low < 16) {
903 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
910 * Otherwise we must load the value from the constant pool.
914 add = tcg_tbrel_diff(s, NULL);
921 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
922 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
923 if (TCG_TARGET_REG_BITS == 64) {
924 new_pool_label(s, val, rel, s->code_ptr, add);
926 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
929 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
930 if (TCG_TARGET_REG_BITS == 64) {
931 new_pool_l2(s, rel, s->code_ptr, add, val, val);
933 new_pool_l4(s, rel, s->code_ptr, add,
934 val >> 32, val, val >> 32, val);
939 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
940 load_insn |= RA(TCG_REG_TB);
942 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
943 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
945 tcg_out32(s, load_insn);
948 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
954 tcg_debug_assert(ret < TCG_REG_V0);
955 tcg_out_movi_int(s, type, ret, arg, false);
959 g_assert_not_reached();
963 static bool mask_operand(uint32_t c, int *mb, int *me)
967 /* Accept a bit pattern like:
971 Keep track of the transitions. */
972 if (c == 0 || c == -1) {
978 if (test & (test - 1)) {
983 *mb = test ? clz32(test & -test) + 1 : 0;
987 static bool mask64_operand(uint64_t c, int *mb, int *me)
996 /* Accept 1..10..0. */
1002 /* Accept 0..01..1. */
1003 if (lsb == 1 && (c & (c + 1)) == 0) {
1004 *mb = clz64(c + 1) + 1;
1011 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1015 if (mask_operand(c, &mb, &me)) {
1016 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1017 } else if ((c & 0xffff) == c) {
1018 tcg_out32(s, ANDI | SAI(src, dst, c));
1020 } else if ((c & 0xffff0000) == c) {
1021 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1024 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1025 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1029 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1033 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1034 if (mask64_operand(c, &mb, &me)) {
1036 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1038 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1040 } else if ((c & 0xffff) == c) {
1041 tcg_out32(s, ANDI | SAI(src, dst, c));
1043 } else if ((c & 0xffff0000) == c) {
1044 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1047 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1048 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1052 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1053 int op_lo, int op_hi)
1056 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1060 tcg_out32(s, op_lo | SAI(src, dst, c));
1065 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1067 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1070 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1072 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1075 static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1077 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1078 if (in_range_b(disp)) {
1079 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1081 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1082 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1083 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1087 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1088 TCGReg base, tcg_target_long offset)
1090 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1091 bool is_int_store = false;
1092 TCGReg rs = TCG_REG_TMP1;
1099 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1115 case STB: case STH: case STW:
1116 is_int_store = true;
1120 /* For unaligned, or very large offsets, use the indexed form. */
1121 if (offset & align || offset != (int32_t)offset || opi == 0) {
1125 tcg_debug_assert(!is_int_store || rs != rt);
1126 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1127 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1131 l0 = (int16_t)offset;
1132 offset = (offset - l0) >> 16;
1133 l1 = (int16_t)offset;
1135 if (l1 < 0 && orig >= 0) {
1137 l1 = (int16_t)(offset - 0x4000);
1140 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1144 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1147 if (opi != ADDI || base != rt || l0 != 0) {
1148 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1152 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1153 TCGReg va, TCGReg vb, int shb)
1155 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1158 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1159 TCGReg base, intptr_t offset)
1165 if (ret < TCG_REG_V0) {
1166 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1169 if (have_isa_2_07 && have_vsx) {
1170 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1173 tcg_debug_assert((offset & 3) == 0);
1174 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1175 shift = (offset - 4) & 0xc;
1177 tcg_out_vsldoi(s, ret, ret, ret, shift);
1181 if (ret < TCG_REG_V0) {
1182 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1183 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1188 tcg_debug_assert(ret >= TCG_REG_V0);
1190 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1194 tcg_debug_assert((offset & 7) == 0);
1195 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1197 tcg_out_vsldoi(s, ret, ret, ret, 8);
1201 tcg_debug_assert(ret >= TCG_REG_V0);
1202 tcg_debug_assert((offset & 15) == 0);
1203 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1204 LVX, ret, base, offset);
1207 g_assert_not_reached();
1211 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1212 TCGReg base, intptr_t offset)
1218 if (arg < TCG_REG_V0) {
1219 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1222 if (have_isa_2_07 && have_vsx) {
1223 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1226 assert((offset & 3) == 0);
1227 tcg_debug_assert((offset & 3) == 0);
1228 shift = (offset - 4) & 0xc;
1230 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1233 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1236 if (arg < TCG_REG_V0) {
1237 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1238 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1243 tcg_debug_assert(arg >= TCG_REG_V0);
1245 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1246 STXSDX, arg, base, offset);
1249 tcg_debug_assert((offset & 7) == 0);
1251 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1254 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1255 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1258 tcg_debug_assert(arg >= TCG_REG_V0);
1259 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1260 STVX, arg, base, offset);
1263 g_assert_not_reached();
1267 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1268 TCGReg base, intptr_t ofs)
1273 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1274 int const_arg2, int cr, TCGType type)
1279 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1281 /* Simplify the comparisons below wrt CMPI. */
1282 if (type == TCG_TYPE_I32) {
1283 arg2 = (int32_t)arg2;
1290 if ((int16_t) arg2 == arg2) {
1294 } else if ((uint16_t) arg2 == arg2) {
1309 if ((int16_t) arg2 == arg2) {
1324 if ((uint16_t) arg2 == arg2) {
1337 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1340 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1343 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1346 tcg_out32(s, op | RA(arg1) | RB(arg2));
1350 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1351 TCGReg dst, TCGReg src)
1353 if (type == TCG_TYPE_I32) {
1354 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1355 tcg_out_shri32(s, dst, dst, 5);
1357 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1358 tcg_out_shri64(s, dst, dst, 6);
1362 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1364 /* X != 0 implies X + -1 generates a carry. Extra addition
1365 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1367 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1368 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1370 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1371 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1375 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1379 if ((uint32_t)arg2 == arg2) {
1380 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1382 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1383 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1386 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1391 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1392 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1397 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1399 /* Ignore high bits of a potential constant arg2. */
1400 if (type == TCG_TYPE_I32) {
1401 arg2 = (uint32_t)arg2;
1404 /* Handle common and trivial cases before handling anything else. */
1408 tcg_out_setcond_eq0(s, type, arg0, arg1);
1411 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1412 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1415 tcg_out_setcond_ne0(s, arg0, arg1);
1418 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1422 /* Extract the sign bit. */
1423 if (type == TCG_TYPE_I32) {
1424 tcg_out_shri32(s, arg0, arg1, 31);
1426 tcg_out_shri64(s, arg0, arg1, 63);
1434 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1435 All other cases below are also at least 3 insns, so speed up the
1436 code generator by not considering them and always using ISEL. */
1440 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1442 isel = tcg_to_isel[cond];
1444 tcg_out_movi(s, type, arg0, 1);
1446 /* arg0 = (bc ? 0 : 1) */
1447 tab = TAB(arg0, 0, arg0);
1450 /* arg0 = (bc ? 1 : 0) */
1451 tcg_out_movi(s, type, TCG_REG_R0, 0);
1452 tab = TAB(arg0, arg0, TCG_REG_R0);
1454 tcg_out32(s, isel | tab);
1460 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1461 tcg_out_setcond_eq0(s, type, arg0, arg1);
1465 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1466 /* Discard the high bits only once, rather than both inputs. */
1467 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1468 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1471 tcg_out_setcond_ne0(s, arg0, arg1);
1489 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1495 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1497 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1501 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1502 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1510 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1513 bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
1515 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1520 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1521 TCGArg arg1, TCGArg arg2, int const_arg2,
1522 TCGLabel *l, TCGType type)
1524 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1525 tcg_out_bc(s, tcg_to_bc[cond], l);
1528 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1529 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1530 TCGArg v2, bool const_c2)
1532 /* If for some reason both inputs are zero, don't produce bad code. */
1533 if (v1 == 0 && v2 == 0) {
1534 tcg_out_movi(s, type, dest, 0);
1538 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1541 int isel = tcg_to_isel[cond];
1543 /* Swap the V operands if the operation indicates inversion. */
1550 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1552 tcg_out_movi(s, type, TCG_REG_R0, 0);
1554 tcg_out32(s, isel | TAB(dest, v1, v2));
1557 cond = tcg_invert_cond(cond);
1559 } else if (dest != v1) {
1561 tcg_out_movi(s, type, dest, 0);
1563 tcg_out_mov(s, type, dest, v1);
1566 /* Branch forward over one insn */
1567 tcg_out32(s, tcg_to_bc[cond] | 8);
1569 tcg_out_movi(s, type, dest, 0);
1571 tcg_out_mov(s, type, dest, v2);
1576 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1577 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1579 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1580 tcg_out32(s, opc | RA(a0) | RS(a1));
1582 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1583 /* Note that the only other valid constant for a2 is 0. */
1585 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1586 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1587 } else if (!const_a2 && a0 == a2) {
1588 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1589 tcg_out32(s, opc | RA(a0) | RS(a1));
1591 tcg_out32(s, opc | RA(a0) | RS(a1));
1592 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1594 tcg_out_movi(s, type, a0, 0);
1596 tcg_out_mov(s, type, a0, a2);
1602 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1603 const int *const_args)
1605 static const struct { uint8_t bit1, bit2; } bits[] = {
1606 [TCG_COND_LT ] = { CR_LT, CR_LT },
1607 [TCG_COND_LE ] = { CR_LT, CR_GT },
1608 [TCG_COND_GT ] = { CR_GT, CR_GT },
1609 [TCG_COND_GE ] = { CR_GT, CR_LT },
1610 [TCG_COND_LTU] = { CR_LT, CR_LT },
1611 [TCG_COND_LEU] = { CR_LT, CR_GT },
1612 [TCG_COND_GTU] = { CR_GT, CR_GT },
1613 [TCG_COND_GEU] = { CR_GT, CR_LT },
1616 TCGCond cond = args[4], cond2;
1617 TCGArg al, ah, bl, bh;
1618 int blconst, bhconst;
1625 blconst = const_args[2];
1626 bhconst = const_args[3];
1635 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1636 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1637 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1648 bit1 = bits[cond].bit1;
1649 bit2 = bits[cond].bit2;
1650 op = (bit1 != bit2 ? CRANDC : CRAND);
1651 cond2 = tcg_unsigned_cond(cond);
1653 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1654 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1655 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1656 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1664 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1665 const int *const_args)
1667 tcg_out_cmp2(s, args + 1, const_args + 1);
1668 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1669 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1672 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1673 const int *const_args)
1675 tcg_out_cmp2(s, args, const_args);
1676 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1679 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1681 uint32_t insn = HWSYNC;
1683 if (a0 == TCG_MO_LD_LD) {
1685 } else if (a0 == TCG_MO_ST_ST) {
1691 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1692 uintptr_t jmp_rw, uintptr_t addr)
1694 if (TCG_TARGET_REG_BITS == 64) {
1695 tcg_insn_unit i1, i2;
1696 intptr_t tb_diff = addr - tc_ptr;
1697 intptr_t br_diff = addr - (jmp_rx + 4);
1700 /* This does not exercise the range of the branch, but we do
1701 still need to be able to load the new value of TCG_REG_TB.
1702 But this does still happen quite often. */
1703 if (tb_diff == (int16_t)tb_diff) {
1704 i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
1705 i2 = B | (br_diff & 0x3fffffc);
1707 intptr_t lo = (int16_t)tb_diff;
1708 intptr_t hi = (int32_t)(tb_diff - lo);
1709 assert(tb_diff == hi + lo);
1710 i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
1711 i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
1713 #ifdef HOST_WORDS_BIGENDIAN
1714 pair = (uint64_t)i1 << 32 | i2;
1716 pair = (uint64_t)i2 << 32 | i1;
1719 /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
1720 within qatomic_set that would fail to build a ppc32 host. */
1721 qatomic_set__nocheck((uint64_t *)jmp_rw, pair);
1722 flush_idcache_range(jmp_rx, jmp_rw, 8);
1724 intptr_t diff = addr - jmp_rx;
1725 tcg_debug_assert(in_range_b(diff));
1726 qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
1727 flush_idcache_range(jmp_rx, jmp_rw, 4);
1731 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
1734 /* Look through the descriptor. If the branch is in range, and we
1735 don't have to spend too much effort on building the toc. */
1736 const void *tgt = ((const void * const *)target)[0];
1737 uintptr_t toc = ((const uintptr_t *)target)[1];
1738 intptr_t diff = tcg_pcrel_diff(s, tgt);
1740 if (in_range_b(diff) && toc == (uint32_t)toc) {
1741 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1742 tcg_out_b(s, LK, tgt);
1744 /* Fold the low bits of the constant into the addresses below. */
1745 intptr_t arg = (intptr_t)target;
1746 int ofs = (int16_t)arg;
1748 if (ofs + 8 < 0x8000) {
1753 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1754 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1755 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1756 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1757 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1759 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1762 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1763 address, which the callee uses to compute its TOC address. */
1764 /* FIXME: when the branch is in range, we could avoid r12 load if we
1765 knew that the destination uses the same TOC, and what its local
1766 entry point offset is. */
1767 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1769 diff = tcg_pcrel_diff(s, target);
1770 if (in_range_b(diff)) {
1771 tcg_out_b(s, LK, target);
1773 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1774 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1777 tcg_out_b(s, LK, target);
1781 static const uint32_t qemu_ldx_opc[16] = {
1788 [MO_BSWAP | MO_UB] = LBZX,
1789 [MO_BSWAP | MO_UW] = LHBRX,
1790 [MO_BSWAP | MO_UL] = LWBRX,
1791 [MO_BSWAP | MO_Q] = LDBRX,
1794 static const uint32_t qemu_stx_opc[16] = {
1799 [MO_BSWAP | MO_UB] = STBX,
1800 [MO_BSWAP | MO_UW] = STHBRX,
1801 [MO_BSWAP | MO_UL] = STWBRX,
1802 [MO_BSWAP | MO_Q] = STDBRX,
1805 static const uint32_t qemu_exts_opc[4] = {
1806 EXTSB, EXTSH, EXTSW, 0
1809 #if defined (CONFIG_SOFTMMU)
1810 #include "../tcg-ldst.c.inc"
1812 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1813 * int mmu_idx, uintptr_t ra)
1815 static void * const qemu_ld_helpers[16] = {
1816 [MO_UB] = helper_ret_ldub_mmu,
1817 [MO_LEUW] = helper_le_lduw_mmu,
1818 [MO_LEUL] = helper_le_ldul_mmu,
1819 [MO_LEQ] = helper_le_ldq_mmu,
1820 [MO_BEUW] = helper_be_lduw_mmu,
1821 [MO_BEUL] = helper_be_ldul_mmu,
1822 [MO_BEQ] = helper_be_ldq_mmu,
1825 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1826 * uintxx_t val, int mmu_idx, uintptr_t ra)
1828 static void * const qemu_st_helpers[16] = {
1829 [MO_UB] = helper_ret_stb_mmu,
1830 [MO_LEUW] = helper_le_stw_mmu,
1831 [MO_LEUL] = helper_le_stl_mmu,
1832 [MO_LEQ] = helper_le_stq_mmu,
1833 [MO_BEUW] = helper_be_stw_mmu,
1834 [MO_BEUL] = helper_be_stl_mmu,
1835 [MO_BEQ] = helper_be_stq_mmu,
1838 /* We expect to use a 16-bit negative offset from ENV. */
1839 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1840 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1842 /* Perform the TLB load and compare. Places the result of the comparison
1843 in CR7, loads the addend of the TLB into R3, and returns the register
1844 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1846 static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
1847 TCGReg addrlo, TCGReg addrhi,
1848 int mem_index, bool is_read)
1852 ? offsetof(CPUTLBEntry, addr_read)
1853 : offsetof(CPUTLBEntry, addr_write));
1854 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1855 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1856 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1857 unsigned s_bits = opc & MO_SIZE;
1858 unsigned a_bits = get_alignment_bits(opc);
1860 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1861 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
1862 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
1864 /* Extract the page index, shifted into place for tlb index. */
1865 if (TCG_TARGET_REG_BITS == 32) {
1866 tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
1867 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1869 tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
1870 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1872 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
1874 /* Load the TLB comparator. */
1875 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
1876 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
1878 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
1880 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
1881 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1882 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
1883 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
1885 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
1889 /* Load the TLB addend for use on the fast path. Do this asap
1890 to minimize any load use delay. */
1891 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
1892 offsetof(CPUTLBEntry, addend));
1894 /* Clear the non-page, non-alignment bits from the address */
1895 if (TCG_TARGET_REG_BITS == 32) {
1896 /* We don't support unaligned accesses on 32-bits.
1897 * Preserve the bottom bits and thus trigger a comparison
1898 * failure on unaligned accesses.
1900 if (a_bits < s_bits) {
1903 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
1904 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
1908 /* If the access is unaligned, we need to make sure we fail if we
1909 * cross a page boundary. The trick is to add the access size-1
1910 * to the address before masking the low bits. That will make the
1911 * address overflow to the next page if we cross a page boundary,
1912 * which will then force a mismatch of the TLB compare.
1914 if (a_bits < s_bits) {
1915 unsigned a_mask = (1 << a_bits) - 1;
1916 unsigned s_mask = (1 << s_bits) - 1;
1917 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
1921 /* Mask the address for the requested alignment. */
1922 if (TARGET_LONG_BITS == 32) {
1923 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
1924 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
1925 /* Zero-extend the address for use in the final address. */
1926 tcg_out_ext32u(s, TCG_REG_R4, addrlo);
1927 addrlo = TCG_REG_R4;
1928 } else if (a_bits == 0) {
1929 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
1931 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
1932 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
1933 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
1937 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1938 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
1939 0, 7, TCG_TYPE_I32);
1940 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
1941 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1943 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
1950 /* Record the context of a call to the out of line helper code for the slow
1951 path for a load or store, so that we can later generate the correct
1953 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1954 TCGReg datalo_reg, TCGReg datahi_reg,
1955 TCGReg addrlo_reg, TCGReg addrhi_reg,
1956 tcg_insn_unit *raddr, tcg_insn_unit *lptr)
1958 TCGLabelQemuLdst *label = new_ldst_label(s);
1960 label->is_ld = is_ld;
1962 label->datalo_reg = datalo_reg;
1963 label->datahi_reg = datahi_reg;
1964 label->addrlo_reg = addrlo_reg;
1965 label->addrhi_reg = addrhi_reg;
1966 label->raddr = tcg_splitwx_to_rx(raddr);
1967 label->label_ptr[0] = lptr;
1970 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1972 TCGMemOpIdx oi = lb->oi;
1973 MemOp opc = get_memop(oi);
1974 TCGReg hi, lo, arg = TCG_REG_R3;
1976 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1980 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
1982 lo = lb->addrlo_reg;
1983 hi = lb->addrhi_reg;
1984 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1985 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1988 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
1989 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
1991 /* If the address needed to be zero-extended, we'll have already
1992 placed it in R4. The only remaining case is 64-bit guest. */
1993 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
1996 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
1997 tcg_out32(s, MFSPR | RT(arg) | LR);
1999 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2001 lo = lb->datalo_reg;
2002 hi = lb->datahi_reg;
2003 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2004 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
2005 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
2006 } else if (opc & MO_SIGN) {
2007 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
2008 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
2010 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
2013 tcg_out_b(s, 0, lb->raddr);
2017 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2019 TCGMemOpIdx oi = lb->oi;
2020 MemOp opc = get_memop(oi);
2021 MemOp s_bits = opc & MO_SIZE;
2022 TCGReg hi, lo, arg = TCG_REG_R3;
2024 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2028 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2030 lo = lb->addrlo_reg;
2031 hi = lb->addrhi_reg;
2032 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2033 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2036 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2037 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2039 /* If the address needed to be zero-extended, we'll have already
2040 placed it in R4. The only remaining case is 64-bit guest. */
2041 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2044 lo = lb->datalo_reg;
2045 hi = lb->datahi_reg;
2046 if (TCG_TARGET_REG_BITS == 32) {
2049 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2052 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2055 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2058 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
2062 if (s_bits == MO_64) {
2063 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
2065 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
2069 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2070 tcg_out32(s, MFSPR | RT(arg) | LR);
2072 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2074 tcg_out_b(s, 0, lb->raddr);
2077 #endif /* SOFTMMU */
2079 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
2081 TCGReg datalo, datahi, addrlo, rbase;
2082 TCGReg addrhi __attribute__((unused));
2085 #ifdef CONFIG_SOFTMMU
2087 tcg_insn_unit *label_ptr;
2091 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2093 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2095 opc = get_memop(oi);
2096 s_bits = opc & MO_SIZE;
2098 #ifdef CONFIG_SOFTMMU
2099 mem_index = get_mmuidx(oi);
2100 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
2102 /* Load a pointer into the current opcode w/conditional branch-link. */
2103 label_ptr = s->code_ptr;
2104 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2107 #else /* !CONFIG_SOFTMMU */
2108 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2109 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2110 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2111 addrlo = TCG_REG_TMP1;
2115 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2116 if (opc & MO_BSWAP) {
2117 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2118 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2119 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
2120 } else if (rbase != 0) {
2121 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2122 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
2123 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
2124 } else if (addrlo == datahi) {
2125 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2126 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2128 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2129 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2132 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2133 if (!have_isa_2_06 && insn == LDBRX) {
2134 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2135 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2136 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
2137 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2139 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2141 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2142 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2143 insn = qemu_exts_opc[s_bits];
2144 tcg_out32(s, insn | RA(datalo) | RS(datalo));
2148 #ifdef CONFIG_SOFTMMU
2149 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2150 s->code_ptr, label_ptr);
2154 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
2156 TCGReg datalo, datahi, addrlo, rbase;
2157 TCGReg addrhi __attribute__((unused));
2160 #ifdef CONFIG_SOFTMMU
2162 tcg_insn_unit *label_ptr;
2166 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2168 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2170 opc = get_memop(oi);
2171 s_bits = opc & MO_SIZE;
2173 #ifdef CONFIG_SOFTMMU
2174 mem_index = get_mmuidx(oi);
2175 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
2177 /* Load a pointer into the current opcode w/conditional branch-link. */
2178 label_ptr = s->code_ptr;
2179 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2182 #else /* !CONFIG_SOFTMMU */
2183 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2184 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2185 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2186 addrlo = TCG_REG_TMP1;
2190 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2191 if (opc & MO_BSWAP) {
2192 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2193 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2194 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
2195 } else if (rbase != 0) {
2196 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2197 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
2198 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
2200 tcg_out32(s, STW | TAI(datahi, addrlo, 0));
2201 tcg_out32(s, STW | TAI(datalo, addrlo, 4));
2204 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2205 if (!have_isa_2_06 && insn == STDBRX) {
2206 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2207 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
2208 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2209 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
2211 tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
2215 #ifdef CONFIG_SOFTMMU
2216 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2217 s->code_ptr, label_ptr);
2221 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2224 for (i = 0; i < count; ++i) {
2229 /* Parameters for function call generation, used in tcg.c. */
2230 #define TCG_TARGET_STACK_ALIGN 16
2231 #define TCG_TARGET_EXTEND_ARGS 1
2234 # define LINK_AREA_SIZE (6 * SZR)
2235 # define LR_OFFSET (1 * SZR)
2236 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2237 #elif defined(TCG_TARGET_CALL_DARWIN)
2238 # define LINK_AREA_SIZE (6 * SZR)
2239 # define LR_OFFSET (2 * SZR)
2240 #elif TCG_TARGET_REG_BITS == 64
2241 # if defined(_CALL_ELF) && _CALL_ELF == 2
2242 # define LINK_AREA_SIZE (4 * SZR)
2243 # define LR_OFFSET (1 * SZR)
2245 #else /* TCG_TARGET_REG_BITS == 32 */
2246 # if defined(_CALL_SYSV)
2247 # define LINK_AREA_SIZE (2 * SZR)
2248 # define LR_OFFSET (1 * SZR)
2252 # error "Unhandled abi"
2254 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2255 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2258 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2259 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2261 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2262 + TCG_STATIC_CALL_ARGS_SIZE \
2263 + CPU_TEMP_BUF_SIZE \
2265 + TCG_TARGET_STACK_ALIGN - 1) \
2266 & -TCG_TARGET_STACK_ALIGN)
2268 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2270 static void tcg_target_qemu_prologue(TCGContext *s)
2275 const void **desc = (const void **)s->code_ptr;
2276 desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */
2277 desc[1] = 0; /* environment pointer */
2278 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2281 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2285 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2286 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2287 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2289 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2290 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2291 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2293 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2295 #ifndef CONFIG_SOFTMMU
2297 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2298 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2302 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2303 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2305 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2307 tcg_out32(s, BCCTR | BO_ALWAYS);
2310 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2312 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2313 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2314 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2315 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2317 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2318 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2319 tcg_out32(s, BCLR | BO_ALWAYS);
2322 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
2323 const int *const_args)
2329 case INDEX_op_exit_tb:
2330 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
2331 tcg_out_b(s, 0, tcg_code_gen_epilogue);
2333 case INDEX_op_goto_tb:
2334 if (s->tb_jmp_insn_offset) {
2336 if (TCG_TARGET_REG_BITS == 64) {
2337 /* Ensure the next insns are 8-byte aligned. */
2338 if ((uintptr_t)s->code_ptr & 7) {
2341 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2342 tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2343 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2345 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2347 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
2351 /* Indirect jump. */
2352 tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
2353 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
2354 (intptr_t)(s->tb_jmp_insn_offset + args[0]));
2356 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2357 tcg_out32(s, BCCTR | BO_ALWAYS);
2358 set_jmp_reset_offset(s, args[0]);
2360 /* For the unlinked case, need to reset TCG_REG_TB. */
2361 tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
2362 -tcg_current_code_size(s));
2365 case INDEX_op_goto_ptr:
2366 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2368 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2370 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2371 tcg_out32(s, BCCTR | BO_ALWAYS);
2375 TCGLabel *l = arg_label(args[0]);
2379 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2382 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2387 case INDEX_op_ld8u_i32:
2388 case INDEX_op_ld8u_i64:
2389 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2391 case INDEX_op_ld8s_i32:
2392 case INDEX_op_ld8s_i64:
2393 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2394 tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
2396 case INDEX_op_ld16u_i32:
2397 case INDEX_op_ld16u_i64:
2398 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2400 case INDEX_op_ld16s_i32:
2401 case INDEX_op_ld16s_i64:
2402 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2404 case INDEX_op_ld_i32:
2405 case INDEX_op_ld32u_i64:
2406 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2408 case INDEX_op_ld32s_i64:
2409 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2411 case INDEX_op_ld_i64:
2412 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2414 case INDEX_op_st8_i32:
2415 case INDEX_op_st8_i64:
2416 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2418 case INDEX_op_st16_i32:
2419 case INDEX_op_st16_i64:
2420 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2422 case INDEX_op_st_i32:
2423 case INDEX_op_st32_i64:
2424 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2426 case INDEX_op_st_i64:
2427 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2430 case INDEX_op_add_i32:
2431 a0 = args[0], a1 = args[1], a2 = args[2];
2432 if (const_args[2]) {
2434 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2436 tcg_out32(s, ADD | TAB(a0, a1, a2));
2439 case INDEX_op_sub_i32:
2440 a0 = args[0], a1 = args[1], a2 = args[2];
2441 if (const_args[1]) {
2442 if (const_args[2]) {
2443 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2445 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2447 } else if (const_args[2]) {
2451 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2455 case INDEX_op_and_i32:
2456 a0 = args[0], a1 = args[1], a2 = args[2];
2457 if (const_args[2]) {
2458 tcg_out_andi32(s, a0, a1, a2);
2460 tcg_out32(s, AND | SAB(a1, a0, a2));
2463 case INDEX_op_and_i64:
2464 a0 = args[0], a1 = args[1], a2 = args[2];
2465 if (const_args[2]) {
2466 tcg_out_andi64(s, a0, a1, a2);
2468 tcg_out32(s, AND | SAB(a1, a0, a2));
2471 case INDEX_op_or_i64:
2472 case INDEX_op_or_i32:
2473 a0 = args[0], a1 = args[1], a2 = args[2];
2474 if (const_args[2]) {
2475 tcg_out_ori32(s, a0, a1, a2);
2477 tcg_out32(s, OR | SAB(a1, a0, a2));
2480 case INDEX_op_xor_i64:
2481 case INDEX_op_xor_i32:
2482 a0 = args[0], a1 = args[1], a2 = args[2];
2483 if (const_args[2]) {
2484 tcg_out_xori32(s, a0, a1, a2);
2486 tcg_out32(s, XOR | SAB(a1, a0, a2));
2489 case INDEX_op_andc_i32:
2490 a0 = args[0], a1 = args[1], a2 = args[2];
2491 if (const_args[2]) {
2492 tcg_out_andi32(s, a0, a1, ~a2);
2494 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2497 case INDEX_op_andc_i64:
2498 a0 = args[0], a1 = args[1], a2 = args[2];
2499 if (const_args[2]) {
2500 tcg_out_andi64(s, a0, a1, ~a2);
2502 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2505 case INDEX_op_orc_i32:
2506 if (const_args[2]) {
2507 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2511 case INDEX_op_orc_i64:
2512 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2514 case INDEX_op_eqv_i32:
2515 if (const_args[2]) {
2516 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2520 case INDEX_op_eqv_i64:
2521 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2523 case INDEX_op_nand_i32:
2524 case INDEX_op_nand_i64:
2525 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2527 case INDEX_op_nor_i32:
2528 case INDEX_op_nor_i64:
2529 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2532 case INDEX_op_clz_i32:
2533 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2534 args[2], const_args[2]);
2536 case INDEX_op_ctz_i32:
2537 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2538 args[2], const_args[2]);
2540 case INDEX_op_ctpop_i32:
2541 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2544 case INDEX_op_clz_i64:
2545 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2546 args[2], const_args[2]);
2548 case INDEX_op_ctz_i64:
2549 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2550 args[2], const_args[2]);
2552 case INDEX_op_ctpop_i64:
2553 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2556 case INDEX_op_mul_i32:
2557 a0 = args[0], a1 = args[1], a2 = args[2];
2558 if (const_args[2]) {
2559 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2561 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2565 case INDEX_op_div_i32:
2566 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2569 case INDEX_op_divu_i32:
2570 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2573 case INDEX_op_shl_i32:
2574 if (const_args[2]) {
2575 /* Limit immediate shift count lest we create an illegal insn. */
2576 tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2578 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2581 case INDEX_op_shr_i32:
2582 if (const_args[2]) {
2583 /* Limit immediate shift count lest we create an illegal insn. */
2584 tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2586 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2589 case INDEX_op_sar_i32:
2590 if (const_args[2]) {
2591 /* Limit immediate shift count lest we create an illegal insn. */
2592 tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
2594 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2597 case INDEX_op_rotl_i32:
2598 if (const_args[2]) {
2599 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2601 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2605 case INDEX_op_rotr_i32:
2606 if (const_args[2]) {
2607 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2609 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2610 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2615 case INDEX_op_brcond_i32:
2616 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2617 arg_label(args[3]), TCG_TYPE_I32);
2619 case INDEX_op_brcond_i64:
2620 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2621 arg_label(args[3]), TCG_TYPE_I64);
2623 case INDEX_op_brcond2_i32:
2624 tcg_out_brcond2(s, args, const_args);
2627 case INDEX_op_neg_i32:
2628 case INDEX_op_neg_i64:
2629 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2632 case INDEX_op_not_i32:
2633 case INDEX_op_not_i64:
2634 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2637 case INDEX_op_add_i64:
2638 a0 = args[0], a1 = args[1], a2 = args[2];
2639 if (const_args[2]) {
2641 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2643 tcg_out32(s, ADD | TAB(a0, a1, a2));
2646 case INDEX_op_sub_i64:
2647 a0 = args[0], a1 = args[1], a2 = args[2];
2648 if (const_args[1]) {
2649 if (const_args[2]) {
2650 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2652 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2654 } else if (const_args[2]) {
2658 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2662 case INDEX_op_shl_i64:
2663 if (const_args[2]) {
2664 /* Limit immediate shift count lest we create an illegal insn. */
2665 tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2667 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2670 case INDEX_op_shr_i64:
2671 if (const_args[2]) {
2672 /* Limit immediate shift count lest we create an illegal insn. */
2673 tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2675 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2678 case INDEX_op_sar_i64:
2679 if (const_args[2]) {
2680 int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
2681 tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
2683 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2686 case INDEX_op_rotl_i64:
2687 if (const_args[2]) {
2688 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2690 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2693 case INDEX_op_rotr_i64:
2694 if (const_args[2]) {
2695 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2697 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2698 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2702 case INDEX_op_mul_i64:
2703 a0 = args[0], a1 = args[1], a2 = args[2];
2704 if (const_args[2]) {
2705 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2707 tcg_out32(s, MULLD | TAB(a0, a1, a2));
2710 case INDEX_op_div_i64:
2711 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2713 case INDEX_op_divu_i64:
2714 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2717 case INDEX_op_qemu_ld_i32:
2718 tcg_out_qemu_ld(s, args, false);
2720 case INDEX_op_qemu_ld_i64:
2721 tcg_out_qemu_ld(s, args, true);
2723 case INDEX_op_qemu_st_i32:
2724 tcg_out_qemu_st(s, args, false);
2726 case INDEX_op_qemu_st_i64:
2727 tcg_out_qemu_st(s, args, true);
2730 case INDEX_op_ext8s_i32:
2731 case INDEX_op_ext8s_i64:
2734 case INDEX_op_ext16s_i32:
2735 case INDEX_op_ext16s_i64:
2738 case INDEX_op_ext_i32_i64:
2739 case INDEX_op_ext32s_i64:
2743 tcg_out32(s, c | RS(args[1]) | RA(args[0]));
2745 case INDEX_op_extu_i32_i64:
2746 tcg_out_ext32u(s, args[0], args[1]);
2749 case INDEX_op_setcond_i32:
2750 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2753 case INDEX_op_setcond_i64:
2754 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2757 case INDEX_op_setcond2_i32:
2758 tcg_out_setcond2(s, args, const_args);
2761 case INDEX_op_bswap16_i32:
2762 case INDEX_op_bswap16_i64:
2763 a0 = args[0], a1 = args[1];
2766 /* a0 = (a1 r<< 24) & 0xff # 000c */
2767 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
2768 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
2769 tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
2771 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
2772 tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
2773 /* a0 = (a1 r<< 24) & 0xff # 000c */
2774 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
2775 /* a0 = a0 | r0 # 00dc */
2776 tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
2780 case INDEX_op_bswap32_i32:
2781 case INDEX_op_bswap32_i64:
2782 /* Stolen from gcc's builtin_bswap32 */
2784 a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
2786 /* a1 = args[1] # abcd */
2787 /* a0 = rotate_left (a1, 8) # bcda */
2788 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
2789 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
2790 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
2791 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
2792 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
2794 if (a0 == TCG_REG_R0) {
2795 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2799 case INDEX_op_bswap64_i64:
2800 a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
2806 /* a1 = # abcd efgh */
2807 /* a0 = rl32(a1, 8) # 0000 fghe */
2808 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
2809 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
2810 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
2811 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
2812 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
2814 /* a0 = rl64(a0, 32) # hgfe 0000 */
2815 /* a2 = rl64(a1, 32) # efgh abcd */
2816 tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
2817 tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
2819 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
2820 tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
2821 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
2822 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
2823 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
2824 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
2827 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2831 case INDEX_op_deposit_i32:
2832 if (const_args[2]) {
2833 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
2834 tcg_out_andi32(s, args[0], args[0], ~mask);
2836 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
2837 32 - args[3] - args[4], 31 - args[3]);
2840 case INDEX_op_deposit_i64:
2841 if (const_args[2]) {
2842 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
2843 tcg_out_andi64(s, args[0], args[0], ~mask);
2845 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
2846 64 - args[3] - args[4]);
2850 case INDEX_op_extract_i32:
2851 tcg_out_rlw(s, RLWINM, args[0], args[1],
2852 32 - args[2], 32 - args[3], 31);
2854 case INDEX_op_extract_i64:
2855 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
2858 case INDEX_op_movcond_i32:
2859 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
2860 args[3], args[4], const_args[2]);
2862 case INDEX_op_movcond_i64:
2863 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
2864 args[3], args[4], const_args[2]);
2867 #if TCG_TARGET_REG_BITS == 64
2868 case INDEX_op_add2_i64:
2870 case INDEX_op_add2_i32:
2872 /* Note that the CA bit is defined based on the word size of the
2873 environment. So in 64-bit mode it's always carry-out of bit 63.
2874 The fallback code using deposit works just as well for 32-bit. */
2875 a0 = args[0], a1 = args[1];
2876 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
2879 if (const_args[4]) {
2880 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
2882 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
2884 if (const_args[5]) {
2885 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
2887 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
2889 if (a0 != args[0]) {
2890 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2894 #if TCG_TARGET_REG_BITS == 64
2895 case INDEX_op_sub2_i64:
2897 case INDEX_op_sub2_i32:
2899 a0 = args[0], a1 = args[1];
2900 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
2903 if (const_args[2]) {
2904 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
2906 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
2908 if (const_args[3]) {
2909 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
2911 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
2913 if (a0 != args[0]) {
2914 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2918 case INDEX_op_muluh_i32:
2919 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
2921 case INDEX_op_mulsh_i32:
2922 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
2924 case INDEX_op_muluh_i64:
2925 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
2927 case INDEX_op_mulsh_i64:
2928 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
2932 tcg_out_mb(s, args[0]);
2935 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2936 case INDEX_op_mov_i64:
2937 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2943 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2946 case INDEX_op_and_vec:
2947 case INDEX_op_or_vec:
2948 case INDEX_op_xor_vec:
2949 case INDEX_op_andc_vec:
2950 case INDEX_op_not_vec:
2952 case INDEX_op_orc_vec:
2953 return have_isa_2_07;
2954 case INDEX_op_add_vec:
2955 case INDEX_op_sub_vec:
2956 case INDEX_op_smax_vec:
2957 case INDEX_op_smin_vec:
2958 case INDEX_op_umax_vec:
2959 case INDEX_op_umin_vec:
2960 case INDEX_op_shlv_vec:
2961 case INDEX_op_shrv_vec:
2962 case INDEX_op_sarv_vec:
2963 case INDEX_op_rotlv_vec:
2964 return vece <= MO_32 || have_isa_2_07;
2965 case INDEX_op_ssadd_vec:
2966 case INDEX_op_sssub_vec:
2967 case INDEX_op_usadd_vec:
2968 case INDEX_op_ussub_vec:
2969 return vece <= MO_32;
2970 case INDEX_op_cmp_vec:
2971 case INDEX_op_shli_vec:
2972 case INDEX_op_shri_vec:
2973 case INDEX_op_sari_vec:
2974 case INDEX_op_rotli_vec:
2975 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
2976 case INDEX_op_neg_vec:
2977 return vece >= MO_32 && have_isa_3_00;
2978 case INDEX_op_mul_vec:
2984 return have_isa_2_07 ? 1 : -1;
2986 return have_isa_3_10;
2989 case INDEX_op_bitsel_vec:
2991 case INDEX_op_rotrv_vec:
2998 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2999 TCGReg dst, TCGReg src)
3001 tcg_debug_assert(dst >= TCG_REG_V0);
3003 /* Splat from integer reg allowed via constraints for v3.00. */
3004 if (src < TCG_REG_V0) {
3005 tcg_debug_assert(have_isa_3_00);
3008 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3011 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3014 /* Fail, so that we fall back on either dupm or mov+dup. */
3020 * Recall we use (or emulate) VSX integer loads, so the integer is
3021 * right justified within the left (zero-index) double-word.
3025 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3028 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3031 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3035 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3038 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3039 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3042 g_assert_not_reached();
3047 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3048 TCGReg out, TCGReg base, intptr_t offset)
3052 tcg_debug_assert(out >= TCG_REG_V0);
3055 if (have_isa_3_00) {
3056 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3058 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3060 elt = extract32(offset, 0, 4);
3061 #ifndef HOST_WORDS_BIGENDIAN
3064 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3067 tcg_debug_assert((offset & 1) == 0);
3068 if (have_isa_3_00) {
3069 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3071 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3073 elt = extract32(offset, 1, 3);
3074 #ifndef HOST_WORDS_BIGENDIAN
3077 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3080 if (have_isa_3_00) {
3081 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3084 tcg_debug_assert((offset & 3) == 0);
3085 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3086 elt = extract32(offset, 2, 2);
3087 #ifndef HOST_WORDS_BIGENDIAN
3090 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3094 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3097 tcg_debug_assert((offset & 7) == 0);
3098 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3099 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3100 elt = extract32(offset, 3, 1);
3101 #ifndef HOST_WORDS_BIGENDIAN
3105 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3107 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3111 g_assert_not_reached();
3116 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3117 unsigned vecl, unsigned vece,
3118 const TCGArg *args, const int *const_args)
3120 static const uint32_t
3121 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3122 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3123 mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3124 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3125 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3126 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3127 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3128 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3129 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3130 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3131 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3132 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3133 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3134 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3135 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3136 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3137 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3138 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3139 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3140 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3141 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3142 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3143 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3144 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3145 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3147 TCGType type = vecl + TCG_TYPE_V64;
3148 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3152 case INDEX_op_ld_vec:
3153 tcg_out_ld(s, type, a0, a1, a2);
3155 case INDEX_op_st_vec:
3156 tcg_out_st(s, type, a0, a1, a2);
3158 case INDEX_op_dupm_vec:
3159 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3162 case INDEX_op_add_vec:
3163 insn = add_op[vece];
3165 case INDEX_op_sub_vec:
3166 insn = sub_op[vece];
3168 case INDEX_op_neg_vec:
3169 insn = neg_op[vece];
3173 case INDEX_op_mul_vec:
3174 insn = mul_op[vece];
3176 case INDEX_op_ssadd_vec:
3177 insn = ssadd_op[vece];
3179 case INDEX_op_sssub_vec:
3180 insn = sssub_op[vece];
3182 case INDEX_op_usadd_vec:
3183 insn = usadd_op[vece];
3185 case INDEX_op_ussub_vec:
3186 insn = ussub_op[vece];
3188 case INDEX_op_smin_vec:
3189 insn = smin_op[vece];
3191 case INDEX_op_umin_vec:
3192 insn = umin_op[vece];
3194 case INDEX_op_smax_vec:
3195 insn = smax_op[vece];
3197 case INDEX_op_umax_vec:
3198 insn = umax_op[vece];
3200 case INDEX_op_shlv_vec:
3201 insn = shlv_op[vece];
3203 case INDEX_op_shrv_vec:
3204 insn = shrv_op[vece];
3206 case INDEX_op_sarv_vec:
3207 insn = sarv_op[vece];
3209 case INDEX_op_and_vec:
3212 case INDEX_op_or_vec:
3215 case INDEX_op_xor_vec:
3218 case INDEX_op_andc_vec:
3221 case INDEX_op_not_vec:
3225 case INDEX_op_orc_vec:
3229 case INDEX_op_cmp_vec:
3238 insn = gts_op[vece];
3241 insn = gtu_op[vece];
3244 g_assert_not_reached();
3248 case INDEX_op_bitsel_vec:
3249 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3252 case INDEX_op_dup2_vec:
3253 assert(TCG_TARGET_REG_BITS == 32);
3254 /* With inputs a1 = xLxx, a2 = xHxx */
3255 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3256 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3257 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3260 case INDEX_op_ppc_mrgh_vec:
3261 insn = mrgh_op[vece];
3263 case INDEX_op_ppc_mrgl_vec:
3264 insn = mrgl_op[vece];
3266 case INDEX_op_ppc_muleu_vec:
3267 insn = muleu_op[vece];
3269 case INDEX_op_ppc_mulou_vec:
3270 insn = mulou_op[vece];
3272 case INDEX_op_ppc_pkum_vec:
3273 insn = pkum_op[vece];
3275 case INDEX_op_rotlv_vec:
3276 insn = rotl_op[vece];
3278 case INDEX_op_ppc_msum_vec:
3279 tcg_debug_assert(vece == MO_16);
3280 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3283 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3284 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3286 g_assert_not_reached();
3289 tcg_debug_assert(insn != 0);
3290 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3293 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3294 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3298 if (vece == MO_32) {
3300 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3301 * So using negative numbers gets us the 4th bit easily.
3303 imm = sextract32(imm, 0, 5);
3305 imm &= (8 << vece) - 1;
3308 /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3309 t1 = tcg_constant_vec(type, MO_8, imm);
3310 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3311 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3314 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3315 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3317 bool need_swap = false, need_inv = false;
3319 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3327 if (have_isa_3_00 && vece <= MO_32) {
3341 need_swap = need_inv = true;
3344 g_assert_not_reached();
3348 cond = tcg_invert_cond(cond);
3352 t1 = v1, v1 = v2, v2 = t1;
3353 cond = tcg_swap_cond(cond);
3356 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3357 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3360 tcg_gen_not_vec(vece, v0, v0);
3364 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3365 TCGv_vec v1, TCGv_vec v2)
3367 TCGv_vec t1 = tcg_temp_new_vec(type);
3368 TCGv_vec t2 = tcg_temp_new_vec(type);
3374 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3375 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3376 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3377 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3378 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3379 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3380 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3381 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3382 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3383 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3387 tcg_debug_assert(!have_isa_2_07);
3389 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3390 * So using -16 is a quick way to represent 16.
3392 c16 = tcg_constant_vec(type, MO_8, -16);
3393 c0 = tcg_constant_vec(type, MO_8, 0);
3395 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3396 tcgv_vec_arg(v2), tcgv_vec_arg(c16));
3397 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3398 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3399 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
3400 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
3401 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
3402 tcgv_vec_arg(t1), tcgv_vec_arg(c16));
3403 tcg_gen_add_vec(MO_32, v0, t1, t2);
3407 g_assert_not_reached();
3409 tcg_temp_free_vec(t1);
3410 tcg_temp_free_vec(t2);
3413 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3417 TCGv_vec v0, v1, v2, t0;
3421 v0 = temp_tcgv_vec(arg_temp(a0));
3422 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3423 a2 = va_arg(va, TCGArg);
3426 case INDEX_op_shli_vec:
3427 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3429 case INDEX_op_shri_vec:
3430 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3432 case INDEX_op_sari_vec:
3433 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3435 case INDEX_op_rotli_vec:
3436 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3438 case INDEX_op_cmp_vec:
3439 v2 = temp_tcgv_vec(arg_temp(a2));
3440 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3442 case INDEX_op_mul_vec:
3443 v2 = temp_tcgv_vec(arg_temp(a2));
3444 expand_vec_mul(type, vece, v0, v1, v2);
3446 case INDEX_op_rotlv_vec:
3447 v2 = temp_tcgv_vec(arg_temp(a2));
3448 t0 = tcg_temp_new_vec(type);
3449 tcg_gen_neg_vec(vece, t0, v2);
3450 tcg_gen_rotlv_vec(vece, v0, v1, t0);
3451 tcg_temp_free_vec(t0);
3454 g_assert_not_reached();
3459 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3462 case INDEX_op_goto_ptr:
3465 case INDEX_op_ld8u_i32:
3466 case INDEX_op_ld8s_i32:
3467 case INDEX_op_ld16u_i32:
3468 case INDEX_op_ld16s_i32:
3469 case INDEX_op_ld_i32:
3470 case INDEX_op_ctpop_i32:
3471 case INDEX_op_neg_i32:
3472 case INDEX_op_not_i32:
3473 case INDEX_op_ext8s_i32:
3474 case INDEX_op_ext16s_i32:
3475 case INDEX_op_bswap16_i32:
3476 case INDEX_op_bswap32_i32:
3477 case INDEX_op_extract_i32:
3478 case INDEX_op_ld8u_i64:
3479 case INDEX_op_ld8s_i64:
3480 case INDEX_op_ld16u_i64:
3481 case INDEX_op_ld16s_i64:
3482 case INDEX_op_ld32u_i64:
3483 case INDEX_op_ld32s_i64:
3484 case INDEX_op_ld_i64:
3485 case INDEX_op_ctpop_i64:
3486 case INDEX_op_neg_i64:
3487 case INDEX_op_not_i64:
3488 case INDEX_op_ext8s_i64:
3489 case INDEX_op_ext16s_i64:
3490 case INDEX_op_ext32s_i64:
3491 case INDEX_op_ext_i32_i64:
3492 case INDEX_op_extu_i32_i64:
3493 case INDEX_op_bswap16_i64:
3494 case INDEX_op_bswap32_i64:
3495 case INDEX_op_bswap64_i64:
3496 case INDEX_op_extract_i64:
3497 return C_O1_I1(r, r);
3499 case INDEX_op_st8_i32:
3500 case INDEX_op_st16_i32:
3501 case INDEX_op_st_i32:
3502 case INDEX_op_st8_i64:
3503 case INDEX_op_st16_i64:
3504 case INDEX_op_st32_i64:
3505 case INDEX_op_st_i64:
3506 return C_O0_I2(r, r);
3508 case INDEX_op_add_i32:
3509 case INDEX_op_and_i32:
3510 case INDEX_op_or_i32:
3511 case INDEX_op_xor_i32:
3512 case INDEX_op_andc_i32:
3513 case INDEX_op_orc_i32:
3514 case INDEX_op_eqv_i32:
3515 case INDEX_op_shl_i32:
3516 case INDEX_op_shr_i32:
3517 case INDEX_op_sar_i32:
3518 case INDEX_op_rotl_i32:
3519 case INDEX_op_rotr_i32:
3520 case INDEX_op_setcond_i32:
3521 case INDEX_op_and_i64:
3522 case INDEX_op_andc_i64:
3523 case INDEX_op_shl_i64:
3524 case INDEX_op_shr_i64:
3525 case INDEX_op_sar_i64:
3526 case INDEX_op_rotl_i64:
3527 case INDEX_op_rotr_i64:
3528 case INDEX_op_setcond_i64:
3529 return C_O1_I2(r, r, ri);
3531 case INDEX_op_mul_i32:
3532 case INDEX_op_mul_i64:
3533 return C_O1_I2(r, r, rI);
3535 case INDEX_op_div_i32:
3536 case INDEX_op_divu_i32:
3537 case INDEX_op_nand_i32:
3538 case INDEX_op_nor_i32:
3539 case INDEX_op_muluh_i32:
3540 case INDEX_op_mulsh_i32:
3541 case INDEX_op_orc_i64:
3542 case INDEX_op_eqv_i64:
3543 case INDEX_op_nand_i64:
3544 case INDEX_op_nor_i64:
3545 case INDEX_op_div_i64:
3546 case INDEX_op_divu_i64:
3547 case INDEX_op_mulsh_i64:
3548 case INDEX_op_muluh_i64:
3549 return C_O1_I2(r, r, r);
3551 case INDEX_op_sub_i32:
3552 return C_O1_I2(r, rI, ri);
3553 case INDEX_op_add_i64:
3554 return C_O1_I2(r, r, rT);
3555 case INDEX_op_or_i64:
3556 case INDEX_op_xor_i64:
3557 return C_O1_I2(r, r, rU);
3558 case INDEX_op_sub_i64:
3559 return C_O1_I2(r, rI, rT);
3560 case INDEX_op_clz_i32:
3561 case INDEX_op_ctz_i32:
3562 case INDEX_op_clz_i64:
3563 case INDEX_op_ctz_i64:
3564 return C_O1_I2(r, r, rZW);
3566 case INDEX_op_brcond_i32:
3567 case INDEX_op_brcond_i64:
3568 return C_O0_I2(r, ri);
3570 case INDEX_op_movcond_i32:
3571 case INDEX_op_movcond_i64:
3572 return C_O1_I4(r, r, ri, rZ, rZ);
3573 case INDEX_op_deposit_i32:
3574 case INDEX_op_deposit_i64:
3575 return C_O1_I2(r, 0, rZ);
3576 case INDEX_op_brcond2_i32:
3577 return C_O0_I4(r, r, ri, ri);
3578 case INDEX_op_setcond2_i32:
3579 return C_O1_I4(r, r, r, ri, ri);
3580 case INDEX_op_add2_i64:
3581 case INDEX_op_add2_i32:
3582 return C_O2_I4(r, r, r, r, rI, rZM);
3583 case INDEX_op_sub2_i64:
3584 case INDEX_op_sub2_i32:
3585 return C_O2_I4(r, r, rI, rZM, r, r);
3587 case INDEX_op_qemu_ld_i32:
3588 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3590 : C_O1_I2(r, L, L));
3592 case INDEX_op_qemu_st_i32:
3593 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3595 : C_O0_I3(S, S, S));
3597 case INDEX_op_qemu_ld_i64:
3598 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
3599 : TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L)
3600 : C_O2_I2(L, L, L, L));
3602 case INDEX_op_qemu_st_i64:
3603 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S)
3604 : TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S)
3605 : C_O0_I4(S, S, S, S));
3607 case INDEX_op_add_vec:
3608 case INDEX_op_sub_vec:
3609 case INDEX_op_mul_vec:
3610 case INDEX_op_and_vec:
3611 case INDEX_op_or_vec:
3612 case INDEX_op_xor_vec:
3613 case INDEX_op_andc_vec:
3614 case INDEX_op_orc_vec:
3615 case INDEX_op_cmp_vec:
3616 case INDEX_op_ssadd_vec:
3617 case INDEX_op_sssub_vec:
3618 case INDEX_op_usadd_vec:
3619 case INDEX_op_ussub_vec:
3620 case INDEX_op_smax_vec:
3621 case INDEX_op_smin_vec:
3622 case INDEX_op_umax_vec:
3623 case INDEX_op_umin_vec:
3624 case INDEX_op_shlv_vec:
3625 case INDEX_op_shrv_vec:
3626 case INDEX_op_sarv_vec:
3627 case INDEX_op_rotlv_vec:
3628 case INDEX_op_rotrv_vec:
3629 case INDEX_op_ppc_mrgh_vec:
3630 case INDEX_op_ppc_mrgl_vec:
3631 case INDEX_op_ppc_muleu_vec:
3632 case INDEX_op_ppc_mulou_vec:
3633 case INDEX_op_ppc_pkum_vec:
3634 case INDEX_op_dup2_vec:
3635 return C_O1_I2(v, v, v);
3637 case INDEX_op_not_vec:
3638 case INDEX_op_neg_vec:
3639 return C_O1_I1(v, v);
3641 case INDEX_op_dup_vec:
3642 return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
3644 case INDEX_op_ld_vec:
3645 case INDEX_op_dupm_vec:
3646 return C_O1_I1(v, r);
3648 case INDEX_op_st_vec:
3649 return C_O0_I2(v, r);
3651 case INDEX_op_bitsel_vec:
3652 case INDEX_op_ppc_msum_vec:
3653 return C_O1_I3(v, v, v, v);
3656 g_assert_not_reached();
3660 static void tcg_target_init(TCGContext *s)
3662 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3663 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3665 have_isa = tcg_isa_base;
3666 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3667 have_isa = tcg_isa_2_06;
3669 #ifdef PPC_FEATURE2_ARCH_2_07
3670 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3671 have_isa = tcg_isa_2_07;
3674 #ifdef PPC_FEATURE2_ARCH_3_00
3675 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3676 have_isa = tcg_isa_3_00;
3679 #ifdef PPC_FEATURE2_ARCH_3_10
3680 if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3681 have_isa = tcg_isa_3_10;
3685 #ifdef PPC_FEATURE2_HAS_ISEL
3686 /* Prefer explicit instruction from the kernel. */
3687 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3689 /* Fall back to knowing Power7 (2.06) has ISEL. */
3690 have_isel = have_isa_2_06;
3693 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3694 have_altivec = true;
3695 /* We only care about the portion of VSX that overlaps Altivec. */
3696 if (hwcap & PPC_FEATURE_HAS_VSX) {
3701 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3702 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3704 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3705 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3708 tcg_target_call_clobber_regs = 0;
3709 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3710 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3711 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3712 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3713 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3714 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3715 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3716 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3717 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3718 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3719 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3720 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3722 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3723 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3724 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3725 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3726 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3727 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3728 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3729 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3730 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3731 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3732 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3733 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3734 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3735 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3736 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3737 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3738 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3739 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3740 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3741 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3743 s->reserved_regs = 0;
3744 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3745 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3746 #if defined(_CALL_SYSV)
3747 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3749 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3750 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3752 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
3753 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3754 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3756 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
3763 DebugFrameFDEHeader fde;
3764 uint8_t fde_def_cfa[4];
3765 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3768 /* We're expecting a 2 byte uleb128 encoded value. */
3769 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3771 #if TCG_TARGET_REG_BITS == 64
3772 # define ELF_HOST_MACHINE EM_PPC64
3774 # define ELF_HOST_MACHINE EM_PPC
3777 static DebugFrame debug_frame = {
3778 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3781 .cie.code_align = 1,
3782 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
3783 .cie.return_column = 65,
3785 /* Total FDE size does not include the "len" member. */
3786 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3789 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
3790 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3794 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3795 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
3799 void tcg_register_jit(const void *buf, size_t buf_size)
3801 uint8_t *p = &debug_frame.fde_reg_ofs[3];
3804 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
3805 p[0] = 0x80 + tcg_target_callee_save_regs[i];
3806 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
3809 debug_frame.fde.func_start = (uintptr_t)buf;
3810 debug_frame.fde.func_len = buf_size;
3812 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3814 #endif /* __ELF__ */