2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "../tcg-ldst.c.inc"
28 #include "../tcg-pool.c.inc"
31 #define TCG_CT_CONST_S16 (1 << 8)
32 #define TCG_CT_CONST_S32 (1 << 9)
33 #define TCG_CT_CONST_S33 (1 << 10)
34 #define TCG_CT_CONST_ZERO (1 << 11)
35 #define TCG_CT_CONST_P32 (1 << 12)
36 #define TCG_CT_CONST_INV (1 << 13)
37 #define TCG_CT_CONST_INVRISBG (1 << 14)
39 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
40 #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
42 /* Several places within the instruction set 0 means "no register"
43 rather than TCG_REG_R0. */
44 #define TCG_REG_NONE 0
46 /* A scratch register that may be be used throughout the backend. */
47 #define TCG_TMP0 TCG_REG_R1
49 #ifndef CONFIG_SOFTMMU
50 #define TCG_GUEST_BASE_REG TCG_REG_R13
53 /* All of the following instructions are prefixed with their instruction
54 format, and are defined as 8- or 16-bit quantities, even when the two
55 halves of the 16-bit quantity may appear 32 bits apart in the insn.
56 This makes it easy to copy the values from the tables in Appendix B. */
57 typedef enum S390Opcode {
129 RIEg_LOCGHI = 0xec46,
167 RRFa_MSGRKC = 0xb9ed,
189 RRFam_SELGR = 0xb9e3,
193 RRFc_POPCNT = 0xb9e1,
277 VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */
278 VRRc_VCH = 0xe7fb, /* " */
279 VRRc_VCHL = 0xe7f9, /* " */
280 VRRc_VERLLV = 0xe773,
282 VRRc_VESRAV = 0xe77a,
283 VRRc_VESRLV = 0xe778,
296 VRRc_VPKS = 0xe797, /* we leave the m5 cs field 0 */
321 #ifdef CONFIG_DEBUG_TCG
322 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
323 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
324 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
325 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
326 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
327 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
328 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
329 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
333 /* Since R6 is a potential argument register, choose it last of the
334 call-saved registers. Likewise prefer the call-clobbered registers
335 in reverse order to maximize the chance of avoiding the arguments. */
336 static const int tcg_target_reg_alloc_order[] = {
337 /* Call saved registers. */
346 /* Call clobbered registers. */
350 /* Argument registers, in reverse order of allocation. */
356 /* V8-V15 are call saved, and omitted. */
383 static const int tcg_target_call_iarg_regs[] = {
391 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
393 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
394 tcg_debug_assert(slot == 0);
402 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
403 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
404 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
405 #define S390_CC_NEVER 0
406 #define S390_CC_ALWAYS 15
408 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
409 static const uint8_t tcg_cond_to_s390_cond[] = {
410 [TCG_COND_EQ] = S390_CC_EQ,
411 [TCG_COND_NE] = S390_CC_NE,
412 [TCG_COND_LT] = S390_CC_LT,
413 [TCG_COND_LE] = S390_CC_LE,
414 [TCG_COND_GT] = S390_CC_GT,
415 [TCG_COND_GE] = S390_CC_GE,
416 [TCG_COND_LTU] = S390_CC_LT,
417 [TCG_COND_LEU] = S390_CC_LE,
418 [TCG_COND_GTU] = S390_CC_GT,
419 [TCG_COND_GEU] = S390_CC_GE,
422 /* Condition codes that result from a LOAD AND TEST. Here, we have no
423 unsigned instruction variation, however since the test is vs zero we
424 can re-map the outcomes appropriately. */
425 static const uint8_t tcg_cond_to_ltr_cond[] = {
426 [TCG_COND_EQ] = S390_CC_EQ,
427 [TCG_COND_NE] = S390_CC_NE,
428 [TCG_COND_LT] = S390_CC_LT,
429 [TCG_COND_LE] = S390_CC_LE,
430 [TCG_COND_GT] = S390_CC_GT,
431 [TCG_COND_GE] = S390_CC_GE,
432 [TCG_COND_LTU] = S390_CC_NEVER,
433 [TCG_COND_LEU] = S390_CC_EQ,
434 [TCG_COND_GTU] = S390_CC_NE,
435 [TCG_COND_GEU] = S390_CC_ALWAYS,
438 static const tcg_insn_unit *tb_ret_addr;
439 uint64_t s390_facilities[3];
441 static inline bool is_general_reg(TCGReg r)
443 return r <= TCG_REG_R15;
446 static inline bool is_vector_reg(TCGReg r)
448 return r >= TCG_REG_V0 && r <= TCG_REG_V31;
451 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
452 intptr_t value, intptr_t addend)
454 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
459 pcrel2 = (tcg_insn_unit *)value - src_rx;
463 if (pcrel2 == (int16_t)pcrel2) {
464 tcg_patch16(src_rw, pcrel2);
469 if (pcrel2 == (int32_t)pcrel2) {
470 tcg_patch32(src_rw, pcrel2);
475 if (value == sextract64(value, 0, 20)) {
476 old = *(uint32_t *)src_rw & 0xf00000ff;
477 old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
478 tcg_patch32(src_rw, old);
483 g_assert_not_reached();
488 static int is_const_p16(uint64_t val)
490 for (int i = 0; i < 4; ++i) {
491 uint64_t mask = 0xffffull << (i * 16);
492 if ((val & ~mask) == 0) {
499 static int is_const_p32(uint64_t val)
501 if ((val & 0xffffffff00000000ull) == 0) {
504 if ((val & 0x00000000ffffffffull) == 0) {
511 * Accept bit patterns like these:
516 * Copied from gcc sources.
518 static bool risbg_mask(uint64_t c)
521 /* We don't change the number of transitions by inverting,
522 so make sure we start with the LSB zero. */
526 /* Reject all zeros or all ones. */
530 /* Find the first transition. */
532 /* Invert to look for a second transition. */
534 /* Erase the first transition. */
536 /* Find the second transition, if any. */
538 /* Match if all the bits are 1's, or if c is zero. */
542 /* Test if a constant matches the constraint. */
543 static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
545 if (ct & TCG_CT_CONST) {
549 if (type == TCG_TYPE_I32) {
553 /* The following are mutually exclusive. */
554 if (ct & TCG_CT_CONST_S16) {
555 return val == (int16_t)val;
556 } else if (ct & TCG_CT_CONST_S32) {
557 return val == (int32_t)val;
558 } else if (ct & TCG_CT_CONST_S33) {
559 return val >= -0xffffffffll && val <= 0xffffffffll;
560 } else if (ct & TCG_CT_CONST_ZERO) {
564 if (ct & TCG_CT_CONST_INV) {
568 * Note that is_const_p16 is a subset of is_const_p32,
569 * so we don't need both constraints.
571 if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) {
574 if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) {
581 /* Emit instructions according to the given instruction format. */
583 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
585 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
588 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
589 TCGReg r1, TCGReg r2)
591 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
594 /* RRF-a without the m4 field */
595 static void tcg_out_insn_RRFa(TCGContext *s, S390Opcode op,
596 TCGReg r1, TCGReg r2, TCGReg r3)
598 tcg_out32(s, (op << 16) | (r3 << 12) | (r1 << 4) | r2);
601 /* RRF-a with the m4 field */
602 static void tcg_out_insn_RRFam(TCGContext *s, S390Opcode op,
603 TCGReg r1, TCGReg r2, TCGReg r3, int m4)
605 tcg_out32(s, (op << 16) | (r3 << 12) | (m4 << 8) | (r1 << 4) | r2);
608 static void tcg_out_insn_RRFc(TCGContext *s, S390Opcode op,
609 TCGReg r1, TCGReg r2, int m3)
611 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
614 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
616 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
619 static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1,
622 tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
623 tcg_out32(s, (i2 << 16) | (op & 0xff));
626 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
628 tcg_out16(s, op | (r1 << 4));
632 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
633 TCGReg b2, TCGReg r3, int disp)
635 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
639 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
640 TCGReg b2, TCGReg r3, int disp)
642 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
643 tcg_out32(s, (op & 0xff) | (b2 << 28)
644 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
647 #define tcg_out_insn_RX tcg_out_insn_RS
648 #define tcg_out_insn_RXY tcg_out_insn_RSY
650 static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
653 * Shift bit 4 of each regno to its corresponding bit of RXB.
654 * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
655 * is the left-shift of the 4th operand.
657 return ((v1 & 0x10) << (4 + 3))
658 | ((v2 & 0x10) << (4 + 2))
659 | ((v3 & 0x10) << (4 + 1))
660 | ((v4 & 0x10) << (4 + 0));
663 static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
664 TCGReg v1, uint16_t i2, int m3)
666 tcg_debug_assert(is_vector_reg(v1));
667 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
669 tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
672 static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
673 TCGReg v1, uint8_t i2, uint8_t i3, int m4)
675 tcg_debug_assert(is_vector_reg(v1));
676 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
677 tcg_out16(s, (i2 << 8) | (i3 & 0xff));
678 tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
681 static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
682 TCGReg v1, uint16_t i2, TCGReg v3, int m4)
684 tcg_debug_assert(is_vector_reg(v1));
685 tcg_debug_assert(is_vector_reg(v3));
686 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
688 tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
691 static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
692 TCGReg v1, TCGReg v2, int m3)
694 tcg_debug_assert(is_vector_reg(v1));
695 tcg_debug_assert(is_vector_reg(v2));
696 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
697 tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
700 static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
701 TCGReg v1, TCGReg v2, TCGReg v3, int m4)
703 tcg_debug_assert(is_vector_reg(v1));
704 tcg_debug_assert(is_vector_reg(v2));
705 tcg_debug_assert(is_vector_reg(v3));
706 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
707 tcg_out16(s, v3 << 12);
708 tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
711 static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op,
712 TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
714 tcg_debug_assert(is_vector_reg(v1));
715 tcg_debug_assert(is_vector_reg(v2));
716 tcg_debug_assert(is_vector_reg(v3));
717 tcg_debug_assert(is_vector_reg(v4));
718 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
719 tcg_out16(s, v3 << 12);
720 tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12));
723 static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
724 TCGReg v1, TCGReg r2, TCGReg r3)
726 tcg_debug_assert(is_vector_reg(v1));
727 tcg_debug_assert(is_general_reg(r2));
728 tcg_debug_assert(is_general_reg(r3));
729 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
730 tcg_out16(s, r3 << 12);
731 tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
734 static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
735 intptr_t d2, TCGReg b2, TCGReg v3, int m4)
737 tcg_debug_assert(is_vector_reg(v1));
738 tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
739 tcg_debug_assert(is_general_reg(b2));
740 tcg_debug_assert(is_vector_reg(v3));
741 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
742 tcg_out16(s, b2 << 12 | d2);
743 tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
746 static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
747 intptr_t d2, TCGReg b2, TCGReg r3, int m4)
749 tcg_debug_assert(is_vector_reg(v1));
750 tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
751 tcg_debug_assert(is_general_reg(b2));
752 tcg_debug_assert(is_general_reg(r3));
753 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3);
754 tcg_out16(s, b2 << 12 | d2);
755 tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
758 static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1,
759 intptr_t d2, TCGReg b2, TCGReg v3, int m4)
761 tcg_debug_assert(is_general_reg(r1));
762 tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
763 tcg_debug_assert(is_general_reg(b2));
764 tcg_debug_assert(is_vector_reg(v3));
765 tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf));
766 tcg_out16(s, b2 << 12 | d2);
767 tcg_out16(s, (op & 0x00ff) | RXB(0, 0, v3, 0) | (m4 << 12));
770 static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
771 TCGReg b2, TCGReg x2, intptr_t d2, int m3)
773 tcg_debug_assert(is_vector_reg(v1));
774 tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
775 tcg_debug_assert(is_general_reg(x2));
776 tcg_debug_assert(is_general_reg(b2));
777 tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
778 tcg_out16(s, (b2 << 12) | d2);
779 tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
782 /* Emit an opcode with "type-checking" of the format. */
783 #define tcg_out_insn(S, FMT, OP, ...) \
784 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
787 /* emit 64-bit shifts */
788 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
789 TCGReg src, TCGReg sh_reg, int sh_imm)
791 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
794 /* emit 32-bit shifts */
795 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
796 TCGReg sh_reg, int sh_imm)
798 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
801 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
808 if (likely(is_general_reg(dst) && is_general_reg(src))) {
809 tcg_out_insn(s, RR, LR, dst, src);
815 if (likely(is_general_reg(dst))) {
816 if (likely(is_general_reg(src))) {
817 tcg_out_insn(s, RRE, LGR, dst, src);
819 tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3);
822 } else if (is_general_reg(src)) {
823 tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3);
830 tcg_out_insn(s, VRRa, VLR, dst, src, 0);
834 g_assert_not_reached();
839 static const S390Opcode li_insns[4] = {
840 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
842 static const S390Opcode oi_insns[4] = {
843 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
845 static const S390Opcode lif_insns[2] = {
846 RIL_LLILF, RIL_LLIHF,
849 /* load a register with an immediate value */
850 static void tcg_out_movi(TCGContext *s, TCGType type,
851 TCGReg ret, tcg_target_long sval)
853 tcg_target_ulong uval = sval;
857 if (type == TCG_TYPE_I32) {
858 uval = (uint32_t)sval;
859 sval = (int32_t)sval;
862 /* Try all 32-bit insns that can load it in one go. */
863 if (sval >= -0x8000 && sval < 0x8000) {
864 tcg_out_insn(s, RI, LGHI, ret, sval);
868 i = is_const_p16(uval);
870 tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
874 /* Try all 48-bit insns that can load it in one go. */
875 if (sval == (int32_t)sval) {
876 tcg_out_insn(s, RIL, LGFI, ret, sval);
880 i = is_const_p32(uval);
882 tcg_out_insn_RIL(s, lif_insns[i], ret, uval >> (i * 32));
886 /* Try for PC-relative address load. For odd addresses, add one. */
887 pc_off = tcg_pcrel_diff(s, (void *)sval) >> 1;
888 if (pc_off == (int32_t)pc_off) {
889 tcg_out_insn(s, RIL, LARL, ret, pc_off);
891 tcg_out_insn(s, RI, AGHI, ret, 1);
896 /* Otherwise, load it by parts. */
897 i = is_const_p16((uint32_t)uval);
899 tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
901 tcg_out_insn(s, RIL, LLILF, ret, uval);
904 i = is_const_p16(uval);
906 tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
908 tcg_out_insn(s, RIL, OIHF, ret, uval);
912 /* Emit a load/store type instruction. Inputs are:
913 DATA: The register to be loaded or stored.
914 BASE+OFS: The effective address.
915 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
916 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
918 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
919 TCGReg data, TCGReg base, TCGReg index,
922 if (ofs < -0x80000 || ofs >= 0x80000) {
923 /* Combine the low 20 bits of the offset with the actual load insn;
924 the high 44 bits must come from an immediate load. */
925 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
926 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
929 /* If we were already given an index register, add it in. */
930 if (index != TCG_REG_NONE) {
931 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
936 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
937 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
939 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
943 static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
944 TCGReg data, TCGReg base, TCGReg index,
945 tcg_target_long ofs, int m3)
947 if (ofs < 0 || ofs >= 0x1000) {
948 if (ofs >= -0x80000 && ofs < 0x80000) {
949 tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
951 index = TCG_REG_NONE;
954 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
955 if (index != TCG_REG_NONE) {
956 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
962 tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
965 /* load data without address translation or endianness conversion */
966 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
967 TCGReg base, intptr_t ofs)
971 if (likely(is_general_reg(data))) {
972 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
975 tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
979 if (likely(is_general_reg(data))) {
980 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
986 tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
990 /* Hint quadword aligned. */
991 tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
995 g_assert_not_reached();
999 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
1000 TCGReg base, intptr_t ofs)
1004 if (likely(is_general_reg(data))) {
1005 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
1007 tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
1012 if (likely(is_general_reg(data))) {
1013 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
1019 tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
1023 /* Hint quadword aligned. */
1024 tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
1028 g_assert_not_reached();
1032 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1033 TCGReg base, intptr_t ofs)
1038 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1043 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1044 tcg_target_long imm)
1046 /* This function is only used for passing structs by reference. */
1047 tcg_out_mem(s, RX_LA, RXY_LAY, rd, rs, TCG_REG_NONE, imm);
1050 static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
1051 int msb, int lsb, int ofs, int z)
1054 tcg_out16(s, (RIEf_RISBG & 0xff00) | (dest << 4) | src);
1055 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
1056 tcg_out16(s, (ofs << 8) | (RIEf_RISBG & 0xff));
1059 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1061 tcg_out_insn(s, RRE, LGBR, dest, src);
1064 static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
1066 tcg_out_insn(s, RRE, LLGCR, dest, src);
1069 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1071 tcg_out_insn(s, RRE, LGHR, dest, src);
1074 static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
1076 tcg_out_insn(s, RRE, LLGHR, dest, src);
1079 static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
1081 tcg_out_insn(s, RRE, LGFR, dest, src);
1084 static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
1086 tcg_out_insn(s, RRE, LLGFR, dest, src);
1089 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1091 tcg_out_ext32s(s, dest, src);
1094 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1096 tcg_out_ext32u(s, dest, src);
1099 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
1101 tcg_out_mov(s, TCG_TYPE_I32, dest, src);
1104 static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
1107 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
1108 /* Achieve wraparound by swapping msb and lsb. */
1109 msb = 64 - ctz64(~val);
1110 lsb = clz64(~val) - 1;
1113 lsb = 63 - ctz64(val);
1115 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
1118 static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
1120 static const S390Opcode ni_insns[4] = {
1121 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
1123 static const S390Opcode nif_insns[2] = {
1126 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
1129 /* Look for the zero-extensions. */
1130 if ((val & valid) == 0xffffffff) {
1131 tcg_out_ext32u(s, dest, dest);
1134 if ((val & valid) == 0xff) {
1135 tcg_out_ext8u(s, dest, dest);
1138 if ((val & valid) == 0xffff) {
1139 tcg_out_ext16u(s, dest, dest);
1143 i = is_const_p16(~val & valid);
1145 tcg_out_insn_RI(s, ni_insns[i], dest, val >> (i * 16));
1149 i = is_const_p32(~val & valid);
1150 tcg_debug_assert(i == 0 || type != TCG_TYPE_I32);
1152 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> (i * 32));
1156 if (risbg_mask(val)) {
1157 tgen_andi_risbg(s, dest, dest, val);
1161 g_assert_not_reached();
1164 static void tgen_ori(TCGContext *s, TCGReg dest, uint64_t val)
1166 static const S390Opcode oif_insns[2] = {
1172 i = is_const_p16(val);
1174 tcg_out_insn_RI(s, oi_insns[i], dest, val >> (i * 16));
1178 i = is_const_p32(val);
1180 tcg_out_insn_RIL(s, oif_insns[i], dest, val >> (i * 32));
1184 g_assert_not_reached();
1187 static void tgen_xori(TCGContext *s, TCGReg dest, uint64_t val)
1189 switch (is_const_p32(val)) {
1191 tcg_out_insn(s, RIL, XILF, dest, val);
1194 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1197 g_assert_not_reached();
1201 static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1202 TCGArg c2, bool c2const, bool need_carry, int *inv_cc)
1204 bool is_unsigned = is_unsigned_cond(c);
1205 TCGCond inv_c = tcg_invert_cond(c);
1210 if (!(is_unsigned && need_carry)) {
1211 if (type == TCG_TYPE_I32) {
1212 tcg_out_insn(s, RR, LTR, r1, r1);
1214 tcg_out_insn(s, RRE, LTGR, r1, r1);
1216 *inv_cc = tcg_cond_to_ltr_cond[inv_c];
1217 return tcg_cond_to_ltr_cond[c];
1221 if (!is_unsigned && c2 == (int16_t)c2) {
1222 op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1223 tcg_out_insn_RI(s, op, r1, c2);
1227 if (type == TCG_TYPE_I32) {
1228 op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1229 tcg_out_insn_RIL(s, op, r1, c2);
1234 * Constraints are for a signed 33-bit operand, which is a
1235 * convenient superset of this signed/unsigned test.
1237 if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
1238 op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
1239 tcg_out_insn_RIL(s, op, r1, c2);
1243 /* Load everything else into a register. */
1244 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, c2);
1248 if (type == TCG_TYPE_I32) {
1249 op = (is_unsigned ? RR_CLR : RR_CR);
1250 tcg_out_insn_RR(s, op, r1, c2);
1252 op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1253 tcg_out_insn_RRE(s, op, r1, c2);
1257 *inv_cc = tcg_cond_to_s390_cond[inv_c];
1258 return tcg_cond_to_s390_cond[c];
1261 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1262 TCGArg c2, bool c2const, bool need_carry)
1265 return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc);
1268 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1269 TCGReg dest, TCGReg c1, TCGArg c2,
1270 bool c2const, bool neg)
1274 /* With LOC2, we can always emit the minimum 3 insns. */
1275 if (HAVE_FACILITY(LOAD_ON_COND2)) {
1276 /* Emit: d = 0, d = (cc ? 1 : d). */
1277 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1278 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1279 tcg_out_insn(s, RIEg, LOCGHI, dest, neg ? -1 : 1, cc);
1288 /* Swap operands so that we can use LEU/GTU/GT/LE. */
1293 cond = tcg_swap_cond(cond);
1302 /* X != 0 is X > 0. */
1303 if (c2const && c2 == 0) {
1304 cond = TCG_COND_GTU;
1313 * The result of a compare has CC=2 for GT and CC=3 unused.
1314 * ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit.
1316 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1317 tcg_out_movi(s, type, dest, 0);
1318 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1320 if (type == TCG_TYPE_I32) {
1321 tcg_out_insn(s, RR, LCR, dest, dest);
1323 tcg_out_insn(s, RRE, LCGR, dest, dest);
1329 /* X == 0 is X <= 0. */
1330 if (c2const && c2 == 0) {
1331 cond = TCG_COND_LEU;
1340 * As above, but we're looking for borrow, or !carry.
1341 * The second insn computes d - d - borrow, or -1 for true
1342 * and 0 for false. So we must mask to 1 bit afterward.
1344 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1345 tcg_out_insn(s, RRE, SLBGR, dest, dest);
1347 tgen_andi(s, type, dest, 1);
1352 g_assert_not_reached();
1355 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1356 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1357 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1358 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, neg ? -1 : 1);
1359 tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
1362 static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
1363 TCGArg v3, int v3const, TCGReg v4,
1370 if (HAVE_FACILITY(LOAD_ON_COND2)) {
1371 /* Emit: if (cc) dest = v3. */
1372 tcg_out_insn(s, RIEg, LOCGHI, dest, v3, cc);
1375 tcg_out_insn(s, RI, LGHI, TCG_TMP0, v3);
1378 /* LGR+LOCGHI is larger than LGHI+LOCGR. */
1379 tcg_out_insn(s, RI, LGHI, dest, v3);
1384 if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1385 /* Emit: dest = cc ? v3 : v4. */
1386 tcg_out_insn(s, RRFam, SELGR, dest, v3, v4, cc);
1392 tcg_out_mov(s, type, dest, v3);
1398 /* Emit: if (cc) dest = src. */
1399 tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
1402 static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1403 TCGReg c1, TCGArg c2, int c2const,
1404 TCGArg v3, int v3const, TCGReg v4)
1408 cc = tgen_cmp2(s, type, c, c1, c2, c2const, false, &inv_cc);
1409 tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
1412 static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1413 TCGArg a2, int a2const)
1415 /* Since this sets both R and R+1, we have no choice but to store the
1416 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1417 QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1418 tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1420 if (a2const && a2 == 64) {
1421 tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1426 * Conditions from FLOGR are:
1427 * 2 -> one bit found
1428 * 8 -> no one bit found
1430 tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
1433 static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1435 /* With MIE3, and bit 0 of m4 set, we get the complete result. */
1436 if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1437 if (type == TCG_TYPE_I32) {
1438 tcg_out_ext32u(s, dest, src);
1441 tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
1445 /* Without MIE3, each byte gets the count of bits for the byte. */
1446 tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
1448 /* Multiply to sum each byte at the top of the word. */
1449 if (type == TCG_TYPE_I32) {
1450 tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
1451 tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
1453 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
1454 tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
1455 tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
1459 static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1460 int ofs, int len, int z)
1462 int lsb = (63 - ofs);
1463 int msb = lsb - (len - 1);
1464 tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
1467 static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1470 tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1473 static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
1475 ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1476 if (off == (int16_t)off) {
1477 tcg_out_insn(s, RI, BRC, cc, off);
1478 } else if (off == (int32_t)off) {
1479 tcg_out_insn(s, RIL, BRCL, cc, off);
1481 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1482 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1486 static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1489 tgen_gotoi(s, cc, l->u.value_ptr);
1491 tcg_out16(s, RI_BRC | (cc << 4));
1492 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
1497 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1498 TCGReg r1, TCGReg r2, TCGLabel *l)
1500 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1502 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1504 tcg_out16(s, cc << 12 | (opc & 0xff));
1507 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1508 TCGReg r1, int i2, TCGLabel *l)
1510 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1512 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1514 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1517 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1518 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1521 bool is_unsigned = is_unsigned_cond(c);
1525 cc = tcg_cond_to_s390_cond[c];
1528 opc = (type == TCG_TYPE_I32
1529 ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
1530 : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
1531 tgen_compare_branch(s, opc, cc, r1, c2, l);
1536 * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1537 * If the immediate we've been given does not fit that range, we'll
1538 * fall back to separate compare and branch instructions using the
1539 * larger comparison range afforded by COMPARE IMMEDIATE.
1541 if (type == TCG_TYPE_I32) {
1544 in_range = (uint32_t)c2 == (uint8_t)c2;
1547 in_range = (int32_t)c2 == (int8_t)c2;
1552 in_range = (uint64_t)c2 == (uint8_t)c2;
1555 in_range = (int64_t)c2 == (int8_t)c2;
1559 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1563 cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
1564 tgen_branch(s, cc, l);
1567 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
1569 ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1570 if (off == (int32_t)off) {
1571 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1573 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1574 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1578 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
1579 const TCGHelperInfo *info)
1581 tcg_out_call_int(s, dest);
1591 bool tcg_target_has_memory_bswap(MemOp memop)
1595 if ((memop & MO_SIZE) <= MO_64) {
1600 * Reject 16-byte memop with 16-byte atomicity,
1601 * but do allow a pair of 64-bit operations.
1603 aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
1604 return aa.atom <= MO_64;
1607 static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
1610 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1612 tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
1615 tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
1618 case MO_UW | MO_BSWAP:
1619 /* swapped unsigned halfword load with upper bits zeroed */
1620 tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1621 tcg_out_ext16u(s, data, data);
1624 tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
1627 case MO_SW | MO_BSWAP:
1628 /* swapped sign-extended halfword load */
1629 tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1630 tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
1633 tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
1636 case MO_UL | MO_BSWAP:
1637 /* swapped unsigned int load with upper bits zeroed */
1638 tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1639 tcg_out_ext32u(s, data, data);
1642 tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
1645 case MO_SL | MO_BSWAP:
1646 /* swapped sign-extended int load */
1647 tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1648 tcg_out_ext32s(s, data, data);
1651 tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
1654 case MO_UQ | MO_BSWAP:
1655 tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
1658 tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
1662 g_assert_not_reached();
1666 static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
1669 switch (opc & (MO_SIZE | MO_BSWAP)) {
1671 if (h.disp >= 0 && h.disp < 0x1000) {
1672 tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
1674 tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
1678 case MO_UW | MO_BSWAP:
1679 tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
1682 if (h.disp >= 0 && h.disp < 0x1000) {
1683 tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
1685 tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
1689 case MO_UL | MO_BSWAP:
1690 tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
1693 if (h.disp >= 0 && h.disp < 0x1000) {
1694 tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
1696 tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
1700 case MO_UQ | MO_BSWAP:
1701 tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
1704 tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
1708 g_assert_not_reached();
1712 static const TCGLdstHelperParam ldst_helper_param = {
1713 .ntmp = 1, .tmp = { TCG_TMP0 }
1716 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1718 MemOp opc = get_memop(lb->oi);
1720 if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1721 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1725 tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1726 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1727 tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1729 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1733 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1735 MemOp opc = get_memop(lb->oi);
1737 if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1738 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1742 tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1743 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]);
1745 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1749 /* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
1750 #define MIN_TLB_MASK_TABLE_OFS -(1 << 19)
1753 * For softmmu, perform the TLB load and compare.
1754 * For useronly, perform any required alignment tests.
1755 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1756 * is required and fill in @h with the host address for the fast path.
1758 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1759 TCGReg addr_reg, MemOpIdx oi,
1762 TCGType addr_type = s->addr_type;
1763 TCGLabelQemuLdst *ldst = NULL;
1764 MemOp opc = get_memop(oi);
1765 MemOp s_bits = opc & MO_SIZE;
1768 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
1769 a_mask = (1 << h->aa.align) - 1;
1771 #ifdef CONFIG_SOFTMMU
1772 unsigned s_mask = (1 << s_bits) - 1;
1773 int mem_index = get_mmuidx(oi);
1774 int fast_off = tlb_mask_table_ofs(s, mem_index);
1775 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1776 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1780 ldst = new_ldst_label(s);
1781 ldst->is_ld = is_ld;
1783 ldst->addrlo_reg = addr_reg;
1785 tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
1786 s->page_bits - CPU_TLB_ENTRY_BITS);
1788 tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
1789 tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
1792 * For aligned accesses, we check the first byte and include the alignment
1793 * bits within the address. For unaligned access, we check that we don't
1794 * cross pages using the address of the last byte of the access.
1796 a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
1797 tlb_mask = (uint64_t)s->page_mask | a_mask;
1799 tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
1801 tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
1802 tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
1806 ofs = offsetof(CPUTLBEntry, addr_read);
1808 ofs = offsetof(CPUTLBEntry, addr_write);
1810 if (addr_type == TCG_TYPE_I32) {
1811 ofs += HOST_BIG_ENDIAN * 4;
1812 tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1814 tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1817 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1818 ldst->label_ptr[0] = s->code_ptr++;
1820 h->index = TCG_TMP0;
1821 tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
1822 offsetof(CPUTLBEntry, addend));
1824 if (addr_type == TCG_TYPE_I32) {
1825 tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
1826 h->base = TCG_REG_NONE;
1833 ldst = new_ldst_label(s);
1834 ldst->is_ld = is_ld;
1836 ldst->addrlo_reg = addr_reg;
1838 /* We are expecting a_bits to max out at 7, much lower than TMLL. */
1839 tcg_debug_assert(a_mask <= 0xffff);
1840 tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
1842 tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
1843 ldst->label_ptr[0] = s->code_ptr++;
1847 if (addr_type == TCG_TYPE_I32) {
1848 tcg_out_ext32u(s, TCG_TMP0, addr_reg);
1851 if (guest_base < 0x80000) {
1852 h->index = TCG_REG_NONE;
1853 h->disp = guest_base;
1855 h->index = TCG_GUEST_BASE_REG;
1863 static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1864 MemOpIdx oi, TCGType data_type)
1866 TCGLabelQemuLdst *ldst;
1869 ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1870 tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
1873 ldst->type = data_type;
1874 ldst->datalo_reg = data_reg;
1875 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1879 static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1880 MemOpIdx oi, TCGType data_type)
1882 TCGLabelQemuLdst *ldst;
1885 ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1886 tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
1889 ldst->type = data_type;
1890 ldst->datalo_reg = data_reg;
1891 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1895 static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
1896 TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1898 TCGLabel *l1 = NULL, *l2 = NULL;
1899 TCGLabelQemuLdst *ldst;
1905 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1907 use_pair = h.aa.atom < MO_128;
1908 need_bswap = get_memop(oi) & MO_BSWAP;
1912 * Atomicity requires we use LPQ. If we've already checked for
1913 * 16-byte alignment, that's all we need. If we arrive with
1914 * lesser alignment, we have determined that less than 16-byte
1915 * alignment can be satisfied with two 8-byte loads.
1917 if (h.aa.align < MO_128) {
1919 l1 = gen_new_label();
1920 l2 = gen_new_label();
1922 tcg_out_insn(s, RI, TMLL, addr_reg, 15);
1923 tgen_branch(s, 7, l1); /* CC in {1,2,3} */
1926 tcg_debug_assert(!need_bswap);
1927 tcg_debug_assert(datalo & 1);
1928 tcg_debug_assert(datahi == datalo - 1);
1929 insn = is_ld ? RXY_LPQ : RXY_STPQ;
1930 tcg_out_insn_RXY(s, insn, datahi, h.base, h.index, h.disp);
1933 tgen_branch(s, S390_CC_ALWAYS, l2);
1934 tcg_out_label(s, l1);
1941 d1 = datalo, d2 = datahi;
1942 insn = is_ld ? RXY_LRVG : RXY_STRVG;
1944 d1 = datahi, d2 = datalo;
1945 insn = is_ld ? RXY_LG : RXY_STG;
1948 if (h.base == d1 || h.index == d1) {
1949 tcg_out_insn(s, RXY, LAY, TCG_TMP0, h.base, h.index, h.disp);
1951 h.index = TCG_REG_NONE;
1954 tcg_out_insn_RXY(s, insn, d1, h.base, h.index, h.disp);
1955 tcg_out_insn_RXY(s, insn, d2, h.base, h.index, h.disp + 8);
1958 tcg_out_label(s, l2);
1962 ldst->type = TCG_TYPE_I128;
1963 ldst->datalo_reg = datalo;
1964 ldst->datahi_reg = datahi;
1965 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1969 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1971 /* Reuse the zeroing that exists for goto_ptr. */
1973 tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
1975 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1976 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1980 static void tcg_out_goto_tb(TCGContext *s, int which)
1983 * Branch displacement must be aligned for atomic patching;
1984 * see if we need to add extra nop before branch
1986 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1989 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1990 set_jmp_insn_offset(s, which);
1992 set_jmp_reset_offset(s, which);
1995 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1996 uintptr_t jmp_rx, uintptr_t jmp_rw)
1998 if (!HAVE_FACILITY(GEN_INST_EXT)) {
2001 /* patch the branch destination */
2002 uintptr_t addr = tb->jmp_target_addr[n];
2003 intptr_t disp = addr - (jmp_rx - 2);
2004 qatomic_set((int32_t *)jmp_rw, disp / 2);
2005 /* no need to flush icache explicitly */
2008 # define OP_32_64(x) \
2009 case glue(glue(INDEX_op_,x),_i32): \
2010 case glue(glue(INDEX_op_,x),_i64)
2012 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2013 const TCGArg args[TCG_MAX_OP_ARGS],
2014 const int const_args[TCG_MAX_OP_ARGS])
2020 case INDEX_op_goto_ptr:
2022 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
2026 /* ??? LLC (RXY format) is only present with the extended-immediate
2027 facility, whereas LLGC is always present. */
2028 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
2032 /* ??? LB is no smaller than LGB, so no point to using it. */
2033 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
2037 /* ??? LLH (RXY format) is only present with the extended-immediate
2038 facility, whereas LLGH is always present. */
2039 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
2042 case INDEX_op_ld16s_i32:
2043 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
2046 case INDEX_op_ld_i32:
2047 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2051 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
2052 TCG_REG_NONE, args[2]);
2056 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
2057 TCG_REG_NONE, args[2]);
2060 case INDEX_op_st_i32:
2061 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2064 case INDEX_op_add_i32:
2065 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2066 if (const_args[2]) {
2069 if (a2 == (int16_t)a2) {
2070 tcg_out_insn(s, RI, AHI, a0, a2);
2073 tcg_out_insn(s, RIL, AFI, a0, a2);
2076 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2077 } else if (a0 == a1) {
2078 tcg_out_insn(s, RR, AR, a0, a2);
2080 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2083 case INDEX_op_sub_i32:
2084 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2085 if (const_args[2]) {
2088 } else if (a0 == a1) {
2089 tcg_out_insn(s, RR, SR, a0, a2);
2091 tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
2095 case INDEX_op_and_i32:
2096 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2097 if (const_args[2]) {
2098 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2099 tgen_andi(s, TCG_TYPE_I32, a0, a2);
2100 } else if (a0 == a1) {
2101 tcg_out_insn(s, RR, NR, a0, a2);
2103 tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
2106 case INDEX_op_or_i32:
2107 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2108 if (const_args[2]) {
2109 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2110 tgen_ori(s, a0, a2);
2111 } else if (a0 == a1) {
2112 tcg_out_insn(s, RR, OR, a0, a2);
2114 tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
2117 case INDEX_op_xor_i32:
2118 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2119 if (const_args[2]) {
2120 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2121 tcg_out_insn(s, RIL, XILF, a0, a2);
2122 } else if (a0 == a1) {
2123 tcg_out_insn(s, RR, XR, args[0], args[2]);
2125 tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
2129 case INDEX_op_andc_i32:
2130 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2131 if (const_args[2]) {
2132 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2133 tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2);
2135 tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
2138 case INDEX_op_orc_i32:
2139 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2140 if (const_args[2]) {
2141 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2142 tgen_ori(s, a0, (uint32_t)~a2);
2144 tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
2147 case INDEX_op_eqv_i32:
2148 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2149 if (const_args[2]) {
2150 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2151 tcg_out_insn(s, RIL, XILF, a0, ~a2);
2153 tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
2156 case INDEX_op_nand_i32:
2157 tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
2159 case INDEX_op_nor_i32:
2160 tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
2163 case INDEX_op_neg_i32:
2164 tcg_out_insn(s, RR, LCR, args[0], args[1]);
2166 case INDEX_op_not_i32:
2167 tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
2170 case INDEX_op_mul_i32:
2171 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2172 if (const_args[2]) {
2173 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2174 if (a2 == (int16_t)a2) {
2175 tcg_out_insn(s, RI, MHI, a0, a2);
2177 tcg_out_insn(s, RIL, MSFI, a0, a2);
2179 } else if (a0 == a1) {
2180 tcg_out_insn(s, RRE, MSR, a0, a2);
2182 tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
2186 case INDEX_op_div2_i32:
2187 tcg_debug_assert(args[0] == args[2]);
2188 tcg_debug_assert(args[1] == args[3]);
2189 tcg_debug_assert((args[1] & 1) == 0);
2190 tcg_debug_assert(args[0] == args[1] + 1);
2191 tcg_out_insn(s, RR, DR, args[1], args[4]);
2193 case INDEX_op_divu2_i32:
2194 tcg_debug_assert(args[0] == args[2]);
2195 tcg_debug_assert(args[1] == args[3]);
2196 tcg_debug_assert((args[1] & 1) == 0);
2197 tcg_debug_assert(args[0] == args[1] + 1);
2198 tcg_out_insn(s, RRE, DLR, args[1], args[4]);
2201 case INDEX_op_shl_i32:
2205 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2207 if (const_args[2]) {
2208 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
2210 tcg_out_sh32(s, op, a0, a2, 0);
2213 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
2214 if (const_args[2]) {
2215 tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
2217 tcg_out_sh64(s, op2, a0, a1, a2, 0);
2221 case INDEX_op_shr_i32:
2225 case INDEX_op_sar_i32:
2230 case INDEX_op_rotl_i32:
2231 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
2232 if (const_args[2]) {
2233 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
2235 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
2238 case INDEX_op_rotr_i32:
2239 if (const_args[2]) {
2240 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
2241 TCG_REG_NONE, (32 - args[2]) & 31);
2243 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2244 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
2248 case INDEX_op_bswap16_i32:
2249 a0 = args[0], a1 = args[1], a2 = args[2];
2250 tcg_out_insn(s, RRE, LRVR, a0, a1);
2251 if (a2 & TCG_BSWAP_OS) {
2252 tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
2254 tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
2257 case INDEX_op_bswap16_i64:
2258 a0 = args[0], a1 = args[1], a2 = args[2];
2259 tcg_out_insn(s, RRE, LRVGR, a0, a1);
2260 if (a2 & TCG_BSWAP_OS) {
2261 tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
2263 tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
2267 case INDEX_op_bswap32_i32:
2268 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2270 case INDEX_op_bswap32_i64:
2271 a0 = args[0], a1 = args[1], a2 = args[2];
2272 tcg_out_insn(s, RRE, LRVR, a0, a1);
2273 if (a2 & TCG_BSWAP_OS) {
2274 tcg_out_ext32s(s, a0, a0);
2275 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2276 tcg_out_ext32u(s, a0, a0);
2280 case INDEX_op_add2_i32:
2281 if (const_args[4]) {
2282 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2284 tcg_out_insn(s, RR, ALR, args[0], args[4]);
2286 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2288 case INDEX_op_sub2_i32:
2289 if (const_args[4]) {
2290 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2292 tcg_out_insn(s, RR, SLR, args[0], args[4]);
2294 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2298 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
2301 case INDEX_op_brcond_i32:
2302 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
2303 args[1], const_args[1], arg_label(args[3]));
2305 case INDEX_op_setcond_i32:
2306 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2307 args[2], const_args[2], false);
2309 case INDEX_op_negsetcond_i32:
2310 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2311 args[2], const_args[2], true);
2313 case INDEX_op_movcond_i32:
2314 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
2315 args[2], const_args[2], args[3], const_args[3], args[4]);
2318 case INDEX_op_qemu_ld_a32_i32:
2319 case INDEX_op_qemu_ld_a64_i32:
2320 tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
2322 case INDEX_op_qemu_ld_a32_i64:
2323 case INDEX_op_qemu_ld_a64_i64:
2324 tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
2326 case INDEX_op_qemu_st_a32_i32:
2327 case INDEX_op_qemu_st_a64_i32:
2328 tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
2330 case INDEX_op_qemu_st_a32_i64:
2331 case INDEX_op_qemu_st_a64_i64:
2332 tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
2334 case INDEX_op_qemu_ld_a32_i128:
2335 case INDEX_op_qemu_ld_a64_i128:
2336 tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
2338 case INDEX_op_qemu_st_a32_i128:
2339 case INDEX_op_qemu_st_a64_i128:
2340 tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
2343 case INDEX_op_ld16s_i64:
2344 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2346 case INDEX_op_ld32u_i64:
2347 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2349 case INDEX_op_ld32s_i64:
2350 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2352 case INDEX_op_ld_i64:
2353 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2356 case INDEX_op_st32_i64:
2357 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2359 case INDEX_op_st_i64:
2360 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2363 case INDEX_op_add_i64:
2364 a0 = args[0], a1 = args[1], a2 = args[2];
2365 if (const_args[2]) {
2368 if (a2 == (int16_t)a2) {
2369 tcg_out_insn(s, RI, AGHI, a0, a2);
2372 if (a2 == (int32_t)a2) {
2373 tcg_out_insn(s, RIL, AGFI, a0, a2);
2376 if (a2 == (uint32_t)a2) {
2377 tcg_out_insn(s, RIL, ALGFI, a0, a2);
2380 if (-a2 == (uint32_t)-a2) {
2381 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2385 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2386 } else if (a0 == a1) {
2387 tcg_out_insn(s, RRE, AGR, a0, a2);
2389 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2392 case INDEX_op_sub_i64:
2393 a0 = args[0], a1 = args[1], a2 = args[2];
2394 if (const_args[2]) {
2398 tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
2402 case INDEX_op_and_i64:
2403 a0 = args[0], a1 = args[1], a2 = args[2];
2404 if (const_args[2]) {
2405 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2406 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2408 tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
2411 case INDEX_op_or_i64:
2412 a0 = args[0], a1 = args[1], a2 = args[2];
2413 if (const_args[2]) {
2414 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2415 tgen_ori(s, a0, a2);
2417 tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
2420 case INDEX_op_xor_i64:
2421 a0 = args[0], a1 = args[1], a2 = args[2];
2422 if (const_args[2]) {
2423 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2424 tgen_xori(s, a0, a2);
2426 tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
2430 case INDEX_op_andc_i64:
2431 a0 = args[0], a1 = args[1], a2 = args[2];
2432 if (const_args[2]) {
2433 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2434 tgen_andi(s, TCG_TYPE_I64, a0, ~a2);
2436 tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
2439 case INDEX_op_orc_i64:
2440 a0 = args[0], a1 = args[1], a2 = args[2];
2441 if (const_args[2]) {
2442 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2443 tgen_ori(s, a0, ~a2);
2445 tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
2448 case INDEX_op_eqv_i64:
2449 a0 = args[0], a1 = args[1], a2 = args[2];
2450 if (const_args[2]) {
2451 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2452 tgen_xori(s, a0, ~a2);
2454 tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
2457 case INDEX_op_nand_i64:
2458 tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
2460 case INDEX_op_nor_i64:
2461 tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
2464 case INDEX_op_neg_i64:
2465 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2467 case INDEX_op_not_i64:
2468 tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
2470 case INDEX_op_bswap64_i64:
2471 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2474 case INDEX_op_mul_i64:
2475 a0 = args[0], a1 = args[1], a2 = args[2];
2476 if (const_args[2]) {
2477 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2478 if (a2 == (int16_t)a2) {
2479 tcg_out_insn(s, RI, MGHI, a0, a2);
2481 tcg_out_insn(s, RIL, MSGFI, a0, a2);
2483 } else if (a0 == a1) {
2484 tcg_out_insn(s, RRE, MSGR, a0, a2);
2486 tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
2490 case INDEX_op_div2_i64:
2492 * ??? We get an unnecessary sign-extension of the dividend
2493 * into op0 with this definition, but as we do in fact always
2494 * produce both quotient and remainder using INDEX_op_div_i64
2495 * instead requires jumping through even more hoops.
2497 tcg_debug_assert(args[0] == args[2]);
2498 tcg_debug_assert(args[1] == args[3]);
2499 tcg_debug_assert((args[1] & 1) == 0);
2500 tcg_debug_assert(args[0] == args[1] + 1);
2501 tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
2503 case INDEX_op_divu2_i64:
2504 tcg_debug_assert(args[0] == args[2]);
2505 tcg_debug_assert(args[1] == args[3]);
2506 tcg_debug_assert((args[1] & 1) == 0);
2507 tcg_debug_assert(args[0] == args[1] + 1);
2508 tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
2510 case INDEX_op_mulu2_i64:
2511 tcg_debug_assert(args[0] == args[2]);
2512 tcg_debug_assert((args[1] & 1) == 0);
2513 tcg_debug_assert(args[0] == args[1] + 1);
2514 tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
2516 case INDEX_op_muls2_i64:
2517 tcg_debug_assert((args[1] & 1) == 0);
2518 tcg_debug_assert(args[0] == args[1] + 1);
2519 tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
2522 case INDEX_op_shl_i64:
2525 if (const_args[2]) {
2526 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2528 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2531 case INDEX_op_shr_i64:
2534 case INDEX_op_sar_i64:
2538 case INDEX_op_rotl_i64:
2539 if (const_args[2]) {
2540 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2541 TCG_REG_NONE, args[2]);
2543 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2546 case INDEX_op_rotr_i64:
2547 if (const_args[2]) {
2548 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2549 TCG_REG_NONE, (64 - args[2]) & 63);
2551 /* We can use the smaller 32-bit negate because only the
2552 low 6 bits are examined for the rotate. */
2553 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2554 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2558 case INDEX_op_add2_i64:
2559 if (const_args[4]) {
2560 if ((int64_t)args[4] >= 0) {
2561 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2563 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2566 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2568 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2570 case INDEX_op_sub2_i64:
2571 if (const_args[4]) {
2572 if ((int64_t)args[4] >= 0) {
2573 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2575 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2578 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2580 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2583 case INDEX_op_brcond_i64:
2584 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2585 args[1], const_args[1], arg_label(args[3]));
2587 case INDEX_op_setcond_i64:
2588 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2589 args[2], const_args[2], false);
2591 case INDEX_op_negsetcond_i64:
2592 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2593 args[2], const_args[2], true);
2595 case INDEX_op_movcond_i64:
2596 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2597 args[2], const_args[2], args[3], const_args[3], args[4]);
2601 a0 = args[0], a1 = args[1], a2 = args[2];
2602 if (const_args[1]) {
2603 tgen_deposit(s, a0, a2, args[3], args[4], 1);
2605 /* Since we can't support "0Z" as a constraint, we allow a1 in
2606 any register. Fix things up as if a matching constraint. */
2608 TCGType type = (opc == INDEX_op_deposit_i64);
2610 tcg_out_mov(s, type, TCG_TMP0, a2);
2613 tcg_out_mov(s, type, a0, a1);
2615 tgen_deposit(s, a0, a2, args[3], args[4], 0);
2620 tgen_extract(s, args[0], args[1], args[2], args[3]);
2623 case INDEX_op_clz_i64:
2624 tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2627 case INDEX_op_ctpop_i32:
2628 tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
2630 case INDEX_op_ctpop_i64:
2631 tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]);
2635 /* The host memory model is quite strong, we simply need to
2636 serialize the instruction stream. */
2637 if (args[0] & TCG_MO_ST_LD) {
2638 /* fast-bcr-serialization facility (45) is present */
2639 tcg_out_insn(s, RR, BCR, 14, 0);
2643 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2644 case INDEX_op_mov_i64:
2645 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2646 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
2647 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
2648 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
2649 case INDEX_op_ext8s_i64:
2650 case INDEX_op_ext8u_i32:
2651 case INDEX_op_ext8u_i64:
2652 case INDEX_op_ext16s_i32:
2653 case INDEX_op_ext16s_i64:
2654 case INDEX_op_ext16u_i32:
2655 case INDEX_op_ext16u_i64:
2656 case INDEX_op_ext32s_i64:
2657 case INDEX_op_ext32u_i64:
2658 case INDEX_op_ext_i32_i64:
2659 case INDEX_op_extu_i32_i64:
2660 case INDEX_op_extrl_i64_i32:
2662 g_assert_not_reached();
2666 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2667 TCGReg dst, TCGReg src)
2669 if (is_general_reg(src)) {
2670 /* Replicate general register into two MO_64. */
2671 tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
2672 if (vece == MO_64) {
2679 * Recall that the "standard" integer, within a vector, is the
2680 * rightmost element of the leftmost doubleword, a-la VLLEZ.
2682 tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
2686 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2687 TCGReg dst, TCGReg base, intptr_t offset)
2689 tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
2693 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2694 TCGReg dst, int64_t val)
2696 int i, mask, msb, lsb;
2698 /* Look for int16_t elements. */
2699 if (vece <= MO_16 ||
2700 (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
2701 tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
2705 /* Look for bit masks. */
2706 if (vece == MO_32) {
2707 if (risbg_mask((int32_t)val)) {
2708 /* Handle wraparound by swapping msb and lsb. */
2709 if ((val & 0x80000001u) == 0x80000001u) {
2710 msb = 32 - ctz32(~val);
2711 lsb = clz32(~val) - 1;
2714 lsb = 31 - ctz32(val);
2716 tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
2720 if (risbg_mask(val)) {
2721 /* Handle wraparound by swapping msb and lsb. */
2722 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
2723 /* Handle wraparound by swapping msb and lsb. */
2724 msb = 64 - ctz64(~val);
2725 lsb = clz64(~val) - 1;
2728 lsb = 63 - ctz64(val);
2730 tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
2735 /* Look for all bytes 0x00 or 0xff. */
2736 for (i = mask = 0; i < 8; i++) {
2737 uint8_t byte = val >> (i * 8);
2740 } else if (byte != 0) {
2745 tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
2749 /* Otherwise, stuff it in the constant pool. */
2750 tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
2751 new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
2752 tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
2755 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2756 unsigned vecl, unsigned vece,
2757 const TCGArg args[TCG_MAX_OP_ARGS],
2758 const int const_args[TCG_MAX_OP_ARGS])
2760 TCGType type = vecl + TCG_TYPE_V64;
2761 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
2764 case INDEX_op_ld_vec:
2765 tcg_out_ld(s, type, a0, a1, a2);
2767 case INDEX_op_st_vec:
2768 tcg_out_st(s, type, a0, a1, a2);
2770 case INDEX_op_dupm_vec:
2771 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2774 case INDEX_op_abs_vec:
2775 tcg_out_insn(s, VRRa, VLP, a0, a1, vece);
2777 case INDEX_op_neg_vec:
2778 tcg_out_insn(s, VRRa, VLC, a0, a1, vece);
2780 case INDEX_op_not_vec:
2781 tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0);
2784 case INDEX_op_add_vec:
2785 tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
2787 case INDEX_op_sub_vec:
2788 tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece);
2790 case INDEX_op_and_vec:
2791 tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
2793 case INDEX_op_andc_vec:
2794 tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
2796 case INDEX_op_mul_vec:
2797 tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece);
2799 case INDEX_op_or_vec:
2800 tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
2802 case INDEX_op_orc_vec:
2803 tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0);
2805 case INDEX_op_xor_vec:
2806 tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
2808 case INDEX_op_nand_vec:
2809 tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
2811 case INDEX_op_nor_vec:
2812 tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
2814 case INDEX_op_eqv_vec:
2815 tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
2818 case INDEX_op_shli_vec:
2819 tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
2821 case INDEX_op_shri_vec:
2822 tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
2824 case INDEX_op_sari_vec:
2825 tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
2827 case INDEX_op_rotli_vec:
2828 tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
2830 case INDEX_op_shls_vec:
2831 tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
2833 case INDEX_op_shrs_vec:
2834 tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
2836 case INDEX_op_sars_vec:
2837 tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
2839 case INDEX_op_rotls_vec:
2840 tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
2842 case INDEX_op_shlv_vec:
2843 tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
2845 case INDEX_op_shrv_vec:
2846 tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
2848 case INDEX_op_sarv_vec:
2849 tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
2851 case INDEX_op_rotlv_vec:
2852 tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
2855 case INDEX_op_smin_vec:
2856 tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece);
2858 case INDEX_op_smax_vec:
2859 tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece);
2861 case INDEX_op_umin_vec:
2862 tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece);
2864 case INDEX_op_umax_vec:
2865 tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
2868 case INDEX_op_bitsel_vec:
2869 tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
2872 case INDEX_op_cmp_vec:
2873 switch ((TCGCond)args[3]) {
2875 tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
2878 tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
2881 tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
2884 g_assert_not_reached();
2888 case INDEX_op_s390_vuph_vec:
2889 tcg_out_insn(s, VRRa, VUPH, a0, a1, vece);
2891 case INDEX_op_s390_vupl_vec:
2892 tcg_out_insn(s, VRRa, VUPL, a0, a1, vece);
2894 case INDEX_op_s390_vpks_vec:
2895 tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece);
2898 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2899 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2901 g_assert_not_reached();
2905 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2908 case INDEX_op_abs_vec:
2909 case INDEX_op_add_vec:
2910 case INDEX_op_and_vec:
2911 case INDEX_op_andc_vec:
2912 case INDEX_op_bitsel_vec:
2913 case INDEX_op_eqv_vec:
2914 case INDEX_op_nand_vec:
2915 case INDEX_op_neg_vec:
2916 case INDEX_op_nor_vec:
2917 case INDEX_op_not_vec:
2918 case INDEX_op_or_vec:
2919 case INDEX_op_orc_vec:
2920 case INDEX_op_rotli_vec:
2921 case INDEX_op_rotls_vec:
2922 case INDEX_op_rotlv_vec:
2923 case INDEX_op_sari_vec:
2924 case INDEX_op_sars_vec:
2925 case INDEX_op_sarv_vec:
2926 case INDEX_op_shli_vec:
2927 case INDEX_op_shls_vec:
2928 case INDEX_op_shlv_vec:
2929 case INDEX_op_shri_vec:
2930 case INDEX_op_shrs_vec:
2931 case INDEX_op_shrv_vec:
2932 case INDEX_op_smax_vec:
2933 case INDEX_op_smin_vec:
2934 case INDEX_op_sub_vec:
2935 case INDEX_op_umax_vec:
2936 case INDEX_op_umin_vec:
2937 case INDEX_op_xor_vec:
2939 case INDEX_op_cmp_vec:
2940 case INDEX_op_cmpsel_vec:
2941 case INDEX_op_rotrv_vec:
2943 case INDEX_op_mul_vec:
2944 return vece < MO_64;
2945 case INDEX_op_ssadd_vec:
2946 case INDEX_op_sssub_vec:
2947 return vece < MO_64 ? -1 : 0;
2953 static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
2954 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2956 bool need_swap = false, need_inv = false;
2974 need_swap = need_inv = true;
2977 g_assert_not_reached();
2981 cond = tcg_invert_cond(cond);
2985 t1 = v1, v1 = v2, v2 = t1;
2986 cond = tcg_swap_cond(cond);
2989 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
2990 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
2995 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
2996 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2998 if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
2999 tcg_gen_not_vec(vece, v0, v0);
3003 static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
3004 TCGv_vec c1, TCGv_vec c2,
3005 TCGv_vec v3, TCGv_vec v4, TCGCond cond)
3007 TCGv_vec t = tcg_temp_new_vec(type);
3009 if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
3010 /* Invert the sense of the compare by swapping arguments. */
3011 tcg_gen_bitsel_vec(vece, v0, t, v4, v3);
3013 tcg_gen_bitsel_vec(vece, v0, t, v3, v4);
3015 tcg_temp_free_vec(t);
3018 static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
3019 TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
3021 TCGv_vec h1 = tcg_temp_new_vec(type);
3022 TCGv_vec h2 = tcg_temp_new_vec(type);
3023 TCGv_vec l1 = tcg_temp_new_vec(type);
3024 TCGv_vec l2 = tcg_temp_new_vec(type);
3026 tcg_debug_assert (vece < MO_64);
3028 /* Unpack with sign-extension. */
3029 vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3030 tcgv_vec_arg(h1), tcgv_vec_arg(v1));
3031 vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3032 tcgv_vec_arg(h2), tcgv_vec_arg(v2));
3034 vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3035 tcgv_vec_arg(l1), tcgv_vec_arg(v1));
3036 vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3037 tcgv_vec_arg(l2), tcgv_vec_arg(v2));
3039 /* Arithmetic on a wider element size. */
3040 vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1),
3041 tcgv_vec_arg(h1), tcgv_vec_arg(h2));
3042 vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1),
3043 tcgv_vec_arg(l1), tcgv_vec_arg(l2));
3045 /* Pack with saturation. */
3046 vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1,
3047 tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1));
3049 tcg_temp_free_vec(h1);
3050 tcg_temp_free_vec(h2);
3051 tcg_temp_free_vec(l1);
3052 tcg_temp_free_vec(l2);
3055 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3059 TCGv_vec v0, v1, v2, v3, v4, t0;
3062 v0 = temp_tcgv_vec(arg_temp(a0));
3063 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3064 v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3067 case INDEX_op_cmp_vec:
3068 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3071 case INDEX_op_cmpsel_vec:
3072 v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3073 v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3074 expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
3077 case INDEX_op_rotrv_vec:
3078 t0 = tcg_temp_new_vec(type);
3079 tcg_gen_neg_vec(vece, t0, v2);
3080 tcg_gen_rotlv_vec(vece, v0, v1, t0);
3081 tcg_temp_free_vec(t0);
3084 case INDEX_op_ssadd_vec:
3085 expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec);
3087 case INDEX_op_sssub_vec:
3088 expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec);
3092 g_assert_not_reached();
3097 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3100 case INDEX_op_goto_ptr:
3103 case INDEX_op_ld8u_i32:
3104 case INDEX_op_ld8u_i64:
3105 case INDEX_op_ld8s_i32:
3106 case INDEX_op_ld8s_i64:
3107 case INDEX_op_ld16u_i32:
3108 case INDEX_op_ld16u_i64:
3109 case INDEX_op_ld16s_i32:
3110 case INDEX_op_ld16s_i64:
3111 case INDEX_op_ld_i32:
3112 case INDEX_op_ld32u_i64:
3113 case INDEX_op_ld32s_i64:
3114 case INDEX_op_ld_i64:
3115 return C_O1_I1(r, r);
3117 case INDEX_op_st8_i32:
3118 case INDEX_op_st8_i64:
3119 case INDEX_op_st16_i32:
3120 case INDEX_op_st16_i64:
3121 case INDEX_op_st_i32:
3122 case INDEX_op_st32_i64:
3123 case INDEX_op_st_i64:
3124 return C_O0_I2(r, r);
3126 case INDEX_op_add_i32:
3127 case INDEX_op_add_i64:
3128 case INDEX_op_shl_i64:
3129 case INDEX_op_shr_i64:
3130 case INDEX_op_sar_i64:
3131 case INDEX_op_rotl_i32:
3132 case INDEX_op_rotl_i64:
3133 case INDEX_op_rotr_i32:
3134 case INDEX_op_rotr_i64:
3135 case INDEX_op_setcond_i32:
3136 case INDEX_op_negsetcond_i32:
3137 return C_O1_I2(r, r, ri);
3138 case INDEX_op_setcond_i64:
3139 case INDEX_op_negsetcond_i64:
3140 return C_O1_I2(r, r, rA);
3142 case INDEX_op_clz_i64:
3143 return C_O1_I2(r, r, rI);
3145 case INDEX_op_sub_i32:
3146 case INDEX_op_sub_i64:
3147 case INDEX_op_and_i32:
3148 case INDEX_op_or_i32:
3149 case INDEX_op_xor_i32:
3150 return C_O1_I2(r, r, ri);
3151 case INDEX_op_and_i64:
3152 return C_O1_I2(r, r, rNKR);
3153 case INDEX_op_or_i64:
3154 case INDEX_op_xor_i64:
3155 return C_O1_I2(r, r, rK);
3157 case INDEX_op_andc_i32:
3158 case INDEX_op_orc_i32:
3159 case INDEX_op_eqv_i32:
3160 return C_O1_I2(r, r, ri);
3161 case INDEX_op_andc_i64:
3162 return C_O1_I2(r, r, rKR);
3163 case INDEX_op_orc_i64:
3164 case INDEX_op_eqv_i64:
3165 return C_O1_I2(r, r, rNK);
3167 case INDEX_op_nand_i32:
3168 case INDEX_op_nand_i64:
3169 case INDEX_op_nor_i32:
3170 case INDEX_op_nor_i64:
3171 return C_O1_I2(r, r, r);
3173 case INDEX_op_mul_i32:
3174 return (HAVE_FACILITY(MISC_INSN_EXT2)
3176 : C_O1_I2(r, 0, ri));
3177 case INDEX_op_mul_i64:
3178 return (HAVE_FACILITY(MISC_INSN_EXT2)
3180 : C_O1_I2(r, 0, rJ));
3182 case INDEX_op_shl_i32:
3183 case INDEX_op_shr_i32:
3184 case INDEX_op_sar_i32:
3185 return C_O1_I2(r, r, ri);
3187 case INDEX_op_brcond_i32:
3188 return C_O0_I2(r, ri);
3189 case INDEX_op_brcond_i64:
3190 return C_O0_I2(r, rA);
3192 case INDEX_op_bswap16_i32:
3193 case INDEX_op_bswap16_i64:
3194 case INDEX_op_bswap32_i32:
3195 case INDEX_op_bswap32_i64:
3196 case INDEX_op_bswap64_i64:
3197 case INDEX_op_neg_i32:
3198 case INDEX_op_neg_i64:
3199 case INDEX_op_not_i32:
3200 case INDEX_op_not_i64:
3201 case INDEX_op_ext8s_i32:
3202 case INDEX_op_ext8s_i64:
3203 case INDEX_op_ext8u_i32:
3204 case INDEX_op_ext8u_i64:
3205 case INDEX_op_ext16s_i32:
3206 case INDEX_op_ext16s_i64:
3207 case INDEX_op_ext16u_i32:
3208 case INDEX_op_ext16u_i64:
3209 case INDEX_op_ext32s_i64:
3210 case INDEX_op_ext32u_i64:
3211 case INDEX_op_ext_i32_i64:
3212 case INDEX_op_extu_i32_i64:
3213 case INDEX_op_extract_i32:
3214 case INDEX_op_extract_i64:
3215 case INDEX_op_ctpop_i32:
3216 case INDEX_op_ctpop_i64:
3217 return C_O1_I1(r, r);
3219 case INDEX_op_qemu_ld_a32_i32:
3220 case INDEX_op_qemu_ld_a64_i32:
3221 case INDEX_op_qemu_ld_a32_i64:
3222 case INDEX_op_qemu_ld_a64_i64:
3223 return C_O1_I1(r, r);
3224 case INDEX_op_qemu_st_a32_i64:
3225 case INDEX_op_qemu_st_a64_i64:
3226 case INDEX_op_qemu_st_a32_i32:
3227 case INDEX_op_qemu_st_a64_i32:
3228 return C_O0_I2(r, r);
3229 case INDEX_op_qemu_ld_a32_i128:
3230 case INDEX_op_qemu_ld_a64_i128:
3231 return C_O2_I1(o, m, r);
3232 case INDEX_op_qemu_st_a32_i128:
3233 case INDEX_op_qemu_st_a64_i128:
3234 return C_O0_I3(o, m, r);
3236 case INDEX_op_deposit_i32:
3237 case INDEX_op_deposit_i64:
3238 return C_O1_I2(r, rZ, r);
3240 case INDEX_op_movcond_i32:
3241 return C_O1_I4(r, r, ri, rI, r);
3242 case INDEX_op_movcond_i64:
3243 return C_O1_I4(r, r, rA, rI, r);
3245 case INDEX_op_div2_i32:
3246 case INDEX_op_div2_i64:
3247 case INDEX_op_divu2_i32:
3248 case INDEX_op_divu2_i64:
3249 return C_O2_I3(o, m, 0, 1, r);
3251 case INDEX_op_mulu2_i64:
3252 return C_O2_I2(o, m, 0, r);
3253 case INDEX_op_muls2_i64:
3254 return C_O2_I2(o, m, r, r);
3256 case INDEX_op_add2_i32:
3257 case INDEX_op_sub2_i32:
3258 return C_N1_O1_I4(r, r, 0, 1, ri, r);
3260 case INDEX_op_add2_i64:
3261 case INDEX_op_sub2_i64:
3262 return C_N1_O1_I4(r, r, 0, 1, rA, r);
3264 case INDEX_op_st_vec:
3265 return C_O0_I2(v, r);
3266 case INDEX_op_ld_vec:
3267 case INDEX_op_dupm_vec:
3268 return C_O1_I1(v, r);
3269 case INDEX_op_dup_vec:
3270 return C_O1_I1(v, vr);
3271 case INDEX_op_abs_vec:
3272 case INDEX_op_neg_vec:
3273 case INDEX_op_not_vec:
3274 case INDEX_op_rotli_vec:
3275 case INDEX_op_sari_vec:
3276 case INDEX_op_shli_vec:
3277 case INDEX_op_shri_vec:
3278 case INDEX_op_s390_vuph_vec:
3279 case INDEX_op_s390_vupl_vec:
3280 return C_O1_I1(v, v);
3281 case INDEX_op_add_vec:
3282 case INDEX_op_sub_vec:
3283 case INDEX_op_and_vec:
3284 case INDEX_op_andc_vec:
3285 case INDEX_op_or_vec:
3286 case INDEX_op_orc_vec:
3287 case INDEX_op_xor_vec:
3288 case INDEX_op_nand_vec:
3289 case INDEX_op_nor_vec:
3290 case INDEX_op_eqv_vec:
3291 case INDEX_op_cmp_vec:
3292 case INDEX_op_mul_vec:
3293 case INDEX_op_rotlv_vec:
3294 case INDEX_op_rotrv_vec:
3295 case INDEX_op_shlv_vec:
3296 case INDEX_op_shrv_vec:
3297 case INDEX_op_sarv_vec:
3298 case INDEX_op_smax_vec:
3299 case INDEX_op_smin_vec:
3300 case INDEX_op_umax_vec:
3301 case INDEX_op_umin_vec:
3302 case INDEX_op_s390_vpks_vec:
3303 return C_O1_I2(v, v, v);
3304 case INDEX_op_rotls_vec:
3305 case INDEX_op_shls_vec:
3306 case INDEX_op_shrs_vec:
3307 case INDEX_op_sars_vec:
3308 return C_O1_I2(v, v, r);
3309 case INDEX_op_bitsel_vec:
3310 return C_O1_I3(v, v, v, v);
3313 g_assert_not_reached();
3318 * Mainline glibc added HWCAP_S390_VX before it was kernel abi.
3319 * Some distros have fixed this up locally, others have not.
3321 #ifndef HWCAP_S390_VXRS
3322 #define HWCAP_S390_VXRS 2048
3325 static void query_s390_facilities(void)
3327 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3330 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
3331 is present on all 64-bit systems, but let's check for it anyway. */
3332 if (hwcap & HWCAP_S390_STFLE) {
3333 register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1;
3334 register void *r1 __asm__("1") = s390_facilities;
3337 asm volatile(".word 0xb2b0,0x1000"
3338 : "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
3342 * Use of vector registers requires os support beyond the facility bit.
3343 * If the kernel does not advertise support, disable the facility bits.
3344 * There is nothing else we currently care about in the 3rd word, so
3345 * disable VECTOR with one store.
3347 if (!(hwcap & HWCAP_S390_VXRS)) {
3348 s390_facilities[2] = 0;
3352 * Minimum supported cpu revision is z196.
3353 * Check for all required facilities.
3354 * ZARCH_ACTIVE is done via preprocessor check for 64-bit.
3356 if (!HAVE_FACILITY(LONG_DISP)) {
3357 which = "long-displacement";
3360 if (!HAVE_FACILITY(EXT_IMM)) {
3361 which = "extended-immediate";
3364 if (!HAVE_FACILITY(GEN_INST_EXT)) {
3365 which = "general-instructions-extension";
3369 * Facility 45 is a big bin that contains: distinct-operands,
3370 * fast-BCR-serialization, high-word, population-count,
3371 * interlocked-access-1, and load/store-on-condition-1
3373 if (!HAVE_FACILITY(45)) {
3380 error_report("%s: missing required facility %s", __func__, which);
3384 static void tcg_target_init(TCGContext *s)
3386 query_s390_facilities();
3388 tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
3389 tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
3390 if (HAVE_FACILITY(VECTOR)) {
3391 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3392 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3395 tcg_target_call_clobber_regs = 0;
3396 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3397 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
3398 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3399 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3400 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3401 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3402 /* The r6 register is technically call-saved, but it's also a parameter
3403 register, so it can get killed by setup for the qemu_st helper. */
3404 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3405 /* The return register can be considered call-clobbered. */
3406 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
3408 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3409 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3410 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3411 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3412 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3413 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3414 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3415 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3416 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3417 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3418 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3419 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3420 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
3421 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
3422 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
3423 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
3424 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
3425 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
3426 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
3427 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
3428 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
3429 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
3430 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
3431 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
3433 s->reserved_regs = 0;
3434 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
3435 /* XXX many insns can't be used with R0, so we better avoid it for now */
3436 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
3437 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3440 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
3441 + TCG_STATIC_CALL_ARGS_SIZE \
3442 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
3444 static void tcg_target_qemu_prologue(TCGContext *s)
3446 /* stmg %r6,%r15,48(%r15) (save registers) */
3447 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
3449 /* aghi %r15,-frame_size */
3450 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
3452 tcg_set_frame(s, TCG_REG_CALL_STACK,
3453 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
3454 CPU_TEMP_BUF_NLONGS * sizeof(long));
3456 #ifndef CONFIG_SOFTMMU
3457 if (guest_base >= 0x80000) {
3458 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
3459 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
3463 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3465 /* br %r3 (go to TB) */
3466 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
3469 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3470 * and fall through to the rest of the epilogue.
3472 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3473 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
3476 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
3478 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
3479 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
3482 /* br %r14 (return) */
3483 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
3486 static void tcg_out_tb_start(TCGContext *s)
3491 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3493 memset(p, 0x07, count * sizeof(tcg_insn_unit));
3498 uint8_t fde_def_cfa[4];
3499 uint8_t fde_reg_ofs[18];
3502 /* We're expecting a 2 byte uleb128 encoded value. */
3503 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3505 #define ELF_HOST_MACHINE EM_S390
3507 static const DebugFrame debug_frame = {
3508 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3511 .h.cie.code_align = 1,
3512 .h.cie.data_align = 8, /* sleb128 8 */
3513 .h.cie.return_column = TCG_REG_R14,
3515 /* Total FDE size does not include the "len" member. */
3516 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3519 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
3520 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3524 0x86, 6, /* DW_CFA_offset, %r6, 48 */
3525 0x87, 7, /* DW_CFA_offset, %r7, 56 */
3526 0x88, 8, /* DW_CFA_offset, %r8, 64 */
3527 0x89, 9, /* DW_CFA_offset, %r92, 72 */
3528 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
3529 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
3530 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
3531 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
3532 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
3536 void tcg_register_jit(const void *buf, size_t buf_size)
3538 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));