2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 /* We only support generating code for 64-bit mode. */
28 #if TCG_TARGET_REG_BITS != 64
29 #error "unsupported code generation mode"
32 #include "tcg-pool.inc.c"
35 /* ??? The translation blocks produced by TCG are generally small enough to
36 be entirely reachable with a 16-bit displacement. Leaving the option for
37 a 32-bit displacement here Just In Case. */
38 #define USE_LONG_BRANCHES 0
40 #define TCG_CT_CONST_S16 0x100
41 #define TCG_CT_CONST_S32 0x200
42 #define TCG_CT_CONST_S33 0x400
43 #define TCG_CT_CONST_ZERO 0x800
45 /* Several places within the instruction set 0 means "no register"
46 rather than TCG_REG_R0. */
47 #define TCG_REG_NONE 0
49 /* A scratch register that may be be used throughout the backend. */
50 #define TCG_TMP0 TCG_REG_R1
52 /* A scratch register that holds a pointer to the beginning of the TB.
53 We don't need this when we have pc-relative loads with the general
54 instructions extension facility. */
55 #define TCG_REG_TB TCG_REG_R12
56 #define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
58 #ifndef CONFIG_SOFTMMU
59 #define TCG_GUEST_BASE_REG TCG_REG_R13
62 /* All of the following instructions are prefixed with their instruction
63 format, and are defined as 8- or 16-bit quantities, even when the two
64 halves of the 16-bit quantity may appear 32 bits apart in the insn.
65 This makes it easy to copy the values from the tables in Appendix B. */
66 typedef enum S390Opcode
{
256 #ifdef CONFIG_DEBUG_TCG
257 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
258 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
259 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
263 /* Since R6 is a potential argument register, choose it last of the
264 call-saved registers. Likewise prefer the call-clobbered registers
265 in reverse order to maximize the chance of avoiding the arguments. */
266 static const int tcg_target_reg_alloc_order
[] = {
267 /* Call saved registers. */
276 /* Call clobbered registers. */
280 /* Argument registers, in reverse order of allocation. */
287 static const int tcg_target_call_iarg_regs
[] = {
295 static const int tcg_target_call_oarg_regs
[] = {
303 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
304 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
305 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
306 #define S390_CC_NEVER 0
307 #define S390_CC_ALWAYS 15
309 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
310 static const uint8_t tcg_cond_to_s390_cond
[] = {
311 [TCG_COND_EQ
] = S390_CC_EQ
,
312 [TCG_COND_NE
] = S390_CC_NE
,
313 [TCG_COND_LT
] = S390_CC_LT
,
314 [TCG_COND_LE
] = S390_CC_LE
,
315 [TCG_COND_GT
] = S390_CC_GT
,
316 [TCG_COND_GE
] = S390_CC_GE
,
317 [TCG_COND_LTU
] = S390_CC_LT
,
318 [TCG_COND_LEU
] = S390_CC_LE
,
319 [TCG_COND_GTU
] = S390_CC_GT
,
320 [TCG_COND_GEU
] = S390_CC_GE
,
323 /* Condition codes that result from a LOAD AND TEST. Here, we have no
324 unsigned instruction variation, however since the test is vs zero we
325 can re-map the outcomes appropriately. */
326 static const uint8_t tcg_cond_to_ltr_cond
[] = {
327 [TCG_COND_EQ
] = S390_CC_EQ
,
328 [TCG_COND_NE
] = S390_CC_NE
,
329 [TCG_COND_LT
] = S390_CC_LT
,
330 [TCG_COND_LE
] = S390_CC_LE
,
331 [TCG_COND_GT
] = S390_CC_GT
,
332 [TCG_COND_GE
] = S390_CC_GE
,
333 [TCG_COND_LTU
] = S390_CC_NEVER
,
334 [TCG_COND_LEU
] = S390_CC_EQ
,
335 [TCG_COND_GTU
] = S390_CC_NE
,
336 [TCG_COND_GEU
] = S390_CC_ALWAYS
,
339 #ifdef CONFIG_SOFTMMU
340 static void * const qemu_ld_helpers
[16] = {
341 [MO_UB
] = helper_ret_ldub_mmu
,
342 [MO_SB
] = helper_ret_ldsb_mmu
,
343 [MO_LEUW
] = helper_le_lduw_mmu
,
344 [MO_LESW
] = helper_le_ldsw_mmu
,
345 [MO_LEUL
] = helper_le_ldul_mmu
,
346 [MO_LESL
] = helper_le_ldsl_mmu
,
347 [MO_LEQ
] = helper_le_ldq_mmu
,
348 [MO_BEUW
] = helper_be_lduw_mmu
,
349 [MO_BESW
] = helper_be_ldsw_mmu
,
350 [MO_BEUL
] = helper_be_ldul_mmu
,
351 [MO_BESL
] = helper_be_ldsl_mmu
,
352 [MO_BEQ
] = helper_be_ldq_mmu
,
355 static void * const qemu_st_helpers
[16] = {
356 [MO_UB
] = helper_ret_stb_mmu
,
357 [MO_LEUW
] = helper_le_stw_mmu
,
358 [MO_LEUL
] = helper_le_stl_mmu
,
359 [MO_LEQ
] = helper_le_stq_mmu
,
360 [MO_BEUW
] = helper_be_stw_mmu
,
361 [MO_BEUL
] = helper_be_stl_mmu
,
362 [MO_BEQ
] = helper_be_stq_mmu
,
366 static tcg_insn_unit
*tb_ret_addr
;
367 uint64_t s390_facilities
;
369 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
370 intptr_t value
, intptr_t addend
)
376 pcrel2
= (tcg_insn_unit
*)value
- code_ptr
;
380 if (pcrel2
== (int16_t)pcrel2
) {
381 tcg_patch16(code_ptr
, pcrel2
);
386 if (pcrel2
== (int32_t)pcrel2
) {
387 tcg_patch32(code_ptr
, pcrel2
);
392 if (value
== sextract64(value
, 0, 20)) {
393 old
= *(uint32_t *)code_ptr
& 0xf00000ff;
394 old
|= ((value
& 0xfff) << 16) | ((value
& 0xff000) >> 4);
395 tcg_patch32(code_ptr
, old
);
400 g_assert_not_reached();
405 /* parse target specific constraints */
406 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
407 const char *ct_str
, TCGType type
)
410 case 'r': /* all registers */
411 ct
->ct
|= TCG_CT_REG
;
414 case 'L': /* qemu_ld/st constraint */
415 ct
->ct
|= TCG_CT_REG
;
417 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
418 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
419 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R4
);
421 case 'a': /* force R2 for division */
422 ct
->ct
|= TCG_CT_REG
;
424 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R2
);
426 case 'b': /* force R3 for division */
427 ct
->ct
|= TCG_CT_REG
;
429 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R3
);
432 ct
->ct
|= TCG_CT_CONST_S33
;
435 ct
->ct
|= TCG_CT_CONST_S16
;
438 ct
->ct
|= TCG_CT_CONST_S32
;
441 ct
->ct
|= TCG_CT_CONST_ZERO
;
449 /* Test if a constant matches the constraint. */
450 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
451 const TCGArgConstraint
*arg_ct
)
455 if (ct
& TCG_CT_CONST
) {
459 if (type
== TCG_TYPE_I32
) {
463 /* The following are mutually exclusive. */
464 if (ct
& TCG_CT_CONST_S16
) {
465 return val
== (int16_t)val
;
466 } else if (ct
& TCG_CT_CONST_S32
) {
467 return val
== (int32_t)val
;
468 } else if (ct
& TCG_CT_CONST_S33
) {
469 return val
>= -0xffffffffll
&& val
<= 0xffffffffll
;
470 } else if (ct
& TCG_CT_CONST_ZERO
) {
477 /* Emit instructions according to the given instruction format. */
479 static void tcg_out_insn_RR(TCGContext
*s
, S390Opcode op
, TCGReg r1
, TCGReg r2
)
481 tcg_out16(s
, (op
<< 8) | (r1
<< 4) | r2
);
484 static void tcg_out_insn_RRE(TCGContext
*s
, S390Opcode op
,
485 TCGReg r1
, TCGReg r2
)
487 tcg_out32(s
, (op
<< 16) | (r1
<< 4) | r2
);
490 static void tcg_out_insn_RRF(TCGContext
*s
, S390Opcode op
,
491 TCGReg r1
, TCGReg r2
, int m3
)
493 tcg_out32(s
, (op
<< 16) | (m3
<< 12) | (r1
<< 4) | r2
);
496 static void tcg_out_insn_RI(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
498 tcg_out32(s
, (op
<< 16) | (r1
<< 20) | (i2
& 0xffff));
501 static void tcg_out_insn_RIE(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
504 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | m3
);
505 tcg_out32(s
, (i2
<< 16) | (op
& 0xff));
508 static void tcg_out_insn_RIL(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
510 tcg_out16(s
, op
| (r1
<< 4));
514 static void tcg_out_insn_RS(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
515 TCGReg b2
, TCGReg r3
, int disp
)
517 tcg_out32(s
, (op
<< 24) | (r1
<< 20) | (r3
<< 16) | (b2
<< 12)
521 static void tcg_out_insn_RSY(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
522 TCGReg b2
, TCGReg r3
, int disp
)
524 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | r3
);
525 tcg_out32(s
, (op
& 0xff) | (b2
<< 28)
526 | ((disp
& 0xfff) << 16) | ((disp
& 0xff000) >> 4));
529 #define tcg_out_insn_RX tcg_out_insn_RS
530 #define tcg_out_insn_RXY tcg_out_insn_RSY
532 /* Emit an opcode with "type-checking" of the format. */
533 #define tcg_out_insn(S, FMT, OP, ...) \
534 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
537 /* emit 64-bit shifts */
538 static void tcg_out_sh64(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
539 TCGReg src
, TCGReg sh_reg
, int sh_imm
)
541 tcg_out_insn_RSY(s
, op
, dest
, sh_reg
, src
, sh_imm
);
544 /* emit 32-bit shifts */
545 static void tcg_out_sh32(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
546 TCGReg sh_reg
, int sh_imm
)
548 tcg_out_insn_RS(s
, op
, dest
, sh_reg
, 0, sh_imm
);
551 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg dst
, TCGReg src
)
554 if (type
== TCG_TYPE_I32
) {
555 tcg_out_insn(s
, RR
, LR
, dst
, src
);
557 tcg_out_insn(s
, RRE
, LGR
, dst
, src
);
562 static const S390Opcode lli_insns
[4] = {
563 RI_LLILL
, RI_LLILH
, RI_LLIHL
, RI_LLIHH
566 static bool maybe_out_small_movi(TCGContext
*s
, TCGType type
,
567 TCGReg ret
, tcg_target_long sval
)
569 tcg_target_ulong uval
= sval
;
572 if (type
== TCG_TYPE_I32
) {
573 uval
= (uint32_t)sval
;
574 sval
= (int32_t)sval
;
577 /* Try all 32-bit insns that can load it in one go. */
578 if (sval
>= -0x8000 && sval
< 0x8000) {
579 tcg_out_insn(s
, RI
, LGHI
, ret
, sval
);
583 for (i
= 0; i
< 4; i
++) {
584 tcg_target_long mask
= 0xffffull
<< i
*16;
585 if ((uval
& mask
) == uval
) {
586 tcg_out_insn_RI(s
, lli_insns
[i
], ret
, uval
>> i
*16);
594 /* load a register with an immediate value */
595 static void tcg_out_movi_int(TCGContext
*s
, TCGType type
, TCGReg ret
,
596 tcg_target_long sval
, bool in_prologue
)
598 tcg_target_ulong uval
;
600 /* Try all 32-bit insns that can load it in one go. */
601 if (maybe_out_small_movi(s
, type
, ret
, sval
)) {
606 if (type
== TCG_TYPE_I32
) {
607 uval
= (uint32_t)sval
;
608 sval
= (int32_t)sval
;
611 /* Try all 48-bit insns that can load it in one go. */
612 if (s390_facilities
& FACILITY_EXT_IMM
) {
613 if (sval
== (int32_t)sval
) {
614 tcg_out_insn(s
, RIL
, LGFI
, ret
, sval
);
617 if (uval
<= 0xffffffff) {
618 tcg_out_insn(s
, RIL
, LLILF
, ret
, uval
);
621 if ((uval
& 0xffffffff) == 0) {
622 tcg_out_insn(s
, RIL
, LLIHF
, ret
, uval
>> 32);
627 /* Try for PC-relative address load. For odd addresses,
628 attempt to use an offset from the start of the TB. */
629 if ((sval
& 1) == 0) {
630 ptrdiff_t off
= tcg_pcrel_diff(s
, (void *)sval
) >> 1;
631 if (off
== (int32_t)off
) {
632 tcg_out_insn(s
, RIL
, LARL
, ret
, off
);
635 } else if (USE_REG_TB
&& !in_prologue
) {
636 ptrdiff_t off
= sval
- (uintptr_t)s
->code_gen_ptr
;
637 if (off
== sextract64(off
, 0, 20)) {
638 /* This is certain to be an address within TB, and therefore
639 OFF will be negative; don't try RX_LA. */
640 tcg_out_insn(s
, RXY
, LAY
, ret
, TCG_REG_TB
, TCG_REG_NONE
, off
);
645 /* A 32-bit unsigned value can be loaded in 2 insns. And given
646 that LLILL, LLIHL, LLILF above did not succeed, we know that
647 both insns are required. */
648 if (uval
<= 0xffffffff) {
649 tcg_out_insn(s
, RI
, LLILL
, ret
, uval
);
650 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
654 /* Otherwise, stuff it in the constant pool. */
655 if (s390_facilities
& FACILITY_GEN_INST_EXT
) {
656 tcg_out_insn(s
, RIL
, LGRL
, ret
, 0);
657 new_pool_label(s
, sval
, R_390_PC32DBL
, s
->code_ptr
- 2, 2);
658 } else if (USE_REG_TB
&& !in_prologue
) {
659 tcg_out_insn(s
, RXY
, LG
, ret
, TCG_REG_TB
, TCG_REG_NONE
, 0);
660 new_pool_label(s
, sval
, R_390_20
, s
->code_ptr
- 2,
661 -(intptr_t)s
->code_gen_ptr
);
663 TCGReg base
= ret
? ret
: TCG_TMP0
;
664 tcg_out_insn(s
, RIL
, LARL
, base
, 0);
665 new_pool_label(s
, sval
, R_390_PC32DBL
, s
->code_ptr
- 2, 2);
666 tcg_out_insn(s
, RXY
, LG
, ret
, base
, TCG_REG_NONE
, 0);
670 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
671 TCGReg ret
, tcg_target_long sval
)
673 tcg_out_movi_int(s
, type
, ret
, sval
, false);
676 /* Emit a load/store type instruction. Inputs are:
677 DATA: The register to be loaded or stored.
678 BASE+OFS: The effective address.
679 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
680 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
682 static void tcg_out_mem(TCGContext
*s
, S390Opcode opc_rx
, S390Opcode opc_rxy
,
683 TCGReg data
, TCGReg base
, TCGReg index
,
686 if (ofs
< -0x80000 || ofs
>= 0x80000) {
687 /* Combine the low 20 bits of the offset with the actual load insn;
688 the high 44 bits must come from an immediate load. */
689 tcg_target_long low
= ((ofs
& 0xfffff) ^ 0x80000) - 0x80000;
690 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
- low
);
693 /* If we were already given an index register, add it in. */
694 if (index
!= TCG_REG_NONE
) {
695 tcg_out_insn(s
, RRE
, AGR
, TCG_TMP0
, index
);
700 if (opc_rx
&& ofs
>= 0 && ofs
< 0x1000) {
701 tcg_out_insn_RX(s
, opc_rx
, data
, base
, index
, ofs
);
703 tcg_out_insn_RXY(s
, opc_rxy
, data
, base
, index
, ofs
);
708 /* load data without address translation or endianness conversion */
709 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg data
,
710 TCGReg base
, intptr_t ofs
)
712 if (type
== TCG_TYPE_I32
) {
713 tcg_out_mem(s
, RX_L
, RXY_LY
, data
, base
, TCG_REG_NONE
, ofs
);
715 tcg_out_mem(s
, 0, RXY_LG
, data
, base
, TCG_REG_NONE
, ofs
);
719 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg data
,
720 TCGReg base
, intptr_t ofs
)
722 if (type
== TCG_TYPE_I32
) {
723 tcg_out_mem(s
, RX_ST
, RXY_STY
, data
, base
, TCG_REG_NONE
, ofs
);
725 tcg_out_mem(s
, 0, RXY_STG
, data
, base
, TCG_REG_NONE
, ofs
);
729 static inline bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
730 TCGReg base
, intptr_t ofs
)
735 /* load data from an absolute host address */
736 static void tcg_out_ld_abs(TCGContext
*s
, TCGType type
, TCGReg dest
, void *abs
)
738 intptr_t addr
= (intptr_t)abs
;
740 if ((s390_facilities
& FACILITY_GEN_INST_EXT
) && !(addr
& 1)) {
741 ptrdiff_t disp
= tcg_pcrel_diff(s
, abs
) >> 1;
742 if (disp
== (int32_t)disp
) {
743 if (type
== TCG_TYPE_I32
) {
744 tcg_out_insn(s
, RIL
, LRL
, dest
, disp
);
746 tcg_out_insn(s
, RIL
, LGRL
, dest
, disp
);
752 ptrdiff_t disp
= abs
- (void *)s
->code_gen_ptr
;
753 if (disp
== sextract64(disp
, 0, 20)) {
754 tcg_out_ld(s
, type
, dest
, TCG_REG_TB
, disp
);
759 tcg_out_movi(s
, TCG_TYPE_PTR
, dest
, addr
& ~0xffff);
760 tcg_out_ld(s
, type
, dest
, dest
, addr
& 0xffff);
763 static inline void tcg_out_risbg(TCGContext
*s
, TCGReg dest
, TCGReg src
,
764 int msb
, int lsb
, int ofs
, int z
)
767 tcg_out16(s
, (RIE_RISBG
& 0xff00) | (dest
<< 4) | src
);
768 tcg_out16(s
, (msb
<< 8) | (z
<< 7) | lsb
);
769 tcg_out16(s
, (ofs
<< 8) | (RIE_RISBG
& 0xff));
772 static void tgen_ext8s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
774 if (s390_facilities
& FACILITY_EXT_IMM
) {
775 tcg_out_insn(s
, RRE
, LGBR
, dest
, src
);
779 if (type
== TCG_TYPE_I32
) {
781 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 24);
783 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 24);
785 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 24);
787 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 56);
788 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 56);
792 static void tgen_ext8u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
794 if (s390_facilities
& FACILITY_EXT_IMM
) {
795 tcg_out_insn(s
, RRE
, LLGCR
, dest
, src
);
800 tcg_out_movi(s
, type
, TCG_TMP0
, 0xff);
803 tcg_out_movi(s
, type
, dest
, 0xff);
805 if (type
== TCG_TYPE_I32
) {
806 tcg_out_insn(s
, RR
, NR
, dest
, src
);
808 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
812 static void tgen_ext16s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
814 if (s390_facilities
& FACILITY_EXT_IMM
) {
815 tcg_out_insn(s
, RRE
, LGHR
, dest
, src
);
819 if (type
== TCG_TYPE_I32
) {
821 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 16);
823 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 16);
825 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 16);
827 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 48);
828 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 48);
832 static void tgen_ext16u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
834 if (s390_facilities
& FACILITY_EXT_IMM
) {
835 tcg_out_insn(s
, RRE
, LLGHR
, dest
, src
);
840 tcg_out_movi(s
, type
, TCG_TMP0
, 0xffff);
843 tcg_out_movi(s
, type
, dest
, 0xffff);
845 if (type
== TCG_TYPE_I32
) {
846 tcg_out_insn(s
, RR
, NR
, dest
, src
);
848 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
852 static inline void tgen_ext32s(TCGContext
*s
, TCGReg dest
, TCGReg src
)
854 tcg_out_insn(s
, RRE
, LGFR
, dest
, src
);
857 static inline void tgen_ext32u(TCGContext
*s
, TCGReg dest
, TCGReg src
)
859 tcg_out_insn(s
, RRE
, LLGFR
, dest
, src
);
862 /* Accept bit patterns like these:
867 Copied from gcc sources. */
868 static inline bool risbg_mask(uint64_t c
)
871 /* We don't change the number of transitions by inverting,
872 so make sure we start with the LSB zero. */
876 /* Reject all zeros or all ones. */
880 /* Find the first transition. */
882 /* Invert to look for a second transition. */
884 /* Erase the first transition. */
886 /* Find the second transition, if any. */
888 /* Match if all the bits are 1's, or if c is zero. */
892 static void tgen_andi_risbg(TCGContext
*s
, TCGReg out
, TCGReg in
, uint64_t val
)
895 if ((val
& 0x8000000000000001ull
) == 0x8000000000000001ull
) {
896 /* Achieve wraparound by swapping msb and lsb. */
897 msb
= 64 - ctz64(~val
);
898 lsb
= clz64(~val
) - 1;
901 lsb
= 63 - ctz64(val
);
903 tcg_out_risbg(s
, out
, in
, msb
, lsb
, 0, 1);
906 static void tgen_andi(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
908 static const S390Opcode ni_insns
[4] = {
909 RI_NILL
, RI_NILH
, RI_NIHL
, RI_NIHH
911 static const S390Opcode nif_insns
[2] = {
914 uint64_t valid
= (type
== TCG_TYPE_I32
? 0xffffffffull
: -1ull);
917 /* Look for the zero-extensions. */
918 if ((val
& valid
) == 0xffffffff) {
919 tgen_ext32u(s
, dest
, dest
);
922 if (s390_facilities
& FACILITY_EXT_IMM
) {
923 if ((val
& valid
) == 0xff) {
924 tgen_ext8u(s
, TCG_TYPE_I64
, dest
, dest
);
927 if ((val
& valid
) == 0xffff) {
928 tgen_ext16u(s
, TCG_TYPE_I64
, dest
, dest
);
933 /* Try all 32-bit insns that can perform it in one go. */
934 for (i
= 0; i
< 4; i
++) {
935 tcg_target_ulong mask
= ~(0xffffull
<< i
*16);
936 if (((val
| ~valid
) & mask
) == mask
) {
937 tcg_out_insn_RI(s
, ni_insns
[i
], dest
, val
>> i
*16);
942 /* Try all 48-bit insns that can perform it in one go. */
943 if (s390_facilities
& FACILITY_EXT_IMM
) {
944 for (i
= 0; i
< 2; i
++) {
945 tcg_target_ulong mask
= ~(0xffffffffull
<< i
*32);
946 if (((val
| ~valid
) & mask
) == mask
) {
947 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
952 if ((s390_facilities
& FACILITY_GEN_INST_EXT
) && risbg_mask(val
)) {
953 tgen_andi_risbg(s
, dest
, dest
, val
);
957 /* Use the constant pool if USE_REG_TB, but not for small constants. */
959 if (!maybe_out_small_movi(s
, type
, TCG_TMP0
, val
)) {
960 tcg_out_insn(s
, RXY
, NG
, dest
, TCG_REG_TB
, TCG_REG_NONE
, 0);
961 new_pool_label(s
, val
& valid
, R_390_20
, s
->code_ptr
- 2,
962 -(intptr_t)s
->code_gen_ptr
);
966 tcg_out_movi(s
, type
, TCG_TMP0
, val
);
968 if (type
== TCG_TYPE_I32
) {
969 tcg_out_insn(s
, RR
, NR
, dest
, TCG_TMP0
);
971 tcg_out_insn(s
, RRE
, NGR
, dest
, TCG_TMP0
);
975 static void tgen_ori(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
977 static const S390Opcode oi_insns
[4] = {
978 RI_OILL
, RI_OILH
, RI_OIHL
, RI_OIHH
980 static const S390Opcode oif_insns
[2] = {
986 /* Look for no-op. */
987 if (unlikely(val
== 0)) {
991 /* Try all 32-bit insns that can perform it in one go. */
992 for (i
= 0; i
< 4; i
++) {
993 tcg_target_ulong mask
= (0xffffull
<< i
*16);
994 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
995 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1000 /* Try all 48-bit insns that can perform it in one go. */
1001 if (s390_facilities
& FACILITY_EXT_IMM
) {
1002 for (i
= 0; i
< 2; i
++) {
1003 tcg_target_ulong mask
= (0xffffffffull
<< i
*32);
1004 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1005 tcg_out_insn_RIL(s
, oif_insns
[i
], dest
, val
>> i
*32);
1011 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1012 if (maybe_out_small_movi(s
, type
, TCG_TMP0
, val
)) {
1013 if (type
== TCG_TYPE_I32
) {
1014 tcg_out_insn(s
, RR
, OR
, dest
, TCG_TMP0
);
1016 tcg_out_insn(s
, RRE
, OGR
, dest
, TCG_TMP0
);
1018 } else if (USE_REG_TB
) {
1019 tcg_out_insn(s
, RXY
, OG
, dest
, TCG_REG_TB
, TCG_REG_NONE
, 0);
1020 new_pool_label(s
, val
, R_390_20
, s
->code_ptr
- 2,
1021 -(intptr_t)s
->code_gen_ptr
);
1023 /* Perform the OR via sequential modifications to the high and
1024 low parts. Do this via recursion to handle 16-bit vs 32-bit
1025 masks in each half. */
1026 tcg_debug_assert(s390_facilities
& FACILITY_EXT_IMM
);
1027 tgen_ori(s
, type
, dest
, val
& 0x00000000ffffffffull
);
1028 tgen_ori(s
, type
, dest
, val
& 0xffffffff00000000ull
);
1032 static void tgen_xori(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
1034 /* Try all 48-bit insns that can perform it in one go. */
1035 if (s390_facilities
& FACILITY_EXT_IMM
) {
1036 if ((val
& 0xffffffff00000000ull
) == 0) {
1037 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1040 if ((val
& 0x00000000ffffffffull
) == 0) {
1041 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 32);
1046 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1047 if (maybe_out_small_movi(s
, type
, TCG_TMP0
, val
)) {
1048 if (type
== TCG_TYPE_I32
) {
1049 tcg_out_insn(s
, RR
, XR
, dest
, TCG_TMP0
);
1051 tcg_out_insn(s
, RRE
, XGR
, dest
, TCG_TMP0
);
1053 } else if (USE_REG_TB
) {
1054 tcg_out_insn(s
, RXY
, XG
, dest
, TCG_REG_TB
, TCG_REG_NONE
, 0);
1055 new_pool_label(s
, val
, R_390_20
, s
->code_ptr
- 2,
1056 -(intptr_t)s
->code_gen_ptr
);
1058 /* Perform the xor by parts. */
1059 tcg_debug_assert(s390_facilities
& FACILITY_EXT_IMM
);
1060 if (val
& 0xffffffff) {
1061 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1063 if (val
> 0xffffffff) {
1064 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 32);
1069 static int tgen_cmp(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg r1
,
1070 TCGArg c2
, bool c2const
, bool need_carry
)
1072 bool is_unsigned
= is_unsigned_cond(c
);
1077 if (!(is_unsigned
&& need_carry
)) {
1078 if (type
== TCG_TYPE_I32
) {
1079 tcg_out_insn(s
, RR
, LTR
, r1
, r1
);
1081 tcg_out_insn(s
, RRE
, LTGR
, r1
, r1
);
1083 return tcg_cond_to_ltr_cond
[c
];
1087 if (!is_unsigned
&& c2
== (int16_t)c2
) {
1088 op
= (type
== TCG_TYPE_I32
? RI_CHI
: RI_CGHI
);
1089 tcg_out_insn_RI(s
, op
, r1
, c2
);
1093 if (s390_facilities
& FACILITY_EXT_IMM
) {
1094 if (type
== TCG_TYPE_I32
) {
1095 op
= (is_unsigned
? RIL_CLFI
: RIL_CFI
);
1096 tcg_out_insn_RIL(s
, op
, r1
, c2
);
1098 } else if (c2
== (is_unsigned
? (uint32_t)c2
: (int32_t)c2
)) {
1099 op
= (is_unsigned
? RIL_CLGFI
: RIL_CGFI
);
1100 tcg_out_insn_RIL(s
, op
, r1
, c2
);
1105 /* Use the constant pool, but not for small constants. */
1106 if (maybe_out_small_movi(s
, type
, TCG_TMP0
, c2
)) {
1108 /* fall through to reg-reg */
1109 } else if (USE_REG_TB
) {
1110 if (type
== TCG_TYPE_I32
) {
1111 op
= (is_unsigned
? RXY_CLY
: RXY_CY
);
1112 tcg_out_insn_RXY(s
, op
, r1
, TCG_REG_TB
, TCG_REG_NONE
, 0);
1113 new_pool_label(s
, (uint32_t)c2
, R_390_20
, s
->code_ptr
- 2,
1114 4 - (intptr_t)s
->code_gen_ptr
);
1116 op
= (is_unsigned
? RXY_CLG
: RXY_CG
);
1117 tcg_out_insn_RXY(s
, op
, r1
, TCG_REG_TB
, TCG_REG_NONE
, 0);
1118 new_pool_label(s
, c2
, R_390_20
, s
->code_ptr
- 2,
1119 -(intptr_t)s
->code_gen_ptr
);
1123 if (type
== TCG_TYPE_I32
) {
1124 op
= (is_unsigned
? RIL_CLRL
: RIL_CRL
);
1125 tcg_out_insn_RIL(s
, op
, r1
, 0);
1126 new_pool_label(s
, (uint32_t)c2
, R_390_PC32DBL
,
1127 s
->code_ptr
- 2, 2 + 4);
1129 op
= (is_unsigned
? RIL_CLGRL
: RIL_CGRL
);
1130 tcg_out_insn_RIL(s
, op
, r1
, 0);
1131 new_pool_label(s
, c2
, R_390_PC32DBL
, s
->code_ptr
- 2, 2);
1137 if (type
== TCG_TYPE_I32
) {
1138 op
= (is_unsigned
? RR_CLR
: RR_CR
);
1139 tcg_out_insn_RR(s
, op
, r1
, c2
);
1141 op
= (is_unsigned
? RRE_CLGR
: RRE_CGR
);
1142 tcg_out_insn_RRE(s
, op
, r1
, c2
);
1146 return tcg_cond_to_s390_cond
[c
];
1149 static void tgen_setcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
1150 TCGReg dest
, TCGReg c1
, TCGArg c2
, int c2const
)
1155 /* With LOC2, we can always emit the minimum 3 insns. */
1156 if (s390_facilities
& FACILITY_LOAD_ON_COND2
) {
1157 /* Emit: d = 0, d = (cc ? 1 : d). */
1158 cc
= tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
, false);
1159 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, 0);
1160 tcg_out_insn(s
, RIE
, LOCGHI
, dest
, 1, cc
);
1164 have_loc
= (s390_facilities
& FACILITY_LOAD_ON_COND
) != 0;
1166 /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
1170 /* X != 0 is X > 0. */
1171 if (c2const
&& c2
== 0) {
1172 cond
= TCG_COND_GTU
;
1180 /* The result of a compare has CC=2 for GT and CC=3 unused.
1181 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1182 tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
, true);
1183 tcg_out_movi(s
, type
, dest
, 0);
1184 tcg_out_insn(s
, RRE
, ALCGR
, dest
, dest
);
1188 /* X == 0 is X <= 0. */
1189 if (c2const
&& c2
== 0) {
1190 cond
= TCG_COND_LEU
;
1198 /* As above, but we're looking for borrow, or !carry.
1199 The second insn computes d - d - borrow, or -1 for true
1200 and 0 for false. So we must mask to 1 bit afterward. */
1201 tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
, true);
1202 tcg_out_insn(s
, RRE
, SLBGR
, dest
, dest
);
1203 tgen_andi(s
, type
, dest
, 1);
1210 /* Swap operands so that we can use LEU/GTU/GT/LE. */
1215 tcg_out_movi(s
, type
, TCG_TMP0
, c2
);
1224 cond
= tcg_swap_cond(cond
);
1228 g_assert_not_reached();
1231 cc
= tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
, false);
1233 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1234 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, 0);
1235 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_TMP0
, 1);
1236 tcg_out_insn(s
, RRF
, LOCGR
, dest
, TCG_TMP0
, cc
);
1238 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1239 tcg_out_movi(s
, type
, dest
, 1);
1240 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1241 tcg_out_movi(s
, type
, dest
, 0);
1245 static void tgen_movcond(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg dest
,
1246 TCGReg c1
, TCGArg c2
, int c2const
,
1247 TCGArg v3
, int v3const
)
1250 if (s390_facilities
& FACILITY_LOAD_ON_COND
) {
1251 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
, false);
1253 tcg_out_insn(s
, RIE
, LOCGHI
, dest
, v3
, cc
);
1255 tcg_out_insn(s
, RRF
, LOCGR
, dest
, v3
, cc
);
1258 c
= tcg_invert_cond(c
);
1259 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
, false);
1261 /* Emit: if (cc) goto over; dest = r3; over: */
1262 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1263 tcg_out_insn(s
, RRE
, LGR
, dest
, v3
);
1267 static void tgen_clz(TCGContext
*s
, TCGReg dest
, TCGReg a1
,
1268 TCGArg a2
, int a2const
)
1270 /* Since this sets both R and R+1, we have no choice but to store the
1271 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1272 QEMU_BUILD_BUG_ON(TCG_TMP0
!= TCG_REG_R1
);
1273 tcg_out_insn(s
, RRE
, FLOGR
, TCG_REG_R0
, a1
);
1275 if (a2const
&& a2
== 64) {
1276 tcg_out_mov(s
, TCG_TYPE_I64
, dest
, TCG_REG_R0
);
1279 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, a2
);
1281 tcg_out_mov(s
, TCG_TYPE_I64
, dest
, a2
);
1283 if (s390_facilities
& FACILITY_LOAD_ON_COND
) {
1284 /* Emit: if (one bit found) dest = r0. */
1285 tcg_out_insn(s
, RRF
, LOCGR
, dest
, TCG_REG_R0
, 2);
1287 /* Emit: if (no one bit found) goto over; dest = r0; over: */
1288 tcg_out_insn(s
, RI
, BRC
, 8, (4 + 4) >> 1);
1289 tcg_out_insn(s
, RRE
, LGR
, dest
, TCG_REG_R0
);
1294 static void tgen_deposit(TCGContext
*s
, TCGReg dest
, TCGReg src
,
1295 int ofs
, int len
, int z
)
1297 int lsb
= (63 - ofs
);
1298 int msb
= lsb
- (len
- 1);
1299 tcg_out_risbg(s
, dest
, src
, msb
, lsb
, ofs
, z
);
1302 static void tgen_extract(TCGContext
*s
, TCGReg dest
, TCGReg src
,
1305 tcg_out_risbg(s
, dest
, src
, 64 - len
, 63, 64 - ofs
, 1);
1308 static void tgen_gotoi(TCGContext
*s
, int cc
, tcg_insn_unit
*dest
)
1310 ptrdiff_t off
= dest
- s
->code_ptr
;
1311 if (off
== (int16_t)off
) {
1312 tcg_out_insn(s
, RI
, BRC
, cc
, off
);
1313 } else if (off
== (int32_t)off
) {
1314 tcg_out_insn(s
, RIL
, BRCL
, cc
, off
);
1316 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, (uintptr_t)dest
);
1317 tcg_out_insn(s
, RR
, BCR
, cc
, TCG_TMP0
);
1321 static void tgen_branch(TCGContext
*s
, int cc
, TCGLabel
*l
)
1324 tgen_gotoi(s
, cc
, l
->u
.value_ptr
);
1325 } else if (USE_LONG_BRANCHES
) {
1326 tcg_out16(s
, RIL_BRCL
| (cc
<< 4));
1327 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC32DBL
, l
, 2);
1330 tcg_out16(s
, RI_BRC
| (cc
<< 4));
1331 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC16DBL
, l
, 2);
1336 static void tgen_compare_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1337 TCGReg r1
, TCGReg r2
, TCGLabel
*l
)
1342 off
= l
->u
.value_ptr
- s
->code_ptr
;
1343 tcg_debug_assert(off
== (int16_t)off
);
1345 tcg_out_reloc(s
, s
->code_ptr
+ 1, R_390_PC16DBL
, l
, 2);
1348 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | r2
);
1350 tcg_out16(s
, cc
<< 12 | (opc
& 0xff));
1353 static void tgen_compare_imm_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1354 TCGReg r1
, int i2
, TCGLabel
*l
)
1356 tcg_target_long off
= 0;
1359 off
= l
->u
.value_ptr
- s
->code_ptr
;
1360 tcg_debug_assert(off
== (int16_t)off
);
1362 tcg_out_reloc(s
, s
->code_ptr
+ 1, R_390_PC16DBL
, l
, 2);
1365 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | cc
);
1367 tcg_out16(s
, (i2
<< 8) | (opc
& 0xff));
1370 static void tgen_brcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1371 TCGReg r1
, TCGArg c2
, int c2const
, TCGLabel
*l
)
1375 if (s390_facilities
& FACILITY_GEN_INST_EXT
) {
1376 bool is_unsigned
= is_unsigned_cond(c
);
1380 cc
= tcg_cond_to_s390_cond
[c
];
1383 opc
= (type
== TCG_TYPE_I32
1384 ? (is_unsigned
? RIE_CLRJ
: RIE_CRJ
)
1385 : (is_unsigned
? RIE_CLGRJ
: RIE_CGRJ
));
1386 tgen_compare_branch(s
, opc
, cc
, r1
, c2
, l
);
1390 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1391 If the immediate we've been given does not fit that range, we'll
1392 fall back to separate compare and branch instructions using the
1393 larger comparison range afforded by COMPARE IMMEDIATE. */
1394 if (type
== TCG_TYPE_I32
) {
1397 in_range
= (uint32_t)c2
== (uint8_t)c2
;
1400 in_range
= (int32_t)c2
== (int8_t)c2
;
1405 in_range
= (uint64_t)c2
== (uint8_t)c2
;
1408 in_range
= (int64_t)c2
== (int8_t)c2
;
1412 tgen_compare_imm_branch(s
, opc
, cc
, r1
, c2
, l
);
1417 cc
= tgen_cmp(s
, type
, c
, r1
, c2
, c2const
, false);
1418 tgen_branch(s
, cc
, l
);
1421 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*dest
)
1423 ptrdiff_t off
= dest
- s
->code_ptr
;
1424 if (off
== (int32_t)off
) {
1425 tcg_out_insn(s
, RIL
, BRASL
, TCG_REG_R14
, off
);
1427 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, (uintptr_t)dest
);
1428 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, TCG_TMP0
);
1432 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGMemOp opc
, TCGReg data
,
1433 TCGReg base
, TCGReg index
, int disp
)
1435 switch (opc
& (MO_SSIZE
| MO_BSWAP
)) {
1437 tcg_out_insn(s
, RXY
, LLGC
, data
, base
, index
, disp
);
1440 tcg_out_insn(s
, RXY
, LGB
, data
, base
, index
, disp
);
1443 case MO_UW
| MO_BSWAP
:
1444 /* swapped unsigned halfword load with upper bits zeroed */
1445 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1446 tgen_ext16u(s
, TCG_TYPE_I64
, data
, data
);
1449 tcg_out_insn(s
, RXY
, LLGH
, data
, base
, index
, disp
);
1452 case MO_SW
| MO_BSWAP
:
1453 /* swapped sign-extended halfword load */
1454 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1455 tgen_ext16s(s
, TCG_TYPE_I64
, data
, data
);
1458 tcg_out_insn(s
, RXY
, LGH
, data
, base
, index
, disp
);
1461 case MO_UL
| MO_BSWAP
:
1462 /* swapped unsigned int load with upper bits zeroed */
1463 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1464 tgen_ext32u(s
, data
, data
);
1467 tcg_out_insn(s
, RXY
, LLGF
, data
, base
, index
, disp
);
1470 case MO_SL
| MO_BSWAP
:
1471 /* swapped sign-extended int load */
1472 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1473 tgen_ext32s(s
, data
, data
);
1476 tcg_out_insn(s
, RXY
, LGF
, data
, base
, index
, disp
);
1479 case MO_Q
| MO_BSWAP
:
1480 tcg_out_insn(s
, RXY
, LRVG
, data
, base
, index
, disp
);
1483 tcg_out_insn(s
, RXY
, LG
, data
, base
, index
, disp
);
1491 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGMemOp opc
, TCGReg data
,
1492 TCGReg base
, TCGReg index
, int disp
)
1494 switch (opc
& (MO_SIZE
| MO_BSWAP
)) {
1496 if (disp
>= 0 && disp
< 0x1000) {
1497 tcg_out_insn(s
, RX
, STC
, data
, base
, index
, disp
);
1499 tcg_out_insn(s
, RXY
, STCY
, data
, base
, index
, disp
);
1503 case MO_UW
| MO_BSWAP
:
1504 tcg_out_insn(s
, RXY
, STRVH
, data
, base
, index
, disp
);
1507 if (disp
>= 0 && disp
< 0x1000) {
1508 tcg_out_insn(s
, RX
, STH
, data
, base
, index
, disp
);
1510 tcg_out_insn(s
, RXY
, STHY
, data
, base
, index
, disp
);
1514 case MO_UL
| MO_BSWAP
:
1515 tcg_out_insn(s
, RXY
, STRV
, data
, base
, index
, disp
);
1518 if (disp
>= 0 && disp
< 0x1000) {
1519 tcg_out_insn(s
, RX
, ST
, data
, base
, index
, disp
);
1521 tcg_out_insn(s
, RXY
, STY
, data
, base
, index
, disp
);
1525 case MO_Q
| MO_BSWAP
:
1526 tcg_out_insn(s
, RXY
, STRVG
, data
, base
, index
, disp
);
1529 tcg_out_insn(s
, RXY
, STG
, data
, base
, index
, disp
);
1537 #if defined(CONFIG_SOFTMMU)
1538 #include "tcg-ldst.inc.c"
1540 /* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1541 Using the offset of the second entry in the last tlb table ensures
1542 that we can index all of the elements of the first entry. */
1543 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
, tlb_table
[NB_MMU_MODES
- 1][1])
1546 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1547 addend into R2. Returns a register with the santitized guest address. */
1548 static TCGReg
tcg_out_tlb_read(TCGContext
* s
, TCGReg addr_reg
, TCGMemOp opc
,
1549 int mem_index
, bool is_ld
)
1551 unsigned s_bits
= opc
& MO_SIZE
;
1552 unsigned a_bits
= get_alignment_bits(opc
);
1553 unsigned s_mask
= (1 << s_bits
) - 1;
1554 unsigned a_mask
= (1 << a_bits
) - 1;
1558 /* For aligned accesses, we check the first byte and include the alignment
1559 bits within the address. For unaligned access, we check that we don't
1560 cross pages using the address of the last byte of the access. */
1561 a_off
= (a_bits
>= s_bits
? 0 : s_mask
- a_mask
);
1562 tlb_mask
= (uint64_t)TARGET_PAGE_MASK
| a_mask
;
1564 if (s390_facilities
& FACILITY_GEN_INST_EXT
) {
1565 tcg_out_risbg(s
, TCG_REG_R2
, addr_reg
,
1566 64 - CPU_TLB_BITS
- CPU_TLB_ENTRY_BITS
,
1567 63 - CPU_TLB_ENTRY_BITS
,
1568 64 + CPU_TLB_ENTRY_BITS
- TARGET_PAGE_BITS
, 1);
1570 tcg_out_insn(s
, RX
, LA
, TCG_REG_R3
, addr_reg
, TCG_REG_NONE
, a_off
);
1571 tgen_andi(s
, TCG_TYPE_TL
, TCG_REG_R3
, tlb_mask
);
1573 tgen_andi_risbg(s
, TCG_REG_R3
, addr_reg
, tlb_mask
);
1576 tcg_out_sh64(s
, RSY_SRLG
, TCG_REG_R2
, addr_reg
, TCG_REG_NONE
,
1577 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1578 tcg_out_insn(s
, RX
, LA
, TCG_REG_R3
, addr_reg
, TCG_REG_NONE
, a_off
);
1579 tgen_andi(s
, TCG_TYPE_I64
, TCG_REG_R2
,
1580 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
1581 tgen_andi(s
, TCG_TYPE_TL
, TCG_REG_R3
, tlb_mask
);
1585 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
);
1587 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
);
1589 if (TARGET_LONG_BITS
== 32) {
1590 tcg_out_mem(s
, RX_C
, RXY_CY
, TCG_REG_R3
, TCG_REG_R2
, TCG_AREG0
, ofs
);
1592 tcg_out_mem(s
, 0, RXY_CG
, TCG_REG_R3
, TCG_REG_R2
, TCG_AREG0
, ofs
);
1595 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1596 tcg_out_mem(s
, 0, RXY_LG
, TCG_REG_R2
, TCG_REG_R2
, TCG_AREG0
, ofs
);
1598 if (TARGET_LONG_BITS
== 32) {
1599 tgen_ext32u(s
, TCG_REG_R3
, addr_reg
);
1605 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1606 TCGReg data
, TCGReg addr
,
1607 tcg_insn_unit
*raddr
, tcg_insn_unit
*label_ptr
)
1609 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1611 label
->is_ld
= is_ld
;
1613 label
->datalo_reg
= data
;
1614 label
->addrlo_reg
= addr
;
1615 label
->raddr
= raddr
;
1616 label
->label_ptr
[0] = label_ptr
;
1619 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1621 TCGReg addr_reg
= lb
->addrlo_reg
;
1622 TCGReg data_reg
= lb
->datalo_reg
;
1623 TCGMemOpIdx oi
= lb
->oi
;
1624 TCGMemOp opc
= get_memop(oi
);
1626 bool ok
= patch_reloc(lb
->label_ptr
[0], R_390_PC16DBL
,
1627 (intptr_t)s
->code_ptr
, 2);
1628 tcg_debug_assert(ok
);
1630 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_AREG0
);
1631 if (TARGET_LONG_BITS
== 64) {
1632 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R3
, addr_reg
);
1634 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R4
, oi
);
1635 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R5
, (uintptr_t)lb
->raddr
);
1636 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)]);
1637 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1639 tgen_gotoi(s
, S390_CC_ALWAYS
, lb
->raddr
);
1642 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1644 TCGReg addr_reg
= lb
->addrlo_reg
;
1645 TCGReg data_reg
= lb
->datalo_reg
;
1646 TCGMemOpIdx oi
= lb
->oi
;
1647 TCGMemOp opc
= get_memop(oi
);
1649 bool ok
= patch_reloc(lb
->label_ptr
[0], R_390_PC16DBL
,
1650 (intptr_t)s
->code_ptr
, 2);
1651 tcg_debug_assert(ok
);
1653 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_AREG0
);
1654 if (TARGET_LONG_BITS
== 64) {
1655 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R3
, addr_reg
);
1657 switch (opc
& MO_SIZE
) {
1659 tgen_ext8u(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1662 tgen_ext16u(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1665 tgen_ext32u(s
, TCG_REG_R4
, data_reg
);
1668 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1673 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R5
, oi
);
1674 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R6
, (uintptr_t)lb
->raddr
);
1675 tcg_out_call(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1677 tgen_gotoi(s
, S390_CC_ALWAYS
, lb
->raddr
);
1680 static void tcg_prepare_user_ldst(TCGContext
*s
, TCGReg
*addr_reg
,
1681 TCGReg
*index_reg
, tcg_target_long
*disp
)
1683 if (TARGET_LONG_BITS
== 32) {
1684 tgen_ext32u(s
, TCG_TMP0
, *addr_reg
);
1685 *addr_reg
= TCG_TMP0
;
1687 if (guest_base
< 0x80000) {
1688 *index_reg
= TCG_REG_NONE
;
1691 *index_reg
= TCG_GUEST_BASE_REG
;
1695 #endif /* CONFIG_SOFTMMU */
1697 static void tcg_out_qemu_ld(TCGContext
* s
, TCGReg data_reg
, TCGReg addr_reg
,
1700 TCGMemOp opc
= get_memop(oi
);
1701 #ifdef CONFIG_SOFTMMU
1702 unsigned mem_index
= get_mmuidx(oi
);
1703 tcg_insn_unit
*label_ptr
;
1706 base_reg
= tcg_out_tlb_read(s
, addr_reg
, opc
, mem_index
, 1);
1708 tcg_out16(s
, RI_BRC
| (S390_CC_NE
<< 4));
1709 label_ptr
= s
->code_ptr
;
1712 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, base_reg
, TCG_REG_R2
, 0);
1714 add_qemu_ldst_label(s
, 1, oi
, data_reg
, addr_reg
, s
->code_ptr
, label_ptr
);
1717 tcg_target_long disp
;
1719 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1720 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1724 static void tcg_out_qemu_st(TCGContext
* s
, TCGReg data_reg
, TCGReg addr_reg
,
1727 TCGMemOp opc
= get_memop(oi
);
1728 #ifdef CONFIG_SOFTMMU
1729 unsigned mem_index
= get_mmuidx(oi
);
1730 tcg_insn_unit
*label_ptr
;
1733 base_reg
= tcg_out_tlb_read(s
, addr_reg
, opc
, mem_index
, 0);
1735 tcg_out16(s
, RI_BRC
| (S390_CC_NE
<< 4));
1736 label_ptr
= s
->code_ptr
;
1739 tcg_out_qemu_st_direct(s
, opc
, data_reg
, base_reg
, TCG_REG_R2
, 0);
1741 add_qemu_ldst_label(s
, 0, oi
, data_reg
, addr_reg
, s
->code_ptr
, label_ptr
);
1744 tcg_target_long disp
;
1746 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1747 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1751 # define OP_32_64(x) \
1752 case glue(glue(INDEX_op_,x),_i32): \
1753 case glue(glue(INDEX_op_,x),_i64)
1755 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1756 const TCGArg
*args
, const int *const_args
)
1762 case INDEX_op_exit_tb
:
1763 /* Reuse the zeroing that exists for goto_ptr. */
1766 tgen_gotoi(s
, S390_CC_ALWAYS
, s
->code_gen_epilogue
);
1768 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, a0
);
1769 tgen_gotoi(s
, S390_CC_ALWAYS
, tb_ret_addr
);
1773 case INDEX_op_goto_tb
:
1775 if (s
->tb_jmp_insn_offset
) {
1776 /* branch displacement must be aligned for atomic patching;
1777 * see if we need to add extra nop before branch
1779 if (!QEMU_PTR_IS_ALIGNED(s
->code_ptr
+ 1, 4)) {
1782 tcg_debug_assert(!USE_REG_TB
);
1783 tcg_out16(s
, RIL_BRCL
| (S390_CC_ALWAYS
<< 4));
1784 s
->tb_jmp_insn_offset
[a0
] = tcg_current_code_size(s
);
1787 /* load address stored at s->tb_jmp_target_addr + a0 */
1788 tcg_out_ld_abs(s
, TCG_TYPE_PTR
, TCG_REG_TB
,
1789 s
->tb_jmp_target_addr
+ a0
);
1791 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_TB
);
1793 set_jmp_reset_offset(s
, a0
);
1795 /* For the unlinked path of goto_tb, we need to reset
1796 TCG_REG_TB to the beginning of this TB. */
1798 int ofs
= -tcg_current_code_size(s
);
1799 assert(ofs
== (int16_t)ofs
);
1800 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_TB
, ofs
);
1804 case INDEX_op_goto_ptr
:
1807 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_TB
, a0
);
1809 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, a0
);
1813 /* ??? LLC (RXY format) is only present with the extended-immediate
1814 facility, whereas LLGC is always present. */
1815 tcg_out_mem(s
, 0, RXY_LLGC
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1819 /* ??? LB is no smaller than LGB, so no point to using it. */
1820 tcg_out_mem(s
, 0, RXY_LGB
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1824 /* ??? LLH (RXY format) is only present with the extended-immediate
1825 facility, whereas LLGH is always present. */
1826 tcg_out_mem(s
, 0, RXY_LLGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1829 case INDEX_op_ld16s_i32
:
1830 tcg_out_mem(s
, RX_LH
, RXY_LHY
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1833 case INDEX_op_ld_i32
:
1834 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1838 tcg_out_mem(s
, RX_STC
, RXY_STCY
, args
[0], args
[1],
1839 TCG_REG_NONE
, args
[2]);
1843 tcg_out_mem(s
, RX_STH
, RXY_STHY
, args
[0], args
[1],
1844 TCG_REG_NONE
, args
[2]);
1847 case INDEX_op_st_i32
:
1848 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1851 case INDEX_op_add_i32
:
1852 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1853 if (const_args
[2]) {
1856 if (a2
== (int16_t)a2
) {
1857 tcg_out_insn(s
, RI
, AHI
, a0
, a2
);
1860 if (s390_facilities
& FACILITY_EXT_IMM
) {
1861 tcg_out_insn(s
, RIL
, AFI
, a0
, a2
);
1865 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1866 } else if (a0
== a1
) {
1867 tcg_out_insn(s
, RR
, AR
, a0
, a2
);
1869 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1872 case INDEX_op_sub_i32
:
1873 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1874 if (const_args
[2]) {
1877 } else if (a0
== a1
) {
1878 tcg_out_insn(s
, RR
, SR
, a0
, a2
);
1880 tcg_out_insn(s
, RRF
, SRK
, a0
, a1
, a2
);
1884 case INDEX_op_and_i32
:
1885 a0
= args
[0], a1
= args
[1], a2
= (uint32_t)args
[2];
1886 if (const_args
[2]) {
1887 tcg_out_mov(s
, TCG_TYPE_I32
, a0
, a1
);
1888 tgen_andi(s
, TCG_TYPE_I32
, a0
, a2
);
1889 } else if (a0
== a1
) {
1890 tcg_out_insn(s
, RR
, NR
, a0
, a2
);
1892 tcg_out_insn(s
, RRF
, NRK
, a0
, a1
, a2
);
1895 case INDEX_op_or_i32
:
1896 a0
= args
[0], a1
= args
[1], a2
= (uint32_t)args
[2];
1897 if (const_args
[2]) {
1898 tcg_out_mov(s
, TCG_TYPE_I32
, a0
, a1
);
1899 tgen_ori(s
, TCG_TYPE_I32
, a0
, a2
);
1900 } else if (a0
== a1
) {
1901 tcg_out_insn(s
, RR
, OR
, a0
, a2
);
1903 tcg_out_insn(s
, RRF
, ORK
, a0
, a1
, a2
);
1906 case INDEX_op_xor_i32
:
1907 a0
= args
[0], a1
= args
[1], a2
= (uint32_t)args
[2];
1908 if (const_args
[2]) {
1909 tcg_out_mov(s
, TCG_TYPE_I32
, a0
, a1
);
1910 tgen_xori(s
, TCG_TYPE_I32
, a0
, a2
);
1911 } else if (a0
== a1
) {
1912 tcg_out_insn(s
, RR
, XR
, args
[0], args
[2]);
1914 tcg_out_insn(s
, RRF
, XRK
, a0
, a1
, a2
);
1918 case INDEX_op_neg_i32
:
1919 tcg_out_insn(s
, RR
, LCR
, args
[0], args
[1]);
1922 case INDEX_op_mul_i32
:
1923 if (const_args
[2]) {
1924 if ((int32_t)args
[2] == (int16_t)args
[2]) {
1925 tcg_out_insn(s
, RI
, MHI
, args
[0], args
[2]);
1927 tcg_out_insn(s
, RIL
, MSFI
, args
[0], args
[2]);
1930 tcg_out_insn(s
, RRE
, MSR
, args
[0], args
[2]);
1934 case INDEX_op_div2_i32
:
1935 tcg_out_insn(s
, RR
, DR
, TCG_REG_R2
, args
[4]);
1937 case INDEX_op_divu2_i32
:
1938 tcg_out_insn(s
, RRE
, DLR
, TCG_REG_R2
, args
[4]);
1941 case INDEX_op_shl_i32
:
1945 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1947 if (const_args
[2]) {
1948 tcg_out_sh32(s
, op
, a0
, TCG_REG_NONE
, a2
);
1950 tcg_out_sh32(s
, op
, a0
, a2
, 0);
1953 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
1954 if (const_args
[2]) {
1955 tcg_out_sh64(s
, op2
, a0
, a1
, TCG_REG_NONE
, a2
);
1957 tcg_out_sh64(s
, op2
, a0
, a1
, a2
, 0);
1961 case INDEX_op_shr_i32
:
1965 case INDEX_op_sar_i32
:
1970 case INDEX_op_rotl_i32
:
1971 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1972 if (const_args
[2]) {
1973 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1975 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], args
[2], 0);
1978 case INDEX_op_rotr_i32
:
1979 if (const_args
[2]) {
1980 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1],
1981 TCG_REG_NONE
, (32 - args
[2]) & 31);
1983 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
1984 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_TMP0
, 0);
1988 case INDEX_op_ext8s_i32
:
1989 tgen_ext8s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1991 case INDEX_op_ext16s_i32
:
1992 tgen_ext16s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1994 case INDEX_op_ext8u_i32
:
1995 tgen_ext8u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1997 case INDEX_op_ext16u_i32
:
1998 tgen_ext16u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
2002 /* The TCG bswap definition requires bits 0-47 already be zero.
2003 Thus we don't need the G-type insns to implement bswap16_i64. */
2004 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
2005 tcg_out_sh32(s
, RS_SRL
, args
[0], TCG_REG_NONE
, 16);
2008 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
2011 case INDEX_op_add2_i32
:
2012 if (const_args
[4]) {
2013 tcg_out_insn(s
, RIL
, ALFI
, args
[0], args
[4]);
2015 tcg_out_insn(s
, RR
, ALR
, args
[0], args
[4]);
2017 tcg_out_insn(s
, RRE
, ALCR
, args
[1], args
[5]);
2019 case INDEX_op_sub2_i32
:
2020 if (const_args
[4]) {
2021 tcg_out_insn(s
, RIL
, SLFI
, args
[0], args
[4]);
2023 tcg_out_insn(s
, RR
, SLR
, args
[0], args
[4]);
2025 tcg_out_insn(s
, RRE
, SLBR
, args
[1], args
[5]);
2029 tgen_branch(s
, S390_CC_ALWAYS
, arg_label(args
[0]));
2032 case INDEX_op_brcond_i32
:
2033 tgen_brcond(s
, TCG_TYPE_I32
, args
[2], args
[0],
2034 args
[1], const_args
[1], arg_label(args
[3]));
2036 case INDEX_op_setcond_i32
:
2037 tgen_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1],
2038 args
[2], const_args
[2]);
2040 case INDEX_op_movcond_i32
:
2041 tgen_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1],
2042 args
[2], const_args
[2], args
[3], const_args
[3]);
2045 case INDEX_op_qemu_ld_i32
:
2046 /* ??? Technically we can use a non-extending instruction. */
2047 case INDEX_op_qemu_ld_i64
:
2048 tcg_out_qemu_ld(s
, args
[0], args
[1], args
[2]);
2050 case INDEX_op_qemu_st_i32
:
2051 case INDEX_op_qemu_st_i64
:
2052 tcg_out_qemu_st(s
, args
[0], args
[1], args
[2]);
2055 case INDEX_op_ld16s_i64
:
2056 tcg_out_mem(s
, 0, RXY_LGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2058 case INDEX_op_ld32u_i64
:
2059 tcg_out_mem(s
, 0, RXY_LLGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2061 case INDEX_op_ld32s_i64
:
2062 tcg_out_mem(s
, 0, RXY_LGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2064 case INDEX_op_ld_i64
:
2065 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
2068 case INDEX_op_st32_i64
:
2069 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
2071 case INDEX_op_st_i64
:
2072 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
2075 case INDEX_op_add_i64
:
2076 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2077 if (const_args
[2]) {
2080 if (a2
== (int16_t)a2
) {
2081 tcg_out_insn(s
, RI
, AGHI
, a0
, a2
);
2084 if (s390_facilities
& FACILITY_EXT_IMM
) {
2085 if (a2
== (int32_t)a2
) {
2086 tcg_out_insn(s
, RIL
, AGFI
, a0
, a2
);
2088 } else if (a2
== (uint32_t)a2
) {
2089 tcg_out_insn(s
, RIL
, ALGFI
, a0
, a2
);
2091 } else if (-a2
== (uint32_t)-a2
) {
2092 tcg_out_insn(s
, RIL
, SLGFI
, a0
, -a2
);
2097 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
2098 } else if (a0
== a1
) {
2099 tcg_out_insn(s
, RRE
, AGR
, a0
, a2
);
2101 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
2104 case INDEX_op_sub_i64
:
2105 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2106 if (const_args
[2]) {
2109 } else if (a0
== a1
) {
2110 tcg_out_insn(s
, RRE
, SGR
, a0
, a2
);
2112 tcg_out_insn(s
, RRF
, SGRK
, a0
, a1
, a2
);
2116 case INDEX_op_and_i64
:
2117 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2118 if (const_args
[2]) {
2119 tcg_out_mov(s
, TCG_TYPE_I64
, a0
, a1
);
2120 tgen_andi(s
, TCG_TYPE_I64
, args
[0], args
[2]);
2121 } else if (a0
== a1
) {
2122 tcg_out_insn(s
, RRE
, NGR
, args
[0], args
[2]);
2124 tcg_out_insn(s
, RRF
, NGRK
, a0
, a1
, a2
);
2127 case INDEX_op_or_i64
:
2128 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2129 if (const_args
[2]) {
2130 tcg_out_mov(s
, TCG_TYPE_I64
, a0
, a1
);
2131 tgen_ori(s
, TCG_TYPE_I64
, a0
, a2
);
2132 } else if (a0
== a1
) {
2133 tcg_out_insn(s
, RRE
, OGR
, a0
, a2
);
2135 tcg_out_insn(s
, RRF
, OGRK
, a0
, a1
, a2
);
2138 case INDEX_op_xor_i64
:
2139 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2140 if (const_args
[2]) {
2141 tcg_out_mov(s
, TCG_TYPE_I64
, a0
, a1
);
2142 tgen_xori(s
, TCG_TYPE_I64
, a0
, a2
);
2143 } else if (a0
== a1
) {
2144 tcg_out_insn(s
, RRE
, XGR
, a0
, a2
);
2146 tcg_out_insn(s
, RRF
, XGRK
, a0
, a1
, a2
);
2150 case INDEX_op_neg_i64
:
2151 tcg_out_insn(s
, RRE
, LCGR
, args
[0], args
[1]);
2153 case INDEX_op_bswap64_i64
:
2154 tcg_out_insn(s
, RRE
, LRVGR
, args
[0], args
[1]);
2157 case INDEX_op_mul_i64
:
2158 if (const_args
[2]) {
2159 if (args
[2] == (int16_t)args
[2]) {
2160 tcg_out_insn(s
, RI
, MGHI
, args
[0], args
[2]);
2162 tcg_out_insn(s
, RIL
, MSGFI
, args
[0], args
[2]);
2165 tcg_out_insn(s
, RRE
, MSGR
, args
[0], args
[2]);
2169 case INDEX_op_div2_i64
:
2170 /* ??? We get an unnecessary sign-extension of the dividend
2171 into R3 with this definition, but as we do in fact always
2172 produce both quotient and remainder using INDEX_op_div_i64
2173 instead requires jumping through even more hoops. */
2174 tcg_out_insn(s
, RRE
, DSGR
, TCG_REG_R2
, args
[4]);
2176 case INDEX_op_divu2_i64
:
2177 tcg_out_insn(s
, RRE
, DLGR
, TCG_REG_R2
, args
[4]);
2179 case INDEX_op_mulu2_i64
:
2180 tcg_out_insn(s
, RRE
, MLGR
, TCG_REG_R2
, args
[3]);
2183 case INDEX_op_shl_i64
:
2186 if (const_args
[2]) {
2187 tcg_out_sh64(s
, op
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2189 tcg_out_sh64(s
, op
, args
[0], args
[1], args
[2], 0);
2192 case INDEX_op_shr_i64
:
2195 case INDEX_op_sar_i64
:
2199 case INDEX_op_rotl_i64
:
2200 if (const_args
[2]) {
2201 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2202 TCG_REG_NONE
, args
[2]);
2204 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], args
[2], 0);
2207 case INDEX_op_rotr_i64
:
2208 if (const_args
[2]) {
2209 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2210 TCG_REG_NONE
, (64 - args
[2]) & 63);
2212 /* We can use the smaller 32-bit negate because only the
2213 low 6 bits are examined for the rotate. */
2214 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
2215 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], TCG_TMP0
, 0);
2219 case INDEX_op_ext8s_i64
:
2220 tgen_ext8s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2222 case INDEX_op_ext16s_i64
:
2223 tgen_ext16s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2225 case INDEX_op_ext_i32_i64
:
2226 case INDEX_op_ext32s_i64
:
2227 tgen_ext32s(s
, args
[0], args
[1]);
2229 case INDEX_op_ext8u_i64
:
2230 tgen_ext8u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2232 case INDEX_op_ext16u_i64
:
2233 tgen_ext16u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2235 case INDEX_op_extu_i32_i64
:
2236 case INDEX_op_ext32u_i64
:
2237 tgen_ext32u(s
, args
[0], args
[1]);
2240 case INDEX_op_add2_i64
:
2241 if (const_args
[4]) {
2242 if ((int64_t)args
[4] >= 0) {
2243 tcg_out_insn(s
, RIL
, ALGFI
, args
[0], args
[4]);
2245 tcg_out_insn(s
, RIL
, SLGFI
, args
[0], -args
[4]);
2248 tcg_out_insn(s
, RRE
, ALGR
, args
[0], args
[4]);
2250 tcg_out_insn(s
, RRE
, ALCGR
, args
[1], args
[5]);
2252 case INDEX_op_sub2_i64
:
2253 if (const_args
[4]) {
2254 if ((int64_t)args
[4] >= 0) {
2255 tcg_out_insn(s
, RIL
, SLGFI
, args
[0], args
[4]);
2257 tcg_out_insn(s
, RIL
, ALGFI
, args
[0], -args
[4]);
2260 tcg_out_insn(s
, RRE
, SLGR
, args
[0], args
[4]);
2262 tcg_out_insn(s
, RRE
, SLBGR
, args
[1], args
[5]);
2265 case INDEX_op_brcond_i64
:
2266 tgen_brcond(s
, TCG_TYPE_I64
, args
[2], args
[0],
2267 args
[1], const_args
[1], arg_label(args
[3]));
2269 case INDEX_op_setcond_i64
:
2270 tgen_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1],
2271 args
[2], const_args
[2]);
2273 case INDEX_op_movcond_i64
:
2274 tgen_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1],
2275 args
[2], const_args
[2], args
[3], const_args
[3]);
2279 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2280 if (const_args
[1]) {
2281 tgen_deposit(s
, a0
, a2
, args
[3], args
[4], 1);
2283 /* Since we can't support "0Z" as a constraint, we allow a1 in
2284 any register. Fix things up as if a matching constraint. */
2286 TCGType type
= (opc
== INDEX_op_deposit_i64
);
2288 tcg_out_mov(s
, type
, TCG_TMP0
, a2
);
2291 tcg_out_mov(s
, type
, a0
, a1
);
2293 tgen_deposit(s
, a0
, a2
, args
[3], args
[4], 0);
2298 tgen_extract(s
, args
[0], args
[1], args
[2], args
[3]);
2301 case INDEX_op_clz_i64
:
2302 tgen_clz(s
, args
[0], args
[1], args
[2], const_args
[2]);
2306 /* The host memory model is quite strong, we simply need to
2307 serialize the instruction stream. */
2308 if (args
[0] & TCG_MO_ST_LD
) {
2309 tcg_out_insn(s
, RR
, BCR
,
2310 s390_facilities
& FACILITY_FAST_BCR_SER
? 14 : 15, 0);
2314 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2315 case INDEX_op_mov_i64
:
2316 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2317 case INDEX_op_movi_i64
:
2318 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2324 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
2326 static const TCGTargetOpDef r
= { .args_ct_str
= { "r" } };
2327 static const TCGTargetOpDef r_r
= { .args_ct_str
= { "r", "r" } };
2328 static const TCGTargetOpDef r_L
= { .args_ct_str
= { "r", "L" } };
2329 static const TCGTargetOpDef L_L
= { .args_ct_str
= { "L", "L" } };
2330 static const TCGTargetOpDef r_ri
= { .args_ct_str
= { "r", "ri" } };
2331 static const TCGTargetOpDef r_r_ri
= { .args_ct_str
= { "r", "r", "ri" } };
2332 static const TCGTargetOpDef r_0_ri
= { .args_ct_str
= { "r", "0", "ri" } };
2333 static const TCGTargetOpDef r_0_rI
= { .args_ct_str
= { "r", "0", "rI" } };
2334 static const TCGTargetOpDef r_0_rJ
= { .args_ct_str
= { "r", "0", "rJ" } };
2335 static const TCGTargetOpDef a2_r
2336 = { .args_ct_str
= { "r", "r", "0", "1", "r", "r" } };
2337 static const TCGTargetOpDef a2_ri
2338 = { .args_ct_str
= { "r", "r", "0", "1", "ri", "r" } };
2339 static const TCGTargetOpDef a2_rA
2340 = { .args_ct_str
= { "r", "r", "0", "1", "rA", "r" } };
2343 case INDEX_op_goto_ptr
:
2346 case INDEX_op_ld8u_i32
:
2347 case INDEX_op_ld8u_i64
:
2348 case INDEX_op_ld8s_i32
:
2349 case INDEX_op_ld8s_i64
:
2350 case INDEX_op_ld16u_i32
:
2351 case INDEX_op_ld16u_i64
:
2352 case INDEX_op_ld16s_i32
:
2353 case INDEX_op_ld16s_i64
:
2354 case INDEX_op_ld_i32
:
2355 case INDEX_op_ld32u_i64
:
2356 case INDEX_op_ld32s_i64
:
2357 case INDEX_op_ld_i64
:
2358 case INDEX_op_st8_i32
:
2359 case INDEX_op_st8_i64
:
2360 case INDEX_op_st16_i32
:
2361 case INDEX_op_st16_i64
:
2362 case INDEX_op_st_i32
:
2363 case INDEX_op_st32_i64
:
2364 case INDEX_op_st_i64
:
2367 case INDEX_op_add_i32
:
2368 case INDEX_op_add_i64
:
2370 case INDEX_op_sub_i32
:
2371 case INDEX_op_sub_i64
:
2372 case INDEX_op_and_i32
:
2373 case INDEX_op_and_i64
:
2374 case INDEX_op_or_i32
:
2375 case INDEX_op_or_i64
:
2376 case INDEX_op_xor_i32
:
2377 case INDEX_op_xor_i64
:
2378 return (s390_facilities
& FACILITY_DISTINCT_OPS
? &r_r_ri
: &r_0_ri
);
2380 case INDEX_op_mul_i32
:
2381 /* If we have the general-instruction-extensions, then we have
2382 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
2383 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
2384 return (s390_facilities
& FACILITY_GEN_INST_EXT
? &r_0_ri
: &r_0_rI
);
2385 case INDEX_op_mul_i64
:
2386 return (s390_facilities
& FACILITY_GEN_INST_EXT
? &r_0_rJ
: &r_0_rI
);
2388 case INDEX_op_shl_i32
:
2389 case INDEX_op_shr_i32
:
2390 case INDEX_op_sar_i32
:
2391 return (s390_facilities
& FACILITY_DISTINCT_OPS
? &r_r_ri
: &r_0_ri
);
2393 case INDEX_op_shl_i64
:
2394 case INDEX_op_shr_i64
:
2395 case INDEX_op_sar_i64
:
2398 case INDEX_op_rotl_i32
:
2399 case INDEX_op_rotl_i64
:
2400 case INDEX_op_rotr_i32
:
2401 case INDEX_op_rotr_i64
:
2404 case INDEX_op_brcond_i32
:
2405 case INDEX_op_brcond_i64
:
2408 case INDEX_op_bswap16_i32
:
2409 case INDEX_op_bswap16_i64
:
2410 case INDEX_op_bswap32_i32
:
2411 case INDEX_op_bswap32_i64
:
2412 case INDEX_op_bswap64_i64
:
2413 case INDEX_op_neg_i32
:
2414 case INDEX_op_neg_i64
:
2415 case INDEX_op_ext8s_i32
:
2416 case INDEX_op_ext8s_i64
:
2417 case INDEX_op_ext8u_i32
:
2418 case INDEX_op_ext8u_i64
:
2419 case INDEX_op_ext16s_i32
:
2420 case INDEX_op_ext16s_i64
:
2421 case INDEX_op_ext16u_i32
:
2422 case INDEX_op_ext16u_i64
:
2423 case INDEX_op_ext32s_i64
:
2424 case INDEX_op_ext32u_i64
:
2425 case INDEX_op_ext_i32_i64
:
2426 case INDEX_op_extu_i32_i64
:
2427 case INDEX_op_extract_i32
:
2428 case INDEX_op_extract_i64
:
2431 case INDEX_op_clz_i64
:
2432 case INDEX_op_setcond_i32
:
2433 case INDEX_op_setcond_i64
:
2436 case INDEX_op_qemu_ld_i32
:
2437 case INDEX_op_qemu_ld_i64
:
2439 case INDEX_op_qemu_st_i64
:
2440 case INDEX_op_qemu_st_i32
:
2443 case INDEX_op_deposit_i32
:
2444 case INDEX_op_deposit_i64
:
2446 static const TCGTargetOpDef dep
2447 = { .args_ct_str
= { "r", "rZ", "r" } };
2450 case INDEX_op_movcond_i32
:
2451 case INDEX_op_movcond_i64
:
2453 static const TCGTargetOpDef movc
2454 = { .args_ct_str
= { "r", "r", "ri", "r", "0" } };
2455 static const TCGTargetOpDef movc_l
2456 = { .args_ct_str
= { "r", "r", "ri", "rI", "0" } };
2457 return (s390_facilities
& FACILITY_LOAD_ON_COND2
? &movc_l
: &movc
);
2459 case INDEX_op_div2_i32
:
2460 case INDEX_op_div2_i64
:
2461 case INDEX_op_divu2_i32
:
2462 case INDEX_op_divu2_i64
:
2464 static const TCGTargetOpDef div2
2465 = { .args_ct_str
= { "b", "a", "0", "1", "r" } };
2468 case INDEX_op_mulu2_i64
:
2470 static const TCGTargetOpDef mul2
2471 = { .args_ct_str
= { "b", "a", "0", "r" } };
2475 case INDEX_op_add2_i32
:
2476 case INDEX_op_sub2_i32
:
2477 return (s390_facilities
& FACILITY_EXT_IMM
? &a2_ri
: &a2_r
);
2478 case INDEX_op_add2_i64
:
2479 case INDEX_op_sub2_i64
:
2480 return (s390_facilities
& FACILITY_EXT_IMM
? &a2_rA
: &a2_r
);
2488 static void query_s390_facilities(void)
2490 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2492 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2493 is present on all 64-bit systems, but let's check for it anyway. */
2494 if (hwcap
& HWCAP_S390_STFLE
) {
2495 register int r0
__asm__("0");
2496 register void *r1
__asm__("1");
2499 r1
= &s390_facilities
;
2500 asm volatile(".word 0xb2b0,0x1000"
2501 : "=r"(r0
) : "0"(0), "r"(r1
) : "memory", "cc");
2505 static void tcg_target_init(TCGContext
*s
)
2507 query_s390_facilities();
2509 tcg_target_available_regs
[TCG_TYPE_I32
] = 0xffff;
2510 tcg_target_available_regs
[TCG_TYPE_I64
] = 0xffff;
2512 tcg_target_call_clobber_regs
= 0;
2513 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2514 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2515 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2516 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2517 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R4
);
2518 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R5
);
2519 /* The r6 register is technically call-saved, but it's also a parameter
2520 register, so it can get killed by setup for the qemu_st helper. */
2521 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R6
);
2522 /* The return register can be considered call-clobbered. */
2523 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2525 s
->reserved_regs
= 0;
2526 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
);
2527 /* XXX many insns can't be used with R0, so we better avoid it for now */
2528 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
);
2529 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2531 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TB
);
2535 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2536 + TCG_STATIC_CALL_ARGS_SIZE \
2537 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2539 static void tcg_target_qemu_prologue(TCGContext
*s
)
2541 /* stmg %r6,%r15,48(%r15) (save registers) */
2542 tcg_out_insn(s
, RXY
, STMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
, 48);
2544 /* aghi %r15,-frame_size */
2545 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_R15
, -FRAME_SIZE
);
2547 tcg_set_frame(s
, TCG_REG_CALL_STACK
,
2548 TCG_STATIC_CALL_ARGS_SIZE
+ TCG_TARGET_CALL_STACK_OFFSET
,
2549 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2551 #ifndef CONFIG_SOFTMMU
2552 if (guest_base
>= 0x80000) {
2553 tcg_out_movi_int(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, guest_base
, true);
2554 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2558 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2560 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_TB
,
2561 tcg_target_call_iarg_regs
[1]);
2564 /* br %r3 (go to TB) */
2565 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, tcg_target_call_iarg_regs
[1]);
2568 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2569 * and fall through to the rest of the epilogue.
2571 s
->code_gen_epilogue
= s
->code_ptr
;
2572 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, 0);
2575 tb_ret_addr
= s
->code_ptr
;
2577 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2578 tcg_out_insn(s
, RXY
, LMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
,
2581 /* br %r14 (return) */
2582 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_R14
);
2585 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
2587 memset(p
, 0x07, count
* sizeof(tcg_insn_unit
));
2592 uint8_t fde_def_cfa
[4];
2593 uint8_t fde_reg_ofs
[18];
2596 /* We're expecting a 2 byte uleb128 encoded value. */
2597 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2599 #define ELF_HOST_MACHINE EM_S390
2601 static const DebugFrame debug_frame
= {
2602 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2605 .h
.cie
.code_align
= 1,
2606 .h
.cie
.data_align
= 8, /* sleb128 8 */
2607 .h
.cie
.return_column
= TCG_REG_R14
,
2609 /* Total FDE size does not include the "len" member. */
2610 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2613 12, TCG_REG_CALL_STACK
, /* DW_CFA_def_cfa %r15, ... */
2614 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2618 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2619 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2620 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2621 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2622 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2623 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2624 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2625 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2626 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2630 void tcg_register_jit(void *buf
, size_t buf_size
)
2632 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));