2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "tcg-be-null.h"
29 /* We only support generating code for 64-bit mode. */
30 #if TCG_TARGET_REG_BITS != 64
31 #error "unsupported code generation mode"
36 /* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39 #define USE_LONG_BRANCHES 0
41 #define TCG_CT_CONST_MULI 0x100
42 #define TCG_CT_CONST_ORI 0x200
43 #define TCG_CT_CONST_XORI 0x400
44 #define TCG_CT_CONST_CMPI 0x800
46 /* Several places within the instruction set 0 means "no register"
47 rather than TCG_REG_R0. */
48 #define TCG_REG_NONE 0
50 /* A scratch register that may be be used throughout the backend. */
51 #define TCG_TMP0 TCG_REG_R14
53 #ifdef CONFIG_USE_GUEST_BASE
54 #define TCG_GUEST_BASE_REG TCG_REG_R13
56 #define TCG_GUEST_BASE_REG TCG_REG_R0
64 /* All of the following instructions are prefixed with their instruction
65 format, and are defined as 8- or 16-bit quantities, even when the two
66 halves of the 16-bit quantity may appear 32 bits apart in the insn.
67 This makes it easy to copy the values from the tables in Appendix B. */
68 typedef enum S390Opcode
{
230 #define LD_SIGNED 0x04
231 #define LD_UINT8 0x00
232 #define LD_INT8 (LD_UINT8 | LD_SIGNED)
233 #define LD_UINT16 0x01
234 #define LD_INT16 (LD_UINT16 | LD_SIGNED)
235 #define LD_UINT32 0x02
236 #define LD_INT32 (LD_UINT32 | LD_SIGNED)
237 #define LD_UINT64 0x03
238 #define LD_INT64 (LD_UINT64 | LD_SIGNED)
241 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
242 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
243 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
247 /* Since R6 is a potential argument register, choose it last of the
248 call-saved registers. Likewise prefer the call-clobbered registers
249 in reverse order to maximize the chance of avoiding the arguments. */
250 static const int tcg_target_reg_alloc_order
[] = {
268 static const int tcg_target_call_iarg_regs
[] = {
276 static const int tcg_target_call_oarg_regs
[] = {
284 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
285 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
286 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
287 #define S390_CC_NEVER 0
288 #define S390_CC_ALWAYS 15
290 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
291 static const uint8_t tcg_cond_to_s390_cond
[] = {
292 [TCG_COND_EQ
] = S390_CC_EQ
,
293 [TCG_COND_NE
] = S390_CC_NE
,
294 [TCG_COND_LT
] = S390_CC_LT
,
295 [TCG_COND_LE
] = S390_CC_LE
,
296 [TCG_COND_GT
] = S390_CC_GT
,
297 [TCG_COND_GE
] = S390_CC_GE
,
298 [TCG_COND_LTU
] = S390_CC_LT
,
299 [TCG_COND_LEU
] = S390_CC_LE
,
300 [TCG_COND_GTU
] = S390_CC_GT
,
301 [TCG_COND_GEU
] = S390_CC_GE
,
304 /* Condition codes that result from a LOAD AND TEST. Here, we have no
305 unsigned instruction variation, however since the test is vs zero we
306 can re-map the outcomes appropriately. */
307 static const uint8_t tcg_cond_to_ltr_cond
[] = {
308 [TCG_COND_EQ
] = S390_CC_EQ
,
309 [TCG_COND_NE
] = S390_CC_NE
,
310 [TCG_COND_LT
] = S390_CC_LT
,
311 [TCG_COND_LE
] = S390_CC_LE
,
312 [TCG_COND_GT
] = S390_CC_GT
,
313 [TCG_COND_GE
] = S390_CC_GE
,
314 [TCG_COND_LTU
] = S390_CC_NEVER
,
315 [TCG_COND_LEU
] = S390_CC_EQ
,
316 [TCG_COND_GTU
] = S390_CC_NE
,
317 [TCG_COND_GEU
] = S390_CC_ALWAYS
,
320 #ifdef CONFIG_SOFTMMU
321 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
323 static const void * const qemu_ld_helpers
[4] = {
330 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
331 uintxx_t val, int mmu_idx) */
332 static const void * const qemu_st_helpers
[4] = {
340 static uint8_t *tb_ret_addr
;
342 /* A list of relevant facilities used by this translator. Some of these
343 are required for proper operation, and these are checked at startup. */
345 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
346 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
347 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
348 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
349 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
351 static uint64_t facilities
;
353 static void patch_reloc(uint8_t *code_ptr
, int type
,
354 intptr_t value
, intptr_t addend
)
356 intptr_t code_ptr_tl
= (intptr_t)code_ptr
;
359 /* ??? Not the usual definition of "addend". */
360 pcrel2
= (value
- (code_ptr_tl
+ addend
)) >> 1;
364 assert(pcrel2
== (int16_t)pcrel2
);
365 *(int16_t *)code_ptr
= pcrel2
;
368 assert(pcrel2
== (int32_t)pcrel2
);
369 *(int32_t *)code_ptr
= pcrel2
;
377 /* parse target specific constraints */
378 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
380 const char *ct_str
= *pct_str
;
383 case 'r': /* all registers */
384 ct
->ct
|= TCG_CT_REG
;
385 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
387 case 'R': /* not R0 */
388 ct
->ct
|= TCG_CT_REG
;
389 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
390 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
392 case 'L': /* qemu_ld/st constraint */
393 ct
->ct
|= TCG_CT_REG
;
394 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
395 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R2
);
396 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R3
);
397 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R4
);
399 case 'a': /* force R2 for division */
400 ct
->ct
|= TCG_CT_REG
;
401 tcg_regset_clear(ct
->u
.regs
);
402 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R2
);
404 case 'b': /* force R3 for division */
405 ct
->ct
|= TCG_CT_REG
;
406 tcg_regset_clear(ct
->u
.regs
);
407 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R3
);
410 ct
->ct
|= TCG_CT_CONST_MULI
;
413 ct
->ct
|= TCG_CT_CONST_ORI
;
416 ct
->ct
|= TCG_CT_CONST_XORI
;
419 ct
->ct
|= TCG_CT_CONST_CMPI
;
430 /* Immediates to be used with logical OR. This is an optimization only,
431 since a full 64-bit immediate OR can always be performed with 4 sequential
432 OI[LH][LH] instructions. What we're looking for is immediates that we
433 can load efficiently, and the immediate load plus the reg-reg OR is
434 smaller than the sequential OI's. */
436 static int tcg_match_ori(TCGType type
, tcg_target_long val
)
438 if (facilities
& FACILITY_EXT_IMM
) {
439 if (type
== TCG_TYPE_I32
) {
440 /* All 32-bit ORs can be performed with 1 48-bit insn. */
445 /* Look for negative values. These are best to load with LGHI. */
447 if (val
== (int16_t)val
) {
450 if (facilities
& FACILITY_EXT_IMM
) {
451 if (val
== (int32_t)val
) {
460 /* Immediates to be used with logical XOR. This is almost, but not quite,
461 only an optimization. XOR with immediate is only supported with the
462 extended-immediate facility. That said, there are a few patterns for
463 which it is better to load the value into a register first. */
465 static int tcg_match_xori(TCGType type
, tcg_target_long val
)
467 if ((facilities
& FACILITY_EXT_IMM
) == 0) {
471 if (type
== TCG_TYPE_I32
) {
472 /* All 32-bit XORs can be performed with 1 48-bit insn. */
476 /* Look for negative values. These are best to load with LGHI. */
477 if (val
< 0 && val
== (int32_t)val
) {
484 /* Imediates to be used with comparisons. */
486 static int tcg_match_cmpi(TCGType type
, tcg_target_long val
)
488 if (facilities
& FACILITY_EXT_IMM
) {
489 /* The COMPARE IMMEDIATE instruction is available. */
490 if (type
== TCG_TYPE_I32
) {
491 /* We have a 32-bit immediate and can compare against anything. */
494 /* ??? We have no insight here into whether the comparison is
495 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
496 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
497 a 32-bit unsigned immediate. If we were to use the (semi)
498 obvious "val == (int32_t)val" we would be enabling unsigned
499 comparisons vs very large numbers. The only solution is to
500 take the intersection of the ranges. */
501 /* ??? Another possible solution is to simply lie and allow all
502 constants here and force the out-of-range values into a temp
503 register in tgen_cmp when we have knowledge of the actual
504 comparison code in use. */
505 return val
>= 0 && val
<= 0x7fffffff;
508 /* Only the LOAD AND TEST instruction is available. */
513 /* Test if a constant matches the constraint. */
514 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
515 const TCGArgConstraint
*arg_ct
)
519 if (ct
& TCG_CT_CONST
) {
523 if (type
== TCG_TYPE_I32
) {
527 /* The following are mutually exclusive. */
528 if (ct
& TCG_CT_CONST_MULI
) {
529 /* Immediates that may be used with multiply. If we have the
530 general-instruction-extensions, then we have MULTIPLY SINGLE
531 IMMEDIATE with a signed 32-bit, otherwise we have only
532 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
533 if (facilities
& FACILITY_GEN_INST_EXT
) {
534 return val
== (int32_t)val
;
536 return val
== (int16_t)val
;
538 } else if (ct
& TCG_CT_CONST_ORI
) {
539 return tcg_match_ori(type
, val
);
540 } else if (ct
& TCG_CT_CONST_XORI
) {
541 return tcg_match_xori(type
, val
);
542 } else if (ct
& TCG_CT_CONST_CMPI
) {
543 return tcg_match_cmpi(type
, val
);
549 /* Emit instructions according to the given instruction format. */
551 static void tcg_out_insn_RR(TCGContext
*s
, S390Opcode op
, TCGReg r1
, TCGReg r2
)
553 tcg_out16(s
, (op
<< 8) | (r1
<< 4) | r2
);
556 static void tcg_out_insn_RRE(TCGContext
*s
, S390Opcode op
,
557 TCGReg r1
, TCGReg r2
)
559 tcg_out32(s
, (op
<< 16) | (r1
<< 4) | r2
);
562 static void tcg_out_insn_RRF(TCGContext
*s
, S390Opcode op
,
563 TCGReg r1
, TCGReg r2
, int m3
)
565 tcg_out32(s
, (op
<< 16) | (m3
<< 12) | (r1
<< 4) | r2
);
568 static void tcg_out_insn_RI(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
570 tcg_out32(s
, (op
<< 16) | (r1
<< 20) | (i2
& 0xffff));
573 static void tcg_out_insn_RIL(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
575 tcg_out16(s
, op
| (r1
<< 4));
579 static void tcg_out_insn_RS(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
580 TCGReg b2
, TCGReg r3
, int disp
)
582 tcg_out32(s
, (op
<< 24) | (r1
<< 20) | (r3
<< 16) | (b2
<< 12)
586 static void tcg_out_insn_RSY(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
587 TCGReg b2
, TCGReg r3
, int disp
)
589 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | r3
);
590 tcg_out32(s
, (op
& 0xff) | (b2
<< 28)
591 | ((disp
& 0xfff) << 16) | ((disp
& 0xff000) >> 4));
594 #define tcg_out_insn_RX tcg_out_insn_RS
595 #define tcg_out_insn_RXY tcg_out_insn_RSY
597 /* Emit an opcode with "type-checking" of the format. */
598 #define tcg_out_insn(S, FMT, OP, ...) \
599 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
602 /* emit 64-bit shifts */
603 static void tcg_out_sh64(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
604 TCGReg src
, TCGReg sh_reg
, int sh_imm
)
606 tcg_out_insn_RSY(s
, op
, dest
, sh_reg
, src
, sh_imm
);
609 /* emit 32-bit shifts */
610 static void tcg_out_sh32(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
611 TCGReg sh_reg
, int sh_imm
)
613 tcg_out_insn_RS(s
, op
, dest
, sh_reg
, 0, sh_imm
);
616 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg dst
, TCGReg src
)
619 if (type
== TCG_TYPE_I32
) {
620 tcg_out_insn(s
, RR
, LR
, dst
, src
);
622 tcg_out_insn(s
, RRE
, LGR
, dst
, src
);
627 /* load a register with an immediate value */
628 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
629 TCGReg ret
, tcg_target_long sval
)
631 static const S390Opcode lli_insns
[4] = {
632 RI_LLILL
, RI_LLILH
, RI_LLIHL
, RI_LLIHH
635 tcg_target_ulong uval
= sval
;
638 if (type
== TCG_TYPE_I32
) {
639 uval
= (uint32_t)sval
;
640 sval
= (int32_t)sval
;
643 /* Try all 32-bit insns that can load it in one go. */
644 if (sval
>= -0x8000 && sval
< 0x8000) {
645 tcg_out_insn(s
, RI
, LGHI
, ret
, sval
);
649 for (i
= 0; i
< 4; i
++) {
650 tcg_target_long mask
= 0xffffull
<< i
*16;
651 if ((uval
& mask
) == uval
) {
652 tcg_out_insn_RI(s
, lli_insns
[i
], ret
, uval
>> i
*16);
657 /* Try all 48-bit insns that can load it in one go. */
658 if (facilities
& FACILITY_EXT_IMM
) {
659 if (sval
== (int32_t)sval
) {
660 tcg_out_insn(s
, RIL
, LGFI
, ret
, sval
);
663 if (uval
<= 0xffffffff) {
664 tcg_out_insn(s
, RIL
, LLILF
, ret
, uval
);
667 if ((uval
& 0xffffffff) == 0) {
668 tcg_out_insn(s
, RIL
, LLIHF
, ret
, uval
>> 31 >> 1);
673 /* Try for PC-relative address load. */
674 if ((sval
& 1) == 0) {
675 intptr_t off
= (sval
- (intptr_t)s
->code_ptr
) >> 1;
676 if (off
== (int32_t)off
) {
677 tcg_out_insn(s
, RIL
, LARL
, ret
, off
);
682 /* If extended immediates are not present, then we may have to issue
683 several instructions to load the low 32 bits. */
684 if (!(facilities
& FACILITY_EXT_IMM
)) {
685 /* A 32-bit unsigned value can be loaded in 2 insns. And given
686 that the lli_insns loop above did not succeed, we know that
687 both insns are required. */
688 if (uval
<= 0xffffffff) {
689 tcg_out_insn(s
, RI
, LLILL
, ret
, uval
);
690 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
694 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
695 We first want to make sure that all the high bits get set. With
696 luck the low 16-bits can be considered negative to perform that for
697 free, otherwise we load an explicit -1. */
698 if (sval
>> 31 >> 1 == -1) {
700 tcg_out_insn(s
, RI
, LGHI
, ret
, uval
);
702 tcg_out_insn(s
, RI
, LGHI
, ret
, -1);
703 tcg_out_insn(s
, RI
, IILL
, ret
, uval
);
705 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
710 /* If we get here, both the high and low parts have non-zero bits. */
712 /* Recurse to load the lower 32-bits. */
713 tcg_out_movi(s
, TCG_TYPE_I64
, ret
, uval
& 0xffffffff);
715 /* Insert data into the high 32-bits. */
716 uval
= uval
>> 31 >> 1;
717 if (facilities
& FACILITY_EXT_IMM
) {
718 if (uval
< 0x10000) {
719 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
720 } else if ((uval
& 0xffff) == 0) {
721 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
723 tcg_out_insn(s
, RIL
, IIHF
, ret
, uval
);
727 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
729 if (uval
& 0xffff0000) {
730 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
736 /* Emit a load/store type instruction. Inputs are:
737 DATA: The register to be loaded or stored.
738 BASE+OFS: The effective address.
739 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
740 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
742 static void tcg_out_mem(TCGContext
*s
, S390Opcode opc_rx
, S390Opcode opc_rxy
,
743 TCGReg data
, TCGReg base
, TCGReg index
,
746 if (ofs
< -0x80000 || ofs
>= 0x80000) {
747 /* Combine the low 20 bits of the offset with the actual load insn;
748 the high 44 bits must come from an immediate load. */
749 tcg_target_long low
= ((ofs
& 0xfffff) ^ 0x80000) - 0x80000;
750 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
- low
);
753 /* If we were already given an index register, add it in. */
754 if (index
!= TCG_REG_NONE
) {
755 tcg_out_insn(s
, RRE
, AGR
, TCG_TMP0
, index
);
760 if (opc_rx
&& ofs
>= 0 && ofs
< 0x1000) {
761 tcg_out_insn_RX(s
, opc_rx
, data
, base
, index
, ofs
);
763 tcg_out_insn_RXY(s
, opc_rxy
, data
, base
, index
, ofs
);
768 /* load data without address translation or endianness conversion */
769 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg data
,
770 TCGReg base
, intptr_t ofs
)
772 if (type
== TCG_TYPE_I32
) {
773 tcg_out_mem(s
, RX_L
, RXY_LY
, data
, base
, TCG_REG_NONE
, ofs
);
775 tcg_out_mem(s
, 0, RXY_LG
, data
, base
, TCG_REG_NONE
, ofs
);
779 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg data
,
780 TCGReg base
, intptr_t ofs
)
782 if (type
== TCG_TYPE_I32
) {
783 tcg_out_mem(s
, RX_ST
, RXY_STY
, data
, base
, TCG_REG_NONE
, ofs
);
785 tcg_out_mem(s
, 0, RXY_STG
, data
, base
, TCG_REG_NONE
, ofs
);
789 /* load data from an absolute host address */
790 static void tcg_out_ld_abs(TCGContext
*s
, TCGType type
, TCGReg dest
, void *abs
)
792 tcg_target_long addr
= (tcg_target_long
)abs
;
794 if (facilities
& FACILITY_GEN_INST_EXT
) {
795 tcg_target_long disp
= (addr
- (tcg_target_long
)s
->code_ptr
) >> 1;
796 if (disp
== (int32_t)disp
) {
797 if (type
== TCG_TYPE_I32
) {
798 tcg_out_insn(s
, RIL
, LRL
, dest
, disp
);
800 tcg_out_insn(s
, RIL
, LGRL
, dest
, disp
);
806 tcg_out_movi(s
, TCG_TYPE_PTR
, dest
, addr
& ~0xffff);
807 tcg_out_ld(s
, type
, dest
, dest
, addr
& 0xffff);
810 static inline void tcg_out_risbg(TCGContext
*s
, TCGReg dest
, TCGReg src
,
811 int msb
, int lsb
, int ofs
, int z
)
814 tcg_out16(s
, (RIE_RISBG
& 0xff00) | (dest
<< 4) | src
);
815 tcg_out16(s
, (msb
<< 8) | (z
<< 7) | lsb
);
816 tcg_out16(s
, (ofs
<< 8) | (RIE_RISBG
& 0xff));
819 static void tgen_ext8s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
821 if (facilities
& FACILITY_EXT_IMM
) {
822 tcg_out_insn(s
, RRE
, LGBR
, dest
, src
);
826 if (type
== TCG_TYPE_I32
) {
828 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 24);
830 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 24);
832 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 24);
834 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 56);
835 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 56);
839 static void tgen_ext8u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
841 if (facilities
& FACILITY_EXT_IMM
) {
842 tcg_out_insn(s
, RRE
, LLGCR
, dest
, src
);
847 tcg_out_movi(s
, type
, TCG_TMP0
, 0xff);
850 tcg_out_movi(s
, type
, dest
, 0xff);
852 if (type
== TCG_TYPE_I32
) {
853 tcg_out_insn(s
, RR
, NR
, dest
, src
);
855 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
859 static void tgen_ext16s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
861 if (facilities
& FACILITY_EXT_IMM
) {
862 tcg_out_insn(s
, RRE
, LGHR
, dest
, src
);
866 if (type
== TCG_TYPE_I32
) {
868 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 16);
870 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 16);
872 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 16);
874 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 48);
875 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 48);
879 static void tgen_ext16u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
881 if (facilities
& FACILITY_EXT_IMM
) {
882 tcg_out_insn(s
, RRE
, LLGHR
, dest
, src
);
887 tcg_out_movi(s
, type
, TCG_TMP0
, 0xffff);
890 tcg_out_movi(s
, type
, dest
, 0xffff);
892 if (type
== TCG_TYPE_I32
) {
893 tcg_out_insn(s
, RR
, NR
, dest
, src
);
895 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
899 static inline void tgen_ext32s(TCGContext
*s
, TCGReg dest
, TCGReg src
)
901 tcg_out_insn(s
, RRE
, LGFR
, dest
, src
);
904 static inline void tgen_ext32u(TCGContext
*s
, TCGReg dest
, TCGReg src
)
906 tcg_out_insn(s
, RRE
, LLGFR
, dest
, src
);
909 /* Accept bit patterns like these:
914 Copied from gcc sources. */
915 static inline bool risbg_mask(uint64_t c
)
918 /* We don't change the number of transitions by inverting,
919 so make sure we start with the LSB zero. */
923 /* Reject all zeros or all ones. */
927 /* Find the first transition. */
929 /* Invert to look for a second transition. */
931 /* Erase the first transition. */
933 /* Find the second transition, if any. */
935 /* Match if all the bits are 1's, or if c is zero. */
939 static void tgen_andi(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
941 static const S390Opcode ni_insns
[4] = {
942 RI_NILL
, RI_NILH
, RI_NIHL
, RI_NIHH
944 static const S390Opcode nif_insns
[2] = {
947 uint64_t valid
= (type
== TCG_TYPE_I32
? 0xffffffffull
: -1ull);
950 /* Look for the zero-extensions. */
951 if ((val
& valid
) == 0xffffffff) {
952 tgen_ext32u(s
, dest
, dest
);
955 if (facilities
& FACILITY_EXT_IMM
) {
956 if ((val
& valid
) == 0xff) {
957 tgen_ext8u(s
, TCG_TYPE_I64
, dest
, dest
);
960 if ((val
& valid
) == 0xffff) {
961 tgen_ext16u(s
, TCG_TYPE_I64
, dest
, dest
);
966 /* Try all 32-bit insns that can perform it in one go. */
967 for (i
= 0; i
< 4; i
++) {
968 tcg_target_ulong mask
= ~(0xffffull
<< i
*16);
969 if (((val
| ~valid
) & mask
) == mask
) {
970 tcg_out_insn_RI(s
, ni_insns
[i
], dest
, val
>> i
*16);
975 /* Try all 48-bit insns that can perform it in one go. */
976 if (facilities
& FACILITY_EXT_IMM
) {
977 for (i
= 0; i
< 2; i
++) {
978 tcg_target_ulong mask
= ~(0xffffffffull
<< i
*32);
979 if (((val
| ~valid
) & mask
) == mask
) {
980 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
985 if ((facilities
& FACILITY_GEN_INST_EXT
) && risbg_mask(val
)) {
987 if ((val
& 0x8000000000000001ull
) == 0x8000000000000001ull
) {
988 /* Achieve wraparound by swapping msb and lsb. */
989 msb
= 63 - ctz64(~val
);
990 lsb
= clz64(~val
) + 1;
993 lsb
= 63 - ctz64(val
);
995 tcg_out_risbg(s
, dest
, dest
, msb
, lsb
, 0, 1);
999 /* Fall back to loading the constant. */
1000 tcg_out_movi(s
, type
, TCG_TMP0
, val
);
1001 if (type
== TCG_TYPE_I32
) {
1002 tcg_out_insn(s
, RR
, NR
, dest
, TCG_TMP0
);
1004 tcg_out_insn(s
, RRE
, NGR
, dest
, TCG_TMP0
);
1008 static void tgen64_ori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1010 static const S390Opcode oi_insns
[4] = {
1011 RI_OILL
, RI_OILH
, RI_OIHL
, RI_OIHH
1013 static const S390Opcode nif_insns
[2] = {
1019 /* Look for no-op. */
1024 if (facilities
& FACILITY_EXT_IMM
) {
1025 /* Try all 32-bit insns that can perform it in one go. */
1026 for (i
= 0; i
< 4; i
++) {
1027 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1028 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1029 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1034 /* Try all 48-bit insns that can perform it in one go. */
1035 for (i
= 0; i
< 2; i
++) {
1036 tcg_target_ulong mask
= (0xffffffffull
<< i
*32);
1037 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1038 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
1043 /* Perform the OR via sequential modifications to the high and
1044 low parts. Do this via recursion to handle 16-bit vs 32-bit
1045 masks in each half. */
1046 tgen64_ori(s
, dest
, val
& 0x00000000ffffffffull
);
1047 tgen64_ori(s
, dest
, val
& 0xffffffff00000000ull
);
1049 /* With no extended-immediate facility, we don't need to be so
1050 clever. Just iterate over the insns and mask in the constant. */
1051 for (i
= 0; i
< 4; i
++) {
1052 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1053 if ((val
& mask
) != 0) {
1054 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1060 static void tgen64_xori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1062 /* Perform the xor by parts. */
1063 if (val
& 0xffffffff) {
1064 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1066 if (val
> 0xffffffff) {
1067 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 31 >> 1);
1071 static int tgen_cmp(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg r1
,
1072 TCGArg c2
, int c2const
)
1074 bool is_unsigned
= is_unsigned_cond(c
);
1077 if (type
== TCG_TYPE_I32
) {
1078 tcg_out_insn(s
, RR
, LTR
, r1
, r1
);
1080 tcg_out_insn(s
, RRE
, LTGR
, r1
, r1
);
1082 return tcg_cond_to_ltr_cond
[c
];
1085 if (type
== TCG_TYPE_I32
) {
1086 tcg_out_insn(s
, RIL
, CLFI
, r1
, c2
);
1088 tcg_out_insn(s
, RIL
, CLGFI
, r1
, c2
);
1091 if (type
== TCG_TYPE_I32
) {
1092 tcg_out_insn(s
, RIL
, CFI
, r1
, c2
);
1094 tcg_out_insn(s
, RIL
, CGFI
, r1
, c2
);
1100 if (type
== TCG_TYPE_I32
) {
1101 tcg_out_insn(s
, RR
, CLR
, r1
, c2
);
1103 tcg_out_insn(s
, RRE
, CLGR
, r1
, c2
);
1106 if (type
== TCG_TYPE_I32
) {
1107 tcg_out_insn(s
, RR
, CR
, r1
, c2
);
1109 tcg_out_insn(s
, RRE
, CGR
, r1
, c2
);
1113 return tcg_cond_to_s390_cond
[c
];
1116 static void tgen_setcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1117 TCGReg dest
, TCGReg c1
, TCGArg c2
, int c2const
)
1119 int cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1121 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1122 tcg_out_movi(s
, type
, dest
, 1);
1123 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1124 tcg_out_movi(s
, type
, dest
, 0);
1127 static void tgen_movcond(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg dest
,
1128 TCGReg c1
, TCGArg c2
, int c2const
, TCGReg r3
)
1131 if (facilities
& FACILITY_LOAD_ON_COND
) {
1132 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1133 tcg_out_insn(s
, RRF
, LOCGR
, dest
, r3
, cc
);
1135 c
= tcg_invert_cond(c
);
1136 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1138 /* Emit: if (cc) goto over; dest = r3; over: */
1139 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1140 tcg_out_insn(s
, RRE
, LGR
, dest
, r3
);
1144 bool tcg_target_deposit_valid(int ofs
, int len
)
1146 return (facilities
& FACILITY_GEN_INST_EXT
) != 0;
1149 static void tgen_deposit(TCGContext
*s
, TCGReg dest
, TCGReg src
,
1152 int lsb
= (63 - ofs
);
1153 int msb
= lsb
- (len
- 1);
1154 tcg_out_risbg(s
, dest
, src
, msb
, lsb
, ofs
, 0);
1157 static void tgen_gotoi(TCGContext
*s
, int cc
, tcg_target_long dest
)
1159 tcg_target_long off
= (dest
- (tcg_target_long
)s
->code_ptr
) >> 1;
1160 if (off
> -0x8000 && off
< 0x7fff) {
1161 tcg_out_insn(s
, RI
, BRC
, cc
, off
);
1162 } else if (off
== (int32_t)off
) {
1163 tcg_out_insn(s
, RIL
, BRCL
, cc
, off
);
1165 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, dest
);
1166 tcg_out_insn(s
, RR
, BCR
, cc
, TCG_TMP0
);
1170 static void tgen_branch(TCGContext
*s
, int cc
, int labelno
)
1172 TCGLabel
* l
= &s
->labels
[labelno
];
1174 tgen_gotoi(s
, cc
, l
->u
.value
);
1175 } else if (USE_LONG_BRANCHES
) {
1176 tcg_out16(s
, RIL_BRCL
| (cc
<< 4));
1177 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC32DBL
, labelno
, -2);
1180 tcg_out16(s
, RI_BRC
| (cc
<< 4));
1181 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC16DBL
, labelno
, -2);
1186 static void tgen_compare_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1187 TCGReg r1
, TCGReg r2
, int labelno
)
1189 TCGLabel
* l
= &s
->labels
[labelno
];
1190 tcg_target_long off
;
1193 off
= (l
->u
.value
- (tcg_target_long
)s
->code_ptr
) >> 1;
1195 /* We need to keep the offset unchanged for retranslation. */
1196 off
= ((int16_t *)s
->code_ptr
)[1];
1197 tcg_out_reloc(s
, s
->code_ptr
+ 2, R_390_PC16DBL
, labelno
, -2);
1200 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | r2
);
1202 tcg_out16(s
, cc
<< 12 | (opc
& 0xff));
1205 static void tgen_compare_imm_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1206 TCGReg r1
, int i2
, int labelno
)
1208 TCGLabel
* l
= &s
->labels
[labelno
];
1209 tcg_target_long off
;
1212 off
= (l
->u
.value
- (tcg_target_long
)s
->code_ptr
) >> 1;
1214 /* We need to keep the offset unchanged for retranslation. */
1215 off
= ((int16_t *)s
->code_ptr
)[1];
1216 tcg_out_reloc(s
, s
->code_ptr
+ 2, R_390_PC16DBL
, labelno
, -2);
1219 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | cc
);
1221 tcg_out16(s
, (i2
<< 8) | (opc
& 0xff));
1224 static void tgen_brcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1225 TCGReg r1
, TCGArg c2
, int c2const
, int labelno
)
1229 if (facilities
& FACILITY_GEN_INST_EXT
) {
1230 bool is_unsigned
= is_unsigned_cond(c
);
1234 cc
= tcg_cond_to_s390_cond
[c
];
1237 opc
= (type
== TCG_TYPE_I32
1238 ? (is_unsigned
? RIE_CLRJ
: RIE_CRJ
)
1239 : (is_unsigned
? RIE_CLGRJ
: RIE_CGRJ
));
1240 tgen_compare_branch(s
, opc
, cc
, r1
, c2
, labelno
);
1244 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1245 If the immediate we've been given does not fit that range, we'll
1246 fall back to separate compare and branch instructions using the
1247 larger comparison range afforded by COMPARE IMMEDIATE. */
1248 if (type
== TCG_TYPE_I32
) {
1251 in_range
= (uint32_t)c2
== (uint8_t)c2
;
1254 in_range
= (int32_t)c2
== (int8_t)c2
;
1259 in_range
= (uint64_t)c2
== (uint8_t)c2
;
1262 in_range
= (int64_t)c2
== (int8_t)c2
;
1266 tgen_compare_imm_branch(s
, opc
, cc
, r1
, c2
, labelno
);
1271 cc
= tgen_cmp(s
, type
, c
, r1
, c2
, c2const
);
1272 tgen_branch(s
, cc
, labelno
);
1275 static void tgen_calli(TCGContext
*s
, tcg_target_long dest
)
1277 tcg_target_long off
= (dest
- (tcg_target_long
)s
->code_ptr
) >> 1;
1278 if (off
== (int32_t)off
) {
1279 tcg_out_insn(s
, RIL
, BRASL
, TCG_REG_R14
, off
);
1281 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, dest
);
1282 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, TCG_TMP0
);
1286 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int opc
, TCGReg data
,
1287 TCGReg base
, TCGReg index
, int disp
)
1289 #ifdef TARGET_WORDS_BIGENDIAN
1290 const int bswap
= 0;
1292 const int bswap
= 1;
1296 tcg_out_insn(s
, RXY
, LLGC
, data
, base
, index
, disp
);
1299 tcg_out_insn(s
, RXY
, LGB
, data
, base
, index
, disp
);
1303 /* swapped unsigned halfword load with upper bits zeroed */
1304 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1305 tgen_ext16u(s
, TCG_TYPE_I64
, data
, data
);
1307 tcg_out_insn(s
, RXY
, LLGH
, data
, base
, index
, disp
);
1312 /* swapped sign-extended halfword load */
1313 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1314 tgen_ext16s(s
, TCG_TYPE_I64
, data
, data
);
1316 tcg_out_insn(s
, RXY
, LGH
, data
, base
, index
, disp
);
1321 /* swapped unsigned int load with upper bits zeroed */
1322 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1323 tgen_ext32u(s
, data
, data
);
1325 tcg_out_insn(s
, RXY
, LLGF
, data
, base
, index
, disp
);
1330 /* swapped sign-extended int load */
1331 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1332 tgen_ext32s(s
, data
, data
);
1334 tcg_out_insn(s
, RXY
, LGF
, data
, base
, index
, disp
);
1339 tcg_out_insn(s
, RXY
, LRVG
, data
, base
, index
, disp
);
1341 tcg_out_insn(s
, RXY
, LG
, data
, base
, index
, disp
);
1349 static void tcg_out_qemu_st_direct(TCGContext
*s
, int opc
, TCGReg data
,
1350 TCGReg base
, TCGReg index
, int disp
)
1352 #ifdef TARGET_WORDS_BIGENDIAN
1353 const int bswap
= 0;
1355 const int bswap
= 1;
1359 if (disp
>= 0 && disp
< 0x1000) {
1360 tcg_out_insn(s
, RX
, STC
, data
, base
, index
, disp
);
1362 tcg_out_insn(s
, RXY
, STCY
, data
, base
, index
, disp
);
1367 tcg_out_insn(s
, RXY
, STRVH
, data
, base
, index
, disp
);
1368 } else if (disp
>= 0 && disp
< 0x1000) {
1369 tcg_out_insn(s
, RX
, STH
, data
, base
, index
, disp
);
1371 tcg_out_insn(s
, RXY
, STHY
, data
, base
, index
, disp
);
1376 tcg_out_insn(s
, RXY
, STRV
, data
, base
, index
, disp
);
1377 } else if (disp
>= 0 && disp
< 0x1000) {
1378 tcg_out_insn(s
, RX
, ST
, data
, base
, index
, disp
);
1380 tcg_out_insn(s
, RXY
, STY
, data
, base
, index
, disp
);
1385 tcg_out_insn(s
, RXY
, STRVG
, data
, base
, index
, disp
);
1387 tcg_out_insn(s
, RXY
, STG
, data
, base
, index
, disp
);
1395 #if defined(CONFIG_SOFTMMU)
1396 static TCGReg
tcg_prepare_qemu_ldst(TCGContext
* s
, TCGReg data_reg
,
1397 TCGReg addr_reg
, int mem_index
, int opc
,
1398 uint16_t **label2_ptr_p
, int is_store
)
1400 const TCGReg arg0
= tcg_target_call_iarg_regs
[0];
1401 const TCGReg arg1
= tcg_target_call_iarg_regs
[1];
1402 const TCGReg arg2
= tcg_target_call_iarg_regs
[2];
1403 const TCGReg arg3
= tcg_target_call_iarg_regs
[3];
1404 int s_bits
= opc
& 3;
1405 uint16_t *label1_ptr
;
1406 tcg_target_long ofs
;
1408 if (TARGET_LONG_BITS
== 32) {
1409 tgen_ext32u(s
, arg1
, addr_reg
);
1411 tcg_out_mov(s
, TCG_TYPE_I64
, arg1
, addr_reg
);
1414 tcg_out_sh64(s
, RSY_SRLG
, arg2
, addr_reg
, TCG_REG_NONE
,
1415 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1417 tgen_andi(s
, TCG_TYPE_I64
, arg1
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
1418 tgen_andi(s
, TCG_TYPE_I64
, arg2
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
1421 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
);
1423 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
);
1425 assert(ofs
< 0x80000);
1427 if (TARGET_LONG_BITS
== 32) {
1428 tcg_out_mem(s
, RX_C
, RXY_CY
, arg1
, arg2
, TCG_AREG0
, ofs
);
1430 tcg_out_mem(s
, 0, RXY_CG
, arg1
, arg2
, TCG_AREG0
, ofs
);
1433 if (TARGET_LONG_BITS
== 32) {
1434 tgen_ext32u(s
, arg1
, addr_reg
);
1436 tcg_out_mov(s
, TCG_TYPE_I64
, arg1
, addr_reg
);
1439 label1_ptr
= (uint16_t*)s
->code_ptr
;
1441 /* je label1 (offset will be patched in later) */
1442 tcg_out_insn(s
, RI
, BRC
, S390_CC_EQ
, 0);
1444 /* call load/store helper */
1446 /* Make sure to zero-extend the value to the full register
1447 for the calling convention. */
1450 tgen_ext8u(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1453 tgen_ext16u(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1456 tgen_ext32u(s
, arg2
, data_reg
);
1459 tcg_out_mov(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1464 tcg_out_movi(s
, TCG_TYPE_I32
, arg3
, mem_index
);
1465 tcg_out_mov(s
, TCG_TYPE_I64
, arg0
, TCG_AREG0
);
1466 tgen_calli(s
, (tcg_target_ulong
)qemu_st_helpers
[s_bits
]);
1468 tcg_out_movi(s
, TCG_TYPE_I32
, arg2
, mem_index
);
1469 tcg_out_mov(s
, TCG_TYPE_I64
, arg0
, TCG_AREG0
);
1470 tgen_calli(s
, (tcg_target_ulong
)qemu_ld_helpers
[s_bits
]);
1472 /* sign extension */
1475 tgen_ext8s(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1478 tgen_ext16s(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1481 tgen_ext32s(s
, data_reg
, TCG_REG_R2
);
1484 /* unsigned -> just copy */
1485 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1490 /* jump to label2 (end) */
1491 *label2_ptr_p
= (uint16_t*)s
->code_ptr
;
1493 tcg_out_insn(s
, RI
, BRC
, S390_CC_ALWAYS
, 0);
1495 /* this is label1, patch branch */
1496 *(label1_ptr
+ 1) = ((unsigned long)s
->code_ptr
-
1497 (unsigned long)label1_ptr
) >> 1;
1499 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1500 assert(ofs
< 0x80000);
1502 tcg_out_mem(s
, 0, RXY_AG
, arg1
, arg2
, TCG_AREG0
, ofs
);
1507 static void tcg_finish_qemu_ldst(TCGContext
* s
, uint16_t *label2_ptr
)
1510 *(label2_ptr
+ 1) = ((unsigned long)s
->code_ptr
-
1511 (unsigned long)label2_ptr
) >> 1;
1514 static void tcg_prepare_user_ldst(TCGContext
*s
, TCGReg
*addr_reg
,
1515 TCGReg
*index_reg
, tcg_target_long
*disp
)
1517 if (TARGET_LONG_BITS
== 32) {
1518 tgen_ext32u(s
, TCG_TMP0
, *addr_reg
);
1519 *addr_reg
= TCG_TMP0
;
1521 if (GUEST_BASE
< 0x80000) {
1522 *index_reg
= TCG_REG_NONE
;
1525 *index_reg
= TCG_GUEST_BASE_REG
;
1529 #endif /* CONFIG_SOFTMMU */
1531 /* load data with address translation (if applicable)
1532 and endianness conversion */
1533 static void tcg_out_qemu_ld(TCGContext
* s
, const TCGArg
* args
, int opc
)
1535 TCGReg addr_reg
, data_reg
;
1536 #if defined(CONFIG_SOFTMMU)
1538 uint16_t *label2_ptr
;
1541 tcg_target_long disp
;
1547 #if defined(CONFIG_SOFTMMU)
1550 addr_reg
= tcg_prepare_qemu_ldst(s
, data_reg
, addr_reg
, mem_index
,
1551 opc
, &label2_ptr
, 0);
1553 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, TCG_REG_NONE
, 0);
1555 tcg_finish_qemu_ldst(s
, label2_ptr
);
1557 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1558 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1562 static void tcg_out_qemu_st(TCGContext
* s
, const TCGArg
* args
, int opc
)
1564 TCGReg addr_reg
, data_reg
;
1565 #if defined(CONFIG_SOFTMMU)
1567 uint16_t *label2_ptr
;
1570 tcg_target_long disp
;
1576 #if defined(CONFIG_SOFTMMU)
1579 addr_reg
= tcg_prepare_qemu_ldst(s
, data_reg
, addr_reg
, mem_index
,
1580 opc
, &label2_ptr
, 1);
1582 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, TCG_REG_NONE
, 0);
1584 tcg_finish_qemu_ldst(s
, label2_ptr
);
1586 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1587 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1591 # define OP_32_64(x) \
1592 case glue(glue(INDEX_op_,x),_i32): \
1593 case glue(glue(INDEX_op_,x),_i64)
1595 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1596 const TCGArg
*args
, const int *const_args
)
1602 case INDEX_op_exit_tb
:
1604 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, args
[0]);
1605 tgen_gotoi(s
, S390_CC_ALWAYS
, (unsigned long)tb_ret_addr
);
1608 case INDEX_op_goto_tb
:
1609 if (s
->tb_jmp_offset
) {
1612 /* load address stored at s->tb_next + args[0] */
1613 tcg_out_ld_abs(s
, TCG_TYPE_PTR
, TCG_TMP0
, s
->tb_next
+ args
[0]);
1615 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_TMP0
);
1617 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1621 if (const_args
[0]) {
1622 tgen_calli(s
, args
[0]);
1624 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, args
[0]);
1628 case INDEX_op_mov_i32
:
1629 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1631 case INDEX_op_movi_i32
:
1632 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1636 /* ??? LLC (RXY format) is only present with the extended-immediate
1637 facility, whereas LLGC is always present. */
1638 tcg_out_mem(s
, 0, RXY_LLGC
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1642 /* ??? LB is no smaller than LGB, so no point to using it. */
1643 tcg_out_mem(s
, 0, RXY_LGB
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1647 /* ??? LLH (RXY format) is only present with the extended-immediate
1648 facility, whereas LLGH is always present. */
1649 tcg_out_mem(s
, 0, RXY_LLGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1652 case INDEX_op_ld16s_i32
:
1653 tcg_out_mem(s
, RX_LH
, RXY_LHY
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1656 case INDEX_op_ld_i32
:
1657 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1661 tcg_out_mem(s
, RX_STC
, RXY_STCY
, args
[0], args
[1],
1662 TCG_REG_NONE
, args
[2]);
1666 tcg_out_mem(s
, RX_STH
, RXY_STHY
, args
[0], args
[1],
1667 TCG_REG_NONE
, args
[2]);
1670 case INDEX_op_st_i32
:
1671 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1674 case INDEX_op_add_i32
:
1675 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1676 if (const_args
[2]) {
1679 if (a2
== (int16_t)a2
) {
1680 tcg_out_insn(s
, RI
, AHI
, a0
, a2
);
1683 if (facilities
& FACILITY_EXT_IMM
) {
1684 tcg_out_insn(s
, RIL
, AFI
, a0
, a2
);
1688 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1689 } else if (a0
== a1
) {
1690 tcg_out_insn(s
, RR
, AR
, a0
, a2
);
1692 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1695 case INDEX_op_sub_i32
:
1696 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1697 if (const_args
[2]) {
1701 tcg_out_insn(s
, RR
, SR
, args
[0], args
[2]);
1704 case INDEX_op_and_i32
:
1705 if (const_args
[2]) {
1706 tgen_andi(s
, TCG_TYPE_I32
, args
[0], args
[2]);
1708 tcg_out_insn(s
, RR
, NR
, args
[0], args
[2]);
1711 case INDEX_op_or_i32
:
1712 if (const_args
[2]) {
1713 tgen64_ori(s
, args
[0], args
[2] & 0xffffffff);
1715 tcg_out_insn(s
, RR
, OR
, args
[0], args
[2]);
1718 case INDEX_op_xor_i32
:
1719 if (const_args
[2]) {
1720 tgen64_xori(s
, args
[0], args
[2] & 0xffffffff);
1722 tcg_out_insn(s
, RR
, XR
, args
[0], args
[2]);
1726 case INDEX_op_neg_i32
:
1727 tcg_out_insn(s
, RR
, LCR
, args
[0], args
[1]);
1730 case INDEX_op_mul_i32
:
1731 if (const_args
[2]) {
1732 if ((int32_t)args
[2] == (int16_t)args
[2]) {
1733 tcg_out_insn(s
, RI
, MHI
, args
[0], args
[2]);
1735 tcg_out_insn(s
, RIL
, MSFI
, args
[0], args
[2]);
1738 tcg_out_insn(s
, RRE
, MSR
, args
[0], args
[2]);
1742 case INDEX_op_div2_i32
:
1743 tcg_out_insn(s
, RR
, DR
, TCG_REG_R2
, args
[4]);
1745 case INDEX_op_divu2_i32
:
1746 tcg_out_insn(s
, RRE
, DLR
, TCG_REG_R2
, args
[4]);
1749 case INDEX_op_shl_i32
:
1752 if (const_args
[2]) {
1753 tcg_out_sh32(s
, op
, args
[0], TCG_REG_NONE
, args
[2]);
1755 tcg_out_sh32(s
, op
, args
[0], args
[2], 0);
1758 case INDEX_op_shr_i32
:
1761 case INDEX_op_sar_i32
:
1765 case INDEX_op_rotl_i32
:
1766 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1767 if (const_args
[2]) {
1768 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1770 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], args
[2], 0);
1773 case INDEX_op_rotr_i32
:
1774 if (const_args
[2]) {
1775 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1],
1776 TCG_REG_NONE
, (32 - args
[2]) & 31);
1778 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
1779 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_TMP0
, 0);
1783 case INDEX_op_ext8s_i32
:
1784 tgen_ext8s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1786 case INDEX_op_ext16s_i32
:
1787 tgen_ext16s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1789 case INDEX_op_ext8u_i32
:
1790 tgen_ext8u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1792 case INDEX_op_ext16u_i32
:
1793 tgen_ext16u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1797 /* The TCG bswap definition requires bits 0-47 already be zero.
1798 Thus we don't need the G-type insns to implement bswap16_i64. */
1799 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1800 tcg_out_sh32(s
, RS_SRL
, args
[0], TCG_REG_NONE
, 16);
1803 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1806 case INDEX_op_add2_i32
:
1807 /* ??? Make use of ALFI. */
1808 tcg_out_insn(s
, RR
, ALR
, args
[0], args
[4]);
1809 tcg_out_insn(s
, RRE
, ALCR
, args
[1], args
[5]);
1811 case INDEX_op_sub2_i32
:
1812 /* ??? Make use of SLFI. */
1813 tcg_out_insn(s
, RR
, SLR
, args
[0], args
[4]);
1814 tcg_out_insn(s
, RRE
, SLBR
, args
[1], args
[5]);
1818 tgen_branch(s
, S390_CC_ALWAYS
, args
[0]);
1821 case INDEX_op_brcond_i32
:
1822 tgen_brcond(s
, TCG_TYPE_I32
, args
[2], args
[0],
1823 args
[1], const_args
[1], args
[3]);
1825 case INDEX_op_setcond_i32
:
1826 tgen_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1],
1827 args
[2], const_args
[2]);
1829 case INDEX_op_movcond_i32
:
1830 tgen_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1],
1831 args
[2], const_args
[2], args
[3]);
1834 case INDEX_op_qemu_ld8u
:
1835 tcg_out_qemu_ld(s
, args
, LD_UINT8
);
1837 case INDEX_op_qemu_ld8s
:
1838 tcg_out_qemu_ld(s
, args
, LD_INT8
);
1840 case INDEX_op_qemu_ld16u
:
1841 tcg_out_qemu_ld(s
, args
, LD_UINT16
);
1843 case INDEX_op_qemu_ld16s
:
1844 tcg_out_qemu_ld(s
, args
, LD_INT16
);
1846 case INDEX_op_qemu_ld32
:
1847 /* ??? Technically we can use a non-extending instruction. */
1848 tcg_out_qemu_ld(s
, args
, LD_UINT32
);
1850 case INDEX_op_qemu_ld64
:
1851 tcg_out_qemu_ld(s
, args
, LD_UINT64
);
1854 case INDEX_op_qemu_st8
:
1855 tcg_out_qemu_st(s
, args
, LD_UINT8
);
1857 case INDEX_op_qemu_st16
:
1858 tcg_out_qemu_st(s
, args
, LD_UINT16
);
1860 case INDEX_op_qemu_st32
:
1861 tcg_out_qemu_st(s
, args
, LD_UINT32
);
1863 case INDEX_op_qemu_st64
:
1864 tcg_out_qemu_st(s
, args
, LD_UINT64
);
1867 case INDEX_op_mov_i64
:
1868 tcg_out_mov(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1870 case INDEX_op_movi_i64
:
1871 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1874 case INDEX_op_ld16s_i64
:
1875 tcg_out_mem(s
, 0, RXY_LGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1877 case INDEX_op_ld32u_i64
:
1878 tcg_out_mem(s
, 0, RXY_LLGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1880 case INDEX_op_ld32s_i64
:
1881 tcg_out_mem(s
, 0, RXY_LGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1883 case INDEX_op_ld_i64
:
1884 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1887 case INDEX_op_st32_i64
:
1888 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1890 case INDEX_op_st_i64
:
1891 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1894 case INDEX_op_add_i64
:
1895 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1896 if (const_args
[2]) {
1899 if (a2
== (int16_t)a2
) {
1900 tcg_out_insn(s
, RI
, AGHI
, a0
, a2
);
1903 if (facilities
& FACILITY_EXT_IMM
) {
1904 if (a2
== (int32_t)a2
) {
1905 tcg_out_insn(s
, RIL
, AGFI
, a0
, a2
);
1907 } else if (a2
== (uint32_t)a2
) {
1908 tcg_out_insn(s
, RIL
, ALGFI
, a0
, a2
);
1910 } else if (-a2
== (uint32_t)-a2
) {
1911 tcg_out_insn(s
, RIL
, SLGFI
, a0
, -a2
);
1916 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1917 } else if (a0
== a1
) {
1918 tcg_out_insn(s
, RRE
, AGR
, a0
, a2
);
1920 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1923 case INDEX_op_sub_i64
:
1924 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1925 if (const_args
[2]) {
1929 tcg_out_insn(s
, RRE
, SGR
, args
[0], args
[2]);
1933 case INDEX_op_and_i64
:
1934 if (const_args
[2]) {
1935 tgen_andi(s
, TCG_TYPE_I64
, args
[0], args
[2]);
1937 tcg_out_insn(s
, RRE
, NGR
, args
[0], args
[2]);
1940 case INDEX_op_or_i64
:
1941 if (const_args
[2]) {
1942 tgen64_ori(s
, args
[0], args
[2]);
1944 tcg_out_insn(s
, RRE
, OGR
, args
[0], args
[2]);
1947 case INDEX_op_xor_i64
:
1948 if (const_args
[2]) {
1949 tgen64_xori(s
, args
[0], args
[2]);
1951 tcg_out_insn(s
, RRE
, XGR
, args
[0], args
[2]);
1955 case INDEX_op_neg_i64
:
1956 tcg_out_insn(s
, RRE
, LCGR
, args
[0], args
[1]);
1958 case INDEX_op_bswap64_i64
:
1959 tcg_out_insn(s
, RRE
, LRVGR
, args
[0], args
[1]);
1962 case INDEX_op_mul_i64
:
1963 if (const_args
[2]) {
1964 if (args
[2] == (int16_t)args
[2]) {
1965 tcg_out_insn(s
, RI
, MGHI
, args
[0], args
[2]);
1967 tcg_out_insn(s
, RIL
, MSGFI
, args
[0], args
[2]);
1970 tcg_out_insn(s
, RRE
, MSGR
, args
[0], args
[2]);
1974 case INDEX_op_div2_i64
:
1975 /* ??? We get an unnecessary sign-extension of the dividend
1976 into R3 with this definition, but as we do in fact always
1977 produce both quotient and remainder using INDEX_op_div_i64
1978 instead requires jumping through even more hoops. */
1979 tcg_out_insn(s
, RRE
, DSGR
, TCG_REG_R2
, args
[4]);
1981 case INDEX_op_divu2_i64
:
1982 tcg_out_insn(s
, RRE
, DLGR
, TCG_REG_R2
, args
[4]);
1984 case INDEX_op_mulu2_i64
:
1985 tcg_out_insn(s
, RRE
, MLGR
, TCG_REG_R2
, args
[3]);
1988 case INDEX_op_shl_i64
:
1991 if (const_args
[2]) {
1992 tcg_out_sh64(s
, op
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1994 tcg_out_sh64(s
, op
, args
[0], args
[1], args
[2], 0);
1997 case INDEX_op_shr_i64
:
2000 case INDEX_op_sar_i64
:
2004 case INDEX_op_rotl_i64
:
2005 if (const_args
[2]) {
2006 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2007 TCG_REG_NONE
, args
[2]);
2009 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], args
[2], 0);
2012 case INDEX_op_rotr_i64
:
2013 if (const_args
[2]) {
2014 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2015 TCG_REG_NONE
, (64 - args
[2]) & 63);
2017 /* We can use the smaller 32-bit negate because only the
2018 low 6 bits are examined for the rotate. */
2019 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
2020 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], TCG_TMP0
, 0);
2024 case INDEX_op_ext8s_i64
:
2025 tgen_ext8s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2027 case INDEX_op_ext16s_i64
:
2028 tgen_ext16s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2030 case INDEX_op_ext32s_i64
:
2031 tgen_ext32s(s
, args
[0], args
[1]);
2033 case INDEX_op_ext8u_i64
:
2034 tgen_ext8u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2036 case INDEX_op_ext16u_i64
:
2037 tgen_ext16u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2039 case INDEX_op_ext32u_i64
:
2040 tgen_ext32u(s
, args
[0], args
[1]);
2043 case INDEX_op_add2_i64
:
2044 /* ??? Make use of ALGFI and SLGFI. */
2045 tcg_out_insn(s
, RRE
, ALGR
, args
[0], args
[4]);
2046 tcg_out_insn(s
, RRE
, ALCGR
, args
[1], args
[5]);
2048 case INDEX_op_sub2_i64
:
2049 /* ??? Make use of ALGFI and SLGFI. */
2050 tcg_out_insn(s
, RRE
, SLGR
, args
[0], args
[4]);
2051 tcg_out_insn(s
, RRE
, SLBGR
, args
[1], args
[5]);
2054 case INDEX_op_brcond_i64
:
2055 tgen_brcond(s
, TCG_TYPE_I64
, args
[2], args
[0],
2056 args
[1], const_args
[1], args
[3]);
2058 case INDEX_op_setcond_i64
:
2059 tgen_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1],
2060 args
[2], const_args
[2]);
2062 case INDEX_op_movcond_i64
:
2063 tgen_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1],
2064 args
[2], const_args
[2], args
[3]);
2067 case INDEX_op_qemu_ld32u
:
2068 tcg_out_qemu_ld(s
, args
, LD_UINT32
);
2070 case INDEX_op_qemu_ld32s
:
2071 tcg_out_qemu_ld(s
, args
, LD_INT32
);
2075 tgen_deposit(s
, args
[0], args
[2], args
[3], args
[4]);
2079 fprintf(stderr
,"unimplemented opc 0x%x\n",opc
);
2084 static const TCGTargetOpDef s390_op_defs
[] = {
2085 { INDEX_op_exit_tb
, { } },
2086 { INDEX_op_goto_tb
, { } },
2087 { INDEX_op_call
, { "ri" } },
2088 { INDEX_op_br
, { } },
2090 { INDEX_op_mov_i32
, { "r", "r" } },
2091 { INDEX_op_movi_i32
, { "r" } },
2093 { INDEX_op_ld8u_i32
, { "r", "r" } },
2094 { INDEX_op_ld8s_i32
, { "r", "r" } },
2095 { INDEX_op_ld16u_i32
, { "r", "r" } },
2096 { INDEX_op_ld16s_i32
, { "r", "r" } },
2097 { INDEX_op_ld_i32
, { "r", "r" } },
2098 { INDEX_op_st8_i32
, { "r", "r" } },
2099 { INDEX_op_st16_i32
, { "r", "r" } },
2100 { INDEX_op_st_i32
, { "r", "r" } },
2102 { INDEX_op_add_i32
, { "r", "r", "ri" } },
2103 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
2104 { INDEX_op_mul_i32
, { "r", "0", "rK" } },
2106 { INDEX_op_div2_i32
, { "b", "a", "0", "1", "r" } },
2107 { INDEX_op_divu2_i32
, { "b", "a", "0", "1", "r" } },
2109 { INDEX_op_and_i32
, { "r", "0", "ri" } },
2110 { INDEX_op_or_i32
, { "r", "0", "rO" } },
2111 { INDEX_op_xor_i32
, { "r", "0", "rX" } },
2113 { INDEX_op_neg_i32
, { "r", "r" } },
2115 { INDEX_op_shl_i32
, { "r", "0", "Ri" } },
2116 { INDEX_op_shr_i32
, { "r", "0", "Ri" } },
2117 { INDEX_op_sar_i32
, { "r", "0", "Ri" } },
2119 { INDEX_op_rotl_i32
, { "r", "r", "Ri" } },
2120 { INDEX_op_rotr_i32
, { "r", "r", "Ri" } },
2122 { INDEX_op_ext8s_i32
, { "r", "r" } },
2123 { INDEX_op_ext8u_i32
, { "r", "r" } },
2124 { INDEX_op_ext16s_i32
, { "r", "r" } },
2125 { INDEX_op_ext16u_i32
, { "r", "r" } },
2127 { INDEX_op_bswap16_i32
, { "r", "r" } },
2128 { INDEX_op_bswap32_i32
, { "r", "r" } },
2130 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "r", "r" } },
2131 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "r", "r" } },
2133 { INDEX_op_brcond_i32
, { "r", "rC" } },
2134 { INDEX_op_setcond_i32
, { "r", "r", "rC" } },
2135 { INDEX_op_movcond_i32
, { "r", "r", "rC", "r", "0" } },
2136 { INDEX_op_deposit_i32
, { "r", "0", "r" } },
2138 { INDEX_op_qemu_ld8u
, { "r", "L" } },
2139 { INDEX_op_qemu_ld8s
, { "r", "L" } },
2140 { INDEX_op_qemu_ld16u
, { "r", "L" } },
2141 { INDEX_op_qemu_ld16s
, { "r", "L" } },
2142 { INDEX_op_qemu_ld32
, { "r", "L" } },
2143 { INDEX_op_qemu_ld64
, { "r", "L" } },
2145 { INDEX_op_qemu_st8
, { "L", "L" } },
2146 { INDEX_op_qemu_st16
, { "L", "L" } },
2147 { INDEX_op_qemu_st32
, { "L", "L" } },
2148 { INDEX_op_qemu_st64
, { "L", "L" } },
2150 { INDEX_op_mov_i64
, { "r", "r" } },
2151 { INDEX_op_movi_i64
, { "r" } },
2153 { INDEX_op_ld8u_i64
, { "r", "r" } },
2154 { INDEX_op_ld8s_i64
, { "r", "r" } },
2155 { INDEX_op_ld16u_i64
, { "r", "r" } },
2156 { INDEX_op_ld16s_i64
, { "r", "r" } },
2157 { INDEX_op_ld32u_i64
, { "r", "r" } },
2158 { INDEX_op_ld32s_i64
, { "r", "r" } },
2159 { INDEX_op_ld_i64
, { "r", "r" } },
2161 { INDEX_op_st8_i64
, { "r", "r" } },
2162 { INDEX_op_st16_i64
, { "r", "r" } },
2163 { INDEX_op_st32_i64
, { "r", "r" } },
2164 { INDEX_op_st_i64
, { "r", "r" } },
2166 { INDEX_op_add_i64
, { "r", "r", "ri" } },
2167 { INDEX_op_sub_i64
, { "r", "0", "ri" } },
2168 { INDEX_op_mul_i64
, { "r", "0", "rK" } },
2170 { INDEX_op_div2_i64
, { "b", "a", "0", "1", "r" } },
2171 { INDEX_op_divu2_i64
, { "b", "a", "0", "1", "r" } },
2172 { INDEX_op_mulu2_i64
, { "b", "a", "0", "r" } },
2174 { INDEX_op_and_i64
, { "r", "0", "ri" } },
2175 { INDEX_op_or_i64
, { "r", "0", "rO" } },
2176 { INDEX_op_xor_i64
, { "r", "0", "rX" } },
2178 { INDEX_op_neg_i64
, { "r", "r" } },
2180 { INDEX_op_shl_i64
, { "r", "r", "Ri" } },
2181 { INDEX_op_shr_i64
, { "r", "r", "Ri" } },
2182 { INDEX_op_sar_i64
, { "r", "r", "Ri" } },
2184 { INDEX_op_rotl_i64
, { "r", "r", "Ri" } },
2185 { INDEX_op_rotr_i64
, { "r", "r", "Ri" } },
2187 { INDEX_op_ext8s_i64
, { "r", "r" } },
2188 { INDEX_op_ext8u_i64
, { "r", "r" } },
2189 { INDEX_op_ext16s_i64
, { "r", "r" } },
2190 { INDEX_op_ext16u_i64
, { "r", "r" } },
2191 { INDEX_op_ext32s_i64
, { "r", "r" } },
2192 { INDEX_op_ext32u_i64
, { "r", "r" } },
2194 { INDEX_op_bswap16_i64
, { "r", "r" } },
2195 { INDEX_op_bswap32_i64
, { "r", "r" } },
2196 { INDEX_op_bswap64_i64
, { "r", "r" } },
2198 { INDEX_op_add2_i64
, { "r", "r", "0", "1", "r", "r" } },
2199 { INDEX_op_sub2_i64
, { "r", "r", "0", "1", "r", "r" } },
2201 { INDEX_op_brcond_i64
, { "r", "rC" } },
2202 { INDEX_op_setcond_i64
, { "r", "r", "rC" } },
2203 { INDEX_op_movcond_i64
, { "r", "r", "rC", "r", "0" } },
2204 { INDEX_op_deposit_i64
, { "r", "0", "r" } },
2206 { INDEX_op_qemu_ld32u
, { "r", "L" } },
2207 { INDEX_op_qemu_ld32s
, { "r", "L" } },
2212 static void query_facilities(void)
2214 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2216 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2217 is present on all 64-bit systems, but let's check for it anyway. */
2218 if (hwcap
& HWCAP_S390_STFLE
) {
2219 register int r0
__asm__("0");
2220 register void *r1
__asm__("1");
2224 asm volatile(".word 0xb2b0,0x1000"
2225 : "=r"(r0
) : "0"(0), "r"(r1
) : "memory", "cc");
2229 static void tcg_target_init(TCGContext
*s
)
2233 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
2234 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffff);
2236 tcg_regset_clear(tcg_target_call_clobber_regs
);
2237 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2238 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2239 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2240 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2241 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R4
);
2242 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R5
);
2243 /* The return register can be considered call-clobbered. */
2244 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2246 tcg_regset_clear(s
->reserved_regs
);
2247 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
);
2248 /* XXX many insns can't be used with R0, so we better avoid it for now */
2249 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
);
2250 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2252 tcg_add_target_add_op_defs(s390_op_defs
);
2255 static void tcg_target_qemu_prologue(TCGContext
*s
)
2257 tcg_target_long frame_size
;
2259 /* stmg %r6,%r15,48(%r15) (save registers) */
2260 tcg_out_insn(s
, RXY
, STMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
, 48);
2262 /* aghi %r15,-frame_size */
2263 frame_size
= TCG_TARGET_CALL_STACK_OFFSET
;
2264 frame_size
+= TCG_STATIC_CALL_ARGS_SIZE
;
2265 frame_size
+= CPU_TEMP_BUF_NLONGS
* sizeof(long);
2266 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_R15
, -frame_size
);
2268 tcg_set_frame(s
, TCG_REG_CALL_STACK
,
2269 TCG_STATIC_CALL_ARGS_SIZE
+ TCG_TARGET_CALL_STACK_OFFSET
,
2270 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2272 if (GUEST_BASE
>= 0x80000) {
2273 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
2274 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2277 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2278 /* br %r3 (go to TB) */
2279 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, tcg_target_call_iarg_regs
[1]);
2281 tb_ret_addr
= s
->code_ptr
;
2283 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2284 tcg_out_insn(s
, RXY
, LMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
,
2287 /* br %r14 (return) */
2288 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_R14
);