2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "tcg-be-null.h"
29 /* We only support generating code for 64-bit mode. */
30 #if TCG_TARGET_REG_BITS != 64
31 #error "unsupported code generation mode"
34 /* ??? The translation blocks produced by TCG are generally small enough to
35 be entirely reachable with a 16-bit displacement. Leaving the option for
36 a 32-bit displacement here Just In Case. */
37 #define USE_LONG_BRANCHES 0
39 #define TCG_CT_CONST_32 0x0100
40 #define TCG_CT_CONST_MULI 0x0800
41 #define TCG_CT_CONST_ORI 0x2000
42 #define TCG_CT_CONST_XORI 0x4000
43 #define TCG_CT_CONST_CMPI 0x8000
45 /* Several places within the instruction set 0 means "no register"
46 rather than TCG_REG_R0. */
47 #define TCG_REG_NONE 0
49 /* A scratch register that may be be used throughout the backend. */
50 #define TCG_TMP0 TCG_REG_R14
52 #ifdef CONFIG_USE_GUEST_BASE
53 #define TCG_GUEST_BASE_REG TCG_REG_R13
55 #define TCG_GUEST_BASE_REG TCG_REG_R0
63 /* All of the following instructions are prefixed with their instruction
64 format, and are defined as 8- or 16-bit quantities, even when the two
65 halves of the 16-bit quantity may appear 32 bits apart in the insn.
66 This makes it easy to copy the values from the tables in Appendix B. */
67 typedef enum S390Opcode
{
229 #define LD_SIGNED 0x04
230 #define LD_UINT8 0x00
231 #define LD_INT8 (LD_UINT8 | LD_SIGNED)
232 #define LD_UINT16 0x01
233 #define LD_INT16 (LD_UINT16 | LD_SIGNED)
234 #define LD_UINT32 0x02
235 #define LD_INT32 (LD_UINT32 | LD_SIGNED)
236 #define LD_UINT64 0x03
237 #define LD_INT64 (LD_UINT64 | LD_SIGNED)
240 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
241 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
242 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
246 /* Since R6 is a potential argument register, choose it last of the
247 call-saved registers. Likewise prefer the call-clobbered registers
248 in reverse order to maximize the chance of avoiding the arguments. */
249 static const int tcg_target_reg_alloc_order
[] = {
267 static const int tcg_target_call_iarg_regs
[] = {
275 static const int tcg_target_call_oarg_regs
[] = {
283 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
284 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
285 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
286 #define S390_CC_NEVER 0
287 #define S390_CC_ALWAYS 15
289 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
290 static const uint8_t tcg_cond_to_s390_cond
[] = {
291 [TCG_COND_EQ
] = S390_CC_EQ
,
292 [TCG_COND_NE
] = S390_CC_NE
,
293 [TCG_COND_LT
] = S390_CC_LT
,
294 [TCG_COND_LE
] = S390_CC_LE
,
295 [TCG_COND_GT
] = S390_CC_GT
,
296 [TCG_COND_GE
] = S390_CC_GE
,
297 [TCG_COND_LTU
] = S390_CC_LT
,
298 [TCG_COND_LEU
] = S390_CC_LE
,
299 [TCG_COND_GTU
] = S390_CC_GT
,
300 [TCG_COND_GEU
] = S390_CC_GE
,
303 /* Condition codes that result from a LOAD AND TEST. Here, we have no
304 unsigned instruction variation, however since the test is vs zero we
305 can re-map the outcomes appropriately. */
306 static const uint8_t tcg_cond_to_ltr_cond
[] = {
307 [TCG_COND_EQ
] = S390_CC_EQ
,
308 [TCG_COND_NE
] = S390_CC_NE
,
309 [TCG_COND_LT
] = S390_CC_LT
,
310 [TCG_COND_LE
] = S390_CC_LE
,
311 [TCG_COND_GT
] = S390_CC_GT
,
312 [TCG_COND_GE
] = S390_CC_GE
,
313 [TCG_COND_LTU
] = S390_CC_NEVER
,
314 [TCG_COND_LEU
] = S390_CC_EQ
,
315 [TCG_COND_GTU
] = S390_CC_NE
,
316 [TCG_COND_GEU
] = S390_CC_ALWAYS
,
319 #ifdef CONFIG_SOFTMMU
320 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
322 static const void * const qemu_ld_helpers
[4] = {
329 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
330 uintxx_t val, int mmu_idx) */
331 static const void * const qemu_st_helpers
[4] = {
339 static uint8_t *tb_ret_addr
;
341 /* A list of relevant facilities used by this translator. Some of these
342 are required for proper operation, and these are checked at startup. */
344 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
345 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
346 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
347 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
348 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
350 static uint64_t facilities
;
352 static void patch_reloc(uint8_t *code_ptr
, int type
,
353 intptr_t value
, intptr_t addend
)
355 intptr_t code_ptr_tl
= (intptr_t)code_ptr
;
358 /* ??? Not the usual definition of "addend". */
359 pcrel2
= (value
- (code_ptr_tl
+ addend
)) >> 1;
363 assert(pcrel2
== (int16_t)pcrel2
);
364 *(int16_t *)code_ptr
= pcrel2
;
367 assert(pcrel2
== (int32_t)pcrel2
);
368 *(int32_t *)code_ptr
= pcrel2
;
376 /* parse target specific constraints */
377 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
379 const char *ct_str
= *pct_str
;
382 case 'r': /* all registers */
383 ct
->ct
|= TCG_CT_REG
;
384 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
386 case 'R': /* not R0 */
387 ct
->ct
|= TCG_CT_REG
;
388 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
389 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
391 case 'L': /* qemu_ld/st constraint */
392 ct
->ct
|= TCG_CT_REG
;
393 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
394 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R2
);
395 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R3
);
396 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R4
);
398 case 'a': /* force R2 for division */
399 ct
->ct
|= TCG_CT_REG
;
400 tcg_regset_clear(ct
->u
.regs
);
401 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R2
);
403 case 'b': /* force R3 for division */
404 ct
->ct
|= TCG_CT_REG
;
405 tcg_regset_clear(ct
->u
.regs
);
406 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R3
);
408 case 'W': /* force 32-bit ("word") immediate */
409 ct
->ct
|= TCG_CT_CONST_32
;
412 ct
->ct
|= TCG_CT_CONST_MULI
;
415 ct
->ct
|= TCG_CT_CONST_ORI
;
418 ct
->ct
|= TCG_CT_CONST_XORI
;
421 ct
->ct
|= TCG_CT_CONST_CMPI
;
432 /* Immediates to be used with logical OR. This is an optimization only,
433 since a full 64-bit immediate OR can always be performed with 4 sequential
434 OI[LH][LH] instructions. What we're looking for is immediates that we
435 can load efficiently, and the immediate load plus the reg-reg OR is
436 smaller than the sequential OI's. */
438 static int tcg_match_ori(int ct
, tcg_target_long val
)
440 if (facilities
& FACILITY_EXT_IMM
) {
441 if (ct
& TCG_CT_CONST_32
) {
442 /* All 32-bit ORs can be performed with 1 48-bit insn. */
447 /* Look for negative values. These are best to load with LGHI. */
449 if (val
== (int16_t)val
) {
452 if (facilities
& FACILITY_EXT_IMM
) {
453 if (val
== (int32_t)val
) {
462 /* Immediates to be used with logical XOR. This is almost, but not quite,
463 only an optimization. XOR with immediate is only supported with the
464 extended-immediate facility. That said, there are a few patterns for
465 which it is better to load the value into a register first. */
467 static int tcg_match_xori(int ct
, tcg_target_long val
)
469 if ((facilities
& FACILITY_EXT_IMM
) == 0) {
473 if (ct
& TCG_CT_CONST_32
) {
474 /* All 32-bit XORs can be performed with 1 48-bit insn. */
478 /* Look for negative values. These are best to load with LGHI. */
479 if (val
< 0 && val
== (int32_t)val
) {
486 /* Imediates to be used with comparisons. */
488 static int tcg_match_cmpi(int ct
, tcg_target_long val
)
490 if (facilities
& FACILITY_EXT_IMM
) {
491 /* The COMPARE IMMEDIATE instruction is available. */
492 if (ct
& TCG_CT_CONST_32
) {
493 /* We have a 32-bit immediate and can compare against anything. */
496 /* ??? We have no insight here into whether the comparison is
497 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
498 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
499 a 32-bit unsigned immediate. If we were to use the (semi)
500 obvious "val == (int32_t)val" we would be enabling unsigned
501 comparisons vs very large numbers. The only solution is to
502 take the intersection of the ranges. */
503 /* ??? Another possible solution is to simply lie and allow all
504 constants here and force the out-of-range values into a temp
505 register in tgen_cmp when we have knowledge of the actual
506 comparison code in use. */
507 return val
>= 0 && val
<= 0x7fffffff;
510 /* Only the LOAD AND TEST instruction is available. */
515 /* Test if a constant matches the constraint. */
516 static int tcg_target_const_match(tcg_target_long val
,
517 const TCGArgConstraint
*arg_ct
)
521 if (ct
& TCG_CT_CONST
) {
525 /* Handle the modifiers. */
526 if (ct
& TCG_CT_CONST_32
) {
530 /* The following are mutually exclusive. */
531 if (ct
& TCG_CT_CONST_MULI
) {
532 /* Immediates that may be used with multiply. If we have the
533 general-instruction-extensions, then we have MULTIPLY SINGLE
534 IMMEDIATE with a signed 32-bit, otherwise we have only
535 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
536 if (facilities
& FACILITY_GEN_INST_EXT
) {
537 return val
== (int32_t)val
;
539 return val
== (int16_t)val
;
541 } else if (ct
& TCG_CT_CONST_ORI
) {
542 return tcg_match_ori(ct
, val
);
543 } else if (ct
& TCG_CT_CONST_XORI
) {
544 return tcg_match_xori(ct
, val
);
545 } else if (ct
& TCG_CT_CONST_CMPI
) {
546 return tcg_match_cmpi(ct
, val
);
552 /* Emit instructions according to the given instruction format. */
554 static void tcg_out_insn_RR(TCGContext
*s
, S390Opcode op
, TCGReg r1
, TCGReg r2
)
556 tcg_out16(s
, (op
<< 8) | (r1
<< 4) | r2
);
559 static void tcg_out_insn_RRE(TCGContext
*s
, S390Opcode op
,
560 TCGReg r1
, TCGReg r2
)
562 tcg_out32(s
, (op
<< 16) | (r1
<< 4) | r2
);
565 static void tcg_out_insn_RRF(TCGContext
*s
, S390Opcode op
,
566 TCGReg r1
, TCGReg r2
, int m3
)
568 tcg_out32(s
, (op
<< 16) | (m3
<< 12) | (r1
<< 4) | r2
);
571 static void tcg_out_insn_RI(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
573 tcg_out32(s
, (op
<< 16) | (r1
<< 20) | (i2
& 0xffff));
576 static void tcg_out_insn_RIL(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
578 tcg_out16(s
, op
| (r1
<< 4));
582 static void tcg_out_insn_RS(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
583 TCGReg b2
, TCGReg r3
, int disp
)
585 tcg_out32(s
, (op
<< 24) | (r1
<< 20) | (r3
<< 16) | (b2
<< 12)
589 static void tcg_out_insn_RSY(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
590 TCGReg b2
, TCGReg r3
, int disp
)
592 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | r3
);
593 tcg_out32(s
, (op
& 0xff) | (b2
<< 28)
594 | ((disp
& 0xfff) << 16) | ((disp
& 0xff000) >> 4));
597 #define tcg_out_insn_RX tcg_out_insn_RS
598 #define tcg_out_insn_RXY tcg_out_insn_RSY
600 /* Emit an opcode with "type-checking" of the format. */
601 #define tcg_out_insn(S, FMT, OP, ...) \
602 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
605 /* emit 64-bit shifts */
606 static void tcg_out_sh64(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
607 TCGReg src
, TCGReg sh_reg
, int sh_imm
)
609 tcg_out_insn_RSY(s
, op
, dest
, sh_reg
, src
, sh_imm
);
612 /* emit 32-bit shifts */
613 static void tcg_out_sh32(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
614 TCGReg sh_reg
, int sh_imm
)
616 tcg_out_insn_RS(s
, op
, dest
, sh_reg
, 0, sh_imm
);
619 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg dst
, TCGReg src
)
622 if (type
== TCG_TYPE_I32
) {
623 tcg_out_insn(s
, RR
, LR
, dst
, src
);
625 tcg_out_insn(s
, RRE
, LGR
, dst
, src
);
630 /* load a register with an immediate value */
631 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
632 TCGReg ret
, tcg_target_long sval
)
634 static const S390Opcode lli_insns
[4] = {
635 RI_LLILL
, RI_LLILH
, RI_LLIHL
, RI_LLIHH
638 tcg_target_ulong uval
= sval
;
641 if (type
== TCG_TYPE_I32
) {
642 uval
= (uint32_t)sval
;
643 sval
= (int32_t)sval
;
646 /* Try all 32-bit insns that can load it in one go. */
647 if (sval
>= -0x8000 && sval
< 0x8000) {
648 tcg_out_insn(s
, RI
, LGHI
, ret
, sval
);
652 for (i
= 0; i
< 4; i
++) {
653 tcg_target_long mask
= 0xffffull
<< i
*16;
654 if ((uval
& mask
) == uval
) {
655 tcg_out_insn_RI(s
, lli_insns
[i
], ret
, uval
>> i
*16);
660 /* Try all 48-bit insns that can load it in one go. */
661 if (facilities
& FACILITY_EXT_IMM
) {
662 if (sval
== (int32_t)sval
) {
663 tcg_out_insn(s
, RIL
, LGFI
, ret
, sval
);
666 if (uval
<= 0xffffffff) {
667 tcg_out_insn(s
, RIL
, LLILF
, ret
, uval
);
670 if ((uval
& 0xffffffff) == 0) {
671 tcg_out_insn(s
, RIL
, LLIHF
, ret
, uval
>> 31 >> 1);
676 /* Try for PC-relative address load. */
677 if ((sval
& 1) == 0) {
678 intptr_t off
= (sval
- (intptr_t)s
->code_ptr
) >> 1;
679 if (off
== (int32_t)off
) {
680 tcg_out_insn(s
, RIL
, LARL
, ret
, off
);
685 /* If extended immediates are not present, then we may have to issue
686 several instructions to load the low 32 bits. */
687 if (!(facilities
& FACILITY_EXT_IMM
)) {
688 /* A 32-bit unsigned value can be loaded in 2 insns. And given
689 that the lli_insns loop above did not succeed, we know that
690 both insns are required. */
691 if (uval
<= 0xffffffff) {
692 tcg_out_insn(s
, RI
, LLILL
, ret
, uval
);
693 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
697 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
698 We first want to make sure that all the high bits get set. With
699 luck the low 16-bits can be considered negative to perform that for
700 free, otherwise we load an explicit -1. */
701 if (sval
>> 31 >> 1 == -1) {
703 tcg_out_insn(s
, RI
, LGHI
, ret
, uval
);
705 tcg_out_insn(s
, RI
, LGHI
, ret
, -1);
706 tcg_out_insn(s
, RI
, IILL
, ret
, uval
);
708 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
713 /* If we get here, both the high and low parts have non-zero bits. */
715 /* Recurse to load the lower 32-bits. */
716 tcg_out_movi(s
, TCG_TYPE_I64
, ret
, uval
& 0xffffffff);
718 /* Insert data into the high 32-bits. */
719 uval
= uval
>> 31 >> 1;
720 if (facilities
& FACILITY_EXT_IMM
) {
721 if (uval
< 0x10000) {
722 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
723 } else if ((uval
& 0xffff) == 0) {
724 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
726 tcg_out_insn(s
, RIL
, IIHF
, ret
, uval
);
730 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
732 if (uval
& 0xffff0000) {
733 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
739 /* Emit a load/store type instruction. Inputs are:
740 DATA: The register to be loaded or stored.
741 BASE+OFS: The effective address.
742 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
743 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
745 static void tcg_out_mem(TCGContext
*s
, S390Opcode opc_rx
, S390Opcode opc_rxy
,
746 TCGReg data
, TCGReg base
, TCGReg index
,
749 if (ofs
< -0x80000 || ofs
>= 0x80000) {
750 /* Combine the low 20 bits of the offset with the actual load insn;
751 the high 44 bits must come from an immediate load. */
752 tcg_target_long low
= ((ofs
& 0xfffff) ^ 0x80000) - 0x80000;
753 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
- low
);
756 /* If we were already given an index register, add it in. */
757 if (index
!= TCG_REG_NONE
) {
758 tcg_out_insn(s
, RRE
, AGR
, TCG_TMP0
, index
);
763 if (opc_rx
&& ofs
>= 0 && ofs
< 0x1000) {
764 tcg_out_insn_RX(s
, opc_rx
, data
, base
, index
, ofs
);
766 tcg_out_insn_RXY(s
, opc_rxy
, data
, base
, index
, ofs
);
771 /* load data without address translation or endianness conversion */
772 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg data
,
773 TCGReg base
, intptr_t ofs
)
775 if (type
== TCG_TYPE_I32
) {
776 tcg_out_mem(s
, RX_L
, RXY_LY
, data
, base
, TCG_REG_NONE
, ofs
);
778 tcg_out_mem(s
, 0, RXY_LG
, data
, base
, TCG_REG_NONE
, ofs
);
782 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg data
,
783 TCGReg base
, intptr_t ofs
)
785 if (type
== TCG_TYPE_I32
) {
786 tcg_out_mem(s
, RX_ST
, RXY_STY
, data
, base
, TCG_REG_NONE
, ofs
);
788 tcg_out_mem(s
, 0, RXY_STG
, data
, base
, TCG_REG_NONE
, ofs
);
792 /* load data from an absolute host address */
793 static void tcg_out_ld_abs(TCGContext
*s
, TCGType type
, TCGReg dest
, void *abs
)
795 tcg_target_long addr
= (tcg_target_long
)abs
;
797 if (facilities
& FACILITY_GEN_INST_EXT
) {
798 tcg_target_long disp
= (addr
- (tcg_target_long
)s
->code_ptr
) >> 1;
799 if (disp
== (int32_t)disp
) {
800 if (type
== TCG_TYPE_I32
) {
801 tcg_out_insn(s
, RIL
, LRL
, dest
, disp
);
803 tcg_out_insn(s
, RIL
, LGRL
, dest
, disp
);
809 tcg_out_movi(s
, TCG_TYPE_PTR
, dest
, addr
& ~0xffff);
810 tcg_out_ld(s
, type
, dest
, dest
, addr
& 0xffff);
813 static inline void tcg_out_risbg(TCGContext
*s
, TCGReg dest
, TCGReg src
,
814 int msb
, int lsb
, int ofs
, int z
)
817 tcg_out16(s
, (RIE_RISBG
& 0xff00) | (dest
<< 4) | src
);
818 tcg_out16(s
, (msb
<< 8) | (z
<< 7) | lsb
);
819 tcg_out16(s
, (ofs
<< 8) | (RIE_RISBG
& 0xff));
822 static void tgen_ext8s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
824 if (facilities
& FACILITY_EXT_IMM
) {
825 tcg_out_insn(s
, RRE
, LGBR
, dest
, src
);
829 if (type
== TCG_TYPE_I32
) {
831 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 24);
833 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 24);
835 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 24);
837 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 56);
838 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 56);
842 static void tgen_ext8u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
844 if (facilities
& FACILITY_EXT_IMM
) {
845 tcg_out_insn(s
, RRE
, LLGCR
, dest
, src
);
850 tcg_out_movi(s
, type
, TCG_TMP0
, 0xff);
853 tcg_out_movi(s
, type
, dest
, 0xff);
855 if (type
== TCG_TYPE_I32
) {
856 tcg_out_insn(s
, RR
, NR
, dest
, src
);
858 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
862 static void tgen_ext16s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
864 if (facilities
& FACILITY_EXT_IMM
) {
865 tcg_out_insn(s
, RRE
, LGHR
, dest
, src
);
869 if (type
== TCG_TYPE_I32
) {
871 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 16);
873 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 16);
875 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 16);
877 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 48);
878 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 48);
882 static void tgen_ext16u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
884 if (facilities
& FACILITY_EXT_IMM
) {
885 tcg_out_insn(s
, RRE
, LLGHR
, dest
, src
);
890 tcg_out_movi(s
, type
, TCG_TMP0
, 0xffff);
893 tcg_out_movi(s
, type
, dest
, 0xffff);
895 if (type
== TCG_TYPE_I32
) {
896 tcg_out_insn(s
, RR
, NR
, dest
, src
);
898 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
902 static inline void tgen_ext32s(TCGContext
*s
, TCGReg dest
, TCGReg src
)
904 tcg_out_insn(s
, RRE
, LGFR
, dest
, src
);
907 static inline void tgen_ext32u(TCGContext
*s
, TCGReg dest
, TCGReg src
)
909 tcg_out_insn(s
, RRE
, LLGFR
, dest
, src
);
912 /* Accept bit patterns like these:
917 Copied from gcc sources. */
918 static inline bool risbg_mask(uint64_t c
)
921 /* We don't change the number of transitions by inverting,
922 so make sure we start with the LSB zero. */
926 /* Reject all zeros or all ones. */
930 /* Find the first transition. */
932 /* Invert to look for a second transition. */
934 /* Erase the first transition. */
936 /* Find the second transition, if any. */
938 /* Match if all the bits are 1's, or if c is zero. */
942 static void tgen_andi(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
944 static const S390Opcode ni_insns
[4] = {
945 RI_NILL
, RI_NILH
, RI_NIHL
, RI_NIHH
947 static const S390Opcode nif_insns
[2] = {
950 uint64_t valid
= (type
== TCG_TYPE_I32
? 0xffffffffull
: -1ull);
953 /* Look for the zero-extensions. */
954 if ((val
& valid
) == 0xffffffff) {
955 tgen_ext32u(s
, dest
, dest
);
958 if (facilities
& FACILITY_EXT_IMM
) {
959 if ((val
& valid
) == 0xff) {
960 tgen_ext8u(s
, TCG_TYPE_I64
, dest
, dest
);
963 if ((val
& valid
) == 0xffff) {
964 tgen_ext16u(s
, TCG_TYPE_I64
, dest
, dest
);
969 /* Try all 32-bit insns that can perform it in one go. */
970 for (i
= 0; i
< 4; i
++) {
971 tcg_target_ulong mask
= ~(0xffffull
<< i
*16);
972 if (((val
| ~valid
) & mask
) == mask
) {
973 tcg_out_insn_RI(s
, ni_insns
[i
], dest
, val
>> i
*16);
978 /* Try all 48-bit insns that can perform it in one go. */
979 if (facilities
& FACILITY_EXT_IMM
) {
980 for (i
= 0; i
< 2; i
++) {
981 tcg_target_ulong mask
= ~(0xffffffffull
<< i
*32);
982 if (((val
| ~valid
) & mask
) == mask
) {
983 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
988 if ((facilities
& FACILITY_GEN_INST_EXT
) && risbg_mask(val
)) {
990 if ((val
& 0x8000000000000001ull
) == 0x8000000000000001ull
) {
991 /* Achieve wraparound by swapping msb and lsb. */
992 msb
= 63 - ctz64(~val
);
993 lsb
= clz64(~val
) + 1;
996 lsb
= 63 - ctz64(val
);
998 tcg_out_risbg(s
, dest
, dest
, msb
, lsb
, 0, 1);
1002 /* Fall back to loading the constant. */
1003 tcg_out_movi(s
, type
, TCG_TMP0
, val
);
1004 if (type
== TCG_TYPE_I32
) {
1005 tcg_out_insn(s
, RR
, NR
, dest
, TCG_TMP0
);
1007 tcg_out_insn(s
, RRE
, NGR
, dest
, TCG_TMP0
);
1011 static void tgen64_ori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1013 static const S390Opcode oi_insns
[4] = {
1014 RI_OILL
, RI_OILH
, RI_OIHL
, RI_OIHH
1016 static const S390Opcode nif_insns
[2] = {
1022 /* Look for no-op. */
1027 if (facilities
& FACILITY_EXT_IMM
) {
1028 /* Try all 32-bit insns that can perform it in one go. */
1029 for (i
= 0; i
< 4; i
++) {
1030 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1031 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1032 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1037 /* Try all 48-bit insns that can perform it in one go. */
1038 for (i
= 0; i
< 2; i
++) {
1039 tcg_target_ulong mask
= (0xffffffffull
<< i
*32);
1040 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1041 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
1046 /* Perform the OR via sequential modifications to the high and
1047 low parts. Do this via recursion to handle 16-bit vs 32-bit
1048 masks in each half. */
1049 tgen64_ori(s
, dest
, val
& 0x00000000ffffffffull
);
1050 tgen64_ori(s
, dest
, val
& 0xffffffff00000000ull
);
1052 /* With no extended-immediate facility, we don't need to be so
1053 clever. Just iterate over the insns and mask in the constant. */
1054 for (i
= 0; i
< 4; i
++) {
1055 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1056 if ((val
& mask
) != 0) {
1057 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1063 static void tgen64_xori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1065 /* Perform the xor by parts. */
1066 if (val
& 0xffffffff) {
1067 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1069 if (val
> 0xffffffff) {
1070 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 31 >> 1);
1074 static int tgen_cmp(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg r1
,
1075 TCGArg c2
, int c2const
)
1077 bool is_unsigned
= is_unsigned_cond(c
);
1080 if (type
== TCG_TYPE_I32
) {
1081 tcg_out_insn(s
, RR
, LTR
, r1
, r1
);
1083 tcg_out_insn(s
, RRE
, LTGR
, r1
, r1
);
1085 return tcg_cond_to_ltr_cond
[c
];
1088 if (type
== TCG_TYPE_I32
) {
1089 tcg_out_insn(s
, RIL
, CLFI
, r1
, c2
);
1091 tcg_out_insn(s
, RIL
, CLGFI
, r1
, c2
);
1094 if (type
== TCG_TYPE_I32
) {
1095 tcg_out_insn(s
, RIL
, CFI
, r1
, c2
);
1097 tcg_out_insn(s
, RIL
, CGFI
, r1
, c2
);
1103 if (type
== TCG_TYPE_I32
) {
1104 tcg_out_insn(s
, RR
, CLR
, r1
, c2
);
1106 tcg_out_insn(s
, RRE
, CLGR
, r1
, c2
);
1109 if (type
== TCG_TYPE_I32
) {
1110 tcg_out_insn(s
, RR
, CR
, r1
, c2
);
1112 tcg_out_insn(s
, RRE
, CGR
, r1
, c2
);
1116 return tcg_cond_to_s390_cond
[c
];
1119 static void tgen_setcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1120 TCGReg dest
, TCGReg c1
, TCGArg c2
, int c2const
)
1122 int cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1124 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1125 tcg_out_movi(s
, type
, dest
, 1);
1126 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1127 tcg_out_movi(s
, type
, dest
, 0);
1130 static void tgen_movcond(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg dest
,
1131 TCGReg c1
, TCGArg c2
, int c2const
, TCGReg r3
)
1134 if (facilities
& FACILITY_LOAD_ON_COND
) {
1135 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1136 tcg_out_insn(s
, RRF
, LOCGR
, dest
, r3
, cc
);
1138 c
= tcg_invert_cond(c
);
1139 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1141 /* Emit: if (cc) goto over; dest = r3; over: */
1142 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1143 tcg_out_insn(s
, RRE
, LGR
, dest
, r3
);
1147 bool tcg_target_deposit_valid(int ofs
, int len
)
1149 return (facilities
& FACILITY_GEN_INST_EXT
) != 0;
1152 static void tgen_deposit(TCGContext
*s
, TCGReg dest
, TCGReg src
,
1155 int lsb
= (63 - ofs
);
1156 int msb
= lsb
- (len
- 1);
1157 tcg_out_risbg(s
, dest
, src
, msb
, lsb
, ofs
, 0);
1160 static void tgen_gotoi(TCGContext
*s
, int cc
, tcg_target_long dest
)
1162 tcg_target_long off
= (dest
- (tcg_target_long
)s
->code_ptr
) >> 1;
1163 if (off
> -0x8000 && off
< 0x7fff) {
1164 tcg_out_insn(s
, RI
, BRC
, cc
, off
);
1165 } else if (off
== (int32_t)off
) {
1166 tcg_out_insn(s
, RIL
, BRCL
, cc
, off
);
1168 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, dest
);
1169 tcg_out_insn(s
, RR
, BCR
, cc
, TCG_TMP0
);
1173 static void tgen_branch(TCGContext
*s
, int cc
, int labelno
)
1175 TCGLabel
* l
= &s
->labels
[labelno
];
1177 tgen_gotoi(s
, cc
, l
->u
.value
);
1178 } else if (USE_LONG_BRANCHES
) {
1179 tcg_out16(s
, RIL_BRCL
| (cc
<< 4));
1180 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC32DBL
, labelno
, -2);
1183 tcg_out16(s
, RI_BRC
| (cc
<< 4));
1184 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC16DBL
, labelno
, -2);
1189 static void tgen_compare_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1190 TCGReg r1
, TCGReg r2
, int labelno
)
1192 TCGLabel
* l
= &s
->labels
[labelno
];
1193 tcg_target_long off
;
1196 off
= (l
->u
.value
- (tcg_target_long
)s
->code_ptr
) >> 1;
1198 /* We need to keep the offset unchanged for retranslation. */
1199 off
= ((int16_t *)s
->code_ptr
)[1];
1200 tcg_out_reloc(s
, s
->code_ptr
+ 2, R_390_PC16DBL
, labelno
, -2);
1203 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | r2
);
1205 tcg_out16(s
, cc
<< 12 | (opc
& 0xff));
1208 static void tgen_compare_imm_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1209 TCGReg r1
, int i2
, int labelno
)
1211 TCGLabel
* l
= &s
->labels
[labelno
];
1212 tcg_target_long off
;
1215 off
= (l
->u
.value
- (tcg_target_long
)s
->code_ptr
) >> 1;
1217 /* We need to keep the offset unchanged for retranslation. */
1218 off
= ((int16_t *)s
->code_ptr
)[1];
1219 tcg_out_reloc(s
, s
->code_ptr
+ 2, R_390_PC16DBL
, labelno
, -2);
1222 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | cc
);
1224 tcg_out16(s
, (i2
<< 8) | (opc
& 0xff));
1227 static void tgen_brcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1228 TCGReg r1
, TCGArg c2
, int c2const
, int labelno
)
1232 if (facilities
& FACILITY_GEN_INST_EXT
) {
1233 bool is_unsigned
= is_unsigned_cond(c
);
1237 cc
= tcg_cond_to_s390_cond
[c
];
1240 opc
= (type
== TCG_TYPE_I32
1241 ? (is_unsigned
? RIE_CLRJ
: RIE_CRJ
)
1242 : (is_unsigned
? RIE_CLGRJ
: RIE_CGRJ
));
1243 tgen_compare_branch(s
, opc
, cc
, r1
, c2
, labelno
);
1247 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1248 If the immediate we've been given does not fit that range, we'll
1249 fall back to separate compare and branch instructions using the
1250 larger comparison range afforded by COMPARE IMMEDIATE. */
1251 if (type
== TCG_TYPE_I32
) {
1254 in_range
= (uint32_t)c2
== (uint8_t)c2
;
1257 in_range
= (int32_t)c2
== (int8_t)c2
;
1262 in_range
= (uint64_t)c2
== (uint8_t)c2
;
1265 in_range
= (int64_t)c2
== (int8_t)c2
;
1269 tgen_compare_imm_branch(s
, opc
, cc
, r1
, c2
, labelno
);
1274 cc
= tgen_cmp(s
, type
, c
, r1
, c2
, c2const
);
1275 tgen_branch(s
, cc
, labelno
);
1278 static void tgen_calli(TCGContext
*s
, tcg_target_long dest
)
1280 tcg_target_long off
= (dest
- (tcg_target_long
)s
->code_ptr
) >> 1;
1281 if (off
== (int32_t)off
) {
1282 tcg_out_insn(s
, RIL
, BRASL
, TCG_REG_R14
, off
);
1284 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, dest
);
1285 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, TCG_TMP0
);
1289 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int opc
, TCGReg data
,
1290 TCGReg base
, TCGReg index
, int disp
)
1292 #ifdef TARGET_WORDS_BIGENDIAN
1293 const int bswap
= 0;
1295 const int bswap
= 1;
1299 tcg_out_insn(s
, RXY
, LLGC
, data
, base
, index
, disp
);
1302 tcg_out_insn(s
, RXY
, LGB
, data
, base
, index
, disp
);
1306 /* swapped unsigned halfword load with upper bits zeroed */
1307 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1308 tgen_ext16u(s
, TCG_TYPE_I64
, data
, data
);
1310 tcg_out_insn(s
, RXY
, LLGH
, data
, base
, index
, disp
);
1315 /* swapped sign-extended halfword load */
1316 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1317 tgen_ext16s(s
, TCG_TYPE_I64
, data
, data
);
1319 tcg_out_insn(s
, RXY
, LGH
, data
, base
, index
, disp
);
1324 /* swapped unsigned int load with upper bits zeroed */
1325 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1326 tgen_ext32u(s
, data
, data
);
1328 tcg_out_insn(s
, RXY
, LLGF
, data
, base
, index
, disp
);
1333 /* swapped sign-extended int load */
1334 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1335 tgen_ext32s(s
, data
, data
);
1337 tcg_out_insn(s
, RXY
, LGF
, data
, base
, index
, disp
);
1342 tcg_out_insn(s
, RXY
, LRVG
, data
, base
, index
, disp
);
1344 tcg_out_insn(s
, RXY
, LG
, data
, base
, index
, disp
);
1352 static void tcg_out_qemu_st_direct(TCGContext
*s
, int opc
, TCGReg data
,
1353 TCGReg base
, TCGReg index
, int disp
)
1355 #ifdef TARGET_WORDS_BIGENDIAN
1356 const int bswap
= 0;
1358 const int bswap
= 1;
1362 if (disp
>= 0 && disp
< 0x1000) {
1363 tcg_out_insn(s
, RX
, STC
, data
, base
, index
, disp
);
1365 tcg_out_insn(s
, RXY
, STCY
, data
, base
, index
, disp
);
1370 tcg_out_insn(s
, RXY
, STRVH
, data
, base
, index
, disp
);
1371 } else if (disp
>= 0 && disp
< 0x1000) {
1372 tcg_out_insn(s
, RX
, STH
, data
, base
, index
, disp
);
1374 tcg_out_insn(s
, RXY
, STHY
, data
, base
, index
, disp
);
1379 tcg_out_insn(s
, RXY
, STRV
, data
, base
, index
, disp
);
1380 } else if (disp
>= 0 && disp
< 0x1000) {
1381 tcg_out_insn(s
, RX
, ST
, data
, base
, index
, disp
);
1383 tcg_out_insn(s
, RXY
, STY
, data
, base
, index
, disp
);
1388 tcg_out_insn(s
, RXY
, STRVG
, data
, base
, index
, disp
);
1390 tcg_out_insn(s
, RXY
, STG
, data
, base
, index
, disp
);
1398 #if defined(CONFIG_SOFTMMU)
1399 static TCGReg
tcg_prepare_qemu_ldst(TCGContext
* s
, TCGReg data_reg
,
1400 TCGReg addr_reg
, int mem_index
, int opc
,
1401 uint16_t **label2_ptr_p
, int is_store
)
1403 const TCGReg arg0
= tcg_target_call_iarg_regs
[0];
1404 const TCGReg arg1
= tcg_target_call_iarg_regs
[1];
1405 const TCGReg arg2
= tcg_target_call_iarg_regs
[2];
1406 const TCGReg arg3
= tcg_target_call_iarg_regs
[3];
1407 int s_bits
= opc
& 3;
1408 uint16_t *label1_ptr
;
1409 tcg_target_long ofs
;
1411 if (TARGET_LONG_BITS
== 32) {
1412 tgen_ext32u(s
, arg1
, addr_reg
);
1414 tcg_out_mov(s
, TCG_TYPE_I64
, arg1
, addr_reg
);
1417 tcg_out_sh64(s
, RSY_SRLG
, arg2
, addr_reg
, TCG_REG_NONE
,
1418 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1420 tgen_andi(s
, TCG_TYPE_I64
, arg1
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
1421 tgen_andi(s
, TCG_TYPE_I64
, arg2
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
1424 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
);
1426 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
);
1428 assert(ofs
< 0x80000);
1430 if (TARGET_LONG_BITS
== 32) {
1431 tcg_out_mem(s
, RX_C
, RXY_CY
, arg1
, arg2
, TCG_AREG0
, ofs
);
1433 tcg_out_mem(s
, 0, RXY_CG
, arg1
, arg2
, TCG_AREG0
, ofs
);
1436 if (TARGET_LONG_BITS
== 32) {
1437 tgen_ext32u(s
, arg1
, addr_reg
);
1439 tcg_out_mov(s
, TCG_TYPE_I64
, arg1
, addr_reg
);
1442 label1_ptr
= (uint16_t*)s
->code_ptr
;
1444 /* je label1 (offset will be patched in later) */
1445 tcg_out_insn(s
, RI
, BRC
, S390_CC_EQ
, 0);
1447 /* call load/store helper */
1449 /* Make sure to zero-extend the value to the full register
1450 for the calling convention. */
1453 tgen_ext8u(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1456 tgen_ext16u(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1459 tgen_ext32u(s
, arg2
, data_reg
);
1462 tcg_out_mov(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1467 tcg_out_movi(s
, TCG_TYPE_I32
, arg3
, mem_index
);
1468 tcg_out_mov(s
, TCG_TYPE_I64
, arg0
, TCG_AREG0
);
1469 tgen_calli(s
, (tcg_target_ulong
)qemu_st_helpers
[s_bits
]);
1471 tcg_out_movi(s
, TCG_TYPE_I32
, arg2
, mem_index
);
1472 tcg_out_mov(s
, TCG_TYPE_I64
, arg0
, TCG_AREG0
);
1473 tgen_calli(s
, (tcg_target_ulong
)qemu_ld_helpers
[s_bits
]);
1475 /* sign extension */
1478 tgen_ext8s(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1481 tgen_ext16s(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1484 tgen_ext32s(s
, data_reg
, TCG_REG_R2
);
1487 /* unsigned -> just copy */
1488 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1493 /* jump to label2 (end) */
1494 *label2_ptr_p
= (uint16_t*)s
->code_ptr
;
1496 tcg_out_insn(s
, RI
, BRC
, S390_CC_ALWAYS
, 0);
1498 /* this is label1, patch branch */
1499 *(label1_ptr
+ 1) = ((unsigned long)s
->code_ptr
-
1500 (unsigned long)label1_ptr
) >> 1;
1502 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1503 assert(ofs
< 0x80000);
1505 tcg_out_mem(s
, 0, RXY_AG
, arg1
, arg2
, TCG_AREG0
, ofs
);
1510 static void tcg_finish_qemu_ldst(TCGContext
* s
, uint16_t *label2_ptr
)
1513 *(label2_ptr
+ 1) = ((unsigned long)s
->code_ptr
-
1514 (unsigned long)label2_ptr
) >> 1;
1517 static void tcg_prepare_user_ldst(TCGContext
*s
, TCGReg
*addr_reg
,
1518 TCGReg
*index_reg
, tcg_target_long
*disp
)
1520 if (TARGET_LONG_BITS
== 32) {
1521 tgen_ext32u(s
, TCG_TMP0
, *addr_reg
);
1522 *addr_reg
= TCG_TMP0
;
1524 if (GUEST_BASE
< 0x80000) {
1525 *index_reg
= TCG_REG_NONE
;
1528 *index_reg
= TCG_GUEST_BASE_REG
;
1532 #endif /* CONFIG_SOFTMMU */
1534 /* load data with address translation (if applicable)
1535 and endianness conversion */
1536 static void tcg_out_qemu_ld(TCGContext
* s
, const TCGArg
* args
, int opc
)
1538 TCGReg addr_reg
, data_reg
;
1539 #if defined(CONFIG_SOFTMMU)
1541 uint16_t *label2_ptr
;
1544 tcg_target_long disp
;
1550 #if defined(CONFIG_SOFTMMU)
1553 addr_reg
= tcg_prepare_qemu_ldst(s
, data_reg
, addr_reg
, mem_index
,
1554 opc
, &label2_ptr
, 0);
1556 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, TCG_REG_NONE
, 0);
1558 tcg_finish_qemu_ldst(s
, label2_ptr
);
1560 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1561 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1565 static void tcg_out_qemu_st(TCGContext
* s
, const TCGArg
* args
, int opc
)
1567 TCGReg addr_reg
, data_reg
;
1568 #if defined(CONFIG_SOFTMMU)
1570 uint16_t *label2_ptr
;
1573 tcg_target_long disp
;
1579 #if defined(CONFIG_SOFTMMU)
1582 addr_reg
= tcg_prepare_qemu_ldst(s
, data_reg
, addr_reg
, mem_index
,
1583 opc
, &label2_ptr
, 1);
1585 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, TCG_REG_NONE
, 0);
1587 tcg_finish_qemu_ldst(s
, label2_ptr
);
1589 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1590 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1594 # define OP_32_64(x) \
1595 case glue(glue(INDEX_op_,x),_i32): \
1596 case glue(glue(INDEX_op_,x),_i64)
1598 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1599 const TCGArg
*args
, const int *const_args
)
1605 case INDEX_op_exit_tb
:
1607 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, args
[0]);
1608 tgen_gotoi(s
, S390_CC_ALWAYS
, (unsigned long)tb_ret_addr
);
1611 case INDEX_op_goto_tb
:
1612 if (s
->tb_jmp_offset
) {
1615 /* load address stored at s->tb_next + args[0] */
1616 tcg_out_ld_abs(s
, TCG_TYPE_PTR
, TCG_TMP0
, s
->tb_next
+ args
[0]);
1618 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_TMP0
);
1620 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1624 if (const_args
[0]) {
1625 tgen_calli(s
, args
[0]);
1627 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, args
[0]);
1631 case INDEX_op_mov_i32
:
1632 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1634 case INDEX_op_movi_i32
:
1635 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1639 /* ??? LLC (RXY format) is only present with the extended-immediate
1640 facility, whereas LLGC is always present. */
1641 tcg_out_mem(s
, 0, RXY_LLGC
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1645 /* ??? LB is no smaller than LGB, so no point to using it. */
1646 tcg_out_mem(s
, 0, RXY_LGB
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1650 /* ??? LLH (RXY format) is only present with the extended-immediate
1651 facility, whereas LLGH is always present. */
1652 tcg_out_mem(s
, 0, RXY_LLGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1655 case INDEX_op_ld16s_i32
:
1656 tcg_out_mem(s
, RX_LH
, RXY_LHY
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1659 case INDEX_op_ld_i32
:
1660 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1664 tcg_out_mem(s
, RX_STC
, RXY_STCY
, args
[0], args
[1],
1665 TCG_REG_NONE
, args
[2]);
1669 tcg_out_mem(s
, RX_STH
, RXY_STHY
, args
[0], args
[1],
1670 TCG_REG_NONE
, args
[2]);
1673 case INDEX_op_st_i32
:
1674 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1677 case INDEX_op_add_i32
:
1678 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1679 if (const_args
[2]) {
1682 if (a2
== (int16_t)a2
) {
1683 tcg_out_insn(s
, RI
, AHI
, a0
, a2
);
1686 if (facilities
& FACILITY_EXT_IMM
) {
1687 tcg_out_insn(s
, RIL
, AFI
, a0
, a2
);
1691 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1692 } else if (a0
== a1
) {
1693 tcg_out_insn(s
, RR
, AR
, a0
, a2
);
1695 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1698 case INDEX_op_sub_i32
:
1699 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1700 if (const_args
[2]) {
1704 tcg_out_insn(s
, RR
, SR
, args
[0], args
[2]);
1707 case INDEX_op_and_i32
:
1708 if (const_args
[2]) {
1709 tgen_andi(s
, TCG_TYPE_I32
, args
[0], args
[2]);
1711 tcg_out_insn(s
, RR
, NR
, args
[0], args
[2]);
1714 case INDEX_op_or_i32
:
1715 if (const_args
[2]) {
1716 tgen64_ori(s
, args
[0], args
[2] & 0xffffffff);
1718 tcg_out_insn(s
, RR
, OR
, args
[0], args
[2]);
1721 case INDEX_op_xor_i32
:
1722 if (const_args
[2]) {
1723 tgen64_xori(s
, args
[0], args
[2] & 0xffffffff);
1725 tcg_out_insn(s
, RR
, XR
, args
[0], args
[2]);
1729 case INDEX_op_neg_i32
:
1730 tcg_out_insn(s
, RR
, LCR
, args
[0], args
[1]);
1733 case INDEX_op_mul_i32
:
1734 if (const_args
[2]) {
1735 if ((int32_t)args
[2] == (int16_t)args
[2]) {
1736 tcg_out_insn(s
, RI
, MHI
, args
[0], args
[2]);
1738 tcg_out_insn(s
, RIL
, MSFI
, args
[0], args
[2]);
1741 tcg_out_insn(s
, RRE
, MSR
, args
[0], args
[2]);
1745 case INDEX_op_div2_i32
:
1746 tcg_out_insn(s
, RR
, DR
, TCG_REG_R2
, args
[4]);
1748 case INDEX_op_divu2_i32
:
1749 tcg_out_insn(s
, RRE
, DLR
, TCG_REG_R2
, args
[4]);
1752 case INDEX_op_shl_i32
:
1755 if (const_args
[2]) {
1756 tcg_out_sh32(s
, op
, args
[0], TCG_REG_NONE
, args
[2]);
1758 tcg_out_sh32(s
, op
, args
[0], args
[2], 0);
1761 case INDEX_op_shr_i32
:
1764 case INDEX_op_sar_i32
:
1768 case INDEX_op_rotl_i32
:
1769 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1770 if (const_args
[2]) {
1771 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1773 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], args
[2], 0);
1776 case INDEX_op_rotr_i32
:
1777 if (const_args
[2]) {
1778 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1],
1779 TCG_REG_NONE
, (32 - args
[2]) & 31);
1781 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
1782 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_TMP0
, 0);
1786 case INDEX_op_ext8s_i32
:
1787 tgen_ext8s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1789 case INDEX_op_ext16s_i32
:
1790 tgen_ext16s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1792 case INDEX_op_ext8u_i32
:
1793 tgen_ext8u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1795 case INDEX_op_ext16u_i32
:
1796 tgen_ext16u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1800 /* The TCG bswap definition requires bits 0-47 already be zero.
1801 Thus we don't need the G-type insns to implement bswap16_i64. */
1802 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1803 tcg_out_sh32(s
, RS_SRL
, args
[0], TCG_REG_NONE
, 16);
1806 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1809 case INDEX_op_add2_i32
:
1810 /* ??? Make use of ALFI. */
1811 tcg_out_insn(s
, RR
, ALR
, args
[0], args
[4]);
1812 tcg_out_insn(s
, RRE
, ALCR
, args
[1], args
[5]);
1814 case INDEX_op_sub2_i32
:
1815 /* ??? Make use of SLFI. */
1816 tcg_out_insn(s
, RR
, SLR
, args
[0], args
[4]);
1817 tcg_out_insn(s
, RRE
, SLBR
, args
[1], args
[5]);
1821 tgen_branch(s
, S390_CC_ALWAYS
, args
[0]);
1824 case INDEX_op_brcond_i32
:
1825 tgen_brcond(s
, TCG_TYPE_I32
, args
[2], args
[0],
1826 args
[1], const_args
[1], args
[3]);
1828 case INDEX_op_setcond_i32
:
1829 tgen_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1],
1830 args
[2], const_args
[2]);
1832 case INDEX_op_movcond_i32
:
1833 tgen_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1],
1834 args
[2], const_args
[2], args
[3]);
1837 case INDEX_op_qemu_ld8u
:
1838 tcg_out_qemu_ld(s
, args
, LD_UINT8
);
1840 case INDEX_op_qemu_ld8s
:
1841 tcg_out_qemu_ld(s
, args
, LD_INT8
);
1843 case INDEX_op_qemu_ld16u
:
1844 tcg_out_qemu_ld(s
, args
, LD_UINT16
);
1846 case INDEX_op_qemu_ld16s
:
1847 tcg_out_qemu_ld(s
, args
, LD_INT16
);
1849 case INDEX_op_qemu_ld32
:
1850 /* ??? Technically we can use a non-extending instruction. */
1851 tcg_out_qemu_ld(s
, args
, LD_UINT32
);
1853 case INDEX_op_qemu_ld64
:
1854 tcg_out_qemu_ld(s
, args
, LD_UINT64
);
1857 case INDEX_op_qemu_st8
:
1858 tcg_out_qemu_st(s
, args
, LD_UINT8
);
1860 case INDEX_op_qemu_st16
:
1861 tcg_out_qemu_st(s
, args
, LD_UINT16
);
1863 case INDEX_op_qemu_st32
:
1864 tcg_out_qemu_st(s
, args
, LD_UINT32
);
1866 case INDEX_op_qemu_st64
:
1867 tcg_out_qemu_st(s
, args
, LD_UINT64
);
1870 case INDEX_op_mov_i64
:
1871 tcg_out_mov(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1873 case INDEX_op_movi_i64
:
1874 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1877 case INDEX_op_ld16s_i64
:
1878 tcg_out_mem(s
, 0, RXY_LGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1880 case INDEX_op_ld32u_i64
:
1881 tcg_out_mem(s
, 0, RXY_LLGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1883 case INDEX_op_ld32s_i64
:
1884 tcg_out_mem(s
, 0, RXY_LGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1886 case INDEX_op_ld_i64
:
1887 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1890 case INDEX_op_st32_i64
:
1891 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1893 case INDEX_op_st_i64
:
1894 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1897 case INDEX_op_add_i64
:
1898 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1899 if (const_args
[2]) {
1902 if (a2
== (int16_t)a2
) {
1903 tcg_out_insn(s
, RI
, AGHI
, a0
, a2
);
1906 if (facilities
& FACILITY_EXT_IMM
) {
1907 if (a2
== (int32_t)a2
) {
1908 tcg_out_insn(s
, RIL
, AGFI
, a0
, a2
);
1910 } else if (a2
== (uint32_t)a2
) {
1911 tcg_out_insn(s
, RIL
, ALGFI
, a0
, a2
);
1913 } else if (-a2
== (uint32_t)-a2
) {
1914 tcg_out_insn(s
, RIL
, SLGFI
, a0
, -a2
);
1919 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1920 } else if (a0
== a1
) {
1921 tcg_out_insn(s
, RRE
, AGR
, a0
, a2
);
1923 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1926 case INDEX_op_sub_i64
:
1927 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1928 if (const_args
[2]) {
1932 tcg_out_insn(s
, RRE
, SGR
, args
[0], args
[2]);
1936 case INDEX_op_and_i64
:
1937 if (const_args
[2]) {
1938 tgen_andi(s
, TCG_TYPE_I64
, args
[0], args
[2]);
1940 tcg_out_insn(s
, RRE
, NGR
, args
[0], args
[2]);
1943 case INDEX_op_or_i64
:
1944 if (const_args
[2]) {
1945 tgen64_ori(s
, args
[0], args
[2]);
1947 tcg_out_insn(s
, RRE
, OGR
, args
[0], args
[2]);
1950 case INDEX_op_xor_i64
:
1951 if (const_args
[2]) {
1952 tgen64_xori(s
, args
[0], args
[2]);
1954 tcg_out_insn(s
, RRE
, XGR
, args
[0], args
[2]);
1958 case INDEX_op_neg_i64
:
1959 tcg_out_insn(s
, RRE
, LCGR
, args
[0], args
[1]);
1961 case INDEX_op_bswap64_i64
:
1962 tcg_out_insn(s
, RRE
, LRVGR
, args
[0], args
[1]);
1965 case INDEX_op_mul_i64
:
1966 if (const_args
[2]) {
1967 if (args
[2] == (int16_t)args
[2]) {
1968 tcg_out_insn(s
, RI
, MGHI
, args
[0], args
[2]);
1970 tcg_out_insn(s
, RIL
, MSGFI
, args
[0], args
[2]);
1973 tcg_out_insn(s
, RRE
, MSGR
, args
[0], args
[2]);
1977 case INDEX_op_div2_i64
:
1978 /* ??? We get an unnecessary sign-extension of the dividend
1979 into R3 with this definition, but as we do in fact always
1980 produce both quotient and remainder using INDEX_op_div_i64
1981 instead requires jumping through even more hoops. */
1982 tcg_out_insn(s
, RRE
, DSGR
, TCG_REG_R2
, args
[4]);
1984 case INDEX_op_divu2_i64
:
1985 tcg_out_insn(s
, RRE
, DLGR
, TCG_REG_R2
, args
[4]);
1987 case INDEX_op_mulu2_i64
:
1988 tcg_out_insn(s
, RRE
, MLGR
, TCG_REG_R2
, args
[3]);
1991 case INDEX_op_shl_i64
:
1994 if (const_args
[2]) {
1995 tcg_out_sh64(s
, op
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1997 tcg_out_sh64(s
, op
, args
[0], args
[1], args
[2], 0);
2000 case INDEX_op_shr_i64
:
2003 case INDEX_op_sar_i64
:
2007 case INDEX_op_rotl_i64
:
2008 if (const_args
[2]) {
2009 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2010 TCG_REG_NONE
, args
[2]);
2012 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], args
[2], 0);
2015 case INDEX_op_rotr_i64
:
2016 if (const_args
[2]) {
2017 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2018 TCG_REG_NONE
, (64 - args
[2]) & 63);
2020 /* We can use the smaller 32-bit negate because only the
2021 low 6 bits are examined for the rotate. */
2022 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
2023 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], TCG_TMP0
, 0);
2027 case INDEX_op_ext8s_i64
:
2028 tgen_ext8s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2030 case INDEX_op_ext16s_i64
:
2031 tgen_ext16s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2033 case INDEX_op_ext32s_i64
:
2034 tgen_ext32s(s
, args
[0], args
[1]);
2036 case INDEX_op_ext8u_i64
:
2037 tgen_ext8u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2039 case INDEX_op_ext16u_i64
:
2040 tgen_ext16u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2042 case INDEX_op_ext32u_i64
:
2043 tgen_ext32u(s
, args
[0], args
[1]);
2046 case INDEX_op_add2_i64
:
2047 /* ??? Make use of ALGFI and SLGFI. */
2048 tcg_out_insn(s
, RRE
, ALGR
, args
[0], args
[4]);
2049 tcg_out_insn(s
, RRE
, ALCGR
, args
[1], args
[5]);
2051 case INDEX_op_sub2_i64
:
2052 /* ??? Make use of ALGFI and SLGFI. */
2053 tcg_out_insn(s
, RRE
, SLGR
, args
[0], args
[4]);
2054 tcg_out_insn(s
, RRE
, SLBGR
, args
[1], args
[5]);
2057 case INDEX_op_brcond_i64
:
2058 tgen_brcond(s
, TCG_TYPE_I64
, args
[2], args
[0],
2059 args
[1], const_args
[1], args
[3]);
2061 case INDEX_op_setcond_i64
:
2062 tgen_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1],
2063 args
[2], const_args
[2]);
2065 case INDEX_op_movcond_i64
:
2066 tgen_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1],
2067 args
[2], const_args
[2], args
[3]);
2070 case INDEX_op_qemu_ld32u
:
2071 tcg_out_qemu_ld(s
, args
, LD_UINT32
);
2073 case INDEX_op_qemu_ld32s
:
2074 tcg_out_qemu_ld(s
, args
, LD_INT32
);
2078 tgen_deposit(s
, args
[0], args
[2], args
[3], args
[4]);
2082 fprintf(stderr
,"unimplemented opc 0x%x\n",opc
);
2087 static const TCGTargetOpDef s390_op_defs
[] = {
2088 { INDEX_op_exit_tb
, { } },
2089 { INDEX_op_goto_tb
, { } },
2090 { INDEX_op_call
, { "ri" } },
2091 { INDEX_op_br
, { } },
2093 { INDEX_op_mov_i32
, { "r", "r" } },
2094 { INDEX_op_movi_i32
, { "r" } },
2096 { INDEX_op_ld8u_i32
, { "r", "r" } },
2097 { INDEX_op_ld8s_i32
, { "r", "r" } },
2098 { INDEX_op_ld16u_i32
, { "r", "r" } },
2099 { INDEX_op_ld16s_i32
, { "r", "r" } },
2100 { INDEX_op_ld_i32
, { "r", "r" } },
2101 { INDEX_op_st8_i32
, { "r", "r" } },
2102 { INDEX_op_st16_i32
, { "r", "r" } },
2103 { INDEX_op_st_i32
, { "r", "r" } },
2105 { INDEX_op_add_i32
, { "r", "r", "ri" } },
2106 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
2107 { INDEX_op_mul_i32
, { "r", "0", "rK" } },
2109 { INDEX_op_div2_i32
, { "b", "a", "0", "1", "r" } },
2110 { INDEX_op_divu2_i32
, { "b", "a", "0", "1", "r" } },
2112 { INDEX_op_and_i32
, { "r", "0", "ri" } },
2113 { INDEX_op_or_i32
, { "r", "0", "rWO" } },
2114 { INDEX_op_xor_i32
, { "r", "0", "rWX" } },
2116 { INDEX_op_neg_i32
, { "r", "r" } },
2118 { INDEX_op_shl_i32
, { "r", "0", "Ri" } },
2119 { INDEX_op_shr_i32
, { "r", "0", "Ri" } },
2120 { INDEX_op_sar_i32
, { "r", "0", "Ri" } },
2122 { INDEX_op_rotl_i32
, { "r", "r", "Ri" } },
2123 { INDEX_op_rotr_i32
, { "r", "r", "Ri" } },
2125 { INDEX_op_ext8s_i32
, { "r", "r" } },
2126 { INDEX_op_ext8u_i32
, { "r", "r" } },
2127 { INDEX_op_ext16s_i32
, { "r", "r" } },
2128 { INDEX_op_ext16u_i32
, { "r", "r" } },
2130 { INDEX_op_bswap16_i32
, { "r", "r" } },
2131 { INDEX_op_bswap32_i32
, { "r", "r" } },
2133 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "r", "r" } },
2134 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "r", "r" } },
2136 { INDEX_op_brcond_i32
, { "r", "rWC" } },
2137 { INDEX_op_setcond_i32
, { "r", "r", "rWC" } },
2138 { INDEX_op_movcond_i32
, { "r", "r", "rWC", "r", "0" } },
2139 { INDEX_op_deposit_i32
, { "r", "0", "r" } },
2141 { INDEX_op_qemu_ld8u
, { "r", "L" } },
2142 { INDEX_op_qemu_ld8s
, { "r", "L" } },
2143 { INDEX_op_qemu_ld16u
, { "r", "L" } },
2144 { INDEX_op_qemu_ld16s
, { "r", "L" } },
2145 { INDEX_op_qemu_ld32
, { "r", "L" } },
2146 { INDEX_op_qemu_ld64
, { "r", "L" } },
2148 { INDEX_op_qemu_st8
, { "L", "L" } },
2149 { INDEX_op_qemu_st16
, { "L", "L" } },
2150 { INDEX_op_qemu_st32
, { "L", "L" } },
2151 { INDEX_op_qemu_st64
, { "L", "L" } },
2153 { INDEX_op_mov_i64
, { "r", "r" } },
2154 { INDEX_op_movi_i64
, { "r" } },
2156 { INDEX_op_ld8u_i64
, { "r", "r" } },
2157 { INDEX_op_ld8s_i64
, { "r", "r" } },
2158 { INDEX_op_ld16u_i64
, { "r", "r" } },
2159 { INDEX_op_ld16s_i64
, { "r", "r" } },
2160 { INDEX_op_ld32u_i64
, { "r", "r" } },
2161 { INDEX_op_ld32s_i64
, { "r", "r" } },
2162 { INDEX_op_ld_i64
, { "r", "r" } },
2164 { INDEX_op_st8_i64
, { "r", "r" } },
2165 { INDEX_op_st16_i64
, { "r", "r" } },
2166 { INDEX_op_st32_i64
, { "r", "r" } },
2167 { INDEX_op_st_i64
, { "r", "r" } },
2169 { INDEX_op_add_i64
, { "r", "r", "ri" } },
2170 { INDEX_op_sub_i64
, { "r", "0", "ri" } },
2171 { INDEX_op_mul_i64
, { "r", "0", "rK" } },
2173 { INDEX_op_div2_i64
, { "b", "a", "0", "1", "r" } },
2174 { INDEX_op_divu2_i64
, { "b", "a", "0", "1", "r" } },
2175 { INDEX_op_mulu2_i64
, { "b", "a", "0", "r" } },
2177 { INDEX_op_and_i64
, { "r", "0", "ri" } },
2178 { INDEX_op_or_i64
, { "r", "0", "rO" } },
2179 { INDEX_op_xor_i64
, { "r", "0", "rX" } },
2181 { INDEX_op_neg_i64
, { "r", "r" } },
2183 { INDEX_op_shl_i64
, { "r", "r", "Ri" } },
2184 { INDEX_op_shr_i64
, { "r", "r", "Ri" } },
2185 { INDEX_op_sar_i64
, { "r", "r", "Ri" } },
2187 { INDEX_op_rotl_i64
, { "r", "r", "Ri" } },
2188 { INDEX_op_rotr_i64
, { "r", "r", "Ri" } },
2190 { INDEX_op_ext8s_i64
, { "r", "r" } },
2191 { INDEX_op_ext8u_i64
, { "r", "r" } },
2192 { INDEX_op_ext16s_i64
, { "r", "r" } },
2193 { INDEX_op_ext16u_i64
, { "r", "r" } },
2194 { INDEX_op_ext32s_i64
, { "r", "r" } },
2195 { INDEX_op_ext32u_i64
, { "r", "r" } },
2197 { INDEX_op_bswap16_i64
, { "r", "r" } },
2198 { INDEX_op_bswap32_i64
, { "r", "r" } },
2199 { INDEX_op_bswap64_i64
, { "r", "r" } },
2201 { INDEX_op_add2_i64
, { "r", "r", "0", "1", "r", "r" } },
2202 { INDEX_op_sub2_i64
, { "r", "r", "0", "1", "r", "r" } },
2204 { INDEX_op_brcond_i64
, { "r", "rC" } },
2205 { INDEX_op_setcond_i64
, { "r", "r", "rC" } },
2206 { INDEX_op_movcond_i64
, { "r", "r", "rC", "r", "0" } },
2207 { INDEX_op_deposit_i64
, { "r", "0", "r" } },
2209 { INDEX_op_qemu_ld32u
, { "r", "L" } },
2210 { INDEX_op_qemu_ld32s
, { "r", "L" } },
2215 /* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2216 this information. However, getting at that entry is not easy this far
2217 away from main. Our options are: start searching from environ, but
2218 that fails as soon as someone does a setenv in between. Read the data
2219 from /proc/self/auxv. Or do the probing ourselves. The only thing
2220 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2221 that the kernel saves all 64-bits of the registers around traps while
2222 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2223 back and see from when this might not be true). */
2227 static volatile sig_atomic_t got_sigill
;
2229 static void sigill_handler(int sig
)
2234 static void query_facilities(void)
2236 struct sigaction sa_old
, sa_new
;
2237 register int r0
__asm__("0");
2238 register void *r1
__asm__("1");
2241 memset(&sa_new
, 0, sizeof(sa_new
));
2242 sa_new
.sa_handler
= sigill_handler
;
2243 sigaction(SIGILL
, &sa_new
, &sa_old
);
2245 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2246 we need not do any more probing. Unfortunately, this itself is an
2247 extension and the original STORE FACILITY LIST instruction is
2248 kernel-only, storing its results at absolute address 200. */
2251 asm volatile(".word 0xb2b0,0x1000"
2252 : "=r"(r0
) : "0"(0), "r"(r1
) : "memory", "cc");
2255 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2256 kind of instruction that we're interested in. */
2257 /* ??? Possibly some of these are in practice never present unless
2258 the store-facility-extended facility is also present. But since
2259 that isn't documented it's just better to probe for each. */
2261 /* Test for z/Architecture. Required even in 31-bit mode. */
2264 asm volatile(".word 0xb908,0x0000" : "=r"(r0
) : : "cc");
2266 facilities
|= FACILITY_ZARCH_ACTIVE
;
2269 /* Test for long displacement. */
2273 asm volatile(".word 0xe300,0x1000,0x0058"
2274 : "=r"(r0
) : "r"(r1
) : "cc");
2276 facilities
|= FACILITY_LONG_DISP
;
2279 /* Test for extended immediates. */
2282 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2284 facilities
|= FACILITY_EXT_IMM
;
2287 /* Test for general-instructions-extension. */
2290 asm volatile(".word 0xc201,0x0000,0x0001");
2292 facilities
|= FACILITY_GEN_INST_EXT
;
2296 sigaction(SIGILL
, &sa_old
, NULL
);
2298 /* The translator currently uses these extensions unconditionally.
2299 Pruning this back to the base ESA/390 architecture doesn't seem
2300 worthwhile, since even the KVM target requires z/Arch. */
2302 if ((facilities
& FACILITY_ZARCH_ACTIVE
) == 0) {
2303 fprintf(stderr
, "TCG: z/Arch facility is required.\n");
2304 fprintf(stderr
, "TCG: Boot with a 64-bit enabled kernel.\n");
2307 if ((facilities
& FACILITY_LONG_DISP
) == 0) {
2308 fprintf(stderr
, "TCG: long-displacement facility is required.\n");
2312 /* So far there's just enough support for 31-bit mode to let the
2313 compile succeed. This is good enough to run QEMU with KVM. */
2314 if (sizeof(void *) != 8) {
2315 fprintf(stderr
, "TCG: 31-bit mode is not supported.\n");
2324 static void tcg_target_init(TCGContext
*s
)
2328 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
2329 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffff);
2331 tcg_regset_clear(tcg_target_call_clobber_regs
);
2332 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2333 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2334 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2335 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2336 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R4
);
2337 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R5
);
2338 /* The return register can be considered call-clobbered. */
2339 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2341 tcg_regset_clear(s
->reserved_regs
);
2342 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
);
2343 /* XXX many insns can't be used with R0, so we better avoid it for now */
2344 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
);
2345 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2347 tcg_add_target_add_op_defs(s390_op_defs
);
2350 static void tcg_target_qemu_prologue(TCGContext
*s
)
2352 tcg_target_long frame_size
;
2354 /* stmg %r6,%r15,48(%r15) (save registers) */
2355 tcg_out_insn(s
, RXY
, STMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
, 48);
2357 /* aghi %r15,-frame_size */
2358 frame_size
= TCG_TARGET_CALL_STACK_OFFSET
;
2359 frame_size
+= TCG_STATIC_CALL_ARGS_SIZE
;
2360 frame_size
+= CPU_TEMP_BUF_NLONGS
* sizeof(long);
2361 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_R15
, -frame_size
);
2363 tcg_set_frame(s
, TCG_REG_CALL_STACK
,
2364 TCG_STATIC_CALL_ARGS_SIZE
+ TCG_TARGET_CALL_STACK_OFFSET
,
2365 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2367 if (GUEST_BASE
>= 0x80000) {
2368 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
2369 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2372 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2373 /* br %r3 (go to TB) */
2374 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, tcg_target_call_iarg_regs
[1]);
2376 tb_ret_addr
= s
->code_ptr
;
2378 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2379 tcg_out_insn(s
, RXY
, LMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
,
2382 /* br %r14 (return) */
2383 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_R14
);