scripts/decodetree.py: remove unused imports
[qemu/ar7.git] / tcg / s390 / tcg-target.inc.c
blob17c435ade559e343871ee37ec56c19dd36cce3da
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 /* We only support generating code for 64-bit mode. */
28 #if TCG_TARGET_REG_BITS != 64
29 #error "unsupported code generation mode"
30 #endif
32 #include "tcg-pool.inc.c"
33 #include "elf.h"
35 /* ??? The translation blocks produced by TCG are generally small enough to
36 be entirely reachable with a 16-bit displacement. Leaving the option for
37 a 32-bit displacement here Just In Case. */
38 #define USE_LONG_BRANCHES 0
40 #define TCG_CT_CONST_S16 0x100
41 #define TCG_CT_CONST_S32 0x200
42 #define TCG_CT_CONST_S33 0x400
43 #define TCG_CT_CONST_ZERO 0x800
45 /* Several places within the instruction set 0 means "no register"
46 rather than TCG_REG_R0. */
47 #define TCG_REG_NONE 0
49 /* A scratch register that may be be used throughout the backend. */
50 #define TCG_TMP0 TCG_REG_R1
52 /* A scratch register that holds a pointer to the beginning of the TB.
53 We don't need this when we have pc-relative loads with the general
54 instructions extension facility. */
55 #define TCG_REG_TB TCG_REG_R12
56 #define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
58 #ifndef CONFIG_SOFTMMU
59 #define TCG_GUEST_BASE_REG TCG_REG_R13
60 #endif
62 /* All of the following instructions are prefixed with their instruction
63 format, and are defined as 8- or 16-bit quantities, even when the two
64 halves of the 16-bit quantity may appear 32 bits apart in the insn.
65 This makes it easy to copy the values from the tables in Appendix B. */
66 typedef enum S390Opcode {
67 RIL_AFI = 0xc209,
68 RIL_AGFI = 0xc208,
69 RIL_ALFI = 0xc20b,
70 RIL_ALGFI = 0xc20a,
71 RIL_BRASL = 0xc005,
72 RIL_BRCL = 0xc004,
73 RIL_CFI = 0xc20d,
74 RIL_CGFI = 0xc20c,
75 RIL_CLFI = 0xc20f,
76 RIL_CLGFI = 0xc20e,
77 RIL_CLRL = 0xc60f,
78 RIL_CLGRL = 0xc60a,
79 RIL_CRL = 0xc60d,
80 RIL_CGRL = 0xc608,
81 RIL_IIHF = 0xc008,
82 RIL_IILF = 0xc009,
83 RIL_LARL = 0xc000,
84 RIL_LGFI = 0xc001,
85 RIL_LGRL = 0xc408,
86 RIL_LLIHF = 0xc00e,
87 RIL_LLILF = 0xc00f,
88 RIL_LRL = 0xc40d,
89 RIL_MSFI = 0xc201,
90 RIL_MSGFI = 0xc200,
91 RIL_NIHF = 0xc00a,
92 RIL_NILF = 0xc00b,
93 RIL_OIHF = 0xc00c,
94 RIL_OILF = 0xc00d,
95 RIL_SLFI = 0xc205,
96 RIL_SLGFI = 0xc204,
97 RIL_XIHF = 0xc006,
98 RIL_XILF = 0xc007,
100 RI_AGHI = 0xa70b,
101 RI_AHI = 0xa70a,
102 RI_BRC = 0xa704,
103 RI_CHI = 0xa70e,
104 RI_CGHI = 0xa70f,
105 RI_IIHH = 0xa500,
106 RI_IIHL = 0xa501,
107 RI_IILH = 0xa502,
108 RI_IILL = 0xa503,
109 RI_LGHI = 0xa709,
110 RI_LLIHH = 0xa50c,
111 RI_LLIHL = 0xa50d,
112 RI_LLILH = 0xa50e,
113 RI_LLILL = 0xa50f,
114 RI_MGHI = 0xa70d,
115 RI_MHI = 0xa70c,
116 RI_NIHH = 0xa504,
117 RI_NIHL = 0xa505,
118 RI_NILH = 0xa506,
119 RI_NILL = 0xa507,
120 RI_OIHH = 0xa508,
121 RI_OIHL = 0xa509,
122 RI_OILH = 0xa50a,
123 RI_OILL = 0xa50b,
125 RIE_CGIJ = 0xec7c,
126 RIE_CGRJ = 0xec64,
127 RIE_CIJ = 0xec7e,
128 RIE_CLGRJ = 0xec65,
129 RIE_CLIJ = 0xec7f,
130 RIE_CLGIJ = 0xec7d,
131 RIE_CLRJ = 0xec77,
132 RIE_CRJ = 0xec76,
133 RIE_LOCGHI = 0xec46,
134 RIE_RISBG = 0xec55,
136 RRE_AGR = 0xb908,
137 RRE_ALGR = 0xb90a,
138 RRE_ALCR = 0xb998,
139 RRE_ALCGR = 0xb988,
140 RRE_CGR = 0xb920,
141 RRE_CLGR = 0xb921,
142 RRE_DLGR = 0xb987,
143 RRE_DLR = 0xb997,
144 RRE_DSGFR = 0xb91d,
145 RRE_DSGR = 0xb90d,
146 RRE_FLOGR = 0xb983,
147 RRE_LGBR = 0xb906,
148 RRE_LCGR = 0xb903,
149 RRE_LGFR = 0xb914,
150 RRE_LGHR = 0xb907,
151 RRE_LGR = 0xb904,
152 RRE_LLGCR = 0xb984,
153 RRE_LLGFR = 0xb916,
154 RRE_LLGHR = 0xb985,
155 RRE_LRVR = 0xb91f,
156 RRE_LRVGR = 0xb90f,
157 RRE_LTGR = 0xb902,
158 RRE_MLGR = 0xb986,
159 RRE_MSGR = 0xb90c,
160 RRE_MSR = 0xb252,
161 RRE_NGR = 0xb980,
162 RRE_OGR = 0xb981,
163 RRE_SGR = 0xb909,
164 RRE_SLGR = 0xb90b,
165 RRE_SLBR = 0xb999,
166 RRE_SLBGR = 0xb989,
167 RRE_XGR = 0xb982,
169 RRF_LOCR = 0xb9f2,
170 RRF_LOCGR = 0xb9e2,
171 RRF_NRK = 0xb9f4,
172 RRF_NGRK = 0xb9e4,
173 RRF_ORK = 0xb9f6,
174 RRF_OGRK = 0xb9e6,
175 RRF_SRK = 0xb9f9,
176 RRF_SGRK = 0xb9e9,
177 RRF_SLRK = 0xb9fb,
178 RRF_SLGRK = 0xb9eb,
179 RRF_XRK = 0xb9f7,
180 RRF_XGRK = 0xb9e7,
182 RR_AR = 0x1a,
183 RR_ALR = 0x1e,
184 RR_BASR = 0x0d,
185 RR_BCR = 0x07,
186 RR_CLR = 0x15,
187 RR_CR = 0x19,
188 RR_DR = 0x1d,
189 RR_LCR = 0x13,
190 RR_LR = 0x18,
191 RR_LTR = 0x12,
192 RR_NR = 0x14,
193 RR_OR = 0x16,
194 RR_SR = 0x1b,
195 RR_SLR = 0x1f,
196 RR_XR = 0x17,
198 RSY_RLL = 0xeb1d,
199 RSY_RLLG = 0xeb1c,
200 RSY_SLLG = 0xeb0d,
201 RSY_SLLK = 0xebdf,
202 RSY_SRAG = 0xeb0a,
203 RSY_SRAK = 0xebdc,
204 RSY_SRLG = 0xeb0c,
205 RSY_SRLK = 0xebde,
207 RS_SLL = 0x89,
208 RS_SRA = 0x8a,
209 RS_SRL = 0x88,
211 RXY_AG = 0xe308,
212 RXY_AY = 0xe35a,
213 RXY_CG = 0xe320,
214 RXY_CLG = 0xe321,
215 RXY_CLY = 0xe355,
216 RXY_CY = 0xe359,
217 RXY_LAY = 0xe371,
218 RXY_LB = 0xe376,
219 RXY_LG = 0xe304,
220 RXY_LGB = 0xe377,
221 RXY_LGF = 0xe314,
222 RXY_LGH = 0xe315,
223 RXY_LHY = 0xe378,
224 RXY_LLGC = 0xe390,
225 RXY_LLGF = 0xe316,
226 RXY_LLGH = 0xe391,
227 RXY_LMG = 0xeb04,
228 RXY_LRV = 0xe31e,
229 RXY_LRVG = 0xe30f,
230 RXY_LRVH = 0xe31f,
231 RXY_LY = 0xe358,
232 RXY_NG = 0xe380,
233 RXY_OG = 0xe381,
234 RXY_STCY = 0xe372,
235 RXY_STG = 0xe324,
236 RXY_STHY = 0xe370,
237 RXY_STMG = 0xeb24,
238 RXY_STRV = 0xe33e,
239 RXY_STRVG = 0xe32f,
240 RXY_STRVH = 0xe33f,
241 RXY_STY = 0xe350,
242 RXY_XG = 0xe382,
244 RX_A = 0x5a,
245 RX_C = 0x59,
246 RX_L = 0x58,
247 RX_LA = 0x41,
248 RX_LH = 0x48,
249 RX_ST = 0x50,
250 RX_STC = 0x42,
251 RX_STH = 0x40,
253 NOP = 0x0707,
254 } S390Opcode;
256 #ifdef CONFIG_DEBUG_TCG
257 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
258 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
259 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
261 #endif
263 /* Since R6 is a potential argument register, choose it last of the
264 call-saved registers. Likewise prefer the call-clobbered registers
265 in reverse order to maximize the chance of avoiding the arguments. */
266 static const int tcg_target_reg_alloc_order[] = {
267 /* Call saved registers. */
268 TCG_REG_R13,
269 TCG_REG_R12,
270 TCG_REG_R11,
271 TCG_REG_R10,
272 TCG_REG_R9,
273 TCG_REG_R8,
274 TCG_REG_R7,
275 TCG_REG_R6,
276 /* Call clobbered registers. */
277 TCG_REG_R14,
278 TCG_REG_R0,
279 TCG_REG_R1,
280 /* Argument registers, in reverse order of allocation. */
281 TCG_REG_R5,
282 TCG_REG_R4,
283 TCG_REG_R3,
284 TCG_REG_R2,
287 static const int tcg_target_call_iarg_regs[] = {
288 TCG_REG_R2,
289 TCG_REG_R3,
290 TCG_REG_R4,
291 TCG_REG_R5,
292 TCG_REG_R6,
295 static const int tcg_target_call_oarg_regs[] = {
296 TCG_REG_R2,
299 #define S390_CC_EQ 8
300 #define S390_CC_LT 4
301 #define S390_CC_GT 2
302 #define S390_CC_OV 1
303 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
304 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
305 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
306 #define S390_CC_NEVER 0
307 #define S390_CC_ALWAYS 15
309 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
310 static const uint8_t tcg_cond_to_s390_cond[] = {
311 [TCG_COND_EQ] = S390_CC_EQ,
312 [TCG_COND_NE] = S390_CC_NE,
313 [TCG_COND_LT] = S390_CC_LT,
314 [TCG_COND_LE] = S390_CC_LE,
315 [TCG_COND_GT] = S390_CC_GT,
316 [TCG_COND_GE] = S390_CC_GE,
317 [TCG_COND_LTU] = S390_CC_LT,
318 [TCG_COND_LEU] = S390_CC_LE,
319 [TCG_COND_GTU] = S390_CC_GT,
320 [TCG_COND_GEU] = S390_CC_GE,
323 /* Condition codes that result from a LOAD AND TEST. Here, we have no
324 unsigned instruction variation, however since the test is vs zero we
325 can re-map the outcomes appropriately. */
326 static const uint8_t tcg_cond_to_ltr_cond[] = {
327 [TCG_COND_EQ] = S390_CC_EQ,
328 [TCG_COND_NE] = S390_CC_NE,
329 [TCG_COND_LT] = S390_CC_LT,
330 [TCG_COND_LE] = S390_CC_LE,
331 [TCG_COND_GT] = S390_CC_GT,
332 [TCG_COND_GE] = S390_CC_GE,
333 [TCG_COND_LTU] = S390_CC_NEVER,
334 [TCG_COND_LEU] = S390_CC_EQ,
335 [TCG_COND_GTU] = S390_CC_NE,
336 [TCG_COND_GEU] = S390_CC_ALWAYS,
339 #ifdef CONFIG_SOFTMMU
340 static void * const qemu_ld_helpers[16] = {
341 [MO_UB] = helper_ret_ldub_mmu,
342 [MO_SB] = helper_ret_ldsb_mmu,
343 [MO_LEUW] = helper_le_lduw_mmu,
344 [MO_LESW] = helper_le_ldsw_mmu,
345 [MO_LEUL] = helper_le_ldul_mmu,
346 [MO_LESL] = helper_le_ldsl_mmu,
347 [MO_LEQ] = helper_le_ldq_mmu,
348 [MO_BEUW] = helper_be_lduw_mmu,
349 [MO_BESW] = helper_be_ldsw_mmu,
350 [MO_BEUL] = helper_be_ldul_mmu,
351 [MO_BESL] = helper_be_ldsl_mmu,
352 [MO_BEQ] = helper_be_ldq_mmu,
355 static void * const qemu_st_helpers[16] = {
356 [MO_UB] = helper_ret_stb_mmu,
357 [MO_LEUW] = helper_le_stw_mmu,
358 [MO_LEUL] = helper_le_stl_mmu,
359 [MO_LEQ] = helper_le_stq_mmu,
360 [MO_BEUW] = helper_be_stw_mmu,
361 [MO_BEUL] = helper_be_stl_mmu,
362 [MO_BEQ] = helper_be_stq_mmu,
364 #endif
366 static tcg_insn_unit *tb_ret_addr;
367 uint64_t s390_facilities;
369 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
370 intptr_t value, intptr_t addend)
372 intptr_t pcrel2;
373 uint32_t old;
375 value += addend;
376 pcrel2 = (tcg_insn_unit *)value - code_ptr;
378 switch (type) {
379 case R_390_PC16DBL:
380 assert(pcrel2 == (int16_t)pcrel2);
381 tcg_patch16(code_ptr, pcrel2);
382 break;
383 case R_390_PC32DBL:
384 assert(pcrel2 == (int32_t)pcrel2);
385 tcg_patch32(code_ptr, pcrel2);
386 break;
387 case R_390_20:
388 assert(value == sextract64(value, 0, 20));
389 old = *(uint32_t *)code_ptr & 0xf00000ff;
390 old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
391 tcg_patch32(code_ptr, old);
392 break;
393 default:
394 g_assert_not_reached();
398 /* parse target specific constraints */
399 static const char *target_parse_constraint(TCGArgConstraint *ct,
400 const char *ct_str, TCGType type)
402 switch (*ct_str++) {
403 case 'r': /* all registers */
404 ct->ct |= TCG_CT_REG;
405 ct->u.regs = 0xffff;
406 break;
407 case 'L': /* qemu_ld/st constraint */
408 ct->ct |= TCG_CT_REG;
409 ct->u.regs = 0xffff;
410 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
411 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
412 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
413 break;
414 case 'a': /* force R2 for division */
415 ct->ct |= TCG_CT_REG;
416 ct->u.regs = 0;
417 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
418 break;
419 case 'b': /* force R3 for division */
420 ct->ct |= TCG_CT_REG;
421 ct->u.regs = 0;
422 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
423 break;
424 case 'A':
425 ct->ct |= TCG_CT_CONST_S33;
426 break;
427 case 'I':
428 ct->ct |= TCG_CT_CONST_S16;
429 break;
430 case 'J':
431 ct->ct |= TCG_CT_CONST_S32;
432 break;
433 case 'Z':
434 ct->ct |= TCG_CT_CONST_ZERO;
435 break;
436 default:
437 return NULL;
439 return ct_str;
442 /* Test if a constant matches the constraint. */
443 static int tcg_target_const_match(tcg_target_long val, TCGType type,
444 const TCGArgConstraint *arg_ct)
446 int ct = arg_ct->ct;
448 if (ct & TCG_CT_CONST) {
449 return 1;
452 if (type == TCG_TYPE_I32) {
453 val = (int32_t)val;
456 /* The following are mutually exclusive. */
457 if (ct & TCG_CT_CONST_S16) {
458 return val == (int16_t)val;
459 } else if (ct & TCG_CT_CONST_S32) {
460 return val == (int32_t)val;
461 } else if (ct & TCG_CT_CONST_S33) {
462 return val >= -0xffffffffll && val <= 0xffffffffll;
463 } else if (ct & TCG_CT_CONST_ZERO) {
464 return val == 0;
467 return 0;
470 /* Emit instructions according to the given instruction format. */
472 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
474 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
477 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
478 TCGReg r1, TCGReg r2)
480 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
483 static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
484 TCGReg r1, TCGReg r2, int m3)
486 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
489 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
491 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
494 static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1,
495 int i2, int m3)
497 tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
498 tcg_out32(s, (i2 << 16) | (op & 0xff));
501 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
503 tcg_out16(s, op | (r1 << 4));
504 tcg_out32(s, i2);
507 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
508 TCGReg b2, TCGReg r3, int disp)
510 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
511 | (disp & 0xfff));
514 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
515 TCGReg b2, TCGReg r3, int disp)
517 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
518 tcg_out32(s, (op & 0xff) | (b2 << 28)
519 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
522 #define tcg_out_insn_RX tcg_out_insn_RS
523 #define tcg_out_insn_RXY tcg_out_insn_RSY
525 /* Emit an opcode with "type-checking" of the format. */
526 #define tcg_out_insn(S, FMT, OP, ...) \
527 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
530 /* emit 64-bit shifts */
531 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
532 TCGReg src, TCGReg sh_reg, int sh_imm)
534 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
537 /* emit 32-bit shifts */
538 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
539 TCGReg sh_reg, int sh_imm)
541 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
544 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
546 if (src != dst) {
547 if (type == TCG_TYPE_I32) {
548 tcg_out_insn(s, RR, LR, dst, src);
549 } else {
550 tcg_out_insn(s, RRE, LGR, dst, src);
555 static const S390Opcode lli_insns[4] = {
556 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
559 static bool maybe_out_small_movi(TCGContext *s, TCGType type,
560 TCGReg ret, tcg_target_long sval)
562 tcg_target_ulong uval = sval;
563 int i;
565 if (type == TCG_TYPE_I32) {
566 uval = (uint32_t)sval;
567 sval = (int32_t)sval;
570 /* Try all 32-bit insns that can load it in one go. */
571 if (sval >= -0x8000 && sval < 0x8000) {
572 tcg_out_insn(s, RI, LGHI, ret, sval);
573 return true;
576 for (i = 0; i < 4; i++) {
577 tcg_target_long mask = 0xffffull << i*16;
578 if ((uval & mask) == uval) {
579 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
580 return true;
584 return false;
587 /* load a register with an immediate value */
588 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
589 tcg_target_long sval, bool in_prologue)
591 tcg_target_ulong uval;
593 /* Try all 32-bit insns that can load it in one go. */
594 if (maybe_out_small_movi(s, type, ret, sval)) {
595 return;
598 uval = sval;
599 if (type == TCG_TYPE_I32) {
600 uval = (uint32_t)sval;
601 sval = (int32_t)sval;
604 /* Try all 48-bit insns that can load it in one go. */
605 if (s390_facilities & FACILITY_EXT_IMM) {
606 if (sval == (int32_t)sval) {
607 tcg_out_insn(s, RIL, LGFI, ret, sval);
608 return;
610 if (uval <= 0xffffffff) {
611 tcg_out_insn(s, RIL, LLILF, ret, uval);
612 return;
614 if ((uval & 0xffffffff) == 0) {
615 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32);
616 return;
620 /* Try for PC-relative address load. For odd addresses,
621 attempt to use an offset from the start of the TB. */
622 if ((sval & 1) == 0) {
623 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
624 if (off == (int32_t)off) {
625 tcg_out_insn(s, RIL, LARL, ret, off);
626 return;
628 } else if (USE_REG_TB && !in_prologue) {
629 ptrdiff_t off = sval - (uintptr_t)s->code_gen_ptr;
630 if (off == sextract64(off, 0, 20)) {
631 /* This is certain to be an address within TB, and therefore
632 OFF will be negative; don't try RX_LA. */
633 tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off);
634 return;
638 /* A 32-bit unsigned value can be loaded in 2 insns. And given
639 that LLILL, LLIHL, LLILF above did not succeed, we know that
640 both insns are required. */
641 if (uval <= 0xffffffff) {
642 tcg_out_insn(s, RI, LLILL, ret, uval);
643 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
644 return;
647 /* Otherwise, stuff it in the constant pool. */
648 if (s390_facilities & FACILITY_GEN_INST_EXT) {
649 tcg_out_insn(s, RIL, LGRL, ret, 0);
650 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
651 } else if (USE_REG_TB && !in_prologue) {
652 tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
653 new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
654 -(intptr_t)s->code_gen_ptr);
655 } else {
656 TCGReg base = ret ? ret : TCG_TMP0;
657 tcg_out_insn(s, RIL, LARL, base, 0);
658 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
659 tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0);
663 static void tcg_out_movi(TCGContext *s, TCGType type,
664 TCGReg ret, tcg_target_long sval)
666 tcg_out_movi_int(s, type, ret, sval, false);
669 /* Emit a load/store type instruction. Inputs are:
670 DATA: The register to be loaded or stored.
671 BASE+OFS: The effective address.
672 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
673 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
675 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
676 TCGReg data, TCGReg base, TCGReg index,
677 tcg_target_long ofs)
679 if (ofs < -0x80000 || ofs >= 0x80000) {
680 /* Combine the low 20 bits of the offset with the actual load insn;
681 the high 44 bits must come from an immediate load. */
682 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
683 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
684 ofs = low;
686 /* If we were already given an index register, add it in. */
687 if (index != TCG_REG_NONE) {
688 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
690 index = TCG_TMP0;
693 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
694 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
695 } else {
696 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
701 /* load data without address translation or endianness conversion */
702 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
703 TCGReg base, intptr_t ofs)
705 if (type == TCG_TYPE_I32) {
706 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
707 } else {
708 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
712 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
713 TCGReg base, intptr_t ofs)
715 if (type == TCG_TYPE_I32) {
716 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
717 } else {
718 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
722 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
723 TCGReg base, intptr_t ofs)
725 return false;
728 /* load data from an absolute host address */
729 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
731 intptr_t addr = (intptr_t)abs;
733 if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
734 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
735 if (disp == (int32_t)disp) {
736 if (type == TCG_TYPE_I32) {
737 tcg_out_insn(s, RIL, LRL, dest, disp);
738 } else {
739 tcg_out_insn(s, RIL, LGRL, dest, disp);
741 return;
744 if (USE_REG_TB) {
745 ptrdiff_t disp = abs - (void *)s->code_gen_ptr;
746 if (disp == sextract64(disp, 0, 20)) {
747 tcg_out_ld(s, type, dest, TCG_REG_TB, disp);
748 return;
752 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
753 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
756 static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
757 int msb, int lsb, int ofs, int z)
759 /* Format RIE-f */
760 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
761 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
762 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
765 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
767 if (s390_facilities & FACILITY_EXT_IMM) {
768 tcg_out_insn(s, RRE, LGBR, dest, src);
769 return;
772 if (type == TCG_TYPE_I32) {
773 if (dest == src) {
774 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
775 } else {
776 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
778 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
779 } else {
780 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
781 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
785 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
787 if (s390_facilities & FACILITY_EXT_IMM) {
788 tcg_out_insn(s, RRE, LLGCR, dest, src);
789 return;
792 if (dest == src) {
793 tcg_out_movi(s, type, TCG_TMP0, 0xff);
794 src = TCG_TMP0;
795 } else {
796 tcg_out_movi(s, type, dest, 0xff);
798 if (type == TCG_TYPE_I32) {
799 tcg_out_insn(s, RR, NR, dest, src);
800 } else {
801 tcg_out_insn(s, RRE, NGR, dest, src);
805 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
807 if (s390_facilities & FACILITY_EXT_IMM) {
808 tcg_out_insn(s, RRE, LGHR, dest, src);
809 return;
812 if (type == TCG_TYPE_I32) {
813 if (dest == src) {
814 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
815 } else {
816 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
818 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
819 } else {
820 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
821 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
825 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
827 if (s390_facilities & FACILITY_EXT_IMM) {
828 tcg_out_insn(s, RRE, LLGHR, dest, src);
829 return;
832 if (dest == src) {
833 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
834 src = TCG_TMP0;
835 } else {
836 tcg_out_movi(s, type, dest, 0xffff);
838 if (type == TCG_TYPE_I32) {
839 tcg_out_insn(s, RR, NR, dest, src);
840 } else {
841 tcg_out_insn(s, RRE, NGR, dest, src);
845 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
847 tcg_out_insn(s, RRE, LGFR, dest, src);
850 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
852 tcg_out_insn(s, RRE, LLGFR, dest, src);
855 /* Accept bit patterns like these:
856 0....01....1
857 1....10....0
858 1..10..01..1
859 0..01..10..0
860 Copied from gcc sources. */
861 static inline bool risbg_mask(uint64_t c)
863 uint64_t lsb;
864 /* We don't change the number of transitions by inverting,
865 so make sure we start with the LSB zero. */
866 if (c & 1) {
867 c = ~c;
869 /* Reject all zeros or all ones. */
870 if (c == 0) {
871 return false;
873 /* Find the first transition. */
874 lsb = c & -c;
875 /* Invert to look for a second transition. */
876 c = ~c;
877 /* Erase the first transition. */
878 c &= -lsb;
879 /* Find the second transition, if any. */
880 lsb = c & -c;
881 /* Match if all the bits are 1's, or if c is zero. */
882 return c == -lsb;
885 static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
887 int msb, lsb;
888 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
889 /* Achieve wraparound by swapping msb and lsb. */
890 msb = 64 - ctz64(~val);
891 lsb = clz64(~val) - 1;
892 } else {
893 msb = clz64(val);
894 lsb = 63 - ctz64(val);
896 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
899 static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
901 static const S390Opcode ni_insns[4] = {
902 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
904 static const S390Opcode nif_insns[2] = {
905 RIL_NILF, RIL_NIHF
907 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
908 int i;
910 /* Look for the zero-extensions. */
911 if ((val & valid) == 0xffffffff) {
912 tgen_ext32u(s, dest, dest);
913 return;
915 if (s390_facilities & FACILITY_EXT_IMM) {
916 if ((val & valid) == 0xff) {
917 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
918 return;
920 if ((val & valid) == 0xffff) {
921 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
922 return;
926 /* Try all 32-bit insns that can perform it in one go. */
927 for (i = 0; i < 4; i++) {
928 tcg_target_ulong mask = ~(0xffffull << i*16);
929 if (((val | ~valid) & mask) == mask) {
930 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
931 return;
935 /* Try all 48-bit insns that can perform it in one go. */
936 if (s390_facilities & FACILITY_EXT_IMM) {
937 for (i = 0; i < 2; i++) {
938 tcg_target_ulong mask = ~(0xffffffffull << i*32);
939 if (((val | ~valid) & mask) == mask) {
940 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
941 return;
945 if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
946 tgen_andi_risbg(s, dest, dest, val);
947 return;
950 /* Use the constant pool if USE_REG_TB, but not for small constants. */
951 if (USE_REG_TB) {
952 if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) {
953 tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
954 new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2,
955 -(intptr_t)s->code_gen_ptr);
956 return;
958 } else {
959 tcg_out_movi(s, type, TCG_TMP0, val);
961 if (type == TCG_TYPE_I32) {
962 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
963 } else {
964 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
968 static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
970 static const S390Opcode oi_insns[4] = {
971 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
973 static const S390Opcode oif_insns[2] = {
974 RIL_OILF, RIL_OIHF
977 int i;
979 /* Look for no-op. */
980 if (unlikely(val == 0)) {
981 return;
984 /* Try all 32-bit insns that can perform it in one go. */
985 for (i = 0; i < 4; i++) {
986 tcg_target_ulong mask = (0xffffull << i*16);
987 if ((val & mask) != 0 && (val & ~mask) == 0) {
988 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
989 return;
993 /* Try all 48-bit insns that can perform it in one go. */
994 if (s390_facilities & FACILITY_EXT_IMM) {
995 for (i = 0; i < 2; i++) {
996 tcg_target_ulong mask = (0xffffffffull << i*32);
997 if ((val & mask) != 0 && (val & ~mask) == 0) {
998 tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32);
999 return;
1004 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1005 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
1006 if (type == TCG_TYPE_I32) {
1007 tcg_out_insn(s, RR, OR, dest, TCG_TMP0);
1008 } else {
1009 tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0);
1011 } else if (USE_REG_TB) {
1012 tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
1013 new_pool_label(s, val, R_390_20, s->code_ptr - 2,
1014 -(intptr_t)s->code_gen_ptr);
1015 } else {
1016 /* Perform the OR via sequential modifications to the high and
1017 low parts. Do this via recursion to handle 16-bit vs 32-bit
1018 masks in each half. */
1019 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
1020 tgen_ori(s, type, dest, val & 0x00000000ffffffffull);
1021 tgen_ori(s, type, dest, val & 0xffffffff00000000ull);
1025 static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
1027 /* Try all 48-bit insns that can perform it in one go. */
1028 if (s390_facilities & FACILITY_EXT_IMM) {
1029 if ((val & 0xffffffff00000000ull) == 0) {
1030 tcg_out_insn(s, RIL, XILF, dest, val);
1031 return;
1033 if ((val & 0x00000000ffffffffull) == 0) {
1034 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1035 return;
1039 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1040 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
1041 if (type == TCG_TYPE_I32) {
1042 tcg_out_insn(s, RR, XR, dest, TCG_TMP0);
1043 } else {
1044 tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0);
1046 } else if (USE_REG_TB) {
1047 tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
1048 new_pool_label(s, val, R_390_20, s->code_ptr - 2,
1049 -(intptr_t)s->code_gen_ptr);
1050 } else {
1051 /* Perform the xor by parts. */
1052 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
1053 if (val & 0xffffffff) {
1054 tcg_out_insn(s, RIL, XILF, dest, val);
1056 if (val > 0xffffffff) {
1057 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1062 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1063 TCGArg c2, bool c2const, bool need_carry)
1065 bool is_unsigned = is_unsigned_cond(c);
1066 S390Opcode op;
1068 if (c2const) {
1069 if (c2 == 0) {
1070 if (!(is_unsigned && need_carry)) {
1071 if (type == TCG_TYPE_I32) {
1072 tcg_out_insn(s, RR, LTR, r1, r1);
1073 } else {
1074 tcg_out_insn(s, RRE, LTGR, r1, r1);
1076 return tcg_cond_to_ltr_cond[c];
1080 if (!is_unsigned && c2 == (int16_t)c2) {
1081 op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1082 tcg_out_insn_RI(s, op, r1, c2);
1083 goto exit;
1086 if (s390_facilities & FACILITY_EXT_IMM) {
1087 if (type == TCG_TYPE_I32) {
1088 op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1089 tcg_out_insn_RIL(s, op, r1, c2);
1090 goto exit;
1091 } else if (c2 == (is_unsigned ? (uint32_t)c2 : (int32_t)c2)) {
1092 op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
1093 tcg_out_insn_RIL(s, op, r1, c2);
1094 goto exit;
1098 /* Use the constant pool, but not for small constants. */
1099 if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) {
1100 c2 = TCG_TMP0;
1101 /* fall through to reg-reg */
1102 } else if (USE_REG_TB) {
1103 if (type == TCG_TYPE_I32) {
1104 op = (is_unsigned ? RXY_CLY : RXY_CY);
1105 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
1106 new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2,
1107 4 - (intptr_t)s->code_gen_ptr);
1108 } else {
1109 op = (is_unsigned ? RXY_CLG : RXY_CG);
1110 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
1111 new_pool_label(s, c2, R_390_20, s->code_ptr - 2,
1112 -(intptr_t)s->code_gen_ptr);
1114 goto exit;
1115 } else {
1116 if (type == TCG_TYPE_I32) {
1117 op = (is_unsigned ? RIL_CLRL : RIL_CRL);
1118 tcg_out_insn_RIL(s, op, r1, 0);
1119 new_pool_label(s, (uint32_t)c2, R_390_PC32DBL,
1120 s->code_ptr - 2, 2 + 4);
1121 } else {
1122 op = (is_unsigned ? RIL_CLGRL : RIL_CGRL);
1123 tcg_out_insn_RIL(s, op, r1, 0);
1124 new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2);
1126 goto exit;
1130 if (type == TCG_TYPE_I32) {
1131 op = (is_unsigned ? RR_CLR : RR_CR);
1132 tcg_out_insn_RR(s, op, r1, c2);
1133 } else {
1134 op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1135 tcg_out_insn_RRE(s, op, r1, c2);
1138 exit:
1139 return tcg_cond_to_s390_cond[c];
1142 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1143 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1145 int cc;
1146 bool have_loc;
1148 /* With LOC2, we can always emit the minimum 3 insns. */
1149 if (s390_facilities & FACILITY_LOAD_ON_COND2) {
1150 /* Emit: d = 0, d = (cc ? 1 : d). */
1151 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1152 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1153 tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc);
1154 return;
1157 have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0;
1159 /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
1160 restart:
1161 switch (cond) {
1162 case TCG_COND_NE:
1163 /* X != 0 is X > 0. */
1164 if (c2const && c2 == 0) {
1165 cond = TCG_COND_GTU;
1166 } else {
1167 break;
1169 /* fallthru */
1171 case TCG_COND_GTU:
1172 case TCG_COND_GT:
1173 /* The result of a compare has CC=2 for GT and CC=3 unused.
1174 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1175 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1176 tcg_out_movi(s, type, dest, 0);
1177 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1178 return;
1180 case TCG_COND_EQ:
1181 /* X == 0 is X <= 0. */
1182 if (c2const && c2 == 0) {
1183 cond = TCG_COND_LEU;
1184 } else {
1185 break;
1187 /* fallthru */
1189 case TCG_COND_LEU:
1190 case TCG_COND_LE:
1191 /* As above, but we're looking for borrow, or !carry.
1192 The second insn computes d - d - borrow, or -1 for true
1193 and 0 for false. So we must mask to 1 bit afterward. */
1194 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1195 tcg_out_insn(s, RRE, SLBGR, dest, dest);
1196 tgen_andi(s, type, dest, 1);
1197 return;
1199 case TCG_COND_GEU:
1200 case TCG_COND_LTU:
1201 case TCG_COND_LT:
1202 case TCG_COND_GE:
1203 /* Swap operands so that we can use LEU/GTU/GT/LE. */
1204 if (c2const) {
1205 if (have_loc) {
1206 break;
1208 tcg_out_movi(s, type, TCG_TMP0, c2);
1209 c2 = c1;
1210 c2const = 0;
1211 c1 = TCG_TMP0;
1212 } else {
1213 TCGReg t = c1;
1214 c1 = c2;
1215 c2 = t;
1217 cond = tcg_swap_cond(cond);
1218 goto restart;
1220 default:
1221 g_assert_not_reached();
1224 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1225 if (have_loc) {
1226 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1227 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1228 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1229 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1230 } else {
1231 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1232 tcg_out_movi(s, type, dest, 1);
1233 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1234 tcg_out_movi(s, type, dest, 0);
1238 static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1239 TCGReg c1, TCGArg c2, int c2const,
1240 TCGArg v3, int v3const)
1242 int cc;
1243 if (s390_facilities & FACILITY_LOAD_ON_COND) {
1244 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
1245 if (v3const) {
1246 tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
1247 } else {
1248 tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
1250 } else {
1251 c = tcg_invert_cond(c);
1252 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
1254 /* Emit: if (cc) goto over; dest = r3; over: */
1255 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1256 tcg_out_insn(s, RRE, LGR, dest, v3);
1260 static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1261 TCGArg a2, int a2const)
1263 /* Since this sets both R and R+1, we have no choice but to store the
1264 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1265 QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1266 tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1268 if (a2const && a2 == 64) {
1269 tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1270 } else {
1271 if (a2const) {
1272 tcg_out_movi(s, TCG_TYPE_I64, dest, a2);
1273 } else {
1274 tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
1276 if (s390_facilities & FACILITY_LOAD_ON_COND) {
1277 /* Emit: if (one bit found) dest = r0. */
1278 tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
1279 } else {
1280 /* Emit: if (no one bit found) goto over; dest = r0; over: */
1281 tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
1282 tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
1287 static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1288 int ofs, int len, int z)
1290 int lsb = (63 - ofs);
1291 int msb = lsb - (len - 1);
1292 tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
1295 static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1296 int ofs, int len)
1298 tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1301 static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
1303 ptrdiff_t off = dest - s->code_ptr;
1304 if (off == (int16_t)off) {
1305 tcg_out_insn(s, RI, BRC, cc, off);
1306 } else if (off == (int32_t)off) {
1307 tcg_out_insn(s, RIL, BRCL, cc, off);
1308 } else {
1309 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1310 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1314 static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1316 if (l->has_value) {
1317 tgen_gotoi(s, cc, l->u.value_ptr);
1318 } else if (USE_LONG_BRANCHES) {
1319 tcg_out16(s, RIL_BRCL | (cc << 4));
1320 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2);
1321 s->code_ptr += 2;
1322 } else {
1323 tcg_out16(s, RI_BRC | (cc << 4));
1324 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
1325 s->code_ptr += 1;
1329 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1330 TCGReg r1, TCGReg r2, TCGLabel *l)
1332 intptr_t off;
1334 if (l->has_value) {
1335 off = l->u.value_ptr - s->code_ptr;
1336 } else {
1337 /* We need to keep the offset unchanged for retranslation. */
1338 off = s->code_ptr[1];
1339 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1342 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1343 tcg_out16(s, off);
1344 tcg_out16(s, cc << 12 | (opc & 0xff));
1347 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1348 TCGReg r1, int i2, TCGLabel *l)
1350 tcg_target_long off;
1352 if (l->has_value) {
1353 off = l->u.value_ptr - s->code_ptr;
1354 } else {
1355 /* We need to keep the offset unchanged for retranslation. */
1356 off = s->code_ptr[1];
1357 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1360 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1361 tcg_out16(s, off);
1362 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1365 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1366 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1368 int cc;
1370 if (s390_facilities & FACILITY_GEN_INST_EXT) {
1371 bool is_unsigned = is_unsigned_cond(c);
1372 bool in_range;
1373 S390Opcode opc;
1375 cc = tcg_cond_to_s390_cond[c];
1377 if (!c2const) {
1378 opc = (type == TCG_TYPE_I32
1379 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1380 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1381 tgen_compare_branch(s, opc, cc, r1, c2, l);
1382 return;
1385 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1386 If the immediate we've been given does not fit that range, we'll
1387 fall back to separate compare and branch instructions using the
1388 larger comparison range afforded by COMPARE IMMEDIATE. */
1389 if (type == TCG_TYPE_I32) {
1390 if (is_unsigned) {
1391 opc = RIE_CLIJ;
1392 in_range = (uint32_t)c2 == (uint8_t)c2;
1393 } else {
1394 opc = RIE_CIJ;
1395 in_range = (int32_t)c2 == (int8_t)c2;
1397 } else {
1398 if (is_unsigned) {
1399 opc = RIE_CLGIJ;
1400 in_range = (uint64_t)c2 == (uint8_t)c2;
1401 } else {
1402 opc = RIE_CGIJ;
1403 in_range = (int64_t)c2 == (int8_t)c2;
1406 if (in_range) {
1407 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1408 return;
1412 cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
1413 tgen_branch(s, cc, l);
1416 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1418 ptrdiff_t off = dest - s->code_ptr;
1419 if (off == (int32_t)off) {
1420 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1421 } else {
1422 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1423 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1427 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1428 TCGReg base, TCGReg index, int disp)
1430 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1431 case MO_UB:
1432 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1433 break;
1434 case MO_SB:
1435 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1436 break;
1438 case MO_UW | MO_BSWAP:
1439 /* swapped unsigned halfword load with upper bits zeroed */
1440 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1441 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1442 break;
1443 case MO_UW:
1444 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1445 break;
1447 case MO_SW | MO_BSWAP:
1448 /* swapped sign-extended halfword load */
1449 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1450 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1451 break;
1452 case MO_SW:
1453 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1454 break;
1456 case MO_UL | MO_BSWAP:
1457 /* swapped unsigned int load with upper bits zeroed */
1458 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1459 tgen_ext32u(s, data, data);
1460 break;
1461 case MO_UL:
1462 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1463 break;
1465 case MO_SL | MO_BSWAP:
1466 /* swapped sign-extended int load */
1467 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1468 tgen_ext32s(s, data, data);
1469 break;
1470 case MO_SL:
1471 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1472 break;
1474 case MO_Q | MO_BSWAP:
1475 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1476 break;
1477 case MO_Q:
1478 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1479 break;
1481 default:
1482 tcg_abort();
1486 static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1487 TCGReg base, TCGReg index, int disp)
1489 switch (opc & (MO_SIZE | MO_BSWAP)) {
1490 case MO_UB:
1491 if (disp >= 0 && disp < 0x1000) {
1492 tcg_out_insn(s, RX, STC, data, base, index, disp);
1493 } else {
1494 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1496 break;
1498 case MO_UW | MO_BSWAP:
1499 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1500 break;
1501 case MO_UW:
1502 if (disp >= 0 && disp < 0x1000) {
1503 tcg_out_insn(s, RX, STH, data, base, index, disp);
1504 } else {
1505 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1507 break;
1509 case MO_UL | MO_BSWAP:
1510 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1511 break;
1512 case MO_UL:
1513 if (disp >= 0 && disp < 0x1000) {
1514 tcg_out_insn(s, RX, ST, data, base, index, disp);
1515 } else {
1516 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1518 break;
1520 case MO_Q | MO_BSWAP:
1521 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1522 break;
1523 case MO_Q:
1524 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1525 break;
1527 default:
1528 tcg_abort();
1532 #if defined(CONFIG_SOFTMMU)
1533 #include "tcg-ldst.inc.c"
1535 /* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1536 Using the offset of the second entry in the last tlb table ensures
1537 that we can index all of the elements of the first entry. */
1538 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1539 > 0x7ffff);
1541 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1542 addend into R2. Returns a register with the santitized guest address. */
1543 static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1544 int mem_index, bool is_ld)
1546 unsigned s_bits = opc & MO_SIZE;
1547 unsigned a_bits = get_alignment_bits(opc);
1548 unsigned s_mask = (1 << s_bits) - 1;
1549 unsigned a_mask = (1 << a_bits) - 1;
1550 int ofs, a_off;
1551 uint64_t tlb_mask;
1553 /* For aligned accesses, we check the first byte and include the alignment
1554 bits within the address. For unaligned access, we check that we don't
1555 cross pages using the address of the last byte of the access. */
1556 a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
1557 tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
1559 if (s390_facilities & FACILITY_GEN_INST_EXT) {
1560 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1561 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1562 63 - CPU_TLB_ENTRY_BITS,
1563 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
1564 if (a_off) {
1565 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1566 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1567 } else {
1568 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1570 } else {
1571 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1572 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1573 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1574 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1575 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1576 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1579 if (is_ld) {
1580 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1581 } else {
1582 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1584 if (TARGET_LONG_BITS == 32) {
1585 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1586 } else {
1587 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1590 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1591 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1593 if (TARGET_LONG_BITS == 32) {
1594 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1595 return TCG_REG_R3;
1597 return addr_reg;
1600 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1601 TCGReg data, TCGReg addr,
1602 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1604 TCGLabelQemuLdst *label = new_ldst_label(s);
1606 label->is_ld = is_ld;
1607 label->oi = oi;
1608 label->datalo_reg = data;
1609 label->addrlo_reg = addr;
1610 label->raddr = raddr;
1611 label->label_ptr[0] = label_ptr;
1614 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1616 TCGReg addr_reg = lb->addrlo_reg;
1617 TCGReg data_reg = lb->datalo_reg;
1618 TCGMemOpIdx oi = lb->oi;
1619 TCGMemOp opc = get_memop(oi);
1621 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2);
1623 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1624 if (TARGET_LONG_BITS == 64) {
1625 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1627 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
1628 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
1629 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
1630 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1632 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1635 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1637 TCGReg addr_reg = lb->addrlo_reg;
1638 TCGReg data_reg = lb->datalo_reg;
1639 TCGMemOpIdx oi = lb->oi;
1640 TCGMemOp opc = get_memop(oi);
1642 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2);
1644 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1645 if (TARGET_LONG_BITS == 64) {
1646 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1648 switch (opc & MO_SIZE) {
1649 case MO_UB:
1650 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1651 break;
1652 case MO_UW:
1653 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1654 break;
1655 case MO_UL:
1656 tgen_ext32u(s, TCG_REG_R4, data_reg);
1657 break;
1658 case MO_Q:
1659 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1660 break;
1661 default:
1662 tcg_abort();
1664 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
1665 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
1666 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1668 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1670 #else
1671 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1672 TCGReg *index_reg, tcg_target_long *disp)
1674 if (TARGET_LONG_BITS == 32) {
1675 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1676 *addr_reg = TCG_TMP0;
1678 if (guest_base < 0x80000) {
1679 *index_reg = TCG_REG_NONE;
1680 *disp = guest_base;
1681 } else {
1682 *index_reg = TCG_GUEST_BASE_REG;
1683 *disp = 0;
1686 #endif /* CONFIG_SOFTMMU */
1688 static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1689 TCGMemOpIdx oi)
1691 TCGMemOp opc = get_memop(oi);
1692 #ifdef CONFIG_SOFTMMU
1693 unsigned mem_index = get_mmuidx(oi);
1694 tcg_insn_unit *label_ptr;
1695 TCGReg base_reg;
1697 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1699 /* We need to keep the offset unchanged for retranslation. */
1700 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1701 label_ptr = s->code_ptr;
1702 s->code_ptr += 1;
1704 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1706 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1707 #else
1708 TCGReg index_reg;
1709 tcg_target_long disp;
1711 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1712 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1713 #endif
1716 static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1717 TCGMemOpIdx oi)
1719 TCGMemOp opc = get_memop(oi);
1720 #ifdef CONFIG_SOFTMMU
1721 unsigned mem_index = get_mmuidx(oi);
1722 tcg_insn_unit *label_ptr;
1723 TCGReg base_reg;
1725 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1727 /* We need to keep the offset unchanged for retranslation. */
1728 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1729 label_ptr = s->code_ptr;
1730 s->code_ptr += 1;
1732 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1734 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1735 #else
1736 TCGReg index_reg;
1737 tcg_target_long disp;
1739 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1740 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1741 #endif
1744 # define OP_32_64(x) \
1745 case glue(glue(INDEX_op_,x),_i32): \
1746 case glue(glue(INDEX_op_,x),_i64)
1748 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1749 const TCGArg *args, const int *const_args)
1751 S390Opcode op, op2;
1752 TCGArg a0, a1, a2;
1754 switch (opc) {
1755 case INDEX_op_exit_tb:
1756 /* Reuse the zeroing that exists for goto_ptr. */
1757 a0 = args[0];
1758 if (a0 == 0) {
1759 tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue);
1760 } else {
1761 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1762 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1764 break;
1766 case INDEX_op_goto_tb:
1767 a0 = args[0];
1768 if (s->tb_jmp_insn_offset) {
1769 /* branch displacement must be aligned for atomic patching;
1770 * see if we need to add extra nop before branch
1772 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1773 tcg_out16(s, NOP);
1775 tcg_debug_assert(!USE_REG_TB);
1776 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1777 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1778 s->code_ptr += 2;
1779 } else {
1780 /* load address stored at s->tb_jmp_target_addr + a0 */
1781 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
1782 s->tb_jmp_target_addr + a0);
1783 /* and go there */
1784 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
1786 set_jmp_reset_offset(s, a0);
1788 /* For the unlinked path of goto_tb, we need to reset
1789 TCG_REG_TB to the beginning of this TB. */
1790 if (USE_REG_TB) {
1791 int ofs = -tcg_current_code_size(s);
1792 assert(ofs == (int16_t)ofs);
1793 tcg_out_insn(s, RI, AGHI, TCG_REG_TB, ofs);
1795 break;
1797 case INDEX_op_goto_ptr:
1798 a0 = args[0];
1799 if (USE_REG_TB) {
1800 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
1802 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
1803 break;
1805 OP_32_64(ld8u):
1806 /* ??? LLC (RXY format) is only present with the extended-immediate
1807 facility, whereas LLGC is always present. */
1808 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1809 break;
1811 OP_32_64(ld8s):
1812 /* ??? LB is no smaller than LGB, so no point to using it. */
1813 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1814 break;
1816 OP_32_64(ld16u):
1817 /* ??? LLH (RXY format) is only present with the extended-immediate
1818 facility, whereas LLGH is always present. */
1819 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1820 break;
1822 case INDEX_op_ld16s_i32:
1823 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1824 break;
1826 case INDEX_op_ld_i32:
1827 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1828 break;
1830 OP_32_64(st8):
1831 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1832 TCG_REG_NONE, args[2]);
1833 break;
1835 OP_32_64(st16):
1836 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1837 TCG_REG_NONE, args[2]);
1838 break;
1840 case INDEX_op_st_i32:
1841 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1842 break;
1844 case INDEX_op_add_i32:
1845 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1846 if (const_args[2]) {
1847 do_addi_32:
1848 if (a0 == a1) {
1849 if (a2 == (int16_t)a2) {
1850 tcg_out_insn(s, RI, AHI, a0, a2);
1851 break;
1853 if (s390_facilities & FACILITY_EXT_IMM) {
1854 tcg_out_insn(s, RIL, AFI, a0, a2);
1855 break;
1858 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1859 } else if (a0 == a1) {
1860 tcg_out_insn(s, RR, AR, a0, a2);
1861 } else {
1862 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1864 break;
1865 case INDEX_op_sub_i32:
1866 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1867 if (const_args[2]) {
1868 a2 = -a2;
1869 goto do_addi_32;
1870 } else if (a0 == a1) {
1871 tcg_out_insn(s, RR, SR, a0, a2);
1872 } else {
1873 tcg_out_insn(s, RRF, SRK, a0, a1, a2);
1875 break;
1877 case INDEX_op_and_i32:
1878 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
1879 if (const_args[2]) {
1880 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1881 tgen_andi(s, TCG_TYPE_I32, a0, a2);
1882 } else if (a0 == a1) {
1883 tcg_out_insn(s, RR, NR, a0, a2);
1884 } else {
1885 tcg_out_insn(s, RRF, NRK, a0, a1, a2);
1887 break;
1888 case INDEX_op_or_i32:
1889 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
1890 if (const_args[2]) {
1891 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1892 tgen_ori(s, TCG_TYPE_I32, a0, a2);
1893 } else if (a0 == a1) {
1894 tcg_out_insn(s, RR, OR, a0, a2);
1895 } else {
1896 tcg_out_insn(s, RRF, ORK, a0, a1, a2);
1898 break;
1899 case INDEX_op_xor_i32:
1900 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
1901 if (const_args[2]) {
1902 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1903 tgen_xori(s, TCG_TYPE_I32, a0, a2);
1904 } else if (a0 == a1) {
1905 tcg_out_insn(s, RR, XR, args[0], args[2]);
1906 } else {
1907 tcg_out_insn(s, RRF, XRK, a0, a1, a2);
1909 break;
1911 case INDEX_op_neg_i32:
1912 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1913 break;
1915 case INDEX_op_mul_i32:
1916 if (const_args[2]) {
1917 if ((int32_t)args[2] == (int16_t)args[2]) {
1918 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1919 } else {
1920 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1922 } else {
1923 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1925 break;
1927 case INDEX_op_div2_i32:
1928 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1929 break;
1930 case INDEX_op_divu2_i32:
1931 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1932 break;
1934 case INDEX_op_shl_i32:
1935 op = RS_SLL;
1936 op2 = RSY_SLLK;
1937 do_shift32:
1938 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1939 if (a0 == a1) {
1940 if (const_args[2]) {
1941 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
1942 } else {
1943 tcg_out_sh32(s, op, a0, a2, 0);
1945 } else {
1946 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
1947 if (const_args[2]) {
1948 tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
1949 } else {
1950 tcg_out_sh64(s, op2, a0, a1, a2, 0);
1953 break;
1954 case INDEX_op_shr_i32:
1955 op = RS_SRL;
1956 op2 = RSY_SRLK;
1957 goto do_shift32;
1958 case INDEX_op_sar_i32:
1959 op = RS_SRA;
1960 op2 = RSY_SRAK;
1961 goto do_shift32;
1963 case INDEX_op_rotl_i32:
1964 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1965 if (const_args[2]) {
1966 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1967 } else {
1968 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1970 break;
1971 case INDEX_op_rotr_i32:
1972 if (const_args[2]) {
1973 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1974 TCG_REG_NONE, (32 - args[2]) & 31);
1975 } else {
1976 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1977 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1979 break;
1981 case INDEX_op_ext8s_i32:
1982 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1983 break;
1984 case INDEX_op_ext16s_i32:
1985 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1986 break;
1987 case INDEX_op_ext8u_i32:
1988 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1989 break;
1990 case INDEX_op_ext16u_i32:
1991 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1992 break;
1994 OP_32_64(bswap16):
1995 /* The TCG bswap definition requires bits 0-47 already be zero.
1996 Thus we don't need the G-type insns to implement bswap16_i64. */
1997 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1998 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1999 break;
2000 OP_32_64(bswap32):
2001 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2002 break;
2004 case INDEX_op_add2_i32:
2005 if (const_args[4]) {
2006 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2007 } else {
2008 tcg_out_insn(s, RR, ALR, args[0], args[4]);
2010 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2011 break;
2012 case INDEX_op_sub2_i32:
2013 if (const_args[4]) {
2014 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2015 } else {
2016 tcg_out_insn(s, RR, SLR, args[0], args[4]);
2018 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2019 break;
2021 case INDEX_op_br:
2022 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
2023 break;
2025 case INDEX_op_brcond_i32:
2026 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
2027 args[1], const_args[1], arg_label(args[3]));
2028 break;
2029 case INDEX_op_setcond_i32:
2030 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2031 args[2], const_args[2]);
2032 break;
2033 case INDEX_op_movcond_i32:
2034 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
2035 args[2], const_args[2], args[3], const_args[3]);
2036 break;
2038 case INDEX_op_qemu_ld_i32:
2039 /* ??? Technically we can use a non-extending instruction. */
2040 case INDEX_op_qemu_ld_i64:
2041 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
2042 break;
2043 case INDEX_op_qemu_st_i32:
2044 case INDEX_op_qemu_st_i64:
2045 tcg_out_qemu_st(s, args[0], args[1], args[2]);
2046 break;
2048 case INDEX_op_ld16s_i64:
2049 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2050 break;
2051 case INDEX_op_ld32u_i64:
2052 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2053 break;
2054 case INDEX_op_ld32s_i64:
2055 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2056 break;
2057 case INDEX_op_ld_i64:
2058 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2059 break;
2061 case INDEX_op_st32_i64:
2062 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2063 break;
2064 case INDEX_op_st_i64:
2065 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2066 break;
2068 case INDEX_op_add_i64:
2069 a0 = args[0], a1 = args[1], a2 = args[2];
2070 if (const_args[2]) {
2071 do_addi_64:
2072 if (a0 == a1) {
2073 if (a2 == (int16_t)a2) {
2074 tcg_out_insn(s, RI, AGHI, a0, a2);
2075 break;
2077 if (s390_facilities & FACILITY_EXT_IMM) {
2078 if (a2 == (int32_t)a2) {
2079 tcg_out_insn(s, RIL, AGFI, a0, a2);
2080 break;
2081 } else if (a2 == (uint32_t)a2) {
2082 tcg_out_insn(s, RIL, ALGFI, a0, a2);
2083 break;
2084 } else if (-a2 == (uint32_t)-a2) {
2085 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2086 break;
2090 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2091 } else if (a0 == a1) {
2092 tcg_out_insn(s, RRE, AGR, a0, a2);
2093 } else {
2094 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2096 break;
2097 case INDEX_op_sub_i64:
2098 a0 = args[0], a1 = args[1], a2 = args[2];
2099 if (const_args[2]) {
2100 a2 = -a2;
2101 goto do_addi_64;
2102 } else if (a0 == a1) {
2103 tcg_out_insn(s, RRE, SGR, a0, a2);
2104 } else {
2105 tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
2107 break;
2109 case INDEX_op_and_i64:
2110 a0 = args[0], a1 = args[1], a2 = args[2];
2111 if (const_args[2]) {
2112 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2113 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2114 } else if (a0 == a1) {
2115 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
2116 } else {
2117 tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
2119 break;
2120 case INDEX_op_or_i64:
2121 a0 = args[0], a1 = args[1], a2 = args[2];
2122 if (const_args[2]) {
2123 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2124 tgen_ori(s, TCG_TYPE_I64, a0, a2);
2125 } else if (a0 == a1) {
2126 tcg_out_insn(s, RRE, OGR, a0, a2);
2127 } else {
2128 tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
2130 break;
2131 case INDEX_op_xor_i64:
2132 a0 = args[0], a1 = args[1], a2 = args[2];
2133 if (const_args[2]) {
2134 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2135 tgen_xori(s, TCG_TYPE_I64, a0, a2);
2136 } else if (a0 == a1) {
2137 tcg_out_insn(s, RRE, XGR, a0, a2);
2138 } else {
2139 tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
2141 break;
2143 case INDEX_op_neg_i64:
2144 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2145 break;
2146 case INDEX_op_bswap64_i64:
2147 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2148 break;
2150 case INDEX_op_mul_i64:
2151 if (const_args[2]) {
2152 if (args[2] == (int16_t)args[2]) {
2153 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2154 } else {
2155 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2157 } else {
2158 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2160 break;
2162 case INDEX_op_div2_i64:
2163 /* ??? We get an unnecessary sign-extension of the dividend
2164 into R3 with this definition, but as we do in fact always
2165 produce both quotient and remainder using INDEX_op_div_i64
2166 instead requires jumping through even more hoops. */
2167 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2168 break;
2169 case INDEX_op_divu2_i64:
2170 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2171 break;
2172 case INDEX_op_mulu2_i64:
2173 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2174 break;
2176 case INDEX_op_shl_i64:
2177 op = RSY_SLLG;
2178 do_shift64:
2179 if (const_args[2]) {
2180 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2181 } else {
2182 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2184 break;
2185 case INDEX_op_shr_i64:
2186 op = RSY_SRLG;
2187 goto do_shift64;
2188 case INDEX_op_sar_i64:
2189 op = RSY_SRAG;
2190 goto do_shift64;
2192 case INDEX_op_rotl_i64:
2193 if (const_args[2]) {
2194 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2195 TCG_REG_NONE, args[2]);
2196 } else {
2197 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2199 break;
2200 case INDEX_op_rotr_i64:
2201 if (const_args[2]) {
2202 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2203 TCG_REG_NONE, (64 - args[2]) & 63);
2204 } else {
2205 /* We can use the smaller 32-bit negate because only the
2206 low 6 bits are examined for the rotate. */
2207 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2208 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2210 break;
2212 case INDEX_op_ext8s_i64:
2213 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2214 break;
2215 case INDEX_op_ext16s_i64:
2216 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2217 break;
2218 case INDEX_op_ext_i32_i64:
2219 case INDEX_op_ext32s_i64:
2220 tgen_ext32s(s, args[0], args[1]);
2221 break;
2222 case INDEX_op_ext8u_i64:
2223 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2224 break;
2225 case INDEX_op_ext16u_i64:
2226 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2227 break;
2228 case INDEX_op_extu_i32_i64:
2229 case INDEX_op_ext32u_i64:
2230 tgen_ext32u(s, args[0], args[1]);
2231 break;
2233 case INDEX_op_add2_i64:
2234 if (const_args[4]) {
2235 if ((int64_t)args[4] >= 0) {
2236 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2237 } else {
2238 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2240 } else {
2241 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2243 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2244 break;
2245 case INDEX_op_sub2_i64:
2246 if (const_args[4]) {
2247 if ((int64_t)args[4] >= 0) {
2248 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2249 } else {
2250 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2252 } else {
2253 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2255 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2256 break;
2258 case INDEX_op_brcond_i64:
2259 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2260 args[1], const_args[1], arg_label(args[3]));
2261 break;
2262 case INDEX_op_setcond_i64:
2263 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2264 args[2], const_args[2]);
2265 break;
2266 case INDEX_op_movcond_i64:
2267 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2268 args[2], const_args[2], args[3], const_args[3]);
2269 break;
2271 OP_32_64(deposit):
2272 a0 = args[0], a1 = args[1], a2 = args[2];
2273 if (const_args[1]) {
2274 tgen_deposit(s, a0, a2, args[3], args[4], 1);
2275 } else {
2276 /* Since we can't support "0Z" as a constraint, we allow a1 in
2277 any register. Fix things up as if a matching constraint. */
2278 if (a0 != a1) {
2279 TCGType type = (opc == INDEX_op_deposit_i64);
2280 if (a0 == a2) {
2281 tcg_out_mov(s, type, TCG_TMP0, a2);
2282 a2 = TCG_TMP0;
2284 tcg_out_mov(s, type, a0, a1);
2286 tgen_deposit(s, a0, a2, args[3], args[4], 0);
2288 break;
2290 OP_32_64(extract):
2291 tgen_extract(s, args[0], args[1], args[2], args[3]);
2292 break;
2294 case INDEX_op_clz_i64:
2295 tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2296 break;
2298 case INDEX_op_mb:
2299 /* The host memory model is quite strong, we simply need to
2300 serialize the instruction stream. */
2301 if (args[0] & TCG_MO_ST_LD) {
2302 tcg_out_insn(s, RR, BCR,
2303 s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
2305 break;
2307 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2308 case INDEX_op_mov_i64:
2309 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2310 case INDEX_op_movi_i64:
2311 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2312 default:
2313 tcg_abort();
2317 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2319 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2320 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2321 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
2322 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
2323 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
2324 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2325 static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } };
2326 static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } };
2327 static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } };
2328 static const TCGTargetOpDef a2_r
2329 = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } };
2330 static const TCGTargetOpDef a2_ri
2331 = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } };
2332 static const TCGTargetOpDef a2_rA
2333 = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } };
2335 switch (op) {
2336 case INDEX_op_goto_ptr:
2337 return &r;
2339 case INDEX_op_ld8u_i32:
2340 case INDEX_op_ld8u_i64:
2341 case INDEX_op_ld8s_i32:
2342 case INDEX_op_ld8s_i64:
2343 case INDEX_op_ld16u_i32:
2344 case INDEX_op_ld16u_i64:
2345 case INDEX_op_ld16s_i32:
2346 case INDEX_op_ld16s_i64:
2347 case INDEX_op_ld_i32:
2348 case INDEX_op_ld32u_i64:
2349 case INDEX_op_ld32s_i64:
2350 case INDEX_op_ld_i64:
2351 case INDEX_op_st8_i32:
2352 case INDEX_op_st8_i64:
2353 case INDEX_op_st16_i32:
2354 case INDEX_op_st16_i64:
2355 case INDEX_op_st_i32:
2356 case INDEX_op_st32_i64:
2357 case INDEX_op_st_i64:
2358 return &r_r;
2360 case INDEX_op_add_i32:
2361 case INDEX_op_add_i64:
2362 return &r_r_ri;
2363 case INDEX_op_sub_i32:
2364 case INDEX_op_sub_i64:
2365 case INDEX_op_and_i32:
2366 case INDEX_op_and_i64:
2367 case INDEX_op_or_i32:
2368 case INDEX_op_or_i64:
2369 case INDEX_op_xor_i32:
2370 case INDEX_op_xor_i64:
2371 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
2373 case INDEX_op_mul_i32:
2374 /* If we have the general-instruction-extensions, then we have
2375 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
2376 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
2377 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI);
2378 case INDEX_op_mul_i64:
2379 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI);
2381 case INDEX_op_shl_i32:
2382 case INDEX_op_shr_i32:
2383 case INDEX_op_sar_i32:
2384 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
2386 case INDEX_op_shl_i64:
2387 case INDEX_op_shr_i64:
2388 case INDEX_op_sar_i64:
2389 return &r_r_ri;
2391 case INDEX_op_rotl_i32:
2392 case INDEX_op_rotl_i64:
2393 case INDEX_op_rotr_i32:
2394 case INDEX_op_rotr_i64:
2395 return &r_r_ri;
2397 case INDEX_op_brcond_i32:
2398 case INDEX_op_brcond_i64:
2399 return &r_ri;
2401 case INDEX_op_bswap16_i32:
2402 case INDEX_op_bswap16_i64:
2403 case INDEX_op_bswap32_i32:
2404 case INDEX_op_bswap32_i64:
2405 case INDEX_op_bswap64_i64:
2406 case INDEX_op_neg_i32:
2407 case INDEX_op_neg_i64:
2408 case INDEX_op_ext8s_i32:
2409 case INDEX_op_ext8s_i64:
2410 case INDEX_op_ext8u_i32:
2411 case INDEX_op_ext8u_i64:
2412 case INDEX_op_ext16s_i32:
2413 case INDEX_op_ext16s_i64:
2414 case INDEX_op_ext16u_i32:
2415 case INDEX_op_ext16u_i64:
2416 case INDEX_op_ext32s_i64:
2417 case INDEX_op_ext32u_i64:
2418 case INDEX_op_ext_i32_i64:
2419 case INDEX_op_extu_i32_i64:
2420 case INDEX_op_extract_i32:
2421 case INDEX_op_extract_i64:
2422 return &r_r;
2424 case INDEX_op_clz_i64:
2425 case INDEX_op_setcond_i32:
2426 case INDEX_op_setcond_i64:
2427 return &r_r_ri;
2429 case INDEX_op_qemu_ld_i32:
2430 case INDEX_op_qemu_ld_i64:
2431 return &r_L;
2432 case INDEX_op_qemu_st_i64:
2433 case INDEX_op_qemu_st_i32:
2434 return &L_L;
2436 case INDEX_op_deposit_i32:
2437 case INDEX_op_deposit_i64:
2439 static const TCGTargetOpDef dep
2440 = { .args_ct_str = { "r", "rZ", "r" } };
2441 return &dep;
2443 case INDEX_op_movcond_i32:
2444 case INDEX_op_movcond_i64:
2446 static const TCGTargetOpDef movc
2447 = { .args_ct_str = { "r", "r", "ri", "r", "0" } };
2448 static const TCGTargetOpDef movc_l
2449 = { .args_ct_str = { "r", "r", "ri", "rI", "0" } };
2450 return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc);
2452 case INDEX_op_div2_i32:
2453 case INDEX_op_div2_i64:
2454 case INDEX_op_divu2_i32:
2455 case INDEX_op_divu2_i64:
2457 static const TCGTargetOpDef div2
2458 = { .args_ct_str = { "b", "a", "0", "1", "r" } };
2459 return &div2;
2461 case INDEX_op_mulu2_i64:
2463 static const TCGTargetOpDef mul2
2464 = { .args_ct_str = { "b", "a", "0", "r" } };
2465 return &mul2;
2468 case INDEX_op_add2_i32:
2469 case INDEX_op_sub2_i32:
2470 return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r);
2471 case INDEX_op_add2_i64:
2472 case INDEX_op_sub2_i64:
2473 return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r);
2475 default:
2476 break;
2478 return NULL;
2481 static void query_s390_facilities(void)
2483 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2485 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2486 is present on all 64-bit systems, but let's check for it anyway. */
2487 if (hwcap & HWCAP_S390_STFLE) {
2488 register int r0 __asm__("0");
2489 register void *r1 __asm__("1");
2491 /* stfle 0(%r1) */
2492 r1 = &s390_facilities;
2493 asm volatile(".word 0xb2b0,0x1000"
2494 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2498 static void tcg_target_init(TCGContext *s)
2500 query_s390_facilities();
2502 tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
2503 tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
2505 tcg_target_call_clobber_regs = 0;
2506 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2507 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2508 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2509 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2510 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2511 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2512 /* The r6 register is technically call-saved, but it's also a parameter
2513 register, so it can get killed by setup for the qemu_st helper. */
2514 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
2515 /* The return register can be considered call-clobbered. */
2516 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2518 s->reserved_regs = 0;
2519 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2520 /* XXX many insns can't be used with R0, so we better avoid it for now */
2521 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2522 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2523 if (USE_REG_TB) {
2524 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
2528 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2529 + TCG_STATIC_CALL_ARGS_SIZE \
2530 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2532 static void tcg_target_qemu_prologue(TCGContext *s)
2534 /* stmg %r6,%r15,48(%r15) (save registers) */
2535 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2537 /* aghi %r15,-frame_size */
2538 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
2540 tcg_set_frame(s, TCG_REG_CALL_STACK,
2541 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2542 CPU_TEMP_BUF_NLONGS * sizeof(long));
2544 #ifndef CONFIG_SOFTMMU
2545 if (guest_base >= 0x80000) {
2546 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2547 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2549 #endif
2551 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2552 if (USE_REG_TB) {
2553 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
2554 tcg_target_call_iarg_regs[1]);
2557 /* br %r3 (go to TB) */
2558 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2561 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2562 * and fall through to the rest of the epilogue.
2564 s->code_gen_epilogue = s->code_ptr;
2565 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
2567 /* TB epilogue */
2568 tb_ret_addr = s->code_ptr;
2570 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2571 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2572 FRAME_SIZE + 48);
2574 /* br %r14 (return) */
2575 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2578 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2580 memset(p, 0x07, count * sizeof(tcg_insn_unit));
2583 typedef struct {
2584 DebugFrameHeader h;
2585 uint8_t fde_def_cfa[4];
2586 uint8_t fde_reg_ofs[18];
2587 } DebugFrame;
2589 /* We're expecting a 2 byte uleb128 encoded value. */
2590 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2592 #define ELF_HOST_MACHINE EM_S390
2594 static const DebugFrame debug_frame = {
2595 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2596 .h.cie.id = -1,
2597 .h.cie.version = 1,
2598 .h.cie.code_align = 1,
2599 .h.cie.data_align = 8, /* sleb128 8 */
2600 .h.cie.return_column = TCG_REG_R14,
2602 /* Total FDE size does not include the "len" member. */
2603 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2605 .fde_def_cfa = {
2606 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2607 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2608 (FRAME_SIZE >> 7)
2610 .fde_reg_ofs = {
2611 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2612 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2613 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2614 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2615 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2616 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2617 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2618 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2619 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2623 void tcg_register_jit(void *buf, size_t buf_size)
2625 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));