iotests: Test unaligned raw images with O_DIRECT
[qemu/ar7.git] / tcg / s390 / tcg-target.inc.c
blob331d51852cf082d1cbfe6d16bb7af65121a841d8
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 /* We only support generating code for 64-bit mode. */
28 #if TCG_TARGET_REG_BITS != 64
29 #error "unsupported code generation mode"
30 #endif
32 #include "tcg-pool.inc.c"
33 #include "elf.h"
35 /* ??? The translation blocks produced by TCG are generally small enough to
36 be entirely reachable with a 16-bit displacement. Leaving the option for
37 a 32-bit displacement here Just In Case. */
38 #define USE_LONG_BRANCHES 0
40 #define TCG_CT_CONST_S16 0x100
41 #define TCG_CT_CONST_S32 0x200
42 #define TCG_CT_CONST_S33 0x400
43 #define TCG_CT_CONST_ZERO 0x800
45 /* Several places within the instruction set 0 means "no register"
46 rather than TCG_REG_R0. */
47 #define TCG_REG_NONE 0
49 /* A scratch register that may be be used throughout the backend. */
50 #define TCG_TMP0 TCG_REG_R1
52 /* A scratch register that holds a pointer to the beginning of the TB.
53 We don't need this when we have pc-relative loads with the general
54 instructions extension facility. */
55 #define TCG_REG_TB TCG_REG_R12
56 #define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
58 #ifndef CONFIG_SOFTMMU
59 #define TCG_GUEST_BASE_REG TCG_REG_R13
60 #endif
62 /* All of the following instructions are prefixed with their instruction
63 format, and are defined as 8- or 16-bit quantities, even when the two
64 halves of the 16-bit quantity may appear 32 bits apart in the insn.
65 This makes it easy to copy the values from the tables in Appendix B. */
66 typedef enum S390Opcode {
67 RIL_AFI = 0xc209,
68 RIL_AGFI = 0xc208,
69 RIL_ALFI = 0xc20b,
70 RIL_ALGFI = 0xc20a,
71 RIL_BRASL = 0xc005,
72 RIL_BRCL = 0xc004,
73 RIL_CFI = 0xc20d,
74 RIL_CGFI = 0xc20c,
75 RIL_CLFI = 0xc20f,
76 RIL_CLGFI = 0xc20e,
77 RIL_CLRL = 0xc60f,
78 RIL_CLGRL = 0xc60a,
79 RIL_CRL = 0xc60d,
80 RIL_CGRL = 0xc608,
81 RIL_IIHF = 0xc008,
82 RIL_IILF = 0xc009,
83 RIL_LARL = 0xc000,
84 RIL_LGFI = 0xc001,
85 RIL_LGRL = 0xc408,
86 RIL_LLIHF = 0xc00e,
87 RIL_LLILF = 0xc00f,
88 RIL_LRL = 0xc40d,
89 RIL_MSFI = 0xc201,
90 RIL_MSGFI = 0xc200,
91 RIL_NIHF = 0xc00a,
92 RIL_NILF = 0xc00b,
93 RIL_OIHF = 0xc00c,
94 RIL_OILF = 0xc00d,
95 RIL_SLFI = 0xc205,
96 RIL_SLGFI = 0xc204,
97 RIL_XIHF = 0xc006,
98 RIL_XILF = 0xc007,
100 RI_AGHI = 0xa70b,
101 RI_AHI = 0xa70a,
102 RI_BRC = 0xa704,
103 RI_CHI = 0xa70e,
104 RI_CGHI = 0xa70f,
105 RI_IIHH = 0xa500,
106 RI_IIHL = 0xa501,
107 RI_IILH = 0xa502,
108 RI_IILL = 0xa503,
109 RI_LGHI = 0xa709,
110 RI_LLIHH = 0xa50c,
111 RI_LLIHL = 0xa50d,
112 RI_LLILH = 0xa50e,
113 RI_LLILL = 0xa50f,
114 RI_MGHI = 0xa70d,
115 RI_MHI = 0xa70c,
116 RI_NIHH = 0xa504,
117 RI_NIHL = 0xa505,
118 RI_NILH = 0xa506,
119 RI_NILL = 0xa507,
120 RI_OIHH = 0xa508,
121 RI_OIHL = 0xa509,
122 RI_OILH = 0xa50a,
123 RI_OILL = 0xa50b,
125 RIE_CGIJ = 0xec7c,
126 RIE_CGRJ = 0xec64,
127 RIE_CIJ = 0xec7e,
128 RIE_CLGRJ = 0xec65,
129 RIE_CLIJ = 0xec7f,
130 RIE_CLGIJ = 0xec7d,
131 RIE_CLRJ = 0xec77,
132 RIE_CRJ = 0xec76,
133 RIE_LOCGHI = 0xec46,
134 RIE_RISBG = 0xec55,
136 RRE_AGR = 0xb908,
137 RRE_ALGR = 0xb90a,
138 RRE_ALCR = 0xb998,
139 RRE_ALCGR = 0xb988,
140 RRE_CGR = 0xb920,
141 RRE_CLGR = 0xb921,
142 RRE_DLGR = 0xb987,
143 RRE_DLR = 0xb997,
144 RRE_DSGFR = 0xb91d,
145 RRE_DSGR = 0xb90d,
146 RRE_FLOGR = 0xb983,
147 RRE_LGBR = 0xb906,
148 RRE_LCGR = 0xb903,
149 RRE_LGFR = 0xb914,
150 RRE_LGHR = 0xb907,
151 RRE_LGR = 0xb904,
152 RRE_LLGCR = 0xb984,
153 RRE_LLGFR = 0xb916,
154 RRE_LLGHR = 0xb985,
155 RRE_LRVR = 0xb91f,
156 RRE_LRVGR = 0xb90f,
157 RRE_LTGR = 0xb902,
158 RRE_MLGR = 0xb986,
159 RRE_MSGR = 0xb90c,
160 RRE_MSR = 0xb252,
161 RRE_NGR = 0xb980,
162 RRE_OGR = 0xb981,
163 RRE_SGR = 0xb909,
164 RRE_SLGR = 0xb90b,
165 RRE_SLBR = 0xb999,
166 RRE_SLBGR = 0xb989,
167 RRE_XGR = 0xb982,
169 RRF_LOCR = 0xb9f2,
170 RRF_LOCGR = 0xb9e2,
171 RRF_NRK = 0xb9f4,
172 RRF_NGRK = 0xb9e4,
173 RRF_ORK = 0xb9f6,
174 RRF_OGRK = 0xb9e6,
175 RRF_SRK = 0xb9f9,
176 RRF_SGRK = 0xb9e9,
177 RRF_SLRK = 0xb9fb,
178 RRF_SLGRK = 0xb9eb,
179 RRF_XRK = 0xb9f7,
180 RRF_XGRK = 0xb9e7,
182 RR_AR = 0x1a,
183 RR_ALR = 0x1e,
184 RR_BASR = 0x0d,
185 RR_BCR = 0x07,
186 RR_CLR = 0x15,
187 RR_CR = 0x19,
188 RR_DR = 0x1d,
189 RR_LCR = 0x13,
190 RR_LR = 0x18,
191 RR_LTR = 0x12,
192 RR_NR = 0x14,
193 RR_OR = 0x16,
194 RR_SR = 0x1b,
195 RR_SLR = 0x1f,
196 RR_XR = 0x17,
198 RSY_RLL = 0xeb1d,
199 RSY_RLLG = 0xeb1c,
200 RSY_SLLG = 0xeb0d,
201 RSY_SLLK = 0xebdf,
202 RSY_SRAG = 0xeb0a,
203 RSY_SRAK = 0xebdc,
204 RSY_SRLG = 0xeb0c,
205 RSY_SRLK = 0xebde,
207 RS_SLL = 0x89,
208 RS_SRA = 0x8a,
209 RS_SRL = 0x88,
211 RXY_AG = 0xe308,
212 RXY_AY = 0xe35a,
213 RXY_CG = 0xe320,
214 RXY_CLG = 0xe321,
215 RXY_CLY = 0xe355,
216 RXY_CY = 0xe359,
217 RXY_LAY = 0xe371,
218 RXY_LB = 0xe376,
219 RXY_LG = 0xe304,
220 RXY_LGB = 0xe377,
221 RXY_LGF = 0xe314,
222 RXY_LGH = 0xe315,
223 RXY_LHY = 0xe378,
224 RXY_LLGC = 0xe390,
225 RXY_LLGF = 0xe316,
226 RXY_LLGH = 0xe391,
227 RXY_LMG = 0xeb04,
228 RXY_LRV = 0xe31e,
229 RXY_LRVG = 0xe30f,
230 RXY_LRVH = 0xe31f,
231 RXY_LY = 0xe358,
232 RXY_NG = 0xe380,
233 RXY_OG = 0xe381,
234 RXY_STCY = 0xe372,
235 RXY_STG = 0xe324,
236 RXY_STHY = 0xe370,
237 RXY_STMG = 0xeb24,
238 RXY_STRV = 0xe33e,
239 RXY_STRVG = 0xe32f,
240 RXY_STRVH = 0xe33f,
241 RXY_STY = 0xe350,
242 RXY_XG = 0xe382,
244 RX_A = 0x5a,
245 RX_C = 0x59,
246 RX_L = 0x58,
247 RX_LA = 0x41,
248 RX_LH = 0x48,
249 RX_ST = 0x50,
250 RX_STC = 0x42,
251 RX_STH = 0x40,
253 NOP = 0x0707,
254 } S390Opcode;
256 #ifdef CONFIG_DEBUG_TCG
257 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
258 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
259 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
261 #endif
263 /* Since R6 is a potential argument register, choose it last of the
264 call-saved registers. Likewise prefer the call-clobbered registers
265 in reverse order to maximize the chance of avoiding the arguments. */
266 static const int tcg_target_reg_alloc_order[] = {
267 /* Call saved registers. */
268 TCG_REG_R13,
269 TCG_REG_R12,
270 TCG_REG_R11,
271 TCG_REG_R10,
272 TCG_REG_R9,
273 TCG_REG_R8,
274 TCG_REG_R7,
275 TCG_REG_R6,
276 /* Call clobbered registers. */
277 TCG_REG_R14,
278 TCG_REG_R0,
279 TCG_REG_R1,
280 /* Argument registers, in reverse order of allocation. */
281 TCG_REG_R5,
282 TCG_REG_R4,
283 TCG_REG_R3,
284 TCG_REG_R2,
287 static const int tcg_target_call_iarg_regs[] = {
288 TCG_REG_R2,
289 TCG_REG_R3,
290 TCG_REG_R4,
291 TCG_REG_R5,
292 TCG_REG_R6,
295 static const int tcg_target_call_oarg_regs[] = {
296 TCG_REG_R2,
299 #define S390_CC_EQ 8
300 #define S390_CC_LT 4
301 #define S390_CC_GT 2
302 #define S390_CC_OV 1
303 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
304 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
305 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
306 #define S390_CC_NEVER 0
307 #define S390_CC_ALWAYS 15
309 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
310 static const uint8_t tcg_cond_to_s390_cond[] = {
311 [TCG_COND_EQ] = S390_CC_EQ,
312 [TCG_COND_NE] = S390_CC_NE,
313 [TCG_COND_LT] = S390_CC_LT,
314 [TCG_COND_LE] = S390_CC_LE,
315 [TCG_COND_GT] = S390_CC_GT,
316 [TCG_COND_GE] = S390_CC_GE,
317 [TCG_COND_LTU] = S390_CC_LT,
318 [TCG_COND_LEU] = S390_CC_LE,
319 [TCG_COND_GTU] = S390_CC_GT,
320 [TCG_COND_GEU] = S390_CC_GE,
323 /* Condition codes that result from a LOAD AND TEST. Here, we have no
324 unsigned instruction variation, however since the test is vs zero we
325 can re-map the outcomes appropriately. */
326 static const uint8_t tcg_cond_to_ltr_cond[] = {
327 [TCG_COND_EQ] = S390_CC_EQ,
328 [TCG_COND_NE] = S390_CC_NE,
329 [TCG_COND_LT] = S390_CC_LT,
330 [TCG_COND_LE] = S390_CC_LE,
331 [TCG_COND_GT] = S390_CC_GT,
332 [TCG_COND_GE] = S390_CC_GE,
333 [TCG_COND_LTU] = S390_CC_NEVER,
334 [TCG_COND_LEU] = S390_CC_EQ,
335 [TCG_COND_GTU] = S390_CC_NE,
336 [TCG_COND_GEU] = S390_CC_ALWAYS,
339 #ifdef CONFIG_SOFTMMU
340 static void * const qemu_ld_helpers[16] = {
341 [MO_UB] = helper_ret_ldub_mmu,
342 [MO_SB] = helper_ret_ldsb_mmu,
343 [MO_LEUW] = helper_le_lduw_mmu,
344 [MO_LESW] = helper_le_ldsw_mmu,
345 [MO_LEUL] = helper_le_ldul_mmu,
346 [MO_LESL] = helper_le_ldsl_mmu,
347 [MO_LEQ] = helper_le_ldq_mmu,
348 [MO_BEUW] = helper_be_lduw_mmu,
349 [MO_BESW] = helper_be_ldsw_mmu,
350 [MO_BEUL] = helper_be_ldul_mmu,
351 [MO_BESL] = helper_be_ldsl_mmu,
352 [MO_BEQ] = helper_be_ldq_mmu,
355 static void * const qemu_st_helpers[16] = {
356 [MO_UB] = helper_ret_stb_mmu,
357 [MO_LEUW] = helper_le_stw_mmu,
358 [MO_LEUL] = helper_le_stl_mmu,
359 [MO_LEQ] = helper_le_stq_mmu,
360 [MO_BEUW] = helper_be_stw_mmu,
361 [MO_BEUL] = helper_be_stl_mmu,
362 [MO_BEQ] = helper_be_stq_mmu,
364 #endif
366 static tcg_insn_unit *tb_ret_addr;
367 uint64_t s390_facilities;
369 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
370 intptr_t value, intptr_t addend)
372 intptr_t pcrel2;
373 uint32_t old;
375 value += addend;
376 pcrel2 = (tcg_insn_unit *)value - code_ptr;
378 switch (type) {
379 case R_390_PC16DBL:
380 if (pcrel2 == (int16_t)pcrel2) {
381 tcg_patch16(code_ptr, pcrel2);
382 return true;
384 break;
385 case R_390_PC32DBL:
386 if (pcrel2 == (int32_t)pcrel2) {
387 tcg_patch32(code_ptr, pcrel2);
388 return true;
390 break;
391 case R_390_20:
392 if (value == sextract64(value, 0, 20)) {
393 old = *(uint32_t *)code_ptr & 0xf00000ff;
394 old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
395 tcg_patch32(code_ptr, old);
396 return true;
398 break;
399 default:
400 g_assert_not_reached();
402 return false;
405 /* parse target specific constraints */
406 static const char *target_parse_constraint(TCGArgConstraint *ct,
407 const char *ct_str, TCGType type)
409 switch (*ct_str++) {
410 case 'r': /* all registers */
411 ct->ct |= TCG_CT_REG;
412 ct->u.regs = 0xffff;
413 break;
414 case 'L': /* qemu_ld/st constraint */
415 ct->ct |= TCG_CT_REG;
416 ct->u.regs = 0xffff;
417 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
418 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
419 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
420 break;
421 case 'a': /* force R2 for division */
422 ct->ct |= TCG_CT_REG;
423 ct->u.regs = 0;
424 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
425 break;
426 case 'b': /* force R3 for division */
427 ct->ct |= TCG_CT_REG;
428 ct->u.regs = 0;
429 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
430 break;
431 case 'A':
432 ct->ct |= TCG_CT_CONST_S33;
433 break;
434 case 'I':
435 ct->ct |= TCG_CT_CONST_S16;
436 break;
437 case 'J':
438 ct->ct |= TCG_CT_CONST_S32;
439 break;
440 case 'Z':
441 ct->ct |= TCG_CT_CONST_ZERO;
442 break;
443 default:
444 return NULL;
446 return ct_str;
449 /* Test if a constant matches the constraint. */
450 static int tcg_target_const_match(tcg_target_long val, TCGType type,
451 const TCGArgConstraint *arg_ct)
453 int ct = arg_ct->ct;
455 if (ct & TCG_CT_CONST) {
456 return 1;
459 if (type == TCG_TYPE_I32) {
460 val = (int32_t)val;
463 /* The following are mutually exclusive. */
464 if (ct & TCG_CT_CONST_S16) {
465 return val == (int16_t)val;
466 } else if (ct & TCG_CT_CONST_S32) {
467 return val == (int32_t)val;
468 } else if (ct & TCG_CT_CONST_S33) {
469 return val >= -0xffffffffll && val <= 0xffffffffll;
470 } else if (ct & TCG_CT_CONST_ZERO) {
471 return val == 0;
474 return 0;
477 /* Emit instructions according to the given instruction format. */
479 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
481 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
484 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
485 TCGReg r1, TCGReg r2)
487 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
490 static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
491 TCGReg r1, TCGReg r2, int m3)
493 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
496 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
498 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
501 static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1,
502 int i2, int m3)
504 tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
505 tcg_out32(s, (i2 << 16) | (op & 0xff));
508 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
510 tcg_out16(s, op | (r1 << 4));
511 tcg_out32(s, i2);
514 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
515 TCGReg b2, TCGReg r3, int disp)
517 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
518 | (disp & 0xfff));
521 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
522 TCGReg b2, TCGReg r3, int disp)
524 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
525 tcg_out32(s, (op & 0xff) | (b2 << 28)
526 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
529 #define tcg_out_insn_RX tcg_out_insn_RS
530 #define tcg_out_insn_RXY tcg_out_insn_RSY
532 /* Emit an opcode with "type-checking" of the format. */
533 #define tcg_out_insn(S, FMT, OP, ...) \
534 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
537 /* emit 64-bit shifts */
538 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
539 TCGReg src, TCGReg sh_reg, int sh_imm)
541 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
544 /* emit 32-bit shifts */
545 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
546 TCGReg sh_reg, int sh_imm)
548 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
551 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
553 if (src != dst) {
554 if (type == TCG_TYPE_I32) {
555 tcg_out_insn(s, RR, LR, dst, src);
556 } else {
557 tcg_out_insn(s, RRE, LGR, dst, src);
560 return true;
563 static const S390Opcode lli_insns[4] = {
564 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
567 static bool maybe_out_small_movi(TCGContext *s, TCGType type,
568 TCGReg ret, tcg_target_long sval)
570 tcg_target_ulong uval = sval;
571 int i;
573 if (type == TCG_TYPE_I32) {
574 uval = (uint32_t)sval;
575 sval = (int32_t)sval;
578 /* Try all 32-bit insns that can load it in one go. */
579 if (sval >= -0x8000 && sval < 0x8000) {
580 tcg_out_insn(s, RI, LGHI, ret, sval);
581 return true;
584 for (i = 0; i < 4; i++) {
585 tcg_target_long mask = 0xffffull << i*16;
586 if ((uval & mask) == uval) {
587 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
588 return true;
592 return false;
595 /* load a register with an immediate value */
596 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
597 tcg_target_long sval, bool in_prologue)
599 tcg_target_ulong uval;
601 /* Try all 32-bit insns that can load it in one go. */
602 if (maybe_out_small_movi(s, type, ret, sval)) {
603 return;
606 uval = sval;
607 if (type == TCG_TYPE_I32) {
608 uval = (uint32_t)sval;
609 sval = (int32_t)sval;
612 /* Try all 48-bit insns that can load it in one go. */
613 if (s390_facilities & FACILITY_EXT_IMM) {
614 if (sval == (int32_t)sval) {
615 tcg_out_insn(s, RIL, LGFI, ret, sval);
616 return;
618 if (uval <= 0xffffffff) {
619 tcg_out_insn(s, RIL, LLILF, ret, uval);
620 return;
622 if ((uval & 0xffffffff) == 0) {
623 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32);
624 return;
628 /* Try for PC-relative address load. For odd addresses,
629 attempt to use an offset from the start of the TB. */
630 if ((sval & 1) == 0) {
631 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
632 if (off == (int32_t)off) {
633 tcg_out_insn(s, RIL, LARL, ret, off);
634 return;
636 } else if (USE_REG_TB && !in_prologue) {
637 ptrdiff_t off = sval - (uintptr_t)s->code_gen_ptr;
638 if (off == sextract64(off, 0, 20)) {
639 /* This is certain to be an address within TB, and therefore
640 OFF will be negative; don't try RX_LA. */
641 tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off);
642 return;
646 /* A 32-bit unsigned value can be loaded in 2 insns. And given
647 that LLILL, LLIHL, LLILF above did not succeed, we know that
648 both insns are required. */
649 if (uval <= 0xffffffff) {
650 tcg_out_insn(s, RI, LLILL, ret, uval);
651 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
652 return;
655 /* Otherwise, stuff it in the constant pool. */
656 if (s390_facilities & FACILITY_GEN_INST_EXT) {
657 tcg_out_insn(s, RIL, LGRL, ret, 0);
658 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
659 } else if (USE_REG_TB && !in_prologue) {
660 tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
661 new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
662 -(intptr_t)s->code_gen_ptr);
663 } else {
664 TCGReg base = ret ? ret : TCG_TMP0;
665 tcg_out_insn(s, RIL, LARL, base, 0);
666 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
667 tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0);
671 static void tcg_out_movi(TCGContext *s, TCGType type,
672 TCGReg ret, tcg_target_long sval)
674 tcg_out_movi_int(s, type, ret, sval, false);
677 /* Emit a load/store type instruction. Inputs are:
678 DATA: The register to be loaded or stored.
679 BASE+OFS: The effective address.
680 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
681 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
683 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
684 TCGReg data, TCGReg base, TCGReg index,
685 tcg_target_long ofs)
687 if (ofs < -0x80000 || ofs >= 0x80000) {
688 /* Combine the low 20 bits of the offset with the actual load insn;
689 the high 44 bits must come from an immediate load. */
690 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
691 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
692 ofs = low;
694 /* If we were already given an index register, add it in. */
695 if (index != TCG_REG_NONE) {
696 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
698 index = TCG_TMP0;
701 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
702 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
703 } else {
704 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
709 /* load data without address translation or endianness conversion */
710 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
711 TCGReg base, intptr_t ofs)
713 if (type == TCG_TYPE_I32) {
714 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
715 } else {
716 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
720 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
721 TCGReg base, intptr_t ofs)
723 if (type == TCG_TYPE_I32) {
724 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
725 } else {
726 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
730 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
731 TCGReg base, intptr_t ofs)
733 return false;
736 /* load data from an absolute host address */
737 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
739 intptr_t addr = (intptr_t)abs;
741 if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
742 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
743 if (disp == (int32_t)disp) {
744 if (type == TCG_TYPE_I32) {
745 tcg_out_insn(s, RIL, LRL, dest, disp);
746 } else {
747 tcg_out_insn(s, RIL, LGRL, dest, disp);
749 return;
752 if (USE_REG_TB) {
753 ptrdiff_t disp = abs - (void *)s->code_gen_ptr;
754 if (disp == sextract64(disp, 0, 20)) {
755 tcg_out_ld(s, type, dest, TCG_REG_TB, disp);
756 return;
760 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
761 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
764 static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
765 int msb, int lsb, int ofs, int z)
767 /* Format RIE-f */
768 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
769 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
770 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
773 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
775 if (s390_facilities & FACILITY_EXT_IMM) {
776 tcg_out_insn(s, RRE, LGBR, dest, src);
777 return;
780 if (type == TCG_TYPE_I32) {
781 if (dest == src) {
782 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
783 } else {
784 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
786 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
787 } else {
788 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
789 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
793 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
795 if (s390_facilities & FACILITY_EXT_IMM) {
796 tcg_out_insn(s, RRE, LLGCR, dest, src);
797 return;
800 if (dest == src) {
801 tcg_out_movi(s, type, TCG_TMP0, 0xff);
802 src = TCG_TMP0;
803 } else {
804 tcg_out_movi(s, type, dest, 0xff);
806 if (type == TCG_TYPE_I32) {
807 tcg_out_insn(s, RR, NR, dest, src);
808 } else {
809 tcg_out_insn(s, RRE, NGR, dest, src);
813 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
815 if (s390_facilities & FACILITY_EXT_IMM) {
816 tcg_out_insn(s, RRE, LGHR, dest, src);
817 return;
820 if (type == TCG_TYPE_I32) {
821 if (dest == src) {
822 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
823 } else {
824 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
826 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
827 } else {
828 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
829 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
833 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
835 if (s390_facilities & FACILITY_EXT_IMM) {
836 tcg_out_insn(s, RRE, LLGHR, dest, src);
837 return;
840 if (dest == src) {
841 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
842 src = TCG_TMP0;
843 } else {
844 tcg_out_movi(s, type, dest, 0xffff);
846 if (type == TCG_TYPE_I32) {
847 tcg_out_insn(s, RR, NR, dest, src);
848 } else {
849 tcg_out_insn(s, RRE, NGR, dest, src);
853 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
855 tcg_out_insn(s, RRE, LGFR, dest, src);
858 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
860 tcg_out_insn(s, RRE, LLGFR, dest, src);
863 /* Accept bit patterns like these:
864 0....01....1
865 1....10....0
866 1..10..01..1
867 0..01..10..0
868 Copied from gcc sources. */
869 static inline bool risbg_mask(uint64_t c)
871 uint64_t lsb;
872 /* We don't change the number of transitions by inverting,
873 so make sure we start with the LSB zero. */
874 if (c & 1) {
875 c = ~c;
877 /* Reject all zeros or all ones. */
878 if (c == 0) {
879 return false;
881 /* Find the first transition. */
882 lsb = c & -c;
883 /* Invert to look for a second transition. */
884 c = ~c;
885 /* Erase the first transition. */
886 c &= -lsb;
887 /* Find the second transition, if any. */
888 lsb = c & -c;
889 /* Match if all the bits are 1's, or if c is zero. */
890 return c == -lsb;
893 static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
895 int msb, lsb;
896 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
897 /* Achieve wraparound by swapping msb and lsb. */
898 msb = 64 - ctz64(~val);
899 lsb = clz64(~val) - 1;
900 } else {
901 msb = clz64(val);
902 lsb = 63 - ctz64(val);
904 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
907 static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
909 static const S390Opcode ni_insns[4] = {
910 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
912 static const S390Opcode nif_insns[2] = {
913 RIL_NILF, RIL_NIHF
915 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
916 int i;
918 /* Look for the zero-extensions. */
919 if ((val & valid) == 0xffffffff) {
920 tgen_ext32u(s, dest, dest);
921 return;
923 if (s390_facilities & FACILITY_EXT_IMM) {
924 if ((val & valid) == 0xff) {
925 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
926 return;
928 if ((val & valid) == 0xffff) {
929 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
930 return;
934 /* Try all 32-bit insns that can perform it in one go. */
935 for (i = 0; i < 4; i++) {
936 tcg_target_ulong mask = ~(0xffffull << i*16);
937 if (((val | ~valid) & mask) == mask) {
938 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
939 return;
943 /* Try all 48-bit insns that can perform it in one go. */
944 if (s390_facilities & FACILITY_EXT_IMM) {
945 for (i = 0; i < 2; i++) {
946 tcg_target_ulong mask = ~(0xffffffffull << i*32);
947 if (((val | ~valid) & mask) == mask) {
948 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
949 return;
953 if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
954 tgen_andi_risbg(s, dest, dest, val);
955 return;
958 /* Use the constant pool if USE_REG_TB, but not for small constants. */
959 if (USE_REG_TB) {
960 if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) {
961 tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
962 new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2,
963 -(intptr_t)s->code_gen_ptr);
964 return;
966 } else {
967 tcg_out_movi(s, type, TCG_TMP0, val);
969 if (type == TCG_TYPE_I32) {
970 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
971 } else {
972 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
976 static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
978 static const S390Opcode oi_insns[4] = {
979 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
981 static const S390Opcode oif_insns[2] = {
982 RIL_OILF, RIL_OIHF
985 int i;
987 /* Look for no-op. */
988 if (unlikely(val == 0)) {
989 return;
992 /* Try all 32-bit insns that can perform it in one go. */
993 for (i = 0; i < 4; i++) {
994 tcg_target_ulong mask = (0xffffull << i*16);
995 if ((val & mask) != 0 && (val & ~mask) == 0) {
996 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
997 return;
1001 /* Try all 48-bit insns that can perform it in one go. */
1002 if (s390_facilities & FACILITY_EXT_IMM) {
1003 for (i = 0; i < 2; i++) {
1004 tcg_target_ulong mask = (0xffffffffull << i*32);
1005 if ((val & mask) != 0 && (val & ~mask) == 0) {
1006 tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32);
1007 return;
1012 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1013 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
1014 if (type == TCG_TYPE_I32) {
1015 tcg_out_insn(s, RR, OR, dest, TCG_TMP0);
1016 } else {
1017 tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0);
1019 } else if (USE_REG_TB) {
1020 tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
1021 new_pool_label(s, val, R_390_20, s->code_ptr - 2,
1022 -(intptr_t)s->code_gen_ptr);
1023 } else {
1024 /* Perform the OR via sequential modifications to the high and
1025 low parts. Do this via recursion to handle 16-bit vs 32-bit
1026 masks in each half. */
1027 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
1028 tgen_ori(s, type, dest, val & 0x00000000ffffffffull);
1029 tgen_ori(s, type, dest, val & 0xffffffff00000000ull);
1033 static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
1035 /* Try all 48-bit insns that can perform it in one go. */
1036 if (s390_facilities & FACILITY_EXT_IMM) {
1037 if ((val & 0xffffffff00000000ull) == 0) {
1038 tcg_out_insn(s, RIL, XILF, dest, val);
1039 return;
1041 if ((val & 0x00000000ffffffffull) == 0) {
1042 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1043 return;
1047 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1048 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
1049 if (type == TCG_TYPE_I32) {
1050 tcg_out_insn(s, RR, XR, dest, TCG_TMP0);
1051 } else {
1052 tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0);
1054 } else if (USE_REG_TB) {
1055 tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
1056 new_pool_label(s, val, R_390_20, s->code_ptr - 2,
1057 -(intptr_t)s->code_gen_ptr);
1058 } else {
1059 /* Perform the xor by parts. */
1060 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
1061 if (val & 0xffffffff) {
1062 tcg_out_insn(s, RIL, XILF, dest, val);
1064 if (val > 0xffffffff) {
1065 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1070 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1071 TCGArg c2, bool c2const, bool need_carry)
1073 bool is_unsigned = is_unsigned_cond(c);
1074 S390Opcode op;
1076 if (c2const) {
1077 if (c2 == 0) {
1078 if (!(is_unsigned && need_carry)) {
1079 if (type == TCG_TYPE_I32) {
1080 tcg_out_insn(s, RR, LTR, r1, r1);
1081 } else {
1082 tcg_out_insn(s, RRE, LTGR, r1, r1);
1084 return tcg_cond_to_ltr_cond[c];
1088 if (!is_unsigned && c2 == (int16_t)c2) {
1089 op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1090 tcg_out_insn_RI(s, op, r1, c2);
1091 goto exit;
1094 if (s390_facilities & FACILITY_EXT_IMM) {
1095 if (type == TCG_TYPE_I32) {
1096 op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1097 tcg_out_insn_RIL(s, op, r1, c2);
1098 goto exit;
1099 } else if (c2 == (is_unsigned ? (uint32_t)c2 : (int32_t)c2)) {
1100 op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
1101 tcg_out_insn_RIL(s, op, r1, c2);
1102 goto exit;
1106 /* Use the constant pool, but not for small constants. */
1107 if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) {
1108 c2 = TCG_TMP0;
1109 /* fall through to reg-reg */
1110 } else if (USE_REG_TB) {
1111 if (type == TCG_TYPE_I32) {
1112 op = (is_unsigned ? RXY_CLY : RXY_CY);
1113 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
1114 new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2,
1115 4 - (intptr_t)s->code_gen_ptr);
1116 } else {
1117 op = (is_unsigned ? RXY_CLG : RXY_CG);
1118 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
1119 new_pool_label(s, c2, R_390_20, s->code_ptr - 2,
1120 -(intptr_t)s->code_gen_ptr);
1122 goto exit;
1123 } else {
1124 if (type == TCG_TYPE_I32) {
1125 op = (is_unsigned ? RIL_CLRL : RIL_CRL);
1126 tcg_out_insn_RIL(s, op, r1, 0);
1127 new_pool_label(s, (uint32_t)c2, R_390_PC32DBL,
1128 s->code_ptr - 2, 2 + 4);
1129 } else {
1130 op = (is_unsigned ? RIL_CLGRL : RIL_CGRL);
1131 tcg_out_insn_RIL(s, op, r1, 0);
1132 new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2);
1134 goto exit;
1138 if (type == TCG_TYPE_I32) {
1139 op = (is_unsigned ? RR_CLR : RR_CR);
1140 tcg_out_insn_RR(s, op, r1, c2);
1141 } else {
1142 op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1143 tcg_out_insn_RRE(s, op, r1, c2);
1146 exit:
1147 return tcg_cond_to_s390_cond[c];
1150 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1151 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1153 int cc;
1154 bool have_loc;
1156 /* With LOC2, we can always emit the minimum 3 insns. */
1157 if (s390_facilities & FACILITY_LOAD_ON_COND2) {
1158 /* Emit: d = 0, d = (cc ? 1 : d). */
1159 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1160 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1161 tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc);
1162 return;
1165 have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0;
1167 /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
1168 restart:
1169 switch (cond) {
1170 case TCG_COND_NE:
1171 /* X != 0 is X > 0. */
1172 if (c2const && c2 == 0) {
1173 cond = TCG_COND_GTU;
1174 } else {
1175 break;
1177 /* fallthru */
1179 case TCG_COND_GTU:
1180 case TCG_COND_GT:
1181 /* The result of a compare has CC=2 for GT and CC=3 unused.
1182 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1183 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1184 tcg_out_movi(s, type, dest, 0);
1185 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1186 return;
1188 case TCG_COND_EQ:
1189 /* X == 0 is X <= 0. */
1190 if (c2const && c2 == 0) {
1191 cond = TCG_COND_LEU;
1192 } else {
1193 break;
1195 /* fallthru */
1197 case TCG_COND_LEU:
1198 case TCG_COND_LE:
1199 /* As above, but we're looking for borrow, or !carry.
1200 The second insn computes d - d - borrow, or -1 for true
1201 and 0 for false. So we must mask to 1 bit afterward. */
1202 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1203 tcg_out_insn(s, RRE, SLBGR, dest, dest);
1204 tgen_andi(s, type, dest, 1);
1205 return;
1207 case TCG_COND_GEU:
1208 case TCG_COND_LTU:
1209 case TCG_COND_LT:
1210 case TCG_COND_GE:
1211 /* Swap operands so that we can use LEU/GTU/GT/LE. */
1212 if (c2const) {
1213 if (have_loc) {
1214 break;
1216 tcg_out_movi(s, type, TCG_TMP0, c2);
1217 c2 = c1;
1218 c2const = 0;
1219 c1 = TCG_TMP0;
1220 } else {
1221 TCGReg t = c1;
1222 c1 = c2;
1223 c2 = t;
1225 cond = tcg_swap_cond(cond);
1226 goto restart;
1228 default:
1229 g_assert_not_reached();
1232 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1233 if (have_loc) {
1234 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1235 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1236 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1237 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1238 } else {
1239 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1240 tcg_out_movi(s, type, dest, 1);
1241 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1242 tcg_out_movi(s, type, dest, 0);
1246 static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1247 TCGReg c1, TCGArg c2, int c2const,
1248 TCGArg v3, int v3const)
1250 int cc;
1251 if (s390_facilities & FACILITY_LOAD_ON_COND) {
1252 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
1253 if (v3const) {
1254 tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
1255 } else {
1256 tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
1258 } else {
1259 c = tcg_invert_cond(c);
1260 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
1262 /* Emit: if (cc) goto over; dest = r3; over: */
1263 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1264 tcg_out_insn(s, RRE, LGR, dest, v3);
1268 static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1269 TCGArg a2, int a2const)
1271 /* Since this sets both R and R+1, we have no choice but to store the
1272 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1273 QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1274 tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1276 if (a2const && a2 == 64) {
1277 tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1278 } else {
1279 if (a2const) {
1280 tcg_out_movi(s, TCG_TYPE_I64, dest, a2);
1281 } else {
1282 tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
1284 if (s390_facilities & FACILITY_LOAD_ON_COND) {
1285 /* Emit: if (one bit found) dest = r0. */
1286 tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
1287 } else {
1288 /* Emit: if (no one bit found) goto over; dest = r0; over: */
1289 tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
1290 tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
1295 static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1296 int ofs, int len, int z)
1298 int lsb = (63 - ofs);
1299 int msb = lsb - (len - 1);
1300 tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
1303 static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1304 int ofs, int len)
1306 tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1309 static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
1311 ptrdiff_t off = dest - s->code_ptr;
1312 if (off == (int16_t)off) {
1313 tcg_out_insn(s, RI, BRC, cc, off);
1314 } else if (off == (int32_t)off) {
1315 tcg_out_insn(s, RIL, BRCL, cc, off);
1316 } else {
1317 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1318 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1322 static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1324 if (l->has_value) {
1325 tgen_gotoi(s, cc, l->u.value_ptr);
1326 } else if (USE_LONG_BRANCHES) {
1327 tcg_out16(s, RIL_BRCL | (cc << 4));
1328 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2);
1329 s->code_ptr += 2;
1330 } else {
1331 tcg_out16(s, RI_BRC | (cc << 4));
1332 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
1333 s->code_ptr += 1;
1337 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1338 TCGReg r1, TCGReg r2, TCGLabel *l)
1340 intptr_t off = 0;
1342 if (l->has_value) {
1343 off = l->u.value_ptr - s->code_ptr;
1344 tcg_debug_assert(off == (int16_t)off);
1345 } else {
1346 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1349 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1350 tcg_out16(s, off);
1351 tcg_out16(s, cc << 12 | (opc & 0xff));
1354 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1355 TCGReg r1, int i2, TCGLabel *l)
1357 tcg_target_long off = 0;
1359 if (l->has_value) {
1360 off = l->u.value_ptr - s->code_ptr;
1361 tcg_debug_assert(off == (int16_t)off);
1362 } else {
1363 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1366 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1367 tcg_out16(s, off);
1368 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1371 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1372 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1374 int cc;
1376 if (s390_facilities & FACILITY_GEN_INST_EXT) {
1377 bool is_unsigned = is_unsigned_cond(c);
1378 bool in_range;
1379 S390Opcode opc;
1381 cc = tcg_cond_to_s390_cond[c];
1383 if (!c2const) {
1384 opc = (type == TCG_TYPE_I32
1385 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1386 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1387 tgen_compare_branch(s, opc, cc, r1, c2, l);
1388 return;
1391 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1392 If the immediate we've been given does not fit that range, we'll
1393 fall back to separate compare and branch instructions using the
1394 larger comparison range afforded by COMPARE IMMEDIATE. */
1395 if (type == TCG_TYPE_I32) {
1396 if (is_unsigned) {
1397 opc = RIE_CLIJ;
1398 in_range = (uint32_t)c2 == (uint8_t)c2;
1399 } else {
1400 opc = RIE_CIJ;
1401 in_range = (int32_t)c2 == (int8_t)c2;
1403 } else {
1404 if (is_unsigned) {
1405 opc = RIE_CLGIJ;
1406 in_range = (uint64_t)c2 == (uint8_t)c2;
1407 } else {
1408 opc = RIE_CGIJ;
1409 in_range = (int64_t)c2 == (int8_t)c2;
1412 if (in_range) {
1413 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1414 return;
1418 cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
1419 tgen_branch(s, cc, l);
1422 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1424 ptrdiff_t off = dest - s->code_ptr;
1425 if (off == (int32_t)off) {
1426 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1427 } else {
1428 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1429 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1433 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1434 TCGReg base, TCGReg index, int disp)
1436 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1437 case MO_UB:
1438 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1439 break;
1440 case MO_SB:
1441 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1442 break;
1444 case MO_UW | MO_BSWAP:
1445 /* swapped unsigned halfword load with upper bits zeroed */
1446 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1447 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1448 break;
1449 case MO_UW:
1450 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1451 break;
1453 case MO_SW | MO_BSWAP:
1454 /* swapped sign-extended halfword load */
1455 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1456 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1457 break;
1458 case MO_SW:
1459 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1460 break;
1462 case MO_UL | MO_BSWAP:
1463 /* swapped unsigned int load with upper bits zeroed */
1464 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1465 tgen_ext32u(s, data, data);
1466 break;
1467 case MO_UL:
1468 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1469 break;
1471 case MO_SL | MO_BSWAP:
1472 /* swapped sign-extended int load */
1473 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1474 tgen_ext32s(s, data, data);
1475 break;
1476 case MO_SL:
1477 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1478 break;
1480 case MO_Q | MO_BSWAP:
1481 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1482 break;
1483 case MO_Q:
1484 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1485 break;
1487 default:
1488 tcg_abort();
1492 static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1493 TCGReg base, TCGReg index, int disp)
1495 switch (opc & (MO_SIZE | MO_BSWAP)) {
1496 case MO_UB:
1497 if (disp >= 0 && disp < 0x1000) {
1498 tcg_out_insn(s, RX, STC, data, base, index, disp);
1499 } else {
1500 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1502 break;
1504 case MO_UW | MO_BSWAP:
1505 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1506 break;
1507 case MO_UW:
1508 if (disp >= 0 && disp < 0x1000) {
1509 tcg_out_insn(s, RX, STH, data, base, index, disp);
1510 } else {
1511 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1513 break;
1515 case MO_UL | MO_BSWAP:
1516 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1517 break;
1518 case MO_UL:
1519 if (disp >= 0 && disp < 0x1000) {
1520 tcg_out_insn(s, RX, ST, data, base, index, disp);
1521 } else {
1522 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1524 break;
1526 case MO_Q | MO_BSWAP:
1527 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1528 break;
1529 case MO_Q:
1530 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1531 break;
1533 default:
1534 tcg_abort();
1538 #if defined(CONFIG_SOFTMMU)
1539 #include "tcg-ldst.inc.c"
1541 /* We're expecting to use a 20-bit signed offset on the tlb memory ops. */
1542 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_mask[NB_MMU_MODES - 1])
1543 > 0x7ffff);
1544 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1])
1545 > 0x7ffff);
1547 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1548 addend into R2. Returns a register with the santitized guest address. */
1549 static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1550 int mem_index, bool is_ld)
1552 unsigned s_bits = opc & MO_SIZE;
1553 unsigned a_bits = get_alignment_bits(opc);
1554 unsigned s_mask = (1 << s_bits) - 1;
1555 unsigned a_mask = (1 << a_bits) - 1;
1556 int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
1557 int table_off = offsetof(CPUArchState, tlb_table[mem_index]);
1558 int ofs, a_off;
1559 uint64_t tlb_mask;
1561 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1562 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1563 tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off);
1564 tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off);
1566 /* For aligned accesses, we check the first byte and include the alignment
1567 bits within the address. For unaligned access, we check that we don't
1568 cross pages using the address of the last byte of the access. */
1569 a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
1570 tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
1571 if ((s390_facilities & FACILITY_GEN_INST_EXT) && a_off == 0) {
1572 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1573 } else {
1574 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1575 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1578 if (is_ld) {
1579 ofs = offsetof(CPUTLBEntry, addr_read);
1580 } else {
1581 ofs = offsetof(CPUTLBEntry, addr_write);
1583 if (TARGET_LONG_BITS == 32) {
1584 tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs);
1585 } else {
1586 tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs);
1589 tcg_out_insn(s, RXY, LG, TCG_REG_R2, TCG_REG_R2, TCG_REG_NONE,
1590 offsetof(CPUTLBEntry, addend));
1592 if (TARGET_LONG_BITS == 32) {
1593 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1594 return TCG_REG_R3;
1596 return addr_reg;
1599 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1600 TCGReg data, TCGReg addr,
1601 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1603 TCGLabelQemuLdst *label = new_ldst_label(s);
1605 label->is_ld = is_ld;
1606 label->oi = oi;
1607 label->datalo_reg = data;
1608 label->addrlo_reg = addr;
1609 label->raddr = raddr;
1610 label->label_ptr[0] = label_ptr;
1613 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1615 TCGReg addr_reg = lb->addrlo_reg;
1616 TCGReg data_reg = lb->datalo_reg;
1617 TCGMemOpIdx oi = lb->oi;
1618 TCGMemOp opc = get_memop(oi);
1620 if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1621 (intptr_t)s->code_ptr, 2)) {
1622 return false;
1625 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1626 if (TARGET_LONG_BITS == 64) {
1627 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1629 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
1630 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
1631 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
1632 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1634 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1635 return true;
1638 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1640 TCGReg addr_reg = lb->addrlo_reg;
1641 TCGReg data_reg = lb->datalo_reg;
1642 TCGMemOpIdx oi = lb->oi;
1643 TCGMemOp opc = get_memop(oi);
1645 if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1646 (intptr_t)s->code_ptr, 2)) {
1647 return false;
1650 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1651 if (TARGET_LONG_BITS == 64) {
1652 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1654 switch (opc & MO_SIZE) {
1655 case MO_UB:
1656 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1657 break;
1658 case MO_UW:
1659 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1660 break;
1661 case MO_UL:
1662 tgen_ext32u(s, TCG_REG_R4, data_reg);
1663 break;
1664 case MO_Q:
1665 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1666 break;
1667 default:
1668 tcg_abort();
1670 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
1671 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
1672 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1674 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1675 return true;
1677 #else
1678 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1679 TCGReg *index_reg, tcg_target_long *disp)
1681 if (TARGET_LONG_BITS == 32) {
1682 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1683 *addr_reg = TCG_TMP0;
1685 if (guest_base < 0x80000) {
1686 *index_reg = TCG_REG_NONE;
1687 *disp = guest_base;
1688 } else {
1689 *index_reg = TCG_GUEST_BASE_REG;
1690 *disp = 0;
1693 #endif /* CONFIG_SOFTMMU */
1695 static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1696 TCGMemOpIdx oi)
1698 TCGMemOp opc = get_memop(oi);
1699 #ifdef CONFIG_SOFTMMU
1700 unsigned mem_index = get_mmuidx(oi);
1701 tcg_insn_unit *label_ptr;
1702 TCGReg base_reg;
1704 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1706 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1707 label_ptr = s->code_ptr;
1708 s->code_ptr += 1;
1710 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1712 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1713 #else
1714 TCGReg index_reg;
1715 tcg_target_long disp;
1717 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1718 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1719 #endif
1722 static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1723 TCGMemOpIdx oi)
1725 TCGMemOp opc = get_memop(oi);
1726 #ifdef CONFIG_SOFTMMU
1727 unsigned mem_index = get_mmuidx(oi);
1728 tcg_insn_unit *label_ptr;
1729 TCGReg base_reg;
1731 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1733 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1734 label_ptr = s->code_ptr;
1735 s->code_ptr += 1;
1737 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1739 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1740 #else
1741 TCGReg index_reg;
1742 tcg_target_long disp;
1744 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1745 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1746 #endif
1749 # define OP_32_64(x) \
1750 case glue(glue(INDEX_op_,x),_i32): \
1751 case glue(glue(INDEX_op_,x),_i64)
1753 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1754 const TCGArg *args, const int *const_args)
1756 S390Opcode op, op2;
1757 TCGArg a0, a1, a2;
1759 switch (opc) {
1760 case INDEX_op_exit_tb:
1761 /* Reuse the zeroing that exists for goto_ptr. */
1762 a0 = args[0];
1763 if (a0 == 0) {
1764 tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue);
1765 } else {
1766 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1767 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1769 break;
1771 case INDEX_op_goto_tb:
1772 a0 = args[0];
1773 if (s->tb_jmp_insn_offset) {
1774 /* branch displacement must be aligned for atomic patching;
1775 * see if we need to add extra nop before branch
1777 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1778 tcg_out16(s, NOP);
1780 tcg_debug_assert(!USE_REG_TB);
1781 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1782 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1783 s->code_ptr += 2;
1784 } else {
1785 /* load address stored at s->tb_jmp_target_addr + a0 */
1786 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
1787 s->tb_jmp_target_addr + a0);
1788 /* and go there */
1789 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
1791 set_jmp_reset_offset(s, a0);
1793 /* For the unlinked path of goto_tb, we need to reset
1794 TCG_REG_TB to the beginning of this TB. */
1795 if (USE_REG_TB) {
1796 int ofs = -tcg_current_code_size(s);
1797 assert(ofs == (int16_t)ofs);
1798 tcg_out_insn(s, RI, AGHI, TCG_REG_TB, ofs);
1800 break;
1802 case INDEX_op_goto_ptr:
1803 a0 = args[0];
1804 if (USE_REG_TB) {
1805 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
1807 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
1808 break;
1810 OP_32_64(ld8u):
1811 /* ??? LLC (RXY format) is only present with the extended-immediate
1812 facility, whereas LLGC is always present. */
1813 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1814 break;
1816 OP_32_64(ld8s):
1817 /* ??? LB is no smaller than LGB, so no point to using it. */
1818 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1819 break;
1821 OP_32_64(ld16u):
1822 /* ??? LLH (RXY format) is only present with the extended-immediate
1823 facility, whereas LLGH is always present. */
1824 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1825 break;
1827 case INDEX_op_ld16s_i32:
1828 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1829 break;
1831 case INDEX_op_ld_i32:
1832 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1833 break;
1835 OP_32_64(st8):
1836 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1837 TCG_REG_NONE, args[2]);
1838 break;
1840 OP_32_64(st16):
1841 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1842 TCG_REG_NONE, args[2]);
1843 break;
1845 case INDEX_op_st_i32:
1846 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1847 break;
1849 case INDEX_op_add_i32:
1850 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1851 if (const_args[2]) {
1852 do_addi_32:
1853 if (a0 == a1) {
1854 if (a2 == (int16_t)a2) {
1855 tcg_out_insn(s, RI, AHI, a0, a2);
1856 break;
1858 if (s390_facilities & FACILITY_EXT_IMM) {
1859 tcg_out_insn(s, RIL, AFI, a0, a2);
1860 break;
1863 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1864 } else if (a0 == a1) {
1865 tcg_out_insn(s, RR, AR, a0, a2);
1866 } else {
1867 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1869 break;
1870 case INDEX_op_sub_i32:
1871 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1872 if (const_args[2]) {
1873 a2 = -a2;
1874 goto do_addi_32;
1875 } else if (a0 == a1) {
1876 tcg_out_insn(s, RR, SR, a0, a2);
1877 } else {
1878 tcg_out_insn(s, RRF, SRK, a0, a1, a2);
1880 break;
1882 case INDEX_op_and_i32:
1883 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
1884 if (const_args[2]) {
1885 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1886 tgen_andi(s, TCG_TYPE_I32, a0, a2);
1887 } else if (a0 == a1) {
1888 tcg_out_insn(s, RR, NR, a0, a2);
1889 } else {
1890 tcg_out_insn(s, RRF, NRK, a0, a1, a2);
1892 break;
1893 case INDEX_op_or_i32:
1894 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
1895 if (const_args[2]) {
1896 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1897 tgen_ori(s, TCG_TYPE_I32, a0, a2);
1898 } else if (a0 == a1) {
1899 tcg_out_insn(s, RR, OR, a0, a2);
1900 } else {
1901 tcg_out_insn(s, RRF, ORK, a0, a1, a2);
1903 break;
1904 case INDEX_op_xor_i32:
1905 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
1906 if (const_args[2]) {
1907 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1908 tgen_xori(s, TCG_TYPE_I32, a0, a2);
1909 } else if (a0 == a1) {
1910 tcg_out_insn(s, RR, XR, args[0], args[2]);
1911 } else {
1912 tcg_out_insn(s, RRF, XRK, a0, a1, a2);
1914 break;
1916 case INDEX_op_neg_i32:
1917 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1918 break;
1920 case INDEX_op_mul_i32:
1921 if (const_args[2]) {
1922 if ((int32_t)args[2] == (int16_t)args[2]) {
1923 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1924 } else {
1925 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1927 } else {
1928 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1930 break;
1932 case INDEX_op_div2_i32:
1933 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1934 break;
1935 case INDEX_op_divu2_i32:
1936 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1937 break;
1939 case INDEX_op_shl_i32:
1940 op = RS_SLL;
1941 op2 = RSY_SLLK;
1942 do_shift32:
1943 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1944 if (a0 == a1) {
1945 if (const_args[2]) {
1946 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
1947 } else {
1948 tcg_out_sh32(s, op, a0, a2, 0);
1950 } else {
1951 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
1952 if (const_args[2]) {
1953 tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
1954 } else {
1955 tcg_out_sh64(s, op2, a0, a1, a2, 0);
1958 break;
1959 case INDEX_op_shr_i32:
1960 op = RS_SRL;
1961 op2 = RSY_SRLK;
1962 goto do_shift32;
1963 case INDEX_op_sar_i32:
1964 op = RS_SRA;
1965 op2 = RSY_SRAK;
1966 goto do_shift32;
1968 case INDEX_op_rotl_i32:
1969 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1970 if (const_args[2]) {
1971 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1972 } else {
1973 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1975 break;
1976 case INDEX_op_rotr_i32:
1977 if (const_args[2]) {
1978 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1979 TCG_REG_NONE, (32 - args[2]) & 31);
1980 } else {
1981 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1982 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1984 break;
1986 case INDEX_op_ext8s_i32:
1987 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1988 break;
1989 case INDEX_op_ext16s_i32:
1990 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1991 break;
1992 case INDEX_op_ext8u_i32:
1993 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1994 break;
1995 case INDEX_op_ext16u_i32:
1996 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1997 break;
1999 OP_32_64(bswap16):
2000 /* The TCG bswap definition requires bits 0-47 already be zero.
2001 Thus we don't need the G-type insns to implement bswap16_i64. */
2002 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2003 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
2004 break;
2005 OP_32_64(bswap32):
2006 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2007 break;
2009 case INDEX_op_add2_i32:
2010 if (const_args[4]) {
2011 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2012 } else {
2013 tcg_out_insn(s, RR, ALR, args[0], args[4]);
2015 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2016 break;
2017 case INDEX_op_sub2_i32:
2018 if (const_args[4]) {
2019 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2020 } else {
2021 tcg_out_insn(s, RR, SLR, args[0], args[4]);
2023 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2024 break;
2026 case INDEX_op_br:
2027 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
2028 break;
2030 case INDEX_op_brcond_i32:
2031 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
2032 args[1], const_args[1], arg_label(args[3]));
2033 break;
2034 case INDEX_op_setcond_i32:
2035 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2036 args[2], const_args[2]);
2037 break;
2038 case INDEX_op_movcond_i32:
2039 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
2040 args[2], const_args[2], args[3], const_args[3]);
2041 break;
2043 case INDEX_op_qemu_ld_i32:
2044 /* ??? Technically we can use a non-extending instruction. */
2045 case INDEX_op_qemu_ld_i64:
2046 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
2047 break;
2048 case INDEX_op_qemu_st_i32:
2049 case INDEX_op_qemu_st_i64:
2050 tcg_out_qemu_st(s, args[0], args[1], args[2]);
2051 break;
2053 case INDEX_op_ld16s_i64:
2054 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2055 break;
2056 case INDEX_op_ld32u_i64:
2057 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2058 break;
2059 case INDEX_op_ld32s_i64:
2060 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2061 break;
2062 case INDEX_op_ld_i64:
2063 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2064 break;
2066 case INDEX_op_st32_i64:
2067 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2068 break;
2069 case INDEX_op_st_i64:
2070 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2071 break;
2073 case INDEX_op_add_i64:
2074 a0 = args[0], a1 = args[1], a2 = args[2];
2075 if (const_args[2]) {
2076 do_addi_64:
2077 if (a0 == a1) {
2078 if (a2 == (int16_t)a2) {
2079 tcg_out_insn(s, RI, AGHI, a0, a2);
2080 break;
2082 if (s390_facilities & FACILITY_EXT_IMM) {
2083 if (a2 == (int32_t)a2) {
2084 tcg_out_insn(s, RIL, AGFI, a0, a2);
2085 break;
2086 } else if (a2 == (uint32_t)a2) {
2087 tcg_out_insn(s, RIL, ALGFI, a0, a2);
2088 break;
2089 } else if (-a2 == (uint32_t)-a2) {
2090 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2091 break;
2095 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2096 } else if (a0 == a1) {
2097 tcg_out_insn(s, RRE, AGR, a0, a2);
2098 } else {
2099 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2101 break;
2102 case INDEX_op_sub_i64:
2103 a0 = args[0], a1 = args[1], a2 = args[2];
2104 if (const_args[2]) {
2105 a2 = -a2;
2106 goto do_addi_64;
2107 } else if (a0 == a1) {
2108 tcg_out_insn(s, RRE, SGR, a0, a2);
2109 } else {
2110 tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
2112 break;
2114 case INDEX_op_and_i64:
2115 a0 = args[0], a1 = args[1], a2 = args[2];
2116 if (const_args[2]) {
2117 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2118 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2119 } else if (a0 == a1) {
2120 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
2121 } else {
2122 tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
2124 break;
2125 case INDEX_op_or_i64:
2126 a0 = args[0], a1 = args[1], a2 = args[2];
2127 if (const_args[2]) {
2128 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2129 tgen_ori(s, TCG_TYPE_I64, a0, a2);
2130 } else if (a0 == a1) {
2131 tcg_out_insn(s, RRE, OGR, a0, a2);
2132 } else {
2133 tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
2135 break;
2136 case INDEX_op_xor_i64:
2137 a0 = args[0], a1 = args[1], a2 = args[2];
2138 if (const_args[2]) {
2139 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2140 tgen_xori(s, TCG_TYPE_I64, a0, a2);
2141 } else if (a0 == a1) {
2142 tcg_out_insn(s, RRE, XGR, a0, a2);
2143 } else {
2144 tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
2146 break;
2148 case INDEX_op_neg_i64:
2149 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2150 break;
2151 case INDEX_op_bswap64_i64:
2152 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2153 break;
2155 case INDEX_op_mul_i64:
2156 if (const_args[2]) {
2157 if (args[2] == (int16_t)args[2]) {
2158 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2159 } else {
2160 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2162 } else {
2163 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2165 break;
2167 case INDEX_op_div2_i64:
2168 /* ??? We get an unnecessary sign-extension of the dividend
2169 into R3 with this definition, but as we do in fact always
2170 produce both quotient and remainder using INDEX_op_div_i64
2171 instead requires jumping through even more hoops. */
2172 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2173 break;
2174 case INDEX_op_divu2_i64:
2175 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2176 break;
2177 case INDEX_op_mulu2_i64:
2178 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2179 break;
2181 case INDEX_op_shl_i64:
2182 op = RSY_SLLG;
2183 do_shift64:
2184 if (const_args[2]) {
2185 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2186 } else {
2187 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2189 break;
2190 case INDEX_op_shr_i64:
2191 op = RSY_SRLG;
2192 goto do_shift64;
2193 case INDEX_op_sar_i64:
2194 op = RSY_SRAG;
2195 goto do_shift64;
2197 case INDEX_op_rotl_i64:
2198 if (const_args[2]) {
2199 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2200 TCG_REG_NONE, args[2]);
2201 } else {
2202 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2204 break;
2205 case INDEX_op_rotr_i64:
2206 if (const_args[2]) {
2207 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2208 TCG_REG_NONE, (64 - args[2]) & 63);
2209 } else {
2210 /* We can use the smaller 32-bit negate because only the
2211 low 6 bits are examined for the rotate. */
2212 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2213 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2215 break;
2217 case INDEX_op_ext8s_i64:
2218 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2219 break;
2220 case INDEX_op_ext16s_i64:
2221 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2222 break;
2223 case INDEX_op_ext_i32_i64:
2224 case INDEX_op_ext32s_i64:
2225 tgen_ext32s(s, args[0], args[1]);
2226 break;
2227 case INDEX_op_ext8u_i64:
2228 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2229 break;
2230 case INDEX_op_ext16u_i64:
2231 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2232 break;
2233 case INDEX_op_extu_i32_i64:
2234 case INDEX_op_ext32u_i64:
2235 tgen_ext32u(s, args[0], args[1]);
2236 break;
2238 case INDEX_op_add2_i64:
2239 if (const_args[4]) {
2240 if ((int64_t)args[4] >= 0) {
2241 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2242 } else {
2243 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2245 } else {
2246 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2248 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2249 break;
2250 case INDEX_op_sub2_i64:
2251 if (const_args[4]) {
2252 if ((int64_t)args[4] >= 0) {
2253 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2254 } else {
2255 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2257 } else {
2258 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2260 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2261 break;
2263 case INDEX_op_brcond_i64:
2264 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2265 args[1], const_args[1], arg_label(args[3]));
2266 break;
2267 case INDEX_op_setcond_i64:
2268 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2269 args[2], const_args[2]);
2270 break;
2271 case INDEX_op_movcond_i64:
2272 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2273 args[2], const_args[2], args[3], const_args[3]);
2274 break;
2276 OP_32_64(deposit):
2277 a0 = args[0], a1 = args[1], a2 = args[2];
2278 if (const_args[1]) {
2279 tgen_deposit(s, a0, a2, args[3], args[4], 1);
2280 } else {
2281 /* Since we can't support "0Z" as a constraint, we allow a1 in
2282 any register. Fix things up as if a matching constraint. */
2283 if (a0 != a1) {
2284 TCGType type = (opc == INDEX_op_deposit_i64);
2285 if (a0 == a2) {
2286 tcg_out_mov(s, type, TCG_TMP0, a2);
2287 a2 = TCG_TMP0;
2289 tcg_out_mov(s, type, a0, a1);
2291 tgen_deposit(s, a0, a2, args[3], args[4], 0);
2293 break;
2295 OP_32_64(extract):
2296 tgen_extract(s, args[0], args[1], args[2], args[3]);
2297 break;
2299 case INDEX_op_clz_i64:
2300 tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2301 break;
2303 case INDEX_op_mb:
2304 /* The host memory model is quite strong, we simply need to
2305 serialize the instruction stream. */
2306 if (args[0] & TCG_MO_ST_LD) {
2307 tcg_out_insn(s, RR, BCR,
2308 s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
2310 break;
2312 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2313 case INDEX_op_mov_i64:
2314 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2315 case INDEX_op_movi_i64:
2316 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2317 default:
2318 tcg_abort();
2322 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2324 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2325 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2326 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
2327 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
2328 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
2329 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2330 static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } };
2331 static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } };
2332 static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } };
2333 static const TCGTargetOpDef a2_r
2334 = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } };
2335 static const TCGTargetOpDef a2_ri
2336 = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } };
2337 static const TCGTargetOpDef a2_rA
2338 = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } };
2340 switch (op) {
2341 case INDEX_op_goto_ptr:
2342 return &r;
2344 case INDEX_op_ld8u_i32:
2345 case INDEX_op_ld8u_i64:
2346 case INDEX_op_ld8s_i32:
2347 case INDEX_op_ld8s_i64:
2348 case INDEX_op_ld16u_i32:
2349 case INDEX_op_ld16u_i64:
2350 case INDEX_op_ld16s_i32:
2351 case INDEX_op_ld16s_i64:
2352 case INDEX_op_ld_i32:
2353 case INDEX_op_ld32u_i64:
2354 case INDEX_op_ld32s_i64:
2355 case INDEX_op_ld_i64:
2356 case INDEX_op_st8_i32:
2357 case INDEX_op_st8_i64:
2358 case INDEX_op_st16_i32:
2359 case INDEX_op_st16_i64:
2360 case INDEX_op_st_i32:
2361 case INDEX_op_st32_i64:
2362 case INDEX_op_st_i64:
2363 return &r_r;
2365 case INDEX_op_add_i32:
2366 case INDEX_op_add_i64:
2367 return &r_r_ri;
2368 case INDEX_op_sub_i32:
2369 case INDEX_op_sub_i64:
2370 case INDEX_op_and_i32:
2371 case INDEX_op_and_i64:
2372 case INDEX_op_or_i32:
2373 case INDEX_op_or_i64:
2374 case INDEX_op_xor_i32:
2375 case INDEX_op_xor_i64:
2376 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
2378 case INDEX_op_mul_i32:
2379 /* If we have the general-instruction-extensions, then we have
2380 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
2381 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
2382 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI);
2383 case INDEX_op_mul_i64:
2384 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI);
2386 case INDEX_op_shl_i32:
2387 case INDEX_op_shr_i32:
2388 case INDEX_op_sar_i32:
2389 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
2391 case INDEX_op_shl_i64:
2392 case INDEX_op_shr_i64:
2393 case INDEX_op_sar_i64:
2394 return &r_r_ri;
2396 case INDEX_op_rotl_i32:
2397 case INDEX_op_rotl_i64:
2398 case INDEX_op_rotr_i32:
2399 case INDEX_op_rotr_i64:
2400 return &r_r_ri;
2402 case INDEX_op_brcond_i32:
2403 case INDEX_op_brcond_i64:
2404 return &r_ri;
2406 case INDEX_op_bswap16_i32:
2407 case INDEX_op_bswap16_i64:
2408 case INDEX_op_bswap32_i32:
2409 case INDEX_op_bswap32_i64:
2410 case INDEX_op_bswap64_i64:
2411 case INDEX_op_neg_i32:
2412 case INDEX_op_neg_i64:
2413 case INDEX_op_ext8s_i32:
2414 case INDEX_op_ext8s_i64:
2415 case INDEX_op_ext8u_i32:
2416 case INDEX_op_ext8u_i64:
2417 case INDEX_op_ext16s_i32:
2418 case INDEX_op_ext16s_i64:
2419 case INDEX_op_ext16u_i32:
2420 case INDEX_op_ext16u_i64:
2421 case INDEX_op_ext32s_i64:
2422 case INDEX_op_ext32u_i64:
2423 case INDEX_op_ext_i32_i64:
2424 case INDEX_op_extu_i32_i64:
2425 case INDEX_op_extract_i32:
2426 case INDEX_op_extract_i64:
2427 return &r_r;
2429 case INDEX_op_clz_i64:
2430 case INDEX_op_setcond_i32:
2431 case INDEX_op_setcond_i64:
2432 return &r_r_ri;
2434 case INDEX_op_qemu_ld_i32:
2435 case INDEX_op_qemu_ld_i64:
2436 return &r_L;
2437 case INDEX_op_qemu_st_i64:
2438 case INDEX_op_qemu_st_i32:
2439 return &L_L;
2441 case INDEX_op_deposit_i32:
2442 case INDEX_op_deposit_i64:
2444 static const TCGTargetOpDef dep
2445 = { .args_ct_str = { "r", "rZ", "r" } };
2446 return &dep;
2448 case INDEX_op_movcond_i32:
2449 case INDEX_op_movcond_i64:
2451 static const TCGTargetOpDef movc
2452 = { .args_ct_str = { "r", "r", "ri", "r", "0" } };
2453 static const TCGTargetOpDef movc_l
2454 = { .args_ct_str = { "r", "r", "ri", "rI", "0" } };
2455 return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc);
2457 case INDEX_op_div2_i32:
2458 case INDEX_op_div2_i64:
2459 case INDEX_op_divu2_i32:
2460 case INDEX_op_divu2_i64:
2462 static const TCGTargetOpDef div2
2463 = { .args_ct_str = { "b", "a", "0", "1", "r" } };
2464 return &div2;
2466 case INDEX_op_mulu2_i64:
2468 static const TCGTargetOpDef mul2
2469 = { .args_ct_str = { "b", "a", "0", "r" } };
2470 return &mul2;
2473 case INDEX_op_add2_i32:
2474 case INDEX_op_sub2_i32:
2475 return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r);
2476 case INDEX_op_add2_i64:
2477 case INDEX_op_sub2_i64:
2478 return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r);
2480 default:
2481 break;
2483 return NULL;
2486 static void query_s390_facilities(void)
2488 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2490 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2491 is present on all 64-bit systems, but let's check for it anyway. */
2492 if (hwcap & HWCAP_S390_STFLE) {
2493 register int r0 __asm__("0");
2494 register void *r1 __asm__("1");
2496 /* stfle 0(%r1) */
2497 r1 = &s390_facilities;
2498 asm volatile(".word 0xb2b0,0x1000"
2499 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2503 static void tcg_target_init(TCGContext *s)
2505 query_s390_facilities();
2507 tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
2508 tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
2510 tcg_target_call_clobber_regs = 0;
2511 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2512 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2513 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2514 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2515 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2516 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2517 /* The r6 register is technically call-saved, but it's also a parameter
2518 register, so it can get killed by setup for the qemu_st helper. */
2519 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
2520 /* The return register can be considered call-clobbered. */
2521 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2523 s->reserved_regs = 0;
2524 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2525 /* XXX many insns can't be used with R0, so we better avoid it for now */
2526 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2527 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2528 if (USE_REG_TB) {
2529 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
2533 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2534 + TCG_STATIC_CALL_ARGS_SIZE \
2535 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2537 static void tcg_target_qemu_prologue(TCGContext *s)
2539 /* stmg %r6,%r15,48(%r15) (save registers) */
2540 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2542 /* aghi %r15,-frame_size */
2543 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
2545 tcg_set_frame(s, TCG_REG_CALL_STACK,
2546 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2547 CPU_TEMP_BUF_NLONGS * sizeof(long));
2549 #ifndef CONFIG_SOFTMMU
2550 if (guest_base >= 0x80000) {
2551 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2552 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2554 #endif
2556 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2557 if (USE_REG_TB) {
2558 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
2559 tcg_target_call_iarg_regs[1]);
2562 /* br %r3 (go to TB) */
2563 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2566 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2567 * and fall through to the rest of the epilogue.
2569 s->code_gen_epilogue = s->code_ptr;
2570 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
2572 /* TB epilogue */
2573 tb_ret_addr = s->code_ptr;
2575 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2576 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2577 FRAME_SIZE + 48);
2579 /* br %r14 (return) */
2580 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2583 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2585 memset(p, 0x07, count * sizeof(tcg_insn_unit));
2588 typedef struct {
2589 DebugFrameHeader h;
2590 uint8_t fde_def_cfa[4];
2591 uint8_t fde_reg_ofs[18];
2592 } DebugFrame;
2594 /* We're expecting a 2 byte uleb128 encoded value. */
2595 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2597 #define ELF_HOST_MACHINE EM_S390
2599 static const DebugFrame debug_frame = {
2600 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2601 .h.cie.id = -1,
2602 .h.cie.version = 1,
2603 .h.cie.code_align = 1,
2604 .h.cie.data_align = 8, /* sleb128 8 */
2605 .h.cie.return_column = TCG_REG_R14,
2607 /* Total FDE size does not include the "len" member. */
2608 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2610 .fde_def_cfa = {
2611 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2612 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2613 (FRAME_SIZE >> 7)
2615 .fde_reg_ofs = {
2616 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2617 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2618 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2619 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2620 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2621 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2622 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2623 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2624 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2628 void tcg_register_jit(void *buf, size_t buf_size)
2630 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));