scsi: mptconfig: fix an assert expression
[qemu/ar7.git] / tcg / s390 / tcg-target.inc.c
blob5a7495b0638d24a008a72a5160cd29cb138128a6
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "tcg-be-ldst.h"
29 /* We only support generating code for 64-bit mode. */
30 #if TCG_TARGET_REG_BITS != 64
31 #error "unsupported code generation mode"
32 #endif
34 #include "elf.h"
36 /* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39 #define USE_LONG_BRANCHES 0
41 #define TCG_CT_CONST_MULI 0x100
42 #define TCG_CT_CONST_ORI 0x200
43 #define TCG_CT_CONST_XORI 0x400
44 #define TCG_CT_CONST_CMPI 0x800
45 #define TCG_CT_CONST_ADLI 0x1000
47 /* Several places within the instruction set 0 means "no register"
48 rather than TCG_REG_R0. */
49 #define TCG_REG_NONE 0
51 /* A scratch register that may be be used throughout the backend. */
52 #define TCG_TMP0 TCG_REG_R14
54 #ifndef CONFIG_SOFTMMU
55 #define TCG_GUEST_BASE_REG TCG_REG_R13
56 #endif
58 /* All of the following instructions are prefixed with their instruction
59 format, and are defined as 8- or 16-bit quantities, even when the two
60 halves of the 16-bit quantity may appear 32 bits apart in the insn.
61 This makes it easy to copy the values from the tables in Appendix B. */
62 typedef enum S390Opcode {
63 RIL_AFI = 0xc209,
64 RIL_AGFI = 0xc208,
65 RIL_ALFI = 0xc20b,
66 RIL_ALGFI = 0xc20a,
67 RIL_BRASL = 0xc005,
68 RIL_BRCL = 0xc004,
69 RIL_CFI = 0xc20d,
70 RIL_CGFI = 0xc20c,
71 RIL_CLFI = 0xc20f,
72 RIL_CLGFI = 0xc20e,
73 RIL_IIHF = 0xc008,
74 RIL_IILF = 0xc009,
75 RIL_LARL = 0xc000,
76 RIL_LGFI = 0xc001,
77 RIL_LGRL = 0xc408,
78 RIL_LLIHF = 0xc00e,
79 RIL_LLILF = 0xc00f,
80 RIL_LRL = 0xc40d,
81 RIL_MSFI = 0xc201,
82 RIL_MSGFI = 0xc200,
83 RIL_NIHF = 0xc00a,
84 RIL_NILF = 0xc00b,
85 RIL_OIHF = 0xc00c,
86 RIL_OILF = 0xc00d,
87 RIL_SLFI = 0xc205,
88 RIL_SLGFI = 0xc204,
89 RIL_XIHF = 0xc006,
90 RIL_XILF = 0xc007,
92 RI_AGHI = 0xa70b,
93 RI_AHI = 0xa70a,
94 RI_BRC = 0xa704,
95 RI_IIHH = 0xa500,
96 RI_IIHL = 0xa501,
97 RI_IILH = 0xa502,
98 RI_IILL = 0xa503,
99 RI_LGHI = 0xa709,
100 RI_LLIHH = 0xa50c,
101 RI_LLIHL = 0xa50d,
102 RI_LLILH = 0xa50e,
103 RI_LLILL = 0xa50f,
104 RI_MGHI = 0xa70d,
105 RI_MHI = 0xa70c,
106 RI_NIHH = 0xa504,
107 RI_NIHL = 0xa505,
108 RI_NILH = 0xa506,
109 RI_NILL = 0xa507,
110 RI_OIHH = 0xa508,
111 RI_OIHL = 0xa509,
112 RI_OILH = 0xa50a,
113 RI_OILL = 0xa50b,
115 RIE_CGIJ = 0xec7c,
116 RIE_CGRJ = 0xec64,
117 RIE_CIJ = 0xec7e,
118 RIE_CLGRJ = 0xec65,
119 RIE_CLIJ = 0xec7f,
120 RIE_CLGIJ = 0xec7d,
121 RIE_CLRJ = 0xec77,
122 RIE_CRJ = 0xec76,
123 RIE_RISBG = 0xec55,
125 RRE_AGR = 0xb908,
126 RRE_ALGR = 0xb90a,
127 RRE_ALCR = 0xb998,
128 RRE_ALCGR = 0xb988,
129 RRE_CGR = 0xb920,
130 RRE_CLGR = 0xb921,
131 RRE_DLGR = 0xb987,
132 RRE_DLR = 0xb997,
133 RRE_DSGFR = 0xb91d,
134 RRE_DSGR = 0xb90d,
135 RRE_LGBR = 0xb906,
136 RRE_LCGR = 0xb903,
137 RRE_LGFR = 0xb914,
138 RRE_LGHR = 0xb907,
139 RRE_LGR = 0xb904,
140 RRE_LLGCR = 0xb984,
141 RRE_LLGFR = 0xb916,
142 RRE_LLGHR = 0xb985,
143 RRE_LRVR = 0xb91f,
144 RRE_LRVGR = 0xb90f,
145 RRE_LTGR = 0xb902,
146 RRE_MLGR = 0xb986,
147 RRE_MSGR = 0xb90c,
148 RRE_MSR = 0xb252,
149 RRE_NGR = 0xb980,
150 RRE_OGR = 0xb981,
151 RRE_SGR = 0xb909,
152 RRE_SLGR = 0xb90b,
153 RRE_SLBR = 0xb999,
154 RRE_SLBGR = 0xb989,
155 RRE_XGR = 0xb982,
157 RRF_LOCR = 0xb9f2,
158 RRF_LOCGR = 0xb9e2,
160 RR_AR = 0x1a,
161 RR_ALR = 0x1e,
162 RR_BASR = 0x0d,
163 RR_BCR = 0x07,
164 RR_CLR = 0x15,
165 RR_CR = 0x19,
166 RR_DR = 0x1d,
167 RR_LCR = 0x13,
168 RR_LR = 0x18,
169 RR_LTR = 0x12,
170 RR_NR = 0x14,
171 RR_OR = 0x16,
172 RR_SR = 0x1b,
173 RR_SLR = 0x1f,
174 RR_XR = 0x17,
176 RSY_RLL = 0xeb1d,
177 RSY_RLLG = 0xeb1c,
178 RSY_SLLG = 0xeb0d,
179 RSY_SRAG = 0xeb0a,
180 RSY_SRLG = 0xeb0c,
182 RS_SLL = 0x89,
183 RS_SRA = 0x8a,
184 RS_SRL = 0x88,
186 RXY_AG = 0xe308,
187 RXY_AY = 0xe35a,
188 RXY_CG = 0xe320,
189 RXY_CY = 0xe359,
190 RXY_LAY = 0xe371,
191 RXY_LB = 0xe376,
192 RXY_LG = 0xe304,
193 RXY_LGB = 0xe377,
194 RXY_LGF = 0xe314,
195 RXY_LGH = 0xe315,
196 RXY_LHY = 0xe378,
197 RXY_LLGC = 0xe390,
198 RXY_LLGF = 0xe316,
199 RXY_LLGH = 0xe391,
200 RXY_LMG = 0xeb04,
201 RXY_LRV = 0xe31e,
202 RXY_LRVG = 0xe30f,
203 RXY_LRVH = 0xe31f,
204 RXY_LY = 0xe358,
205 RXY_STCY = 0xe372,
206 RXY_STG = 0xe324,
207 RXY_STHY = 0xe370,
208 RXY_STMG = 0xeb24,
209 RXY_STRV = 0xe33e,
210 RXY_STRVG = 0xe32f,
211 RXY_STRVH = 0xe33f,
212 RXY_STY = 0xe350,
214 RX_A = 0x5a,
215 RX_C = 0x59,
216 RX_L = 0x58,
217 RX_LA = 0x41,
218 RX_LH = 0x48,
219 RX_ST = 0x50,
220 RX_STC = 0x42,
221 RX_STH = 0x40,
223 NOP = 0x0707,
224 } S390Opcode;
226 #ifdef CONFIG_DEBUG_TCG
227 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
228 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
229 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
231 #endif
233 /* Since R6 is a potential argument register, choose it last of the
234 call-saved registers. Likewise prefer the call-clobbered registers
235 in reverse order to maximize the chance of avoiding the arguments. */
236 static const int tcg_target_reg_alloc_order[] = {
237 /* Call saved registers. */
238 TCG_REG_R13,
239 TCG_REG_R12,
240 TCG_REG_R11,
241 TCG_REG_R10,
242 TCG_REG_R9,
243 TCG_REG_R8,
244 TCG_REG_R7,
245 TCG_REG_R6,
246 /* Call clobbered registers. */
247 TCG_REG_R14,
248 TCG_REG_R0,
249 TCG_REG_R1,
250 /* Argument registers, in reverse order of allocation. */
251 TCG_REG_R5,
252 TCG_REG_R4,
253 TCG_REG_R3,
254 TCG_REG_R2,
257 static const int tcg_target_call_iarg_regs[] = {
258 TCG_REG_R2,
259 TCG_REG_R3,
260 TCG_REG_R4,
261 TCG_REG_R5,
262 TCG_REG_R6,
265 static const int tcg_target_call_oarg_regs[] = {
266 TCG_REG_R2,
269 #define S390_CC_EQ 8
270 #define S390_CC_LT 4
271 #define S390_CC_GT 2
272 #define S390_CC_OV 1
273 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
274 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
275 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
276 #define S390_CC_NEVER 0
277 #define S390_CC_ALWAYS 15
279 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
280 static const uint8_t tcg_cond_to_s390_cond[] = {
281 [TCG_COND_EQ] = S390_CC_EQ,
282 [TCG_COND_NE] = S390_CC_NE,
283 [TCG_COND_LT] = S390_CC_LT,
284 [TCG_COND_LE] = S390_CC_LE,
285 [TCG_COND_GT] = S390_CC_GT,
286 [TCG_COND_GE] = S390_CC_GE,
287 [TCG_COND_LTU] = S390_CC_LT,
288 [TCG_COND_LEU] = S390_CC_LE,
289 [TCG_COND_GTU] = S390_CC_GT,
290 [TCG_COND_GEU] = S390_CC_GE,
293 /* Condition codes that result from a LOAD AND TEST. Here, we have no
294 unsigned instruction variation, however since the test is vs zero we
295 can re-map the outcomes appropriately. */
296 static const uint8_t tcg_cond_to_ltr_cond[] = {
297 [TCG_COND_EQ] = S390_CC_EQ,
298 [TCG_COND_NE] = S390_CC_NE,
299 [TCG_COND_LT] = S390_CC_LT,
300 [TCG_COND_LE] = S390_CC_LE,
301 [TCG_COND_GT] = S390_CC_GT,
302 [TCG_COND_GE] = S390_CC_GE,
303 [TCG_COND_LTU] = S390_CC_NEVER,
304 [TCG_COND_LEU] = S390_CC_EQ,
305 [TCG_COND_GTU] = S390_CC_NE,
306 [TCG_COND_GEU] = S390_CC_ALWAYS,
309 #ifdef CONFIG_SOFTMMU
310 static void * const qemu_ld_helpers[16] = {
311 [MO_UB] = helper_ret_ldub_mmu,
312 [MO_SB] = helper_ret_ldsb_mmu,
313 [MO_LEUW] = helper_le_lduw_mmu,
314 [MO_LESW] = helper_le_ldsw_mmu,
315 [MO_LEUL] = helper_le_ldul_mmu,
316 [MO_LESL] = helper_le_ldsl_mmu,
317 [MO_LEQ] = helper_le_ldq_mmu,
318 [MO_BEUW] = helper_be_lduw_mmu,
319 [MO_BESW] = helper_be_ldsw_mmu,
320 [MO_BEUL] = helper_be_ldul_mmu,
321 [MO_BESL] = helper_be_ldsl_mmu,
322 [MO_BEQ] = helper_be_ldq_mmu,
325 static void * const qemu_st_helpers[16] = {
326 [MO_UB] = helper_ret_stb_mmu,
327 [MO_LEUW] = helper_le_stw_mmu,
328 [MO_LEUL] = helper_le_stl_mmu,
329 [MO_LEQ] = helper_le_stq_mmu,
330 [MO_BEUW] = helper_be_stw_mmu,
331 [MO_BEUL] = helper_be_stl_mmu,
332 [MO_BEQ] = helper_be_stq_mmu,
334 #endif
336 static tcg_insn_unit *tb_ret_addr;
338 /* A list of relevant facilities used by this translator. Some of these
339 are required for proper operation, and these are checked at startup. */
341 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
342 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
343 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
344 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
345 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
347 static uint64_t facilities;
349 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
350 intptr_t value, intptr_t addend)
352 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
353 tcg_debug_assert(addend == -2);
355 switch (type) {
356 case R_390_PC16DBL:
357 tcg_debug_assert(pcrel2 == (int16_t)pcrel2);
358 tcg_patch16(code_ptr, pcrel2);
359 break;
360 case R_390_PC32DBL:
361 tcg_debug_assert(pcrel2 == (int32_t)pcrel2);
362 tcg_patch32(code_ptr, pcrel2);
363 break;
364 default:
365 tcg_abort();
366 break;
370 /* parse target specific constraints */
371 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
373 const char *ct_str = *pct_str;
375 switch (ct_str[0]) {
376 case 'r': /* all registers */
377 ct->ct |= TCG_CT_REG;
378 tcg_regset_set32(ct->u.regs, 0, 0xffff);
379 break;
380 case 'R': /* not R0 */
381 ct->ct |= TCG_CT_REG;
382 tcg_regset_set32(ct->u.regs, 0, 0xffff);
383 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
384 break;
385 case 'L': /* qemu_ld/st constraint */
386 ct->ct |= TCG_CT_REG;
387 tcg_regset_set32(ct->u.regs, 0, 0xffff);
388 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
389 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
390 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
391 break;
392 case 'a': /* force R2 for division */
393 ct->ct |= TCG_CT_REG;
394 tcg_regset_clear(ct->u.regs);
395 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
396 break;
397 case 'b': /* force R3 for division */
398 ct->ct |= TCG_CT_REG;
399 tcg_regset_clear(ct->u.regs);
400 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
401 break;
402 case 'A':
403 ct->ct |= TCG_CT_CONST_ADLI;
404 break;
405 case 'K':
406 ct->ct |= TCG_CT_CONST_MULI;
407 break;
408 case 'O':
409 ct->ct |= TCG_CT_CONST_ORI;
410 break;
411 case 'X':
412 ct->ct |= TCG_CT_CONST_XORI;
413 break;
414 case 'C':
415 ct->ct |= TCG_CT_CONST_CMPI;
416 break;
417 default:
418 return -1;
420 ct_str++;
421 *pct_str = ct_str;
423 return 0;
426 /* Immediates to be used with logical OR. This is an optimization only,
427 since a full 64-bit immediate OR can always be performed with 4 sequential
428 OI[LH][LH] instructions. What we're looking for is immediates that we
429 can load efficiently, and the immediate load plus the reg-reg OR is
430 smaller than the sequential OI's. */
432 static int tcg_match_ori(TCGType type, tcg_target_long val)
434 if (facilities & FACILITY_EXT_IMM) {
435 if (type == TCG_TYPE_I32) {
436 /* All 32-bit ORs can be performed with 1 48-bit insn. */
437 return 1;
441 /* Look for negative values. These are best to load with LGHI. */
442 if (val < 0) {
443 if (val == (int16_t)val) {
444 return 0;
446 if (facilities & FACILITY_EXT_IMM) {
447 if (val == (int32_t)val) {
448 return 0;
453 return 1;
456 /* Immediates to be used with logical XOR. This is almost, but not quite,
457 only an optimization. XOR with immediate is only supported with the
458 extended-immediate facility. That said, there are a few patterns for
459 which it is better to load the value into a register first. */
461 static int tcg_match_xori(TCGType type, tcg_target_long val)
463 if ((facilities & FACILITY_EXT_IMM) == 0) {
464 return 0;
467 if (type == TCG_TYPE_I32) {
468 /* All 32-bit XORs can be performed with 1 48-bit insn. */
469 return 1;
472 /* Look for negative values. These are best to load with LGHI. */
473 if (val < 0 && val == (int32_t)val) {
474 return 0;
477 return 1;
480 /* Imediates to be used with comparisons. */
482 static int tcg_match_cmpi(TCGType type, tcg_target_long val)
484 if (facilities & FACILITY_EXT_IMM) {
485 /* The COMPARE IMMEDIATE instruction is available. */
486 if (type == TCG_TYPE_I32) {
487 /* We have a 32-bit immediate and can compare against anything. */
488 return 1;
489 } else {
490 /* ??? We have no insight here into whether the comparison is
491 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
492 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
493 a 32-bit unsigned immediate. If we were to use the (semi)
494 obvious "val == (int32_t)val" we would be enabling unsigned
495 comparisons vs very large numbers. The only solution is to
496 take the intersection of the ranges. */
497 /* ??? Another possible solution is to simply lie and allow all
498 constants here and force the out-of-range values into a temp
499 register in tgen_cmp when we have knowledge of the actual
500 comparison code in use. */
501 return val >= 0 && val <= 0x7fffffff;
503 } else {
504 /* Only the LOAD AND TEST instruction is available. */
505 return val == 0;
509 /* Immediates to be used with add2/sub2. */
511 static int tcg_match_add2i(TCGType type, tcg_target_long val)
513 if (facilities & FACILITY_EXT_IMM) {
514 if (type == TCG_TYPE_I32) {
515 return 1;
516 } else if (val >= -0xffffffffll && val <= 0xffffffffll) {
517 return 1;
520 return 0;
523 /* Test if a constant matches the constraint. */
524 static int tcg_target_const_match(tcg_target_long val, TCGType type,
525 const TCGArgConstraint *arg_ct)
527 int ct = arg_ct->ct;
529 if (ct & TCG_CT_CONST) {
530 return 1;
533 if (type == TCG_TYPE_I32) {
534 val = (int32_t)val;
537 /* The following are mutually exclusive. */
538 if (ct & TCG_CT_CONST_MULI) {
539 /* Immediates that may be used with multiply. If we have the
540 general-instruction-extensions, then we have MULTIPLY SINGLE
541 IMMEDIATE with a signed 32-bit, otherwise we have only
542 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
543 if (facilities & FACILITY_GEN_INST_EXT) {
544 return val == (int32_t)val;
545 } else {
546 return val == (int16_t)val;
548 } else if (ct & TCG_CT_CONST_ADLI) {
549 return tcg_match_add2i(type, val);
550 } else if (ct & TCG_CT_CONST_ORI) {
551 return tcg_match_ori(type, val);
552 } else if (ct & TCG_CT_CONST_XORI) {
553 return tcg_match_xori(type, val);
554 } else if (ct & TCG_CT_CONST_CMPI) {
555 return tcg_match_cmpi(type, val);
558 return 0;
561 /* Emit instructions according to the given instruction format. */
563 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
565 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
568 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
569 TCGReg r1, TCGReg r2)
571 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
574 static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
575 TCGReg r1, TCGReg r2, int m3)
577 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
580 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
582 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
585 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
587 tcg_out16(s, op | (r1 << 4));
588 tcg_out32(s, i2);
591 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
592 TCGReg b2, TCGReg r3, int disp)
594 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
595 | (disp & 0xfff));
598 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
599 TCGReg b2, TCGReg r3, int disp)
601 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
602 tcg_out32(s, (op & 0xff) | (b2 << 28)
603 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
606 #define tcg_out_insn_RX tcg_out_insn_RS
607 #define tcg_out_insn_RXY tcg_out_insn_RSY
609 /* Emit an opcode with "type-checking" of the format. */
610 #define tcg_out_insn(S, FMT, OP, ...) \
611 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
614 /* emit 64-bit shifts */
615 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
616 TCGReg src, TCGReg sh_reg, int sh_imm)
618 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
621 /* emit 32-bit shifts */
622 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
623 TCGReg sh_reg, int sh_imm)
625 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
628 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
630 if (src != dst) {
631 if (type == TCG_TYPE_I32) {
632 tcg_out_insn(s, RR, LR, dst, src);
633 } else {
634 tcg_out_insn(s, RRE, LGR, dst, src);
639 /* load a register with an immediate value */
640 static void tcg_out_movi(TCGContext *s, TCGType type,
641 TCGReg ret, tcg_target_long sval)
643 static const S390Opcode lli_insns[4] = {
644 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
647 tcg_target_ulong uval = sval;
648 int i;
650 if (type == TCG_TYPE_I32) {
651 uval = (uint32_t)sval;
652 sval = (int32_t)sval;
655 /* Try all 32-bit insns that can load it in one go. */
656 if (sval >= -0x8000 && sval < 0x8000) {
657 tcg_out_insn(s, RI, LGHI, ret, sval);
658 return;
661 for (i = 0; i < 4; i++) {
662 tcg_target_long mask = 0xffffull << i*16;
663 if ((uval & mask) == uval) {
664 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
665 return;
669 /* Try all 48-bit insns that can load it in one go. */
670 if (facilities & FACILITY_EXT_IMM) {
671 if (sval == (int32_t)sval) {
672 tcg_out_insn(s, RIL, LGFI, ret, sval);
673 return;
675 if (uval <= 0xffffffff) {
676 tcg_out_insn(s, RIL, LLILF, ret, uval);
677 return;
679 if ((uval & 0xffffffff) == 0) {
680 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
681 return;
685 /* Try for PC-relative address load. */
686 if ((sval & 1) == 0) {
687 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
688 if (off == (int32_t)off) {
689 tcg_out_insn(s, RIL, LARL, ret, off);
690 return;
694 /* If extended immediates are not present, then we may have to issue
695 several instructions to load the low 32 bits. */
696 if (!(facilities & FACILITY_EXT_IMM)) {
697 /* A 32-bit unsigned value can be loaded in 2 insns. And given
698 that the lli_insns loop above did not succeed, we know that
699 both insns are required. */
700 if (uval <= 0xffffffff) {
701 tcg_out_insn(s, RI, LLILL, ret, uval);
702 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
703 return;
706 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
707 We first want to make sure that all the high bits get set. With
708 luck the low 16-bits can be considered negative to perform that for
709 free, otherwise we load an explicit -1. */
710 if (sval >> 31 >> 1 == -1) {
711 if (uval & 0x8000) {
712 tcg_out_insn(s, RI, LGHI, ret, uval);
713 } else {
714 tcg_out_insn(s, RI, LGHI, ret, -1);
715 tcg_out_insn(s, RI, IILL, ret, uval);
717 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
718 return;
722 /* If we get here, both the high and low parts have non-zero bits. */
724 /* Recurse to load the lower 32-bits. */
725 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
727 /* Insert data into the high 32-bits. */
728 uval = uval >> 31 >> 1;
729 if (facilities & FACILITY_EXT_IMM) {
730 if (uval < 0x10000) {
731 tcg_out_insn(s, RI, IIHL, ret, uval);
732 } else if ((uval & 0xffff) == 0) {
733 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
734 } else {
735 tcg_out_insn(s, RIL, IIHF, ret, uval);
737 } else {
738 if (uval & 0xffff) {
739 tcg_out_insn(s, RI, IIHL, ret, uval);
741 if (uval & 0xffff0000) {
742 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
748 /* Emit a load/store type instruction. Inputs are:
749 DATA: The register to be loaded or stored.
750 BASE+OFS: The effective address.
751 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
752 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
754 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
755 TCGReg data, TCGReg base, TCGReg index,
756 tcg_target_long ofs)
758 if (ofs < -0x80000 || ofs >= 0x80000) {
759 /* Combine the low 20 bits of the offset with the actual load insn;
760 the high 44 bits must come from an immediate load. */
761 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
762 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
763 ofs = low;
765 /* If we were already given an index register, add it in. */
766 if (index != TCG_REG_NONE) {
767 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
769 index = TCG_TMP0;
772 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
773 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
774 } else {
775 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
780 /* load data without address translation or endianness conversion */
781 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
782 TCGReg base, intptr_t ofs)
784 if (type == TCG_TYPE_I32) {
785 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
786 } else {
787 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
791 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
792 TCGReg base, intptr_t ofs)
794 if (type == TCG_TYPE_I32) {
795 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
796 } else {
797 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
801 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
802 TCGReg base, intptr_t ofs)
804 return false;
807 /* load data from an absolute host address */
808 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
810 intptr_t addr = (intptr_t)abs;
812 if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
813 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
814 if (disp == (int32_t)disp) {
815 if (type == TCG_TYPE_I32) {
816 tcg_out_insn(s, RIL, LRL, dest, disp);
817 } else {
818 tcg_out_insn(s, RIL, LGRL, dest, disp);
820 return;
824 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
825 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
828 static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
829 int msb, int lsb, int ofs, int z)
831 /* Format RIE-f */
832 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
833 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
834 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
837 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
839 if (facilities & FACILITY_EXT_IMM) {
840 tcg_out_insn(s, RRE, LGBR, dest, src);
841 return;
844 if (type == TCG_TYPE_I32) {
845 if (dest == src) {
846 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
847 } else {
848 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
850 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
851 } else {
852 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
853 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
857 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
859 if (facilities & FACILITY_EXT_IMM) {
860 tcg_out_insn(s, RRE, LLGCR, dest, src);
861 return;
864 if (dest == src) {
865 tcg_out_movi(s, type, TCG_TMP0, 0xff);
866 src = TCG_TMP0;
867 } else {
868 tcg_out_movi(s, type, dest, 0xff);
870 if (type == TCG_TYPE_I32) {
871 tcg_out_insn(s, RR, NR, dest, src);
872 } else {
873 tcg_out_insn(s, RRE, NGR, dest, src);
877 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
879 if (facilities & FACILITY_EXT_IMM) {
880 tcg_out_insn(s, RRE, LGHR, dest, src);
881 return;
884 if (type == TCG_TYPE_I32) {
885 if (dest == src) {
886 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
887 } else {
888 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
890 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
891 } else {
892 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
893 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
897 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
899 if (facilities & FACILITY_EXT_IMM) {
900 tcg_out_insn(s, RRE, LLGHR, dest, src);
901 return;
904 if (dest == src) {
905 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
906 src = TCG_TMP0;
907 } else {
908 tcg_out_movi(s, type, dest, 0xffff);
910 if (type == TCG_TYPE_I32) {
911 tcg_out_insn(s, RR, NR, dest, src);
912 } else {
913 tcg_out_insn(s, RRE, NGR, dest, src);
917 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
919 tcg_out_insn(s, RRE, LGFR, dest, src);
922 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
924 tcg_out_insn(s, RRE, LLGFR, dest, src);
927 /* Accept bit patterns like these:
928 0....01....1
929 1....10....0
930 1..10..01..1
931 0..01..10..0
932 Copied from gcc sources. */
933 static inline bool risbg_mask(uint64_t c)
935 uint64_t lsb;
936 /* We don't change the number of transitions by inverting,
937 so make sure we start with the LSB zero. */
938 if (c & 1) {
939 c = ~c;
941 /* Reject all zeros or all ones. */
942 if (c == 0) {
943 return false;
945 /* Find the first transition. */
946 lsb = c & -c;
947 /* Invert to look for a second transition. */
948 c = ~c;
949 /* Erase the first transition. */
950 c &= -lsb;
951 /* Find the second transition, if any. */
952 lsb = c & -c;
953 /* Match if all the bits are 1's, or if c is zero. */
954 return c == -lsb;
957 static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
959 int msb, lsb;
960 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
961 /* Achieve wraparound by swapping msb and lsb. */
962 msb = 64 - ctz64(~val);
963 lsb = clz64(~val) - 1;
964 } else {
965 msb = clz64(val);
966 lsb = 63 - ctz64(val);
968 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
971 static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
973 static const S390Opcode ni_insns[4] = {
974 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
976 static const S390Opcode nif_insns[2] = {
977 RIL_NILF, RIL_NIHF
979 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
980 int i;
982 /* Look for the zero-extensions. */
983 if ((val & valid) == 0xffffffff) {
984 tgen_ext32u(s, dest, dest);
985 return;
987 if (facilities & FACILITY_EXT_IMM) {
988 if ((val & valid) == 0xff) {
989 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
990 return;
992 if ((val & valid) == 0xffff) {
993 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
994 return;
998 /* Try all 32-bit insns that can perform it in one go. */
999 for (i = 0; i < 4; i++) {
1000 tcg_target_ulong mask = ~(0xffffull << i*16);
1001 if (((val | ~valid) & mask) == mask) {
1002 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1003 return;
1007 /* Try all 48-bit insns that can perform it in one go. */
1008 if (facilities & FACILITY_EXT_IMM) {
1009 for (i = 0; i < 2; i++) {
1010 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1011 if (((val | ~valid) & mask) == mask) {
1012 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1013 return;
1017 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
1018 tgen_andi_risbg(s, dest, dest, val);
1019 return;
1022 /* Fall back to loading the constant. */
1023 tcg_out_movi(s, type, TCG_TMP0, val);
1024 if (type == TCG_TYPE_I32) {
1025 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
1026 } else {
1027 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1031 static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1033 static const S390Opcode oi_insns[4] = {
1034 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1036 static const S390Opcode nif_insns[2] = {
1037 RIL_OILF, RIL_OIHF
1040 int i;
1042 /* Look for no-op. */
1043 if (val == 0) {
1044 return;
1047 if (facilities & FACILITY_EXT_IMM) {
1048 /* Try all 32-bit insns that can perform it in one go. */
1049 for (i = 0; i < 4; i++) {
1050 tcg_target_ulong mask = (0xffffull << i*16);
1051 if ((val & mask) != 0 && (val & ~mask) == 0) {
1052 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1053 return;
1057 /* Try all 48-bit insns that can perform it in one go. */
1058 for (i = 0; i < 2; i++) {
1059 tcg_target_ulong mask = (0xffffffffull << i*32);
1060 if ((val & mask) != 0 && (val & ~mask) == 0) {
1061 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1062 return;
1066 /* Perform the OR via sequential modifications to the high and
1067 low parts. Do this via recursion to handle 16-bit vs 32-bit
1068 masks in each half. */
1069 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1070 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1071 } else {
1072 /* With no extended-immediate facility, we don't need to be so
1073 clever. Just iterate over the insns and mask in the constant. */
1074 for (i = 0; i < 4; i++) {
1075 tcg_target_ulong mask = (0xffffull << i*16);
1076 if ((val & mask) != 0) {
1077 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1083 static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1085 /* Perform the xor by parts. */
1086 if (val & 0xffffffff) {
1087 tcg_out_insn(s, RIL, XILF, dest, val);
1089 if (val > 0xffffffff) {
1090 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1094 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1095 TCGArg c2, int c2const)
1097 bool is_unsigned = is_unsigned_cond(c);
1098 if (c2const) {
1099 if (c2 == 0) {
1100 if (type == TCG_TYPE_I32) {
1101 tcg_out_insn(s, RR, LTR, r1, r1);
1102 } else {
1103 tcg_out_insn(s, RRE, LTGR, r1, r1);
1105 return tcg_cond_to_ltr_cond[c];
1106 } else {
1107 if (is_unsigned) {
1108 if (type == TCG_TYPE_I32) {
1109 tcg_out_insn(s, RIL, CLFI, r1, c2);
1110 } else {
1111 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1113 } else {
1114 if (type == TCG_TYPE_I32) {
1115 tcg_out_insn(s, RIL, CFI, r1, c2);
1116 } else {
1117 tcg_out_insn(s, RIL, CGFI, r1, c2);
1121 } else {
1122 if (is_unsigned) {
1123 if (type == TCG_TYPE_I32) {
1124 tcg_out_insn(s, RR, CLR, r1, c2);
1125 } else {
1126 tcg_out_insn(s, RRE, CLGR, r1, c2);
1128 } else {
1129 if (type == TCG_TYPE_I32) {
1130 tcg_out_insn(s, RR, CR, r1, c2);
1131 } else {
1132 tcg_out_insn(s, RRE, CGR, r1, c2);
1136 return tcg_cond_to_s390_cond[c];
1139 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1140 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1142 int cc;
1144 switch (cond) {
1145 case TCG_COND_GTU:
1146 case TCG_COND_GT:
1147 do_greater:
1148 /* The result of a compare has CC=2 for GT and CC=3 unused.
1149 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1150 tgen_cmp(s, type, cond, c1, c2, c2const);
1151 tcg_out_movi(s, type, dest, 0);
1152 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1153 return;
1155 case TCG_COND_GEU:
1156 do_geu:
1157 /* We need "real" carry semantics, so use SUBTRACT LOGICAL
1158 instead of COMPARE LOGICAL. This needs an extra move. */
1159 tcg_out_mov(s, type, TCG_TMP0, c1);
1160 if (c2const) {
1161 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1162 if (type == TCG_TYPE_I32) {
1163 tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2);
1164 } else {
1165 tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2);
1167 } else {
1168 if (type == TCG_TYPE_I32) {
1169 tcg_out_insn(s, RR, SLR, TCG_TMP0, c2);
1170 } else {
1171 tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2);
1173 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1175 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1176 return;
1178 case TCG_COND_LEU:
1179 case TCG_COND_LTU:
1180 case TCG_COND_LT:
1181 /* Swap operands so that we can use GEU/GTU/GT. */
1182 if (c2const) {
1183 tcg_out_movi(s, type, TCG_TMP0, c2);
1184 c2 = c1;
1185 c2const = 0;
1186 c1 = TCG_TMP0;
1187 } else {
1188 TCGReg t = c1;
1189 c1 = c2;
1190 c2 = t;
1192 if (cond == TCG_COND_LEU) {
1193 goto do_geu;
1195 cond = tcg_swap_cond(cond);
1196 goto do_greater;
1198 case TCG_COND_NE:
1199 /* X != 0 is X > 0. */
1200 if (c2const && c2 == 0) {
1201 cond = TCG_COND_GTU;
1202 goto do_greater;
1204 break;
1206 case TCG_COND_EQ:
1207 /* X == 0 is X <= 0 is 0 >= X. */
1208 if (c2const && c2 == 0) {
1209 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0);
1210 c2 = c1;
1211 c2const = 0;
1212 c1 = TCG_TMP0;
1213 goto do_geu;
1215 break;
1217 default:
1218 break;
1221 cc = tgen_cmp(s, type, cond, c1, c2, c2const);
1222 if (facilities & FACILITY_LOAD_ON_COND) {
1223 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1224 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1225 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1226 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1227 } else {
1228 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1229 tcg_out_movi(s, type, dest, 1);
1230 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1231 tcg_out_movi(s, type, dest, 0);
1235 static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1236 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1238 int cc;
1239 if (facilities & FACILITY_LOAD_ON_COND) {
1240 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1241 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1242 } else {
1243 c = tcg_invert_cond(c);
1244 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1246 /* Emit: if (cc) goto over; dest = r3; over: */
1247 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1248 tcg_out_insn(s, RRE, LGR, dest, r3);
1252 bool tcg_target_deposit_valid(int ofs, int len)
1254 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1257 static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1258 int ofs, int len)
1260 int lsb = (63 - ofs);
1261 int msb = lsb - (len - 1);
1262 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
1265 static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
1267 ptrdiff_t off = dest - s->code_ptr;
1268 if (off == (int16_t)off) {
1269 tcg_out_insn(s, RI, BRC, cc, off);
1270 } else if (off == (int32_t)off) {
1271 tcg_out_insn(s, RIL, BRCL, cc, off);
1272 } else {
1273 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1274 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1278 static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1280 if (l->has_value) {
1281 tgen_gotoi(s, cc, l->u.value_ptr);
1282 } else if (USE_LONG_BRANCHES) {
1283 tcg_out16(s, RIL_BRCL | (cc << 4));
1284 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2);
1285 s->code_ptr += 2;
1286 } else {
1287 tcg_out16(s, RI_BRC | (cc << 4));
1288 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2);
1289 s->code_ptr += 1;
1293 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1294 TCGReg r1, TCGReg r2, TCGLabel *l)
1296 intptr_t off;
1298 if (l->has_value) {
1299 off = l->u.value_ptr - s->code_ptr;
1300 } else {
1301 /* We need to keep the offset unchanged for retranslation. */
1302 off = s->code_ptr[1];
1303 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
1306 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1307 tcg_out16(s, off);
1308 tcg_out16(s, cc << 12 | (opc & 0xff));
1311 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1312 TCGReg r1, int i2, TCGLabel *l)
1314 tcg_target_long off;
1316 if (l->has_value) {
1317 off = l->u.value_ptr - s->code_ptr;
1318 } else {
1319 /* We need to keep the offset unchanged for retranslation. */
1320 off = s->code_ptr[1];
1321 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
1324 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1325 tcg_out16(s, off);
1326 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1329 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1330 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1332 int cc;
1334 if (facilities & FACILITY_GEN_INST_EXT) {
1335 bool is_unsigned = is_unsigned_cond(c);
1336 bool in_range;
1337 S390Opcode opc;
1339 cc = tcg_cond_to_s390_cond[c];
1341 if (!c2const) {
1342 opc = (type == TCG_TYPE_I32
1343 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1344 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1345 tgen_compare_branch(s, opc, cc, r1, c2, l);
1346 return;
1349 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1350 If the immediate we've been given does not fit that range, we'll
1351 fall back to separate compare and branch instructions using the
1352 larger comparison range afforded by COMPARE IMMEDIATE. */
1353 if (type == TCG_TYPE_I32) {
1354 if (is_unsigned) {
1355 opc = RIE_CLIJ;
1356 in_range = (uint32_t)c2 == (uint8_t)c2;
1357 } else {
1358 opc = RIE_CIJ;
1359 in_range = (int32_t)c2 == (int8_t)c2;
1361 } else {
1362 if (is_unsigned) {
1363 opc = RIE_CLGIJ;
1364 in_range = (uint64_t)c2 == (uint8_t)c2;
1365 } else {
1366 opc = RIE_CGIJ;
1367 in_range = (int64_t)c2 == (int8_t)c2;
1370 if (in_range) {
1371 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1372 return;
1376 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1377 tgen_branch(s, cc, l);
1380 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1382 ptrdiff_t off = dest - s->code_ptr;
1383 if (off == (int32_t)off) {
1384 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1385 } else {
1386 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1387 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1391 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1392 TCGReg base, TCGReg index, int disp)
1394 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1395 case MO_UB:
1396 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1397 break;
1398 case MO_SB:
1399 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1400 break;
1402 case MO_UW | MO_BSWAP:
1403 /* swapped unsigned halfword load with upper bits zeroed */
1404 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1405 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1406 break;
1407 case MO_UW:
1408 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1409 break;
1411 case MO_SW | MO_BSWAP:
1412 /* swapped sign-extended halfword load */
1413 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1414 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1415 break;
1416 case MO_SW:
1417 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1418 break;
1420 case MO_UL | MO_BSWAP:
1421 /* swapped unsigned int load with upper bits zeroed */
1422 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1423 tgen_ext32u(s, data, data);
1424 break;
1425 case MO_UL:
1426 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1427 break;
1429 case MO_SL | MO_BSWAP:
1430 /* swapped sign-extended int load */
1431 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1432 tgen_ext32s(s, data, data);
1433 break;
1434 case MO_SL:
1435 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1436 break;
1438 case MO_Q | MO_BSWAP:
1439 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1440 break;
1441 case MO_Q:
1442 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1443 break;
1445 default:
1446 tcg_abort();
1450 static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1451 TCGReg base, TCGReg index, int disp)
1453 switch (opc & (MO_SIZE | MO_BSWAP)) {
1454 case MO_UB:
1455 if (disp >= 0 && disp < 0x1000) {
1456 tcg_out_insn(s, RX, STC, data, base, index, disp);
1457 } else {
1458 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1460 break;
1462 case MO_UW | MO_BSWAP:
1463 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1464 break;
1465 case MO_UW:
1466 if (disp >= 0 && disp < 0x1000) {
1467 tcg_out_insn(s, RX, STH, data, base, index, disp);
1468 } else {
1469 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1471 break;
1473 case MO_UL | MO_BSWAP:
1474 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1475 break;
1476 case MO_UL:
1477 if (disp >= 0 && disp < 0x1000) {
1478 tcg_out_insn(s, RX, ST, data, base, index, disp);
1479 } else {
1480 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1482 break;
1484 case MO_Q | MO_BSWAP:
1485 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1486 break;
1487 case MO_Q:
1488 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1489 break;
1491 default:
1492 tcg_abort();
1496 #if defined(CONFIG_SOFTMMU)
1497 /* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1498 Using the offset of the second entry in the last tlb table ensures
1499 that we can index all of the elements of the first entry. */
1500 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1501 > 0x7ffff);
1503 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1504 addend into R2. Returns a register with the santitized guest address. */
1505 static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1506 int mem_index, bool is_ld)
1508 int a_bits = get_alignment_bits(opc);
1509 int ofs, a_off;
1510 uint64_t tlb_mask;
1512 /* For aligned accesses, we check the first byte and include the alignment
1513 bits within the address. For unaligned access, we check that we don't
1514 cross pages using the address of the last byte of the access. */
1515 if (a_bits >= 0) {
1516 /* A byte access or an alignment check required */
1517 a_off = 0;
1518 tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1519 } else {
1520 a_off = (1 << (opc & MO_SIZE)) - 1;
1521 tlb_mask = TARGET_PAGE_MASK;
1524 if (facilities & FACILITY_GEN_INST_EXT) {
1525 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1526 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1527 63 - CPU_TLB_ENTRY_BITS,
1528 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
1529 if (a_off) {
1530 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1531 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1532 } else {
1533 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1535 } else {
1536 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1537 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1538 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1539 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1540 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1541 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1544 if (is_ld) {
1545 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1546 } else {
1547 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1549 if (TARGET_LONG_BITS == 32) {
1550 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1551 } else {
1552 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1555 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1556 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1558 if (TARGET_LONG_BITS == 32) {
1559 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1560 return TCG_REG_R3;
1562 return addr_reg;
1565 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1566 TCGReg data, TCGReg addr,
1567 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1569 TCGLabelQemuLdst *label = new_ldst_label(s);
1571 label->is_ld = is_ld;
1572 label->oi = oi;
1573 label->datalo_reg = data;
1574 label->addrlo_reg = addr;
1575 label->raddr = raddr;
1576 label->label_ptr[0] = label_ptr;
1579 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1581 TCGReg addr_reg = lb->addrlo_reg;
1582 TCGReg data_reg = lb->datalo_reg;
1583 TCGMemOpIdx oi = lb->oi;
1584 TCGMemOp opc = get_memop(oi);
1586 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1588 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1589 if (TARGET_LONG_BITS == 64) {
1590 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1592 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
1593 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
1594 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
1595 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1597 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1600 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1602 TCGReg addr_reg = lb->addrlo_reg;
1603 TCGReg data_reg = lb->datalo_reg;
1604 TCGMemOpIdx oi = lb->oi;
1605 TCGMemOp opc = get_memop(oi);
1607 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1609 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1610 if (TARGET_LONG_BITS == 64) {
1611 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1613 switch (opc & MO_SIZE) {
1614 case MO_UB:
1615 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1616 break;
1617 case MO_UW:
1618 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1619 break;
1620 case MO_UL:
1621 tgen_ext32u(s, TCG_REG_R4, data_reg);
1622 break;
1623 case MO_Q:
1624 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1625 break;
1626 default:
1627 tcg_abort();
1629 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
1630 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
1631 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1633 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1635 #else
1636 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1637 TCGReg *index_reg, tcg_target_long *disp)
1639 if (TARGET_LONG_BITS == 32) {
1640 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1641 *addr_reg = TCG_TMP0;
1643 if (guest_base < 0x80000) {
1644 *index_reg = TCG_REG_NONE;
1645 *disp = guest_base;
1646 } else {
1647 *index_reg = TCG_GUEST_BASE_REG;
1648 *disp = 0;
1651 #endif /* CONFIG_SOFTMMU */
1653 static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1654 TCGMemOpIdx oi)
1656 TCGMemOp opc = get_memop(oi);
1657 #ifdef CONFIG_SOFTMMU
1658 unsigned mem_index = get_mmuidx(oi);
1659 tcg_insn_unit *label_ptr;
1660 TCGReg base_reg;
1662 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1664 /* We need to keep the offset unchanged for retranslation. */
1665 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1666 label_ptr = s->code_ptr;
1667 s->code_ptr += 1;
1669 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1671 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1672 #else
1673 TCGReg index_reg;
1674 tcg_target_long disp;
1676 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1677 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1678 #endif
1681 static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1682 TCGMemOpIdx oi)
1684 TCGMemOp opc = get_memop(oi);
1685 #ifdef CONFIG_SOFTMMU
1686 unsigned mem_index = get_mmuidx(oi);
1687 tcg_insn_unit *label_ptr;
1688 TCGReg base_reg;
1690 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1692 /* We need to keep the offset unchanged for retranslation. */
1693 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1694 label_ptr = s->code_ptr;
1695 s->code_ptr += 1;
1697 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1699 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1700 #else
1701 TCGReg index_reg;
1702 tcg_target_long disp;
1704 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1705 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1706 #endif
1709 # define OP_32_64(x) \
1710 case glue(glue(INDEX_op_,x),_i32): \
1711 case glue(glue(INDEX_op_,x),_i64)
1713 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1714 const TCGArg *args, const int *const_args)
1716 S390Opcode op;
1717 TCGArg a0, a1, a2;
1719 switch (opc) {
1720 case INDEX_op_exit_tb:
1721 /* return value */
1722 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1723 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1724 break;
1726 case INDEX_op_goto_tb:
1727 if (s->tb_jmp_insn_offset) {
1728 /* branch displacement must be aligned for atomic patching;
1729 * see if we need to add extra nop before branch
1731 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1732 tcg_out16(s, NOP);
1734 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1735 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
1736 s->code_ptr += 2;
1737 } else {
1738 /* load address stored at s->tb_jmp_target_addr + args[0] */
1739 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0,
1740 s->tb_jmp_target_addr + args[0]);
1741 /* and go there */
1742 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1744 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
1745 break;
1747 OP_32_64(ld8u):
1748 /* ??? LLC (RXY format) is only present with the extended-immediate
1749 facility, whereas LLGC is always present. */
1750 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1751 break;
1753 OP_32_64(ld8s):
1754 /* ??? LB is no smaller than LGB, so no point to using it. */
1755 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1756 break;
1758 OP_32_64(ld16u):
1759 /* ??? LLH (RXY format) is only present with the extended-immediate
1760 facility, whereas LLGH is always present. */
1761 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1762 break;
1764 case INDEX_op_ld16s_i32:
1765 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1766 break;
1768 case INDEX_op_ld_i32:
1769 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1770 break;
1772 OP_32_64(st8):
1773 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1774 TCG_REG_NONE, args[2]);
1775 break;
1777 OP_32_64(st16):
1778 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1779 TCG_REG_NONE, args[2]);
1780 break;
1782 case INDEX_op_st_i32:
1783 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1784 break;
1786 case INDEX_op_add_i32:
1787 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1788 if (const_args[2]) {
1789 do_addi_32:
1790 if (a0 == a1) {
1791 if (a2 == (int16_t)a2) {
1792 tcg_out_insn(s, RI, AHI, a0, a2);
1793 break;
1795 if (facilities & FACILITY_EXT_IMM) {
1796 tcg_out_insn(s, RIL, AFI, a0, a2);
1797 break;
1800 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1801 } else if (a0 == a1) {
1802 tcg_out_insn(s, RR, AR, a0, a2);
1803 } else {
1804 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1806 break;
1807 case INDEX_op_sub_i32:
1808 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1809 if (const_args[2]) {
1810 a2 = -a2;
1811 goto do_addi_32;
1813 tcg_out_insn(s, RR, SR, args[0], args[2]);
1814 break;
1816 case INDEX_op_and_i32:
1817 if (const_args[2]) {
1818 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
1819 } else {
1820 tcg_out_insn(s, RR, NR, args[0], args[2]);
1822 break;
1823 case INDEX_op_or_i32:
1824 if (const_args[2]) {
1825 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1826 } else {
1827 tcg_out_insn(s, RR, OR, args[0], args[2]);
1829 break;
1830 case INDEX_op_xor_i32:
1831 if (const_args[2]) {
1832 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1833 } else {
1834 tcg_out_insn(s, RR, XR, args[0], args[2]);
1836 break;
1838 case INDEX_op_neg_i32:
1839 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1840 break;
1842 case INDEX_op_mul_i32:
1843 if (const_args[2]) {
1844 if ((int32_t)args[2] == (int16_t)args[2]) {
1845 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1846 } else {
1847 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1849 } else {
1850 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1852 break;
1854 case INDEX_op_div2_i32:
1855 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1856 break;
1857 case INDEX_op_divu2_i32:
1858 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1859 break;
1861 case INDEX_op_shl_i32:
1862 op = RS_SLL;
1863 do_shift32:
1864 if (const_args[2]) {
1865 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1866 } else {
1867 tcg_out_sh32(s, op, args[0], args[2], 0);
1869 break;
1870 case INDEX_op_shr_i32:
1871 op = RS_SRL;
1872 goto do_shift32;
1873 case INDEX_op_sar_i32:
1874 op = RS_SRA;
1875 goto do_shift32;
1877 case INDEX_op_rotl_i32:
1878 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1879 if (const_args[2]) {
1880 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1881 } else {
1882 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1884 break;
1885 case INDEX_op_rotr_i32:
1886 if (const_args[2]) {
1887 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1888 TCG_REG_NONE, (32 - args[2]) & 31);
1889 } else {
1890 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1891 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1893 break;
1895 case INDEX_op_ext8s_i32:
1896 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1897 break;
1898 case INDEX_op_ext16s_i32:
1899 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1900 break;
1901 case INDEX_op_ext8u_i32:
1902 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1903 break;
1904 case INDEX_op_ext16u_i32:
1905 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1906 break;
1908 OP_32_64(bswap16):
1909 /* The TCG bswap definition requires bits 0-47 already be zero.
1910 Thus we don't need the G-type insns to implement bswap16_i64. */
1911 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1912 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1913 break;
1914 OP_32_64(bswap32):
1915 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1916 break;
1918 case INDEX_op_add2_i32:
1919 if (const_args[4]) {
1920 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1921 } else {
1922 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1924 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1925 break;
1926 case INDEX_op_sub2_i32:
1927 if (const_args[4]) {
1928 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1929 } else {
1930 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1932 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1933 break;
1935 case INDEX_op_br:
1936 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
1937 break;
1939 case INDEX_op_brcond_i32:
1940 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1941 args[1], const_args[1], arg_label(args[3]));
1942 break;
1943 case INDEX_op_setcond_i32:
1944 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1945 args[2], const_args[2]);
1946 break;
1947 case INDEX_op_movcond_i32:
1948 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1949 args[2], const_args[2], args[3]);
1950 break;
1952 case INDEX_op_qemu_ld_i32:
1953 /* ??? Technically we can use a non-extending instruction. */
1954 case INDEX_op_qemu_ld_i64:
1955 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
1956 break;
1957 case INDEX_op_qemu_st_i32:
1958 case INDEX_op_qemu_st_i64:
1959 tcg_out_qemu_st(s, args[0], args[1], args[2]);
1960 break;
1962 case INDEX_op_ld16s_i64:
1963 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1964 break;
1965 case INDEX_op_ld32u_i64:
1966 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1967 break;
1968 case INDEX_op_ld32s_i64:
1969 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1970 break;
1971 case INDEX_op_ld_i64:
1972 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1973 break;
1975 case INDEX_op_st32_i64:
1976 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1977 break;
1978 case INDEX_op_st_i64:
1979 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1980 break;
1982 case INDEX_op_add_i64:
1983 a0 = args[0], a1 = args[1], a2 = args[2];
1984 if (const_args[2]) {
1985 do_addi_64:
1986 if (a0 == a1) {
1987 if (a2 == (int16_t)a2) {
1988 tcg_out_insn(s, RI, AGHI, a0, a2);
1989 break;
1991 if (facilities & FACILITY_EXT_IMM) {
1992 if (a2 == (int32_t)a2) {
1993 tcg_out_insn(s, RIL, AGFI, a0, a2);
1994 break;
1995 } else if (a2 == (uint32_t)a2) {
1996 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1997 break;
1998 } else if (-a2 == (uint32_t)-a2) {
1999 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2000 break;
2004 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2005 } else if (a0 == a1) {
2006 tcg_out_insn(s, RRE, AGR, a0, a2);
2007 } else {
2008 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2010 break;
2011 case INDEX_op_sub_i64:
2012 a0 = args[0], a1 = args[1], a2 = args[2];
2013 if (const_args[2]) {
2014 a2 = -a2;
2015 goto do_addi_64;
2016 } else {
2017 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
2019 break;
2021 case INDEX_op_and_i64:
2022 if (const_args[2]) {
2023 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2024 } else {
2025 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
2027 break;
2028 case INDEX_op_or_i64:
2029 if (const_args[2]) {
2030 tgen64_ori(s, args[0], args[2]);
2031 } else {
2032 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
2034 break;
2035 case INDEX_op_xor_i64:
2036 if (const_args[2]) {
2037 tgen64_xori(s, args[0], args[2]);
2038 } else {
2039 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
2041 break;
2043 case INDEX_op_neg_i64:
2044 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2045 break;
2046 case INDEX_op_bswap64_i64:
2047 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2048 break;
2050 case INDEX_op_mul_i64:
2051 if (const_args[2]) {
2052 if (args[2] == (int16_t)args[2]) {
2053 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2054 } else {
2055 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2057 } else {
2058 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2060 break;
2062 case INDEX_op_div2_i64:
2063 /* ??? We get an unnecessary sign-extension of the dividend
2064 into R3 with this definition, but as we do in fact always
2065 produce both quotient and remainder using INDEX_op_div_i64
2066 instead requires jumping through even more hoops. */
2067 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2068 break;
2069 case INDEX_op_divu2_i64:
2070 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2071 break;
2072 case INDEX_op_mulu2_i64:
2073 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2074 break;
2076 case INDEX_op_shl_i64:
2077 op = RSY_SLLG;
2078 do_shift64:
2079 if (const_args[2]) {
2080 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2081 } else {
2082 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2084 break;
2085 case INDEX_op_shr_i64:
2086 op = RSY_SRLG;
2087 goto do_shift64;
2088 case INDEX_op_sar_i64:
2089 op = RSY_SRAG;
2090 goto do_shift64;
2092 case INDEX_op_rotl_i64:
2093 if (const_args[2]) {
2094 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2095 TCG_REG_NONE, args[2]);
2096 } else {
2097 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2099 break;
2100 case INDEX_op_rotr_i64:
2101 if (const_args[2]) {
2102 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2103 TCG_REG_NONE, (64 - args[2]) & 63);
2104 } else {
2105 /* We can use the smaller 32-bit negate because only the
2106 low 6 bits are examined for the rotate. */
2107 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2108 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2110 break;
2112 case INDEX_op_ext8s_i64:
2113 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2114 break;
2115 case INDEX_op_ext16s_i64:
2116 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2117 break;
2118 case INDEX_op_ext_i32_i64:
2119 case INDEX_op_ext32s_i64:
2120 tgen_ext32s(s, args[0], args[1]);
2121 break;
2122 case INDEX_op_ext8u_i64:
2123 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2124 break;
2125 case INDEX_op_ext16u_i64:
2126 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2127 break;
2128 case INDEX_op_extu_i32_i64:
2129 case INDEX_op_ext32u_i64:
2130 tgen_ext32u(s, args[0], args[1]);
2131 break;
2133 case INDEX_op_add2_i64:
2134 if (const_args[4]) {
2135 if ((int64_t)args[4] >= 0) {
2136 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2137 } else {
2138 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2140 } else {
2141 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2143 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2144 break;
2145 case INDEX_op_sub2_i64:
2146 if (const_args[4]) {
2147 if ((int64_t)args[4] >= 0) {
2148 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2149 } else {
2150 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2152 } else {
2153 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2155 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2156 break;
2158 case INDEX_op_brcond_i64:
2159 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2160 args[1], const_args[1], arg_label(args[3]));
2161 break;
2162 case INDEX_op_setcond_i64:
2163 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2164 args[2], const_args[2]);
2165 break;
2166 case INDEX_op_movcond_i64:
2167 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2168 args[2], const_args[2], args[3]);
2169 break;
2171 OP_32_64(deposit):
2172 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2173 break;
2175 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2176 case INDEX_op_mov_i64:
2177 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2178 case INDEX_op_movi_i64:
2179 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2180 default:
2181 tcg_abort();
2185 static const TCGTargetOpDef s390_op_defs[] = {
2186 { INDEX_op_exit_tb, { } },
2187 { INDEX_op_goto_tb, { } },
2188 { INDEX_op_br, { } },
2190 { INDEX_op_ld8u_i32, { "r", "r" } },
2191 { INDEX_op_ld8s_i32, { "r", "r" } },
2192 { INDEX_op_ld16u_i32, { "r", "r" } },
2193 { INDEX_op_ld16s_i32, { "r", "r" } },
2194 { INDEX_op_ld_i32, { "r", "r" } },
2195 { INDEX_op_st8_i32, { "r", "r" } },
2196 { INDEX_op_st16_i32, { "r", "r" } },
2197 { INDEX_op_st_i32, { "r", "r" } },
2199 { INDEX_op_add_i32, { "r", "r", "ri" } },
2200 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2201 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2203 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2204 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2206 { INDEX_op_and_i32, { "r", "0", "ri" } },
2207 { INDEX_op_or_i32, { "r", "0", "rO" } },
2208 { INDEX_op_xor_i32, { "r", "0", "rX" } },
2210 { INDEX_op_neg_i32, { "r", "r" } },
2212 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2213 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2214 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2216 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2217 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2219 { INDEX_op_ext8s_i32, { "r", "r" } },
2220 { INDEX_op_ext8u_i32, { "r", "r" } },
2221 { INDEX_op_ext16s_i32, { "r", "r" } },
2222 { INDEX_op_ext16u_i32, { "r", "r" } },
2224 { INDEX_op_bswap16_i32, { "r", "r" } },
2225 { INDEX_op_bswap32_i32, { "r", "r" } },
2227 { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
2228 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
2230 { INDEX_op_brcond_i32, { "r", "rC" } },
2231 { INDEX_op_setcond_i32, { "r", "r", "rC" } },
2232 { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
2233 { INDEX_op_deposit_i32, { "r", "0", "r" } },
2235 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2236 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2237 { INDEX_op_qemu_st_i32, { "L", "L" } },
2238 { INDEX_op_qemu_st_i64, { "L", "L" } },
2240 { INDEX_op_ld8u_i64, { "r", "r" } },
2241 { INDEX_op_ld8s_i64, { "r", "r" } },
2242 { INDEX_op_ld16u_i64, { "r", "r" } },
2243 { INDEX_op_ld16s_i64, { "r", "r" } },
2244 { INDEX_op_ld32u_i64, { "r", "r" } },
2245 { INDEX_op_ld32s_i64, { "r", "r" } },
2246 { INDEX_op_ld_i64, { "r", "r" } },
2248 { INDEX_op_st8_i64, { "r", "r" } },
2249 { INDEX_op_st16_i64, { "r", "r" } },
2250 { INDEX_op_st32_i64, { "r", "r" } },
2251 { INDEX_op_st_i64, { "r", "r" } },
2253 { INDEX_op_add_i64, { "r", "r", "ri" } },
2254 { INDEX_op_sub_i64, { "r", "0", "ri" } },
2255 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2257 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2258 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
2259 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
2261 { INDEX_op_and_i64, { "r", "0", "ri" } },
2262 { INDEX_op_or_i64, { "r", "0", "rO" } },
2263 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2265 { INDEX_op_neg_i64, { "r", "r" } },
2267 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2268 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2269 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2271 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2272 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2274 { INDEX_op_ext8s_i64, { "r", "r" } },
2275 { INDEX_op_ext8u_i64, { "r", "r" } },
2276 { INDEX_op_ext16s_i64, { "r", "r" } },
2277 { INDEX_op_ext16u_i64, { "r", "r" } },
2278 { INDEX_op_ext32s_i64, { "r", "r" } },
2279 { INDEX_op_ext32u_i64, { "r", "r" } },
2281 { INDEX_op_ext_i32_i64, { "r", "r" } },
2282 { INDEX_op_extu_i32_i64, { "r", "r" } },
2284 { INDEX_op_bswap16_i64, { "r", "r" } },
2285 { INDEX_op_bswap32_i64, { "r", "r" } },
2286 { INDEX_op_bswap64_i64, { "r", "r" } },
2288 { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
2289 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
2291 { INDEX_op_brcond_i64, { "r", "rC" } },
2292 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
2293 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
2294 { INDEX_op_deposit_i64, { "r", "0", "r" } },
2296 { -1 },
2299 static void query_facilities(void)
2301 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2303 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2304 is present on all 64-bit systems, but let's check for it anyway. */
2305 if (hwcap & HWCAP_S390_STFLE) {
2306 register int r0 __asm__("0");
2307 register void *r1 __asm__("1");
2309 /* stfle 0(%r1) */
2310 r1 = &facilities;
2311 asm volatile(".word 0xb2b0,0x1000"
2312 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2316 static void tcg_target_init(TCGContext *s)
2318 query_facilities();
2320 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2321 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2323 tcg_regset_clear(tcg_target_call_clobber_regs);
2324 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2325 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2326 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2327 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2328 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2329 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2330 /* The r6 register is technically call-saved, but it's also a parameter
2331 register, so it can get killed by setup for the qemu_st helper. */
2332 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
2333 /* The return register can be considered call-clobbered. */
2334 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2336 tcg_regset_clear(s->reserved_regs);
2337 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2338 /* XXX many insns can't be used with R0, so we better avoid it for now */
2339 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2340 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2342 tcg_add_target_add_op_defs(s390_op_defs);
2345 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2346 + TCG_STATIC_CALL_ARGS_SIZE \
2347 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2349 static void tcg_target_qemu_prologue(TCGContext *s)
2351 /* stmg %r6,%r15,48(%r15) (save registers) */
2352 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2354 /* aghi %r15,-frame_size */
2355 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
2357 tcg_set_frame(s, TCG_REG_CALL_STACK,
2358 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2359 CPU_TEMP_BUF_NLONGS * sizeof(long));
2361 #ifndef CONFIG_SOFTMMU
2362 if (guest_base >= 0x80000) {
2363 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2364 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2366 #endif
2368 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2369 /* br %r3 (go to TB) */
2370 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2372 tb_ret_addr = s->code_ptr;
2374 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2375 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2376 FRAME_SIZE + 48);
2378 /* br %r14 (return) */
2379 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2382 typedef struct {
2383 DebugFrameHeader h;
2384 uint8_t fde_def_cfa[4];
2385 uint8_t fde_reg_ofs[18];
2386 } DebugFrame;
2388 /* We're expecting a 2 byte uleb128 encoded value. */
2389 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2391 #define ELF_HOST_MACHINE EM_S390
2393 static const DebugFrame debug_frame = {
2394 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2395 .h.cie.id = -1,
2396 .h.cie.version = 1,
2397 .h.cie.code_align = 1,
2398 .h.cie.data_align = 8, /* sleb128 8 */
2399 .h.cie.return_column = TCG_REG_R14,
2401 /* Total FDE size does not include the "len" member. */
2402 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2404 .fde_def_cfa = {
2405 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2406 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2407 (FRAME_SIZE >> 7)
2409 .fde_reg_ofs = {
2410 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2411 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2412 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2413 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2414 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2415 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2416 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2417 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2418 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2422 void tcg_register_jit(void *buf, size_t buf_size)
2424 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));